Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_inode_item.h"
17#include "xfs_quota.h"
18#include "xfs_trace.h"
19#include "xfs_icache.h"
20#include "xfs_bmap_util.h"
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
23#include "xfs_reflink.h"
24#include "xfs_ialloc.h"
25#include "xfs_ag.h"
26#include "xfs_log_priv.h"
27
28#include <linux/iversion.h>
29
30/* Radix tree tags for incore inode tree. */
31
32/* inode is to be reclaimed */
33#define XFS_ICI_RECLAIM_TAG 0
34/* Inode has speculative preallocations (posteof or cow) to clean. */
35#define XFS_ICI_BLOCKGC_TAG 1
36
37/*
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 */
41enum xfs_icwalk_goal {
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
45};
46
47static int xfs_icwalk(struct xfs_mount *mp,
48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
49static int xfs_icwalk_ag(struct xfs_perag *pag,
50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
51
52/*
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
55 */
56
57/* Stop scanning after icw_scan_limit inodes. */
58#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59
60#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
61#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
62
63#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
66
67/*
68 * Allocate and initialise an xfs_inode.
69 */
70struct xfs_inode *
71xfs_inode_alloc(
72 struct xfs_mount *mp,
73 xfs_ino_t ino)
74{
75 struct xfs_inode *ip;
76
77 /*
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
80 */
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
82
83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
84 kmem_cache_free(xfs_inode_cache, ip);
85 return NULL;
86 }
87
88 /* VFS doesn't initialise i_mode or i_state! */
89 VFS_I(ip)->i_mode = 0;
90 VFS_I(ip)->i_state = 0;
91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
92
93 XFS_STATS_INC(mp, vn_active);
94 ASSERT(atomic_read(&ip->i_pincount) == 0);
95 ASSERT(ip->i_ino == 0);
96
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
101 ip->i_cowfp = NULL;
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
104 memset(&ip->i_df, 0, sizeof(ip->i_df));
105 ip->i_flags = 0;
106 ip->i_delayed_blks = 0;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
108 ip->i_nblocks = 0;
109 ip->i_forkoff = 0;
110 ip->i_sick = 0;
111 ip->i_checked = 0;
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = 0;
117
118 return ip;
119}
120
121STATIC void
122xfs_inode_free_callback(
123 struct rcu_head *head)
124{
125 struct inode *inode = container_of(head, struct inode, i_rcu);
126 struct xfs_inode *ip = XFS_I(inode);
127
128 switch (VFS_I(ip)->i_mode & S_IFMT) {
129 case S_IFREG:
130 case S_IFDIR:
131 case S_IFLNK:
132 xfs_idestroy_fork(&ip->i_df);
133 break;
134 }
135
136 xfs_ifork_zap_attr(ip);
137
138 if (ip->i_cowfp) {
139 xfs_idestroy_fork(ip->i_cowfp);
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
141 }
142 if (ip->i_itemp) {
143 ASSERT(!test_bit(XFS_LI_IN_AIL,
144 &ip->i_itemp->ili_item.li_flags));
145 xfs_inode_item_destroy(ip);
146 ip->i_itemp = NULL;
147 }
148
149 kmem_cache_free(xfs_inode_cache, ip);
150}
151
152static void
153__xfs_inode_free(
154 struct xfs_inode *ip)
155{
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
159 XFS_STATS_DEC(ip->i_mount, vn_active);
160
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
162}
163
164void
165xfs_inode_free(
166 struct xfs_inode *ip)
167{
168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
169
170 /*
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
174 * races.
175 */
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
178 ip->i_ino = 0;
179 spin_unlock(&ip->i_flags_lock);
180
181 __xfs_inode_free(ip);
182}
183
184/*
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
187 */
188static void
189xfs_reclaim_work_queue(
190 struct xfs_mount *mp)
191{
192
193 rcu_read_lock();
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
197 }
198 rcu_read_unlock();
199}
200
201/*
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
204 */
205static inline void
206xfs_blockgc_queue(
207 struct xfs_perag *pag)
208{
209 struct xfs_mount *mp = pag->pag_mount;
210
211 if (!xfs_is_blockgc_enabled(mp))
212 return;
213
214 rcu_read_lock();
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
216 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
217 &pag->pag_blockgc_work,
218 msecs_to_jiffies(xfs_blockgc_secs * 1000));
219 rcu_read_unlock();
220}
221
222/* Set a tag on both the AG incore inode tree and the AG radix tree. */
223static void
224xfs_perag_set_inode_tag(
225 struct xfs_perag *pag,
226 xfs_agino_t agino,
227 unsigned int tag)
228{
229 struct xfs_mount *mp = pag->pag_mount;
230 bool was_tagged;
231
232 lockdep_assert_held(&pag->pag_ici_lock);
233
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
236
237 if (tag == XFS_ICI_RECLAIM_TAG)
238 pag->pag_ici_reclaimable++;
239
240 if (was_tagged)
241 return;
242
243 /* propagate the tag up into the perag radix tree */
244 spin_lock(&mp->m_perag_lock);
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246 spin_unlock(&mp->m_perag_lock);
247
248 /* start background work */
249 switch (tag) {
250 case XFS_ICI_RECLAIM_TAG:
251 xfs_reclaim_work_queue(mp);
252 break;
253 case XFS_ICI_BLOCKGC_TAG:
254 xfs_blockgc_queue(pag);
255 break;
256 }
257
258 trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
259}
260
261/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
262static void
263xfs_perag_clear_inode_tag(
264 struct xfs_perag *pag,
265 xfs_agino_t agino,
266 unsigned int tag)
267{
268 struct xfs_mount *mp = pag->pag_mount;
269
270 lockdep_assert_held(&pag->pag_ici_lock);
271
272 /*
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
275 */
276 if (agino != NULLAGINO)
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
278 else
279 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
280
281 if (tag == XFS_ICI_RECLAIM_TAG)
282 pag->pag_ici_reclaimable--;
283
284 if (radix_tree_tagged(&pag->pag_ici_root, tag))
285 return;
286
287 /* clear the tag from the perag radix tree */
288 spin_lock(&mp->m_perag_lock);
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290 spin_unlock(&mp->m_perag_lock);
291
292 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
293}
294
295/*
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
299 * overwrite the values unconditionally. Hence we save the parameters we
300 * need to retain across reinitialisation, and rewrite them into the VFS inode
301 * after reinitialisation even if it fails.
302 */
303static int
304xfs_reinit_inode(
305 struct xfs_mount *mp,
306 struct inode *inode)
307{
308 int error;
309 uint32_t nlink = inode->i_nlink;
310 uint32_t generation = inode->i_generation;
311 uint64_t version = inode_peek_iversion(inode);
312 umode_t mode = inode->i_mode;
313 dev_t dev = inode->i_rdev;
314 kuid_t uid = inode->i_uid;
315 kgid_t gid = inode->i_gid;
316
317 error = inode_init_always(mp->m_super, inode);
318
319 set_nlink(inode, nlink);
320 inode->i_generation = generation;
321 inode_set_iversion_queried(inode, version);
322 inode->i_mode = mode;
323 inode->i_rdev = dev;
324 inode->i_uid = uid;
325 inode->i_gid = gid;
326 mapping_set_large_folios(inode->i_mapping);
327 return error;
328}
329
330/*
331 * Carefully nudge an inode whose VFS state has been torn down back into a
332 * usable state. Drops the i_flags_lock and the rcu read lock.
333 */
334static int
335xfs_iget_recycle(
336 struct xfs_perag *pag,
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
338{
339 struct xfs_mount *mp = ip->i_mount;
340 struct inode *inode = VFS_I(ip);
341 int error;
342
343 trace_xfs_iget_recycle(ip);
344
345 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
346 return -EAGAIN;
347
348 /*
349 * We need to make it look like the inode is being reclaimed to prevent
350 * the actual reclaim workers from stomping over us while we recycle
351 * the inode. We can't clear the radix tree tag yet as it requires
352 * pag_ici_lock to be held exclusive.
353 */
354 ip->i_flags |= XFS_IRECLAIM;
355
356 spin_unlock(&ip->i_flags_lock);
357 rcu_read_unlock();
358
359 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
360 error = xfs_reinit_inode(mp, inode);
361 xfs_iunlock(ip, XFS_ILOCK_EXCL);
362 if (error) {
363 /*
364 * Re-initializing the inode failed, and we are in deep
365 * trouble. Try to re-add it to the reclaim list.
366 */
367 rcu_read_lock();
368 spin_lock(&ip->i_flags_lock);
369 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
370 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
371 spin_unlock(&ip->i_flags_lock);
372 rcu_read_unlock();
373
374 trace_xfs_iget_recycle_fail(ip);
375 return error;
376 }
377
378 spin_lock(&pag->pag_ici_lock);
379 spin_lock(&ip->i_flags_lock);
380
381 /*
382 * Clear the per-lifetime state in the inode as we are now effectively
383 * a new inode and need to return to the initial state before reuse
384 * occurs.
385 */
386 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
387 ip->i_flags |= XFS_INEW;
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
389 XFS_ICI_RECLAIM_TAG);
390 inode->i_state = I_NEW;
391 spin_unlock(&ip->i_flags_lock);
392 spin_unlock(&pag->pag_ici_lock);
393
394 return 0;
395}
396
397/*
398 * If we are allocating a new inode, then check what was returned is
399 * actually a free, empty inode. If we are not allocating an inode,
400 * then check we didn't find a free inode.
401 *
402 * Returns:
403 * 0 if the inode free state matches the lookup context
404 * -ENOENT if the inode is free and we are not allocating
405 * -EFSCORRUPTED if there is any state mismatch at all
406 */
407static int
408xfs_iget_check_free_state(
409 struct xfs_inode *ip,
410 int flags)
411{
412 if (flags & XFS_IGET_CREATE) {
413 /* should be a free inode */
414 if (VFS_I(ip)->i_mode != 0) {
415 xfs_warn(ip->i_mount,
416"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
417 ip->i_ino, VFS_I(ip)->i_mode);
418 return -EFSCORRUPTED;
419 }
420
421 if (ip->i_nblocks != 0) {
422 xfs_warn(ip->i_mount,
423"Corruption detected! Free inode 0x%llx has blocks allocated!",
424 ip->i_ino);
425 return -EFSCORRUPTED;
426 }
427 return 0;
428 }
429
430 /* should be an allocated inode */
431 if (VFS_I(ip)->i_mode == 0)
432 return -ENOENT;
433
434 return 0;
435}
436
437/* Make all pending inactivation work start immediately. */
438static bool
439xfs_inodegc_queue_all(
440 struct xfs_mount *mp)
441{
442 struct xfs_inodegc *gc;
443 int cpu;
444 bool ret = false;
445
446 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
447 gc = per_cpu_ptr(mp->m_inodegc, cpu);
448 if (!llist_empty(&gc->list)) {
449 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
450 ret = true;
451 }
452 }
453
454 return ret;
455}
456
457/* Wait for all queued work and collect errors */
458static int
459xfs_inodegc_wait_all(
460 struct xfs_mount *mp)
461{
462 int cpu;
463 int error = 0;
464
465 flush_workqueue(mp->m_inodegc_wq);
466 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
467 struct xfs_inodegc *gc;
468
469 gc = per_cpu_ptr(mp->m_inodegc, cpu);
470 if (gc->error && !error)
471 error = gc->error;
472 gc->error = 0;
473 }
474
475 return error;
476}
477
478/*
479 * Check the validity of the inode we just found it the cache
480 */
481static int
482xfs_iget_cache_hit(
483 struct xfs_perag *pag,
484 struct xfs_inode *ip,
485 xfs_ino_t ino,
486 int flags,
487 int lock_flags) __releases(RCU)
488{
489 struct inode *inode = VFS_I(ip);
490 struct xfs_mount *mp = ip->i_mount;
491 int error;
492
493 /*
494 * check for re-use of an inode within an RCU grace period due to the
495 * radix tree nodes not being updated yet. We monitor for this by
496 * setting the inode number to zero before freeing the inode structure.
497 * If the inode has been reallocated and set up, then the inode number
498 * will not match, so check for that, too.
499 */
500 spin_lock(&ip->i_flags_lock);
501 if (ip->i_ino != ino)
502 goto out_skip;
503
504 /*
505 * If we are racing with another cache hit that is currently
506 * instantiating this inode or currently recycling it out of
507 * reclaimable state, wait for the initialisation to complete
508 * before continuing.
509 *
510 * If we're racing with the inactivation worker we also want to wait.
511 * If we're creating a new file, it's possible that the worker
512 * previously marked the inode as free on disk but hasn't finished
513 * updating the incore state yet. The AGI buffer will be dirty and
514 * locked to the icreate transaction, so a synchronous push of the
515 * inodegc workers would result in deadlock. For a regular iget, the
516 * worker is running already, so we might as well wait.
517 *
518 * XXX(hch): eventually we should do something equivalent to
519 * wait_on_inode to wait for these flags to be cleared
520 * instead of polling for it.
521 */
522 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
523 goto out_skip;
524
525 if (ip->i_flags & XFS_NEED_INACTIVE) {
526 /* Unlinked inodes cannot be re-grabbed. */
527 if (VFS_I(ip)->i_nlink == 0) {
528 error = -ENOENT;
529 goto out_error;
530 }
531 goto out_inodegc_flush;
532 }
533
534 /*
535 * Check the inode free state is valid. This also detects lookup
536 * racing with unlinks.
537 */
538 error = xfs_iget_check_free_state(ip, flags);
539 if (error)
540 goto out_error;
541
542 /* Skip inodes that have no vfs state. */
543 if ((flags & XFS_IGET_INCORE) &&
544 (ip->i_flags & XFS_IRECLAIMABLE))
545 goto out_skip;
546
547 /* The inode fits the selection criteria; process it. */
548 if (ip->i_flags & XFS_IRECLAIMABLE) {
549 /* Drops i_flags_lock and RCU read lock. */
550 error = xfs_iget_recycle(pag, ip);
551 if (error == -EAGAIN)
552 goto out_skip;
553 if (error)
554 return error;
555 } else {
556 /* If the VFS inode is being torn down, pause and try again. */
557 if (!igrab(inode))
558 goto out_skip;
559
560 /* We've got a live one. */
561 spin_unlock(&ip->i_flags_lock);
562 rcu_read_unlock();
563 trace_xfs_iget_hit(ip);
564 }
565
566 if (lock_flags != 0)
567 xfs_ilock(ip, lock_flags);
568
569 if (!(flags & XFS_IGET_INCORE))
570 xfs_iflags_clear(ip, XFS_ISTALE);
571 XFS_STATS_INC(mp, xs_ig_found);
572
573 return 0;
574
575out_skip:
576 trace_xfs_iget_skip(ip);
577 XFS_STATS_INC(mp, xs_ig_frecycle);
578 error = -EAGAIN;
579out_error:
580 spin_unlock(&ip->i_flags_lock);
581 rcu_read_unlock();
582 return error;
583
584out_inodegc_flush:
585 spin_unlock(&ip->i_flags_lock);
586 rcu_read_unlock();
587 /*
588 * Do not wait for the workers, because the caller could hold an AGI
589 * buffer lock. We're just going to sleep in a loop anyway.
590 */
591 if (xfs_is_inodegc_enabled(mp))
592 xfs_inodegc_queue_all(mp);
593 return -EAGAIN;
594}
595
596static int
597xfs_iget_cache_miss(
598 struct xfs_mount *mp,
599 struct xfs_perag *pag,
600 xfs_trans_t *tp,
601 xfs_ino_t ino,
602 struct xfs_inode **ipp,
603 int flags,
604 int lock_flags)
605{
606 struct xfs_inode *ip;
607 int error;
608 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
609 int iflags;
610
611 ip = xfs_inode_alloc(mp, ino);
612 if (!ip)
613 return -ENOMEM;
614
615 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
616 if (error)
617 goto out_destroy;
618
619 /*
620 * For version 5 superblocks, if we are initialising a new inode and we
621 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
622 * simply build the new inode core with a random generation number.
623 *
624 * For version 4 (and older) superblocks, log recovery is dependent on
625 * the i_flushiter field being initialised from the current on-disk
626 * value and hence we must also read the inode off disk even when
627 * initializing new inodes.
628 */
629 if (xfs_has_v3inodes(mp) &&
630 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
631 VFS_I(ip)->i_generation = get_random_u32();
632 } else {
633 struct xfs_buf *bp;
634
635 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
636 if (error)
637 goto out_destroy;
638
639 error = xfs_inode_from_disk(ip,
640 xfs_buf_offset(bp, ip->i_imap.im_boffset));
641 if (!error)
642 xfs_buf_set_ref(bp, XFS_INO_REF);
643 xfs_trans_brelse(tp, bp);
644
645 if (error)
646 goto out_destroy;
647 }
648
649 trace_xfs_iget_miss(ip);
650
651 /*
652 * Check the inode free state is valid. This also detects lookup
653 * racing with unlinks.
654 */
655 error = xfs_iget_check_free_state(ip, flags);
656 if (error)
657 goto out_destroy;
658
659 /*
660 * Preload the radix tree so we can insert safely under the
661 * write spinlock. Note that we cannot sleep inside the preload
662 * region. Since we can be called from transaction context, don't
663 * recurse into the file system.
664 */
665 if (radix_tree_preload(GFP_NOFS)) {
666 error = -EAGAIN;
667 goto out_destroy;
668 }
669
670 /*
671 * Because the inode hasn't been added to the radix-tree yet it can't
672 * be found by another thread, so we can do the non-sleeping lock here.
673 */
674 if (lock_flags) {
675 if (!xfs_ilock_nowait(ip, lock_flags))
676 BUG();
677 }
678
679 /*
680 * These values must be set before inserting the inode into the radix
681 * tree as the moment it is inserted a concurrent lookup (allowed by the
682 * RCU locking mechanism) can find it and that lookup must see that this
683 * is an inode currently under construction (i.e. that XFS_INEW is set).
684 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
685 * memory barrier that ensures this detection works correctly at lookup
686 * time.
687 */
688 iflags = XFS_INEW;
689 if (flags & XFS_IGET_DONTCACHE)
690 d_mark_dontcache(VFS_I(ip));
691 ip->i_udquot = NULL;
692 ip->i_gdquot = NULL;
693 ip->i_pdquot = NULL;
694 xfs_iflags_set(ip, iflags);
695
696 /* insert the new inode */
697 spin_lock(&pag->pag_ici_lock);
698 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
699 if (unlikely(error)) {
700 WARN_ON(error != -EEXIST);
701 XFS_STATS_INC(mp, xs_ig_dup);
702 error = -EAGAIN;
703 goto out_preload_end;
704 }
705 spin_unlock(&pag->pag_ici_lock);
706 radix_tree_preload_end();
707
708 *ipp = ip;
709 return 0;
710
711out_preload_end:
712 spin_unlock(&pag->pag_ici_lock);
713 radix_tree_preload_end();
714 if (lock_flags)
715 xfs_iunlock(ip, lock_flags);
716out_destroy:
717 __destroy_inode(VFS_I(ip));
718 xfs_inode_free(ip);
719 return error;
720}
721
722/*
723 * Look up an inode by number in the given file system. The inode is looked up
724 * in the cache held in each AG. If the inode is found in the cache, initialise
725 * the vfs inode if necessary.
726 *
727 * If it is not in core, read it in from the file system's device, add it to the
728 * cache and initialise the vfs inode.
729 *
730 * The inode is locked according to the value of the lock_flags parameter.
731 * Inode lookup is only done during metadata operations and not as part of the
732 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
733 */
734int
735xfs_iget(
736 struct xfs_mount *mp,
737 struct xfs_trans *tp,
738 xfs_ino_t ino,
739 uint flags,
740 uint lock_flags,
741 struct xfs_inode **ipp)
742{
743 struct xfs_inode *ip;
744 struct xfs_perag *pag;
745 xfs_agino_t agino;
746 int error;
747
748 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
749
750 /* reject inode numbers outside existing AGs */
751 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
752 return -EINVAL;
753
754 XFS_STATS_INC(mp, xs_ig_attempts);
755
756 /* get the perag structure and ensure that it's inode capable */
757 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
758 agino = XFS_INO_TO_AGINO(mp, ino);
759
760again:
761 error = 0;
762 rcu_read_lock();
763 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
764
765 if (ip) {
766 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
767 if (error)
768 goto out_error_or_again;
769 } else {
770 rcu_read_unlock();
771 if (flags & XFS_IGET_INCORE) {
772 error = -ENODATA;
773 goto out_error_or_again;
774 }
775 XFS_STATS_INC(mp, xs_ig_missed);
776
777 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
778 flags, lock_flags);
779 if (error)
780 goto out_error_or_again;
781 }
782 xfs_perag_put(pag);
783
784 *ipp = ip;
785
786 /*
787 * If we have a real type for an on-disk inode, we can setup the inode
788 * now. If it's a new inode being created, xfs_init_new_inode will
789 * handle it.
790 */
791 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
792 xfs_setup_existing_inode(ip);
793 return 0;
794
795out_error_or_again:
796 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
797 error == -EAGAIN) {
798 delay(1);
799 goto again;
800 }
801 xfs_perag_put(pag);
802 return error;
803}
804
805/*
806 * Grab the inode for reclaim exclusively.
807 *
808 * We have found this inode via a lookup under RCU, so the inode may have
809 * already been freed, or it may be in the process of being recycled by
810 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
811 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
812 * will not be set. Hence we need to check for both these flag conditions to
813 * avoid inodes that are no longer reclaim candidates.
814 *
815 * Note: checking for other state flags here, under the i_flags_lock or not, is
816 * racy and should be avoided. Those races should be resolved only after we have
817 * ensured that we are able to reclaim this inode and the world can see that we
818 * are going to reclaim it.
819 *
820 * Return true if we grabbed it, false otherwise.
821 */
822static bool
823xfs_reclaim_igrab(
824 struct xfs_inode *ip,
825 struct xfs_icwalk *icw)
826{
827 ASSERT(rcu_read_lock_held());
828
829 spin_lock(&ip->i_flags_lock);
830 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
831 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
832 /* not a reclaim candidate. */
833 spin_unlock(&ip->i_flags_lock);
834 return false;
835 }
836
837 /* Don't reclaim a sick inode unless the caller asked for it. */
838 if (ip->i_sick &&
839 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
840 spin_unlock(&ip->i_flags_lock);
841 return false;
842 }
843
844 __xfs_iflags_set(ip, XFS_IRECLAIM);
845 spin_unlock(&ip->i_flags_lock);
846 return true;
847}
848
849/*
850 * Inode reclaim is non-blocking, so the default action if progress cannot be
851 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
852 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
853 * blocking anymore and hence we can wait for the inode to be able to reclaim
854 * it.
855 *
856 * We do no IO here - if callers require inodes to be cleaned they must push the
857 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
858 * done in the background in a non-blocking manner, and enables memory reclaim
859 * to make progress without blocking.
860 */
861static void
862xfs_reclaim_inode(
863 struct xfs_inode *ip,
864 struct xfs_perag *pag)
865{
866 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
867
868 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
869 goto out;
870 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
871 goto out_iunlock;
872
873 /*
874 * Check for log shutdown because aborting the inode can move the log
875 * tail and corrupt in memory state. This is fine if the log is shut
876 * down, but if the log is still active and only the mount is shut down
877 * then the in-memory log tail movement caused by the abort can be
878 * incorrectly propagated to disk.
879 */
880 if (xlog_is_shutdown(ip->i_mount->m_log)) {
881 xfs_iunpin_wait(ip);
882 xfs_iflush_shutdown_abort(ip);
883 goto reclaim;
884 }
885 if (xfs_ipincount(ip))
886 goto out_clear_flush;
887 if (!xfs_inode_clean(ip))
888 goto out_clear_flush;
889
890 xfs_iflags_clear(ip, XFS_IFLUSHING);
891reclaim:
892 trace_xfs_inode_reclaiming(ip);
893
894 /*
895 * Because we use RCU freeing we need to ensure the inode always appears
896 * to be reclaimed with an invalid inode number when in the free state.
897 * We do this as early as possible under the ILOCK so that
898 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
899 * detect races with us here. By doing this, we guarantee that once
900 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
901 * it will see either a valid inode that will serialise correctly, or it
902 * will see an invalid inode that it can skip.
903 */
904 spin_lock(&ip->i_flags_lock);
905 ip->i_flags = XFS_IRECLAIM;
906 ip->i_ino = 0;
907 ip->i_sick = 0;
908 ip->i_checked = 0;
909 spin_unlock(&ip->i_flags_lock);
910
911 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
912 xfs_iunlock(ip, XFS_ILOCK_EXCL);
913
914 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
915 /*
916 * Remove the inode from the per-AG radix tree.
917 *
918 * Because radix_tree_delete won't complain even if the item was never
919 * added to the tree assert that it's been there before to catch
920 * problems with the inode life time early on.
921 */
922 spin_lock(&pag->pag_ici_lock);
923 if (!radix_tree_delete(&pag->pag_ici_root,
924 XFS_INO_TO_AGINO(ip->i_mount, ino)))
925 ASSERT(0);
926 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
927 spin_unlock(&pag->pag_ici_lock);
928
929 /*
930 * Here we do an (almost) spurious inode lock in order to coordinate
931 * with inode cache radix tree lookups. This is because the lookup
932 * can reference the inodes in the cache without taking references.
933 *
934 * We make that OK here by ensuring that we wait until the inode is
935 * unlocked after the lookup before we go ahead and free it.
936 */
937 xfs_ilock(ip, XFS_ILOCK_EXCL);
938 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
939 xfs_iunlock(ip, XFS_ILOCK_EXCL);
940 ASSERT(xfs_inode_clean(ip));
941
942 __xfs_inode_free(ip);
943 return;
944
945out_clear_flush:
946 xfs_iflags_clear(ip, XFS_IFLUSHING);
947out_iunlock:
948 xfs_iunlock(ip, XFS_ILOCK_EXCL);
949out:
950 xfs_iflags_clear(ip, XFS_IRECLAIM);
951}
952
953/* Reclaim sick inodes if we're unmounting or the fs went down. */
954static inline bool
955xfs_want_reclaim_sick(
956 struct xfs_mount *mp)
957{
958 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
959 xfs_is_shutdown(mp);
960}
961
962void
963xfs_reclaim_inodes(
964 struct xfs_mount *mp)
965{
966 struct xfs_icwalk icw = {
967 .icw_flags = 0,
968 };
969
970 if (xfs_want_reclaim_sick(mp))
971 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
972
973 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
974 xfs_ail_push_all_sync(mp->m_ail);
975 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
976 }
977}
978
979/*
980 * The shrinker infrastructure determines how many inodes we should scan for
981 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
982 * push the AIL here. We also want to proactively free up memory if we can to
983 * minimise the amount of work memory reclaim has to do so we kick the
984 * background reclaim if it isn't already scheduled.
985 */
986long
987xfs_reclaim_inodes_nr(
988 struct xfs_mount *mp,
989 unsigned long nr_to_scan)
990{
991 struct xfs_icwalk icw = {
992 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
993 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
994 };
995
996 if (xfs_want_reclaim_sick(mp))
997 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
998
999 /* kick background reclaimer and push the AIL */
1000 xfs_reclaim_work_queue(mp);
1001 xfs_ail_push_all(mp->m_ail);
1002
1003 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1004 return 0;
1005}
1006
1007/*
1008 * Return the number of reclaimable inodes in the filesystem for
1009 * the shrinker to determine how much to reclaim.
1010 */
1011long
1012xfs_reclaim_inodes_count(
1013 struct xfs_mount *mp)
1014{
1015 struct xfs_perag *pag;
1016 xfs_agnumber_t ag = 0;
1017 long reclaimable = 0;
1018
1019 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1020 ag = pag->pag_agno + 1;
1021 reclaimable += pag->pag_ici_reclaimable;
1022 xfs_perag_put(pag);
1023 }
1024 return reclaimable;
1025}
1026
1027STATIC bool
1028xfs_icwalk_match_id(
1029 struct xfs_inode *ip,
1030 struct xfs_icwalk *icw)
1031{
1032 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1033 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1034 return false;
1035
1036 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1037 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1038 return false;
1039
1040 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1041 ip->i_projid != icw->icw_prid)
1042 return false;
1043
1044 return true;
1045}
1046
1047/*
1048 * A union-based inode filtering algorithm. Process the inode if any of the
1049 * criteria match. This is for global/internal scans only.
1050 */
1051STATIC bool
1052xfs_icwalk_match_id_union(
1053 struct xfs_inode *ip,
1054 struct xfs_icwalk *icw)
1055{
1056 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1057 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1058 return true;
1059
1060 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1061 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1062 return true;
1063
1064 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1065 ip->i_projid == icw->icw_prid)
1066 return true;
1067
1068 return false;
1069}
1070
1071/*
1072 * Is this inode @ip eligible for eof/cow block reclamation, given some
1073 * filtering parameters @icw? The inode is eligible if @icw is null or
1074 * if the predicate functions match.
1075 */
1076static bool
1077xfs_icwalk_match(
1078 struct xfs_inode *ip,
1079 struct xfs_icwalk *icw)
1080{
1081 bool match;
1082
1083 if (!icw)
1084 return true;
1085
1086 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1087 match = xfs_icwalk_match_id_union(ip, icw);
1088 else
1089 match = xfs_icwalk_match_id(ip, icw);
1090 if (!match)
1091 return false;
1092
1093 /* skip the inode if the file size is too small */
1094 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1095 XFS_ISIZE(ip) < icw->icw_min_file_size)
1096 return false;
1097
1098 return true;
1099}
1100
1101/*
1102 * This is a fast pass over the inode cache to try to get reclaim moving on as
1103 * many inodes as possible in a short period of time. It kicks itself every few
1104 * seconds, as well as being kicked by the inode cache shrinker when memory
1105 * goes low.
1106 */
1107void
1108xfs_reclaim_worker(
1109 struct work_struct *work)
1110{
1111 struct xfs_mount *mp = container_of(to_delayed_work(work),
1112 struct xfs_mount, m_reclaim_work);
1113
1114 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1115 xfs_reclaim_work_queue(mp);
1116}
1117
1118STATIC int
1119xfs_inode_free_eofblocks(
1120 struct xfs_inode *ip,
1121 struct xfs_icwalk *icw,
1122 unsigned int *lockflags)
1123{
1124 bool wait;
1125
1126 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1127
1128 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1129 return 0;
1130
1131 /*
1132 * If the mapping is dirty the operation can block and wait for some
1133 * time. Unless we are waiting, skip it.
1134 */
1135 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1136 return 0;
1137
1138 if (!xfs_icwalk_match(ip, icw))
1139 return 0;
1140
1141 /*
1142 * If the caller is waiting, return -EAGAIN to keep the background
1143 * scanner moving and revisit the inode in a subsequent pass.
1144 */
1145 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1146 if (wait)
1147 return -EAGAIN;
1148 return 0;
1149 }
1150 *lockflags |= XFS_IOLOCK_EXCL;
1151
1152 if (xfs_can_free_eofblocks(ip, false))
1153 return xfs_free_eofblocks(ip);
1154
1155 /* inode could be preallocated or append-only */
1156 trace_xfs_inode_free_eofblocks_invalid(ip);
1157 xfs_inode_clear_eofblocks_tag(ip);
1158 return 0;
1159}
1160
1161static void
1162xfs_blockgc_set_iflag(
1163 struct xfs_inode *ip,
1164 unsigned long iflag)
1165{
1166 struct xfs_mount *mp = ip->i_mount;
1167 struct xfs_perag *pag;
1168
1169 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1170
1171 /*
1172 * Don't bother locking the AG and looking up in the radix trees
1173 * if we already know that we have the tag set.
1174 */
1175 if (ip->i_flags & iflag)
1176 return;
1177 spin_lock(&ip->i_flags_lock);
1178 ip->i_flags |= iflag;
1179 spin_unlock(&ip->i_flags_lock);
1180
1181 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1182 spin_lock(&pag->pag_ici_lock);
1183
1184 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1185 XFS_ICI_BLOCKGC_TAG);
1186
1187 spin_unlock(&pag->pag_ici_lock);
1188 xfs_perag_put(pag);
1189}
1190
1191void
1192xfs_inode_set_eofblocks_tag(
1193 xfs_inode_t *ip)
1194{
1195 trace_xfs_inode_set_eofblocks_tag(ip);
1196 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1197}
1198
1199static void
1200xfs_blockgc_clear_iflag(
1201 struct xfs_inode *ip,
1202 unsigned long iflag)
1203{
1204 struct xfs_mount *mp = ip->i_mount;
1205 struct xfs_perag *pag;
1206 bool clear_tag;
1207
1208 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1209
1210 spin_lock(&ip->i_flags_lock);
1211 ip->i_flags &= ~iflag;
1212 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1213 spin_unlock(&ip->i_flags_lock);
1214
1215 if (!clear_tag)
1216 return;
1217
1218 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1219 spin_lock(&pag->pag_ici_lock);
1220
1221 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1222 XFS_ICI_BLOCKGC_TAG);
1223
1224 spin_unlock(&pag->pag_ici_lock);
1225 xfs_perag_put(pag);
1226}
1227
1228void
1229xfs_inode_clear_eofblocks_tag(
1230 xfs_inode_t *ip)
1231{
1232 trace_xfs_inode_clear_eofblocks_tag(ip);
1233 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1234}
1235
1236/*
1237 * Set ourselves up to free CoW blocks from this file. If it's already clean
1238 * then we can bail out quickly, but otherwise we must back off if the file
1239 * is undergoing some kind of write.
1240 */
1241static bool
1242xfs_prep_free_cowblocks(
1243 struct xfs_inode *ip)
1244{
1245 /*
1246 * Just clear the tag if we have an empty cow fork or none at all. It's
1247 * possible the inode was fully unshared since it was originally tagged.
1248 */
1249 if (!xfs_inode_has_cow_data(ip)) {
1250 trace_xfs_inode_free_cowblocks_invalid(ip);
1251 xfs_inode_clear_cowblocks_tag(ip);
1252 return false;
1253 }
1254
1255 /*
1256 * If the mapping is dirty or under writeback we cannot touch the
1257 * CoW fork. Leave it alone if we're in the midst of a directio.
1258 */
1259 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1260 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1261 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1262 atomic_read(&VFS_I(ip)->i_dio_count))
1263 return false;
1264
1265 return true;
1266}
1267
1268/*
1269 * Automatic CoW Reservation Freeing
1270 *
1271 * These functions automatically garbage collect leftover CoW reservations
1272 * that were made on behalf of a cowextsize hint when we start to run out
1273 * of quota or when the reservations sit around for too long. If the file
1274 * has dirty pages or is undergoing writeback, its CoW reservations will
1275 * be retained.
1276 *
1277 * The actual garbage collection piggybacks off the same code that runs
1278 * the speculative EOF preallocation garbage collector.
1279 */
1280STATIC int
1281xfs_inode_free_cowblocks(
1282 struct xfs_inode *ip,
1283 struct xfs_icwalk *icw,
1284 unsigned int *lockflags)
1285{
1286 bool wait;
1287 int ret = 0;
1288
1289 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1290
1291 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1292 return 0;
1293
1294 if (!xfs_prep_free_cowblocks(ip))
1295 return 0;
1296
1297 if (!xfs_icwalk_match(ip, icw))
1298 return 0;
1299
1300 /*
1301 * If the caller is waiting, return -EAGAIN to keep the background
1302 * scanner moving and revisit the inode in a subsequent pass.
1303 */
1304 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1305 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1306 if (wait)
1307 return -EAGAIN;
1308 return 0;
1309 }
1310 *lockflags |= XFS_IOLOCK_EXCL;
1311
1312 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1313 if (wait)
1314 return -EAGAIN;
1315 return 0;
1316 }
1317 *lockflags |= XFS_MMAPLOCK_EXCL;
1318
1319 /*
1320 * Check again, nobody else should be able to dirty blocks or change
1321 * the reflink iflag now that we have the first two locks held.
1322 */
1323 if (xfs_prep_free_cowblocks(ip))
1324 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1325 return ret;
1326}
1327
1328void
1329xfs_inode_set_cowblocks_tag(
1330 xfs_inode_t *ip)
1331{
1332 trace_xfs_inode_set_cowblocks_tag(ip);
1333 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1334}
1335
1336void
1337xfs_inode_clear_cowblocks_tag(
1338 xfs_inode_t *ip)
1339{
1340 trace_xfs_inode_clear_cowblocks_tag(ip);
1341 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1342}
1343
1344/* Disable post-EOF and CoW block auto-reclamation. */
1345void
1346xfs_blockgc_stop(
1347 struct xfs_mount *mp)
1348{
1349 struct xfs_perag *pag;
1350 xfs_agnumber_t agno;
1351
1352 if (!xfs_clear_blockgc_enabled(mp))
1353 return;
1354
1355 for_each_perag(mp, agno, pag)
1356 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1357 trace_xfs_blockgc_stop(mp, __return_address);
1358}
1359
1360/* Enable post-EOF and CoW block auto-reclamation. */
1361void
1362xfs_blockgc_start(
1363 struct xfs_mount *mp)
1364{
1365 struct xfs_perag *pag;
1366 xfs_agnumber_t agno;
1367
1368 if (xfs_set_blockgc_enabled(mp))
1369 return;
1370
1371 trace_xfs_blockgc_start(mp, __return_address);
1372 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1373 xfs_blockgc_queue(pag);
1374}
1375
1376/* Don't try to run block gc on an inode that's in any of these states. */
1377#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1378 XFS_NEED_INACTIVE | \
1379 XFS_INACTIVATING | \
1380 XFS_IRECLAIMABLE | \
1381 XFS_IRECLAIM)
1382/*
1383 * Decide if the given @ip is eligible for garbage collection of speculative
1384 * preallocations, and grab it if so. Returns true if it's ready to go or
1385 * false if we should just ignore it.
1386 */
1387static bool
1388xfs_blockgc_igrab(
1389 struct xfs_inode *ip)
1390{
1391 struct inode *inode = VFS_I(ip);
1392
1393 ASSERT(rcu_read_lock_held());
1394
1395 /* Check for stale RCU freed inode */
1396 spin_lock(&ip->i_flags_lock);
1397 if (!ip->i_ino)
1398 goto out_unlock_noent;
1399
1400 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1401 goto out_unlock_noent;
1402 spin_unlock(&ip->i_flags_lock);
1403
1404 /* nothing to sync during shutdown */
1405 if (xfs_is_shutdown(ip->i_mount))
1406 return false;
1407
1408 /* If we can't grab the inode, it must on it's way to reclaim. */
1409 if (!igrab(inode))
1410 return false;
1411
1412 /* inode is valid */
1413 return true;
1414
1415out_unlock_noent:
1416 spin_unlock(&ip->i_flags_lock);
1417 return false;
1418}
1419
1420/* Scan one incore inode for block preallocations that we can remove. */
1421static int
1422xfs_blockgc_scan_inode(
1423 struct xfs_inode *ip,
1424 struct xfs_icwalk *icw)
1425{
1426 unsigned int lockflags = 0;
1427 int error;
1428
1429 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1430 if (error)
1431 goto unlock;
1432
1433 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1434unlock:
1435 if (lockflags)
1436 xfs_iunlock(ip, lockflags);
1437 xfs_irele(ip);
1438 return error;
1439}
1440
1441/* Background worker that trims preallocated space. */
1442void
1443xfs_blockgc_worker(
1444 struct work_struct *work)
1445{
1446 struct xfs_perag *pag = container_of(to_delayed_work(work),
1447 struct xfs_perag, pag_blockgc_work);
1448 struct xfs_mount *mp = pag->pag_mount;
1449 int error;
1450
1451 trace_xfs_blockgc_worker(mp, __return_address);
1452
1453 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1454 if (error)
1455 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1456 pag->pag_agno, error);
1457 xfs_blockgc_queue(pag);
1458}
1459
1460/*
1461 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1462 * and cowblocks.
1463 */
1464int
1465xfs_blockgc_free_space(
1466 struct xfs_mount *mp,
1467 struct xfs_icwalk *icw)
1468{
1469 int error;
1470
1471 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1472
1473 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1474 if (error)
1475 return error;
1476
1477 return xfs_inodegc_flush(mp);
1478}
1479
1480/*
1481 * Reclaim all the free space that we can by scheduling the background blockgc
1482 * and inodegc workers immediately and waiting for them all to clear.
1483 */
1484int
1485xfs_blockgc_flush_all(
1486 struct xfs_mount *mp)
1487{
1488 struct xfs_perag *pag;
1489 xfs_agnumber_t agno;
1490
1491 trace_xfs_blockgc_flush_all(mp, __return_address);
1492
1493 /*
1494 * For each blockgc worker, move its queue time up to now. If it
1495 * wasn't queued, it will not be requeued. Then flush whatever's
1496 * left.
1497 */
1498 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1499 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1500 &pag->pag_blockgc_work, 0);
1501
1502 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1503 flush_delayed_work(&pag->pag_blockgc_work);
1504
1505 return xfs_inodegc_flush(mp);
1506}
1507
1508/*
1509 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1510 * quota caused an allocation failure, so we make a best effort by including
1511 * each quota under low free space conditions (less than 1% free space) in the
1512 * scan.
1513 *
1514 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1515 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1516 * MMAPLOCK.
1517 */
1518int
1519xfs_blockgc_free_dquots(
1520 struct xfs_mount *mp,
1521 struct xfs_dquot *udqp,
1522 struct xfs_dquot *gdqp,
1523 struct xfs_dquot *pdqp,
1524 unsigned int iwalk_flags)
1525{
1526 struct xfs_icwalk icw = {0};
1527 bool do_work = false;
1528
1529 if (!udqp && !gdqp && !pdqp)
1530 return 0;
1531
1532 /*
1533 * Run a scan to free blocks using the union filter to cover all
1534 * applicable quotas in a single scan.
1535 */
1536 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1537
1538 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1539 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1540 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1541 do_work = true;
1542 }
1543
1544 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1545 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1546 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1547 do_work = true;
1548 }
1549
1550 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1551 icw.icw_prid = pdqp->q_id;
1552 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1553 do_work = true;
1554 }
1555
1556 if (!do_work)
1557 return 0;
1558
1559 return xfs_blockgc_free_space(mp, &icw);
1560}
1561
1562/* Run cow/eofblocks scans on the quotas attached to the inode. */
1563int
1564xfs_blockgc_free_quota(
1565 struct xfs_inode *ip,
1566 unsigned int iwalk_flags)
1567{
1568 return xfs_blockgc_free_dquots(ip->i_mount,
1569 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1570 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1571 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1572}
1573
1574/* XFS Inode Cache Walking Code */
1575
1576/*
1577 * The inode lookup is done in batches to keep the amount of lock traffic and
1578 * radix tree lookups to a minimum. The batch size is a trade off between
1579 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1580 * be too greedy.
1581 */
1582#define XFS_LOOKUP_BATCH 32
1583
1584
1585/*
1586 * Decide if we want to grab this inode in anticipation of doing work towards
1587 * the goal.
1588 */
1589static inline bool
1590xfs_icwalk_igrab(
1591 enum xfs_icwalk_goal goal,
1592 struct xfs_inode *ip,
1593 struct xfs_icwalk *icw)
1594{
1595 switch (goal) {
1596 case XFS_ICWALK_BLOCKGC:
1597 return xfs_blockgc_igrab(ip);
1598 case XFS_ICWALK_RECLAIM:
1599 return xfs_reclaim_igrab(ip, icw);
1600 default:
1601 return false;
1602 }
1603}
1604
1605/*
1606 * Process an inode. Each processing function must handle any state changes
1607 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1608 */
1609static inline int
1610xfs_icwalk_process_inode(
1611 enum xfs_icwalk_goal goal,
1612 struct xfs_inode *ip,
1613 struct xfs_perag *pag,
1614 struct xfs_icwalk *icw)
1615{
1616 int error = 0;
1617
1618 switch (goal) {
1619 case XFS_ICWALK_BLOCKGC:
1620 error = xfs_blockgc_scan_inode(ip, icw);
1621 break;
1622 case XFS_ICWALK_RECLAIM:
1623 xfs_reclaim_inode(ip, pag);
1624 break;
1625 }
1626 return error;
1627}
1628
1629/*
1630 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1631 * process them in some manner.
1632 */
1633static int
1634xfs_icwalk_ag(
1635 struct xfs_perag *pag,
1636 enum xfs_icwalk_goal goal,
1637 struct xfs_icwalk *icw)
1638{
1639 struct xfs_mount *mp = pag->pag_mount;
1640 uint32_t first_index;
1641 int last_error = 0;
1642 int skipped;
1643 bool done;
1644 int nr_found;
1645
1646restart:
1647 done = false;
1648 skipped = 0;
1649 if (goal == XFS_ICWALK_RECLAIM)
1650 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1651 else
1652 first_index = 0;
1653 nr_found = 0;
1654 do {
1655 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1656 int error = 0;
1657 int i;
1658
1659 rcu_read_lock();
1660
1661 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1662 (void **) batch, first_index,
1663 XFS_LOOKUP_BATCH, goal);
1664 if (!nr_found) {
1665 done = true;
1666 rcu_read_unlock();
1667 break;
1668 }
1669
1670 /*
1671 * Grab the inodes before we drop the lock. if we found
1672 * nothing, nr == 0 and the loop will be skipped.
1673 */
1674 for (i = 0; i < nr_found; i++) {
1675 struct xfs_inode *ip = batch[i];
1676
1677 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1678 batch[i] = NULL;
1679
1680 /*
1681 * Update the index for the next lookup. Catch
1682 * overflows into the next AG range which can occur if
1683 * we have inodes in the last block of the AG and we
1684 * are currently pointing to the last inode.
1685 *
1686 * Because we may see inodes that are from the wrong AG
1687 * due to RCU freeing and reallocation, only update the
1688 * index if it lies in this AG. It was a race that lead
1689 * us to see this inode, so another lookup from the
1690 * same index will not find it again.
1691 */
1692 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1693 continue;
1694 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1695 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1696 done = true;
1697 }
1698
1699 /* unlock now we've grabbed the inodes. */
1700 rcu_read_unlock();
1701
1702 for (i = 0; i < nr_found; i++) {
1703 if (!batch[i])
1704 continue;
1705 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1706 icw);
1707 if (error == -EAGAIN) {
1708 skipped++;
1709 continue;
1710 }
1711 if (error && last_error != -EFSCORRUPTED)
1712 last_error = error;
1713 }
1714
1715 /* bail out if the filesystem is corrupted. */
1716 if (error == -EFSCORRUPTED)
1717 break;
1718
1719 cond_resched();
1720
1721 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1722 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1723 if (icw->icw_scan_limit <= 0)
1724 break;
1725 }
1726 } while (nr_found && !done);
1727
1728 if (goal == XFS_ICWALK_RECLAIM) {
1729 if (done)
1730 first_index = 0;
1731 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1732 }
1733
1734 if (skipped) {
1735 delay(1);
1736 goto restart;
1737 }
1738 return last_error;
1739}
1740
1741/* Walk all incore inodes to achieve a given goal. */
1742static int
1743xfs_icwalk(
1744 struct xfs_mount *mp,
1745 enum xfs_icwalk_goal goal,
1746 struct xfs_icwalk *icw)
1747{
1748 struct xfs_perag *pag;
1749 int error = 0;
1750 int last_error = 0;
1751 xfs_agnumber_t agno;
1752
1753 for_each_perag_tag(mp, agno, pag, goal) {
1754 error = xfs_icwalk_ag(pag, goal, icw);
1755 if (error) {
1756 last_error = error;
1757 if (error == -EFSCORRUPTED) {
1758 xfs_perag_rele(pag);
1759 break;
1760 }
1761 }
1762 }
1763 return last_error;
1764 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1765}
1766
1767#ifdef DEBUG
1768static void
1769xfs_check_delalloc(
1770 struct xfs_inode *ip,
1771 int whichfork)
1772{
1773 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1774 struct xfs_bmbt_irec got;
1775 struct xfs_iext_cursor icur;
1776
1777 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1778 return;
1779 do {
1780 if (isnullstartblock(got.br_startblock)) {
1781 xfs_warn(ip->i_mount,
1782 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1783 ip->i_ino,
1784 whichfork == XFS_DATA_FORK ? "data" : "cow",
1785 got.br_startoff, got.br_blockcount);
1786 }
1787 } while (xfs_iext_next_extent(ifp, &icur, &got));
1788}
1789#else
1790#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1791#endif
1792
1793/* Schedule the inode for reclaim. */
1794static void
1795xfs_inodegc_set_reclaimable(
1796 struct xfs_inode *ip)
1797{
1798 struct xfs_mount *mp = ip->i_mount;
1799 struct xfs_perag *pag;
1800
1801 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1802 xfs_check_delalloc(ip, XFS_DATA_FORK);
1803 xfs_check_delalloc(ip, XFS_COW_FORK);
1804 ASSERT(0);
1805 }
1806
1807 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1808 spin_lock(&pag->pag_ici_lock);
1809 spin_lock(&ip->i_flags_lock);
1810
1811 trace_xfs_inode_set_reclaimable(ip);
1812 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1813 ip->i_flags |= XFS_IRECLAIMABLE;
1814 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1815 XFS_ICI_RECLAIM_TAG);
1816
1817 spin_unlock(&ip->i_flags_lock);
1818 spin_unlock(&pag->pag_ici_lock);
1819 xfs_perag_put(pag);
1820}
1821
1822/*
1823 * Free all speculative preallocations and possibly even the inode itself.
1824 * This is the last chance to make changes to an otherwise unreferenced file
1825 * before incore reclamation happens.
1826 */
1827static int
1828xfs_inodegc_inactivate(
1829 struct xfs_inode *ip)
1830{
1831 int error;
1832
1833 trace_xfs_inode_inactivating(ip);
1834 error = xfs_inactive(ip);
1835 xfs_inodegc_set_reclaimable(ip);
1836 return error;
1837
1838}
1839
1840void
1841xfs_inodegc_worker(
1842 struct work_struct *work)
1843{
1844 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1845 struct xfs_inodegc, work);
1846 struct llist_node *node = llist_del_all(&gc->list);
1847 struct xfs_inode *ip, *n;
1848 struct xfs_mount *mp = gc->mp;
1849 unsigned int nofs_flag;
1850
1851 /*
1852 * Clear the cpu mask bit and ensure that we have seen the latest
1853 * update of the gc structure associated with this CPU. This matches
1854 * with the release semantics used when setting the cpumask bit in
1855 * xfs_inodegc_queue.
1856 */
1857 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1858 smp_mb__after_atomic();
1859
1860 WRITE_ONCE(gc->items, 0);
1861
1862 if (!node)
1863 return;
1864
1865 /*
1866 * We can allocate memory here while doing writeback on behalf of
1867 * memory reclaim. To avoid memory allocation deadlocks set the
1868 * task-wide nofs context for the following operations.
1869 */
1870 nofs_flag = memalloc_nofs_save();
1871
1872 ip = llist_entry(node, struct xfs_inode, i_gclist);
1873 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1874
1875 WRITE_ONCE(gc->shrinker_hits, 0);
1876 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1877 int error;
1878
1879 xfs_iflags_set(ip, XFS_INACTIVATING);
1880 error = xfs_inodegc_inactivate(ip);
1881 if (error && !gc->error)
1882 gc->error = error;
1883 }
1884
1885 memalloc_nofs_restore(nofs_flag);
1886}
1887
1888/*
1889 * Expedite all pending inodegc work to run immediately. This does not wait for
1890 * completion of the work.
1891 */
1892void
1893xfs_inodegc_push(
1894 struct xfs_mount *mp)
1895{
1896 if (!xfs_is_inodegc_enabled(mp))
1897 return;
1898 trace_xfs_inodegc_push(mp, __return_address);
1899 xfs_inodegc_queue_all(mp);
1900}
1901
1902/*
1903 * Force all currently queued inode inactivation work to run immediately and
1904 * wait for the work to finish.
1905 */
1906int
1907xfs_inodegc_flush(
1908 struct xfs_mount *mp)
1909{
1910 xfs_inodegc_push(mp);
1911 trace_xfs_inodegc_flush(mp, __return_address);
1912 return xfs_inodegc_wait_all(mp);
1913}
1914
1915/*
1916 * Flush all the pending work and then disable the inode inactivation background
1917 * workers and wait for them to stop. Caller must hold sb->s_umount to
1918 * coordinate changes in the inodegc_enabled state.
1919 */
1920void
1921xfs_inodegc_stop(
1922 struct xfs_mount *mp)
1923{
1924 bool rerun;
1925
1926 if (!xfs_clear_inodegc_enabled(mp))
1927 return;
1928
1929 /*
1930 * Drain all pending inodegc work, including inodes that could be
1931 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1932 * threads that sample the inodegc state just prior to us clearing it.
1933 * The inodegc flag state prevents new threads from queuing more
1934 * inodes, so we queue pending work items and flush the workqueue until
1935 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
1936 * here because it does not allow other unserialized mechanisms to
1937 * reschedule inodegc work while this draining is in progress.
1938 */
1939 xfs_inodegc_queue_all(mp);
1940 do {
1941 flush_workqueue(mp->m_inodegc_wq);
1942 rerun = xfs_inodegc_queue_all(mp);
1943 } while (rerun);
1944
1945 trace_xfs_inodegc_stop(mp, __return_address);
1946}
1947
1948/*
1949 * Enable the inode inactivation background workers and schedule deferred inode
1950 * inactivation work if there is any. Caller must hold sb->s_umount to
1951 * coordinate changes in the inodegc_enabled state.
1952 */
1953void
1954xfs_inodegc_start(
1955 struct xfs_mount *mp)
1956{
1957 if (xfs_set_inodegc_enabled(mp))
1958 return;
1959
1960 trace_xfs_inodegc_start(mp, __return_address);
1961 xfs_inodegc_queue_all(mp);
1962}
1963
1964#ifdef CONFIG_XFS_RT
1965static inline bool
1966xfs_inodegc_want_queue_rt_file(
1967 struct xfs_inode *ip)
1968{
1969 struct xfs_mount *mp = ip->i_mount;
1970
1971 if (!XFS_IS_REALTIME_INODE(ip))
1972 return false;
1973
1974 if (__percpu_counter_compare(&mp->m_frextents,
1975 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1976 XFS_FDBLOCKS_BATCH) < 0)
1977 return true;
1978
1979 return false;
1980}
1981#else
1982# define xfs_inodegc_want_queue_rt_file(ip) (false)
1983#endif /* CONFIG_XFS_RT */
1984
1985/*
1986 * Schedule the inactivation worker when:
1987 *
1988 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1989 * - There is less than 5% free space left.
1990 * - Any of the quotas for this inode are near an enforcement limit.
1991 */
1992static inline bool
1993xfs_inodegc_want_queue_work(
1994 struct xfs_inode *ip,
1995 unsigned int items)
1996{
1997 struct xfs_mount *mp = ip->i_mount;
1998
1999 if (items > mp->m_ino_geo.inodes_per_cluster)
2000 return true;
2001
2002 if (__percpu_counter_compare(&mp->m_fdblocks,
2003 mp->m_low_space[XFS_LOWSP_5_PCNT],
2004 XFS_FDBLOCKS_BATCH) < 0)
2005 return true;
2006
2007 if (xfs_inodegc_want_queue_rt_file(ip))
2008 return true;
2009
2010 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2011 return true;
2012
2013 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2014 return true;
2015
2016 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2017 return true;
2018
2019 return false;
2020}
2021
2022/*
2023 * Upper bound on the number of inodes in each AG that can be queued for
2024 * inactivation at any given time, to avoid monopolizing the workqueue.
2025 */
2026#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2027
2028/*
2029 * Make the frontend wait for inactivations when:
2030 *
2031 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2032 * - The queue depth exceeds the maximum allowable percpu backlog.
2033 *
2034 * Note: If the current thread is running a transaction, we don't ever want to
2035 * wait for other transactions because that could introduce a deadlock.
2036 */
2037static inline bool
2038xfs_inodegc_want_flush_work(
2039 struct xfs_inode *ip,
2040 unsigned int items,
2041 unsigned int shrinker_hits)
2042{
2043 if (current->journal_info)
2044 return false;
2045
2046 if (shrinker_hits > 0)
2047 return true;
2048
2049 if (items > XFS_INODEGC_MAX_BACKLOG)
2050 return true;
2051
2052 return false;
2053}
2054
2055/*
2056 * Queue a background inactivation worker if there are inodes that need to be
2057 * inactivated and higher level xfs code hasn't disabled the background
2058 * workers.
2059 */
2060static void
2061xfs_inodegc_queue(
2062 struct xfs_inode *ip)
2063{
2064 struct xfs_mount *mp = ip->i_mount;
2065 struct xfs_inodegc *gc;
2066 int items;
2067 unsigned int shrinker_hits;
2068 unsigned int cpu_nr;
2069 unsigned long queue_delay = 1;
2070
2071 trace_xfs_inode_set_need_inactive(ip);
2072 spin_lock(&ip->i_flags_lock);
2073 ip->i_flags |= XFS_NEED_INACTIVE;
2074 spin_unlock(&ip->i_flags_lock);
2075
2076 cpu_nr = get_cpu();
2077 gc = this_cpu_ptr(mp->m_inodegc);
2078 llist_add(&ip->i_gclist, &gc->list);
2079 items = READ_ONCE(gc->items);
2080 WRITE_ONCE(gc->items, items + 1);
2081 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2082
2083 /*
2084 * Ensure the list add is always seen by anyone who finds the cpumask
2085 * bit set. This effectively gives the cpumask bit set operation
2086 * release ordering semantics.
2087 */
2088 smp_mb__before_atomic();
2089 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2090 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2091
2092 /*
2093 * We queue the work while holding the current CPU so that the work
2094 * is scheduled to run on this CPU.
2095 */
2096 if (!xfs_is_inodegc_enabled(mp)) {
2097 put_cpu();
2098 return;
2099 }
2100
2101 if (xfs_inodegc_want_queue_work(ip, items))
2102 queue_delay = 0;
2103
2104 trace_xfs_inodegc_queue(mp, __return_address);
2105 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2106 queue_delay);
2107 put_cpu();
2108
2109 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2110 trace_xfs_inodegc_throttle(mp, __return_address);
2111 flush_delayed_work(&gc->work);
2112 }
2113}
2114
2115/*
2116 * We set the inode flag atomically with the radix tree tag. Once we get tag
2117 * lookups on the radix tree, this inode flag can go away.
2118 *
2119 * We always use background reclaim here because even if the inode is clean, it
2120 * still may be under IO and hence we have wait for IO completion to occur
2121 * before we can reclaim the inode. The background reclaim path handles this
2122 * more efficiently than we can here, so simply let background reclaim tear down
2123 * all inodes.
2124 */
2125void
2126xfs_inode_mark_reclaimable(
2127 struct xfs_inode *ip)
2128{
2129 struct xfs_mount *mp = ip->i_mount;
2130 bool need_inactive;
2131
2132 XFS_STATS_INC(mp, vn_reclaim);
2133
2134 /*
2135 * We should never get here with any of the reclaim flags already set.
2136 */
2137 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2138
2139 need_inactive = xfs_inode_needs_inactive(ip);
2140 if (need_inactive) {
2141 xfs_inodegc_queue(ip);
2142 return;
2143 }
2144
2145 /* Going straight to reclaim, so drop the dquots. */
2146 xfs_qm_dqdetach(ip);
2147 xfs_inodegc_set_reclaimable(ip);
2148}
2149
2150/*
2151 * Register a phony shrinker so that we can run background inodegc sooner when
2152 * there's memory pressure. Inactivation does not itself free any memory but
2153 * it does make inodes reclaimable, which eventually frees memory.
2154 *
2155 * The count function, seek value, and batch value are crafted to trigger the
2156 * scan function during the second round of scanning. Hopefully this means
2157 * that we reclaimed enough memory that initiating metadata transactions won't
2158 * make things worse.
2159 */
2160#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2161#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2162
2163static unsigned long
2164xfs_inodegc_shrinker_count(
2165 struct shrinker *shrink,
2166 struct shrink_control *sc)
2167{
2168 struct xfs_mount *mp = shrink->private_data;
2169 struct xfs_inodegc *gc;
2170 int cpu;
2171
2172 if (!xfs_is_inodegc_enabled(mp))
2173 return 0;
2174
2175 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2176 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2177 if (!llist_empty(&gc->list))
2178 return XFS_INODEGC_SHRINKER_COUNT;
2179 }
2180
2181 return 0;
2182}
2183
2184static unsigned long
2185xfs_inodegc_shrinker_scan(
2186 struct shrinker *shrink,
2187 struct shrink_control *sc)
2188{
2189 struct xfs_mount *mp = shrink->private_data;
2190 struct xfs_inodegc *gc;
2191 int cpu;
2192 bool no_items = true;
2193
2194 if (!xfs_is_inodegc_enabled(mp))
2195 return SHRINK_STOP;
2196
2197 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2198
2199 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2200 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2201 if (!llist_empty(&gc->list)) {
2202 unsigned int h = READ_ONCE(gc->shrinker_hits);
2203
2204 WRITE_ONCE(gc->shrinker_hits, h + 1);
2205 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2206 no_items = false;
2207 }
2208 }
2209
2210 /*
2211 * If there are no inodes to inactivate, we don't want the shrinker
2212 * to think there's deferred work to call us back about.
2213 */
2214 if (no_items)
2215 return LONG_MAX;
2216
2217 return SHRINK_STOP;
2218}
2219
2220/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2221int
2222xfs_inodegc_register_shrinker(
2223 struct xfs_mount *mp)
2224{
2225 mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
2226 "xfs-inodegc:%s",
2227 mp->m_super->s_id);
2228 if (!mp->m_inodegc_shrinker)
2229 return -ENOMEM;
2230
2231 mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
2232 mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
2233 mp->m_inodegc_shrinker->seeks = 0;
2234 mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
2235 mp->m_inodegc_shrinker->private_data = mp;
2236
2237 shrinker_register(mp->m_inodegc_shrinker);
2238
2239 return 0;
2240}
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36#include "xfs_reflink.h"
37
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40
41/*
42 * Allocate and initialise an xfs_inode.
43 */
44struct xfs_inode *
45xfs_inode_alloc(
46 struct xfs_mount *mp,
47 xfs_ino_t ino)
48{
49 struct xfs_inode *ip;
50
51 /*
52 * if this didn't occur in transactions, we could use
53 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
54 * code up to do this anyway.
55 */
56 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
57 if (!ip)
58 return NULL;
59 if (inode_init_always(mp->m_super, VFS_I(ip))) {
60 kmem_zone_free(xfs_inode_zone, ip);
61 return NULL;
62 }
63
64 /* VFS doesn't initialise i_mode! */
65 VFS_I(ip)->i_mode = 0;
66
67 XFS_STATS_INC(mp, vn_active);
68 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0);
72
73 /* initialise the xfs inode */
74 ip->i_ino = ino;
75 ip->i_mount = mp;
76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77 ip->i_afp = NULL;
78 ip->i_cowfp = NULL;
79 ip->i_cnextents = 0;
80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
82 ip->i_flags = 0;
83 ip->i_delayed_blks = 0;
84 memset(&ip->i_d, 0, sizeof(ip->i_d));
85
86 return ip;
87}
88
89STATIC void
90xfs_inode_free_callback(
91 struct rcu_head *head)
92{
93 struct inode *inode = container_of(head, struct inode, i_rcu);
94 struct xfs_inode *ip = XFS_I(inode);
95
96 switch (VFS_I(ip)->i_mode & S_IFMT) {
97 case S_IFREG:
98 case S_IFDIR:
99 case S_IFLNK:
100 xfs_idestroy_fork(ip, XFS_DATA_FORK);
101 break;
102 }
103
104 if (ip->i_afp)
105 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
106 if (ip->i_cowfp)
107 xfs_idestroy_fork(ip, XFS_COW_FORK);
108
109 if (ip->i_itemp) {
110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
111 xfs_inode_item_destroy(ip);
112 ip->i_itemp = NULL;
113 }
114
115 kmem_zone_free(xfs_inode_zone, ip);
116}
117
118static void
119__xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 XFS_STATS_DEC(ip->i_mount, vn_active);
125
126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
127}
128
129void
130xfs_inode_free(
131 struct xfs_inode *ip)
132{
133 ASSERT(!xfs_isiflocked(ip));
134
135 /*
136 * Because we use RCU freeing we need to ensure the inode always
137 * appears to be reclaimed with an invalid inode number when in the
138 * free state. The ip->i_flags_lock provides the barrier against lookup
139 * races.
140 */
141 spin_lock(&ip->i_flags_lock);
142 ip->i_flags = XFS_IRECLAIM;
143 ip->i_ino = 0;
144 spin_unlock(&ip->i_flags_lock);
145
146 __xfs_inode_free(ip);
147}
148
149/*
150 * Queue a new inode reclaim pass if there are reclaimable inodes and there
151 * isn't a reclaim pass already in progress. By default it runs every 5s based
152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
153 * tunable, but that can be done if this method proves to be ineffective or too
154 * aggressive.
155 */
156static void
157xfs_reclaim_work_queue(
158 struct xfs_mount *mp)
159{
160
161 rcu_read_lock();
162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
165 }
166 rcu_read_unlock();
167}
168
169/*
170 * This is a fast pass over the inode cache to try to get reclaim moving on as
171 * many inodes as possible in a short period of time. It kicks itself every few
172 * seconds, as well as being kicked by the inode cache shrinker when memory
173 * goes low. It scans as quickly as possible avoiding locked inodes or those
174 * already being flushed, and once done schedules a future pass.
175 */
176void
177xfs_reclaim_worker(
178 struct work_struct *work)
179{
180 struct xfs_mount *mp = container_of(to_delayed_work(work),
181 struct xfs_mount, m_reclaim_work);
182
183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
184 xfs_reclaim_work_queue(mp);
185}
186
187static void
188xfs_perag_set_reclaim_tag(
189 struct xfs_perag *pag)
190{
191 struct xfs_mount *mp = pag->pag_mount;
192
193 ASSERT(spin_is_locked(&pag->pag_ici_lock));
194 if (pag->pag_ici_reclaimable++)
195 return;
196
197 /* propagate the reclaim tag up into the perag radix tree */
198 spin_lock(&mp->m_perag_lock);
199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
200 XFS_ICI_RECLAIM_TAG);
201 spin_unlock(&mp->m_perag_lock);
202
203 /* schedule periodic background inode reclaim */
204 xfs_reclaim_work_queue(mp);
205
206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
207}
208
209static void
210xfs_perag_clear_reclaim_tag(
211 struct xfs_perag *pag)
212{
213 struct xfs_mount *mp = pag->pag_mount;
214
215 ASSERT(spin_is_locked(&pag->pag_ici_lock));
216 if (--pag->pag_ici_reclaimable)
217 return;
218
219 /* clear the reclaim tag from the perag radix tree */
220 spin_lock(&mp->m_perag_lock);
221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
222 XFS_ICI_RECLAIM_TAG);
223 spin_unlock(&mp->m_perag_lock);
224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
225}
226
227
228/*
229 * We set the inode flag atomically with the radix tree tag.
230 * Once we get tag lookups on the radix tree, this inode flag
231 * can go away.
232 */
233void
234xfs_inode_set_reclaim_tag(
235 struct xfs_inode *ip)
236{
237 struct xfs_mount *mp = ip->i_mount;
238 struct xfs_perag *pag;
239
240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
241 spin_lock(&pag->pag_ici_lock);
242 spin_lock(&ip->i_flags_lock);
243
244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
245 XFS_ICI_RECLAIM_TAG);
246 xfs_perag_set_reclaim_tag(pag);
247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
248
249 spin_unlock(&ip->i_flags_lock);
250 spin_unlock(&pag->pag_ici_lock);
251 xfs_perag_put(pag);
252}
253
254STATIC void
255xfs_inode_clear_reclaim_tag(
256 struct xfs_perag *pag,
257 xfs_ino_t ino)
258{
259 radix_tree_tag_clear(&pag->pag_ici_root,
260 XFS_INO_TO_AGINO(pag->pag_mount, ino),
261 XFS_ICI_RECLAIM_TAG);
262 xfs_perag_clear_reclaim_tag(pag);
263}
264
265/*
266 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
267 * part of the structure. This is made more complex by the fact we store
268 * information about the on-disk values in the VFS inode and so we can't just
269 * overwrite the values unconditionally. Hence we save the parameters we
270 * need to retain across reinitialisation, and rewrite them into the VFS inode
271 * after reinitialisation even if it fails.
272 */
273static int
274xfs_reinit_inode(
275 struct xfs_mount *mp,
276 struct inode *inode)
277{
278 int error;
279 uint32_t nlink = inode->i_nlink;
280 uint32_t generation = inode->i_generation;
281 uint64_t version = inode->i_version;
282 umode_t mode = inode->i_mode;
283
284 error = inode_init_always(mp->m_super, inode);
285
286 set_nlink(inode, nlink);
287 inode->i_generation = generation;
288 inode->i_version = version;
289 inode->i_mode = mode;
290 return error;
291}
292
293/*
294 * Check the validity of the inode we just found it the cache
295 */
296static int
297xfs_iget_cache_hit(
298 struct xfs_perag *pag,
299 struct xfs_inode *ip,
300 xfs_ino_t ino,
301 int flags,
302 int lock_flags) __releases(RCU)
303{
304 struct inode *inode = VFS_I(ip);
305 struct xfs_mount *mp = ip->i_mount;
306 int error;
307
308 /*
309 * check for re-use of an inode within an RCU grace period due to the
310 * radix tree nodes not being updated yet. We monitor for this by
311 * setting the inode number to zero before freeing the inode structure.
312 * If the inode has been reallocated and set up, then the inode number
313 * will not match, so check for that, too.
314 */
315 spin_lock(&ip->i_flags_lock);
316 if (ip->i_ino != ino) {
317 trace_xfs_iget_skip(ip);
318 XFS_STATS_INC(mp, xs_ig_frecycle);
319 error = -EAGAIN;
320 goto out_error;
321 }
322
323
324 /*
325 * If we are racing with another cache hit that is currently
326 * instantiating this inode or currently recycling it out of
327 * reclaimabe state, wait for the initialisation to complete
328 * before continuing.
329 *
330 * XXX(hch): eventually we should do something equivalent to
331 * wait_on_inode to wait for these flags to be cleared
332 * instead of polling for it.
333 */
334 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
335 trace_xfs_iget_skip(ip);
336 XFS_STATS_INC(mp, xs_ig_frecycle);
337 error = -EAGAIN;
338 goto out_error;
339 }
340
341 /*
342 * If lookup is racing with unlink return an error immediately.
343 */
344 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
345 error = -ENOENT;
346 goto out_error;
347 }
348
349 /*
350 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
351 * Need to carefully get it back into useable state.
352 */
353 if (ip->i_flags & XFS_IRECLAIMABLE) {
354 trace_xfs_iget_reclaim(ip);
355
356 /*
357 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
358 * from stomping over us while we recycle the inode. We can't
359 * clear the radix tree reclaimable tag yet as it requires
360 * pag_ici_lock to be held exclusive.
361 */
362 ip->i_flags |= XFS_IRECLAIM;
363
364 spin_unlock(&ip->i_flags_lock);
365 rcu_read_unlock();
366
367 error = xfs_reinit_inode(mp, inode);
368 if (error) {
369 /*
370 * Re-initializing the inode failed, and we are in deep
371 * trouble. Try to re-add it to the reclaim list.
372 */
373 rcu_read_lock();
374 spin_lock(&ip->i_flags_lock);
375
376 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
377 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
378 trace_xfs_iget_reclaim_fail(ip);
379 goto out_error;
380 }
381
382 spin_lock(&pag->pag_ici_lock);
383 spin_lock(&ip->i_flags_lock);
384
385 /*
386 * Clear the per-lifetime state in the inode as we are now
387 * effectively a new inode and need to return to the initial
388 * state before reuse occurs.
389 */
390 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
391 ip->i_flags |= XFS_INEW;
392 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
393 inode->i_state = I_NEW;
394
395 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
396 init_rwsem(&inode->i_rwsem);
397
398 spin_unlock(&ip->i_flags_lock);
399 spin_unlock(&pag->pag_ici_lock);
400 } else {
401 /* If the VFS inode is being torn down, pause and try again. */
402 if (!igrab(inode)) {
403 trace_xfs_iget_skip(ip);
404 error = -EAGAIN;
405 goto out_error;
406 }
407
408 /* We've got a live one. */
409 spin_unlock(&ip->i_flags_lock);
410 rcu_read_unlock();
411 trace_xfs_iget_hit(ip);
412 }
413
414 if (lock_flags != 0)
415 xfs_ilock(ip, lock_flags);
416
417 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
418 XFS_STATS_INC(mp, xs_ig_found);
419
420 return 0;
421
422out_error:
423 spin_unlock(&ip->i_flags_lock);
424 rcu_read_unlock();
425 return error;
426}
427
428
429static int
430xfs_iget_cache_miss(
431 struct xfs_mount *mp,
432 struct xfs_perag *pag,
433 xfs_trans_t *tp,
434 xfs_ino_t ino,
435 struct xfs_inode **ipp,
436 int flags,
437 int lock_flags)
438{
439 struct xfs_inode *ip;
440 int error;
441 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
442 int iflags;
443
444 ip = xfs_inode_alloc(mp, ino);
445 if (!ip)
446 return -ENOMEM;
447
448 error = xfs_iread(mp, tp, ip, flags);
449 if (error)
450 goto out_destroy;
451
452 trace_xfs_iget_miss(ip);
453
454 if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
455 error = -ENOENT;
456 goto out_destroy;
457 }
458
459 /*
460 * Preload the radix tree so we can insert safely under the
461 * write spinlock. Note that we cannot sleep inside the preload
462 * region. Since we can be called from transaction context, don't
463 * recurse into the file system.
464 */
465 if (radix_tree_preload(GFP_NOFS)) {
466 error = -EAGAIN;
467 goto out_destroy;
468 }
469
470 /*
471 * Because the inode hasn't been added to the radix-tree yet it can't
472 * be found by another thread, so we can do the non-sleeping lock here.
473 */
474 if (lock_flags) {
475 if (!xfs_ilock_nowait(ip, lock_flags))
476 BUG();
477 }
478
479 /*
480 * These values must be set before inserting the inode into the radix
481 * tree as the moment it is inserted a concurrent lookup (allowed by the
482 * RCU locking mechanism) can find it and that lookup must see that this
483 * is an inode currently under construction (i.e. that XFS_INEW is set).
484 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
485 * memory barrier that ensures this detection works correctly at lookup
486 * time.
487 */
488 iflags = XFS_INEW;
489 if (flags & XFS_IGET_DONTCACHE)
490 iflags |= XFS_IDONTCACHE;
491 ip->i_udquot = NULL;
492 ip->i_gdquot = NULL;
493 ip->i_pdquot = NULL;
494 xfs_iflags_set(ip, iflags);
495
496 /* insert the new inode */
497 spin_lock(&pag->pag_ici_lock);
498 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
499 if (unlikely(error)) {
500 WARN_ON(error != -EEXIST);
501 XFS_STATS_INC(mp, xs_ig_dup);
502 error = -EAGAIN;
503 goto out_preload_end;
504 }
505 spin_unlock(&pag->pag_ici_lock);
506 radix_tree_preload_end();
507
508 *ipp = ip;
509 return 0;
510
511out_preload_end:
512 spin_unlock(&pag->pag_ici_lock);
513 radix_tree_preload_end();
514 if (lock_flags)
515 xfs_iunlock(ip, lock_flags);
516out_destroy:
517 __destroy_inode(VFS_I(ip));
518 xfs_inode_free(ip);
519 return error;
520}
521
522/*
523 * Look up an inode by number in the given file system.
524 * The inode is looked up in the cache held in each AG.
525 * If the inode is found in the cache, initialise the vfs inode
526 * if necessary.
527 *
528 * If it is not in core, read it in from the file system's device,
529 * add it to the cache and initialise the vfs inode.
530 *
531 * The inode is locked according to the value of the lock_flags parameter.
532 * This flag parameter indicates how and if the inode's IO lock and inode lock
533 * should be taken.
534 *
535 * mp -- the mount point structure for the current file system. It points
536 * to the inode hash table.
537 * tp -- a pointer to the current transaction if there is one. This is
538 * simply passed through to the xfs_iread() call.
539 * ino -- the number of the inode desired. This is the unique identifier
540 * within the file system for the inode being requested.
541 * lock_flags -- flags indicating how to lock the inode. See the comment
542 * for xfs_ilock() for a list of valid values.
543 */
544int
545xfs_iget(
546 xfs_mount_t *mp,
547 xfs_trans_t *tp,
548 xfs_ino_t ino,
549 uint flags,
550 uint lock_flags,
551 xfs_inode_t **ipp)
552{
553 xfs_inode_t *ip;
554 int error;
555 xfs_perag_t *pag;
556 xfs_agino_t agino;
557
558 /*
559 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
560 * doesn't get freed while it's being referenced during a
561 * radix tree traversal here. It assumes this function
562 * aqcuires only the ILOCK (and therefore it has no need to
563 * involve the IOLOCK in this synchronization).
564 */
565 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
566
567 /* reject inode numbers outside existing AGs */
568 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
569 return -EINVAL;
570
571 XFS_STATS_INC(mp, xs_ig_attempts);
572
573 /* get the perag structure and ensure that it's inode capable */
574 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
575 agino = XFS_INO_TO_AGINO(mp, ino);
576
577again:
578 error = 0;
579 rcu_read_lock();
580 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
581
582 if (ip) {
583 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
584 if (error)
585 goto out_error_or_again;
586 } else {
587 rcu_read_unlock();
588 XFS_STATS_INC(mp, xs_ig_missed);
589
590 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
591 flags, lock_flags);
592 if (error)
593 goto out_error_or_again;
594 }
595 xfs_perag_put(pag);
596
597 *ipp = ip;
598
599 /*
600 * If we have a real type for an on-disk inode, we can setup the inode
601 * now. If it's a new inode being created, xfs_ialloc will handle it.
602 */
603 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
604 xfs_setup_existing_inode(ip);
605 return 0;
606
607out_error_or_again:
608 if (error == -EAGAIN) {
609 delay(1);
610 goto again;
611 }
612 xfs_perag_put(pag);
613 return error;
614}
615
616/*
617 * The inode lookup is done in batches to keep the amount of lock traffic and
618 * radix tree lookups to a minimum. The batch size is a trade off between
619 * lookup reduction and stack usage. This is in the reclaim path, so we can't
620 * be too greedy.
621 */
622#define XFS_LOOKUP_BATCH 32
623
624STATIC int
625xfs_inode_ag_walk_grab(
626 struct xfs_inode *ip)
627{
628 struct inode *inode = VFS_I(ip);
629
630 ASSERT(rcu_read_lock_held());
631
632 /*
633 * check for stale RCU freed inode
634 *
635 * If the inode has been reallocated, it doesn't matter if it's not in
636 * the AG we are walking - we are walking for writeback, so if it
637 * passes all the "valid inode" checks and is dirty, then we'll write
638 * it back anyway. If it has been reallocated and still being
639 * initialised, the XFS_INEW check below will catch it.
640 */
641 spin_lock(&ip->i_flags_lock);
642 if (!ip->i_ino)
643 goto out_unlock_noent;
644
645 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
646 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
647 goto out_unlock_noent;
648 spin_unlock(&ip->i_flags_lock);
649
650 /* nothing to sync during shutdown */
651 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
652 return -EFSCORRUPTED;
653
654 /* If we can't grab the inode, it must on it's way to reclaim. */
655 if (!igrab(inode))
656 return -ENOENT;
657
658 /* inode is valid */
659 return 0;
660
661out_unlock_noent:
662 spin_unlock(&ip->i_flags_lock);
663 return -ENOENT;
664}
665
666STATIC int
667xfs_inode_ag_walk(
668 struct xfs_mount *mp,
669 struct xfs_perag *pag,
670 int (*execute)(struct xfs_inode *ip, int flags,
671 void *args),
672 int flags,
673 void *args,
674 int tag)
675{
676 uint32_t first_index;
677 int last_error = 0;
678 int skipped;
679 int done;
680 int nr_found;
681
682restart:
683 done = 0;
684 skipped = 0;
685 first_index = 0;
686 nr_found = 0;
687 do {
688 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
689 int error = 0;
690 int i;
691
692 rcu_read_lock();
693
694 if (tag == -1)
695 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
696 (void **)batch, first_index,
697 XFS_LOOKUP_BATCH);
698 else
699 nr_found = radix_tree_gang_lookup_tag(
700 &pag->pag_ici_root,
701 (void **) batch, first_index,
702 XFS_LOOKUP_BATCH, tag);
703
704 if (!nr_found) {
705 rcu_read_unlock();
706 break;
707 }
708
709 /*
710 * Grab the inodes before we drop the lock. if we found
711 * nothing, nr == 0 and the loop will be skipped.
712 */
713 for (i = 0; i < nr_found; i++) {
714 struct xfs_inode *ip = batch[i];
715
716 if (done || xfs_inode_ag_walk_grab(ip))
717 batch[i] = NULL;
718
719 /*
720 * Update the index for the next lookup. Catch
721 * overflows into the next AG range which can occur if
722 * we have inodes in the last block of the AG and we
723 * are currently pointing to the last inode.
724 *
725 * Because we may see inodes that are from the wrong AG
726 * due to RCU freeing and reallocation, only update the
727 * index if it lies in this AG. It was a race that lead
728 * us to see this inode, so another lookup from the
729 * same index will not find it again.
730 */
731 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
732 continue;
733 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
734 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
735 done = 1;
736 }
737
738 /* unlock now we've grabbed the inodes. */
739 rcu_read_unlock();
740
741 for (i = 0; i < nr_found; i++) {
742 if (!batch[i])
743 continue;
744 error = execute(batch[i], flags, args);
745 IRELE(batch[i]);
746 if (error == -EAGAIN) {
747 skipped++;
748 continue;
749 }
750 if (error && last_error != -EFSCORRUPTED)
751 last_error = error;
752 }
753
754 /* bail out if the filesystem is corrupted. */
755 if (error == -EFSCORRUPTED)
756 break;
757
758 cond_resched();
759
760 } while (nr_found && !done);
761
762 if (skipped) {
763 delay(1);
764 goto restart;
765 }
766 return last_error;
767}
768
769/*
770 * Background scanning to trim post-EOF preallocated space. This is queued
771 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
772 */
773void
774xfs_queue_eofblocks(
775 struct xfs_mount *mp)
776{
777 rcu_read_lock();
778 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
779 queue_delayed_work(mp->m_eofblocks_workqueue,
780 &mp->m_eofblocks_work,
781 msecs_to_jiffies(xfs_eofb_secs * 1000));
782 rcu_read_unlock();
783}
784
785void
786xfs_eofblocks_worker(
787 struct work_struct *work)
788{
789 struct xfs_mount *mp = container_of(to_delayed_work(work),
790 struct xfs_mount, m_eofblocks_work);
791 xfs_icache_free_eofblocks(mp, NULL);
792 xfs_queue_eofblocks(mp);
793}
794
795/*
796 * Background scanning to trim preallocated CoW space. This is queued
797 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
798 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
799 */
800STATIC void
801xfs_queue_cowblocks(
802 struct xfs_mount *mp)
803{
804 rcu_read_lock();
805 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
806 queue_delayed_work(mp->m_eofblocks_workqueue,
807 &mp->m_cowblocks_work,
808 msecs_to_jiffies(xfs_cowb_secs * 1000));
809 rcu_read_unlock();
810}
811
812void
813xfs_cowblocks_worker(
814 struct work_struct *work)
815{
816 struct xfs_mount *mp = container_of(to_delayed_work(work),
817 struct xfs_mount, m_cowblocks_work);
818 xfs_icache_free_cowblocks(mp, NULL);
819 xfs_queue_cowblocks(mp);
820}
821
822int
823xfs_inode_ag_iterator(
824 struct xfs_mount *mp,
825 int (*execute)(struct xfs_inode *ip, int flags,
826 void *args),
827 int flags,
828 void *args)
829{
830 struct xfs_perag *pag;
831 int error = 0;
832 int last_error = 0;
833 xfs_agnumber_t ag;
834
835 ag = 0;
836 while ((pag = xfs_perag_get(mp, ag))) {
837 ag = pag->pag_agno + 1;
838 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
839 xfs_perag_put(pag);
840 if (error) {
841 last_error = error;
842 if (error == -EFSCORRUPTED)
843 break;
844 }
845 }
846 return last_error;
847}
848
849int
850xfs_inode_ag_iterator_tag(
851 struct xfs_mount *mp,
852 int (*execute)(struct xfs_inode *ip, int flags,
853 void *args),
854 int flags,
855 void *args,
856 int tag)
857{
858 struct xfs_perag *pag;
859 int error = 0;
860 int last_error = 0;
861 xfs_agnumber_t ag;
862
863 ag = 0;
864 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
865 ag = pag->pag_agno + 1;
866 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
867 xfs_perag_put(pag);
868 if (error) {
869 last_error = error;
870 if (error == -EFSCORRUPTED)
871 break;
872 }
873 }
874 return last_error;
875}
876
877/*
878 * Grab the inode for reclaim exclusively.
879 * Return 0 if we grabbed it, non-zero otherwise.
880 */
881STATIC int
882xfs_reclaim_inode_grab(
883 struct xfs_inode *ip,
884 int flags)
885{
886 ASSERT(rcu_read_lock_held());
887
888 /* quick check for stale RCU freed inode */
889 if (!ip->i_ino)
890 return 1;
891
892 /*
893 * If we are asked for non-blocking operation, do unlocked checks to
894 * see if the inode already is being flushed or in reclaim to avoid
895 * lock traffic.
896 */
897 if ((flags & SYNC_TRYLOCK) &&
898 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
899 return 1;
900
901 /*
902 * The radix tree lock here protects a thread in xfs_iget from racing
903 * with us starting reclaim on the inode. Once we have the
904 * XFS_IRECLAIM flag set it will not touch us.
905 *
906 * Due to RCU lookup, we may find inodes that have been freed and only
907 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
908 * aren't candidates for reclaim at all, so we must check the
909 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
910 */
911 spin_lock(&ip->i_flags_lock);
912 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
913 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
914 /* not a reclaim candidate. */
915 spin_unlock(&ip->i_flags_lock);
916 return 1;
917 }
918 __xfs_iflags_set(ip, XFS_IRECLAIM);
919 spin_unlock(&ip->i_flags_lock);
920 return 0;
921}
922
923/*
924 * Inodes in different states need to be treated differently. The following
925 * table lists the inode states and the reclaim actions necessary:
926 *
927 * inode state iflush ret required action
928 * --------------- ---------- ---------------
929 * bad - reclaim
930 * shutdown EIO unpin and reclaim
931 * clean, unpinned 0 reclaim
932 * stale, unpinned 0 reclaim
933 * clean, pinned(*) 0 requeue
934 * stale, pinned EAGAIN requeue
935 * dirty, async - requeue
936 * dirty, sync 0 reclaim
937 *
938 * (*) dgc: I don't think the clean, pinned state is possible but it gets
939 * handled anyway given the order of checks implemented.
940 *
941 * Also, because we get the flush lock first, we know that any inode that has
942 * been flushed delwri has had the flush completed by the time we check that
943 * the inode is clean.
944 *
945 * Note that because the inode is flushed delayed write by AIL pushing, the
946 * flush lock may already be held here and waiting on it can result in very
947 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
948 * the caller should push the AIL first before trying to reclaim inodes to
949 * minimise the amount of time spent waiting. For background relaim, we only
950 * bother to reclaim clean inodes anyway.
951 *
952 * Hence the order of actions after gaining the locks should be:
953 * bad => reclaim
954 * shutdown => unpin and reclaim
955 * pinned, async => requeue
956 * pinned, sync => unpin
957 * stale => reclaim
958 * clean => reclaim
959 * dirty, async => requeue
960 * dirty, sync => flush, wait and reclaim
961 */
962STATIC int
963xfs_reclaim_inode(
964 struct xfs_inode *ip,
965 struct xfs_perag *pag,
966 int sync_mode)
967{
968 struct xfs_buf *bp = NULL;
969 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
970 int error;
971
972restart:
973 error = 0;
974 xfs_ilock(ip, XFS_ILOCK_EXCL);
975 if (!xfs_iflock_nowait(ip)) {
976 if (!(sync_mode & SYNC_WAIT))
977 goto out;
978 xfs_iflock(ip);
979 }
980
981 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
982 xfs_iunpin_wait(ip);
983 /* xfs_iflush_abort() drops the flush lock */
984 xfs_iflush_abort(ip, false);
985 goto reclaim;
986 }
987 if (xfs_ipincount(ip)) {
988 if (!(sync_mode & SYNC_WAIT))
989 goto out_ifunlock;
990 xfs_iunpin_wait(ip);
991 }
992 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
993 xfs_ifunlock(ip);
994 goto reclaim;
995 }
996
997 /*
998 * Never flush out dirty data during non-blocking reclaim, as it would
999 * just contend with AIL pushing trying to do the same job.
1000 */
1001 if (!(sync_mode & SYNC_WAIT))
1002 goto out_ifunlock;
1003
1004 /*
1005 * Now we have an inode that needs flushing.
1006 *
1007 * Note that xfs_iflush will never block on the inode buffer lock, as
1008 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1009 * ip->i_lock, and we are doing the exact opposite here. As a result,
1010 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1011 * result in an ABBA deadlock with xfs_ifree_cluster().
1012 *
1013 * As xfs_ifree_cluser() must gather all inodes that are active in the
1014 * cache to mark them stale, if we hit this case we don't actually want
1015 * to do IO here - we want the inode marked stale so we can simply
1016 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1017 * inode, back off and try again. Hopefully the next pass through will
1018 * see the stale flag set on the inode.
1019 */
1020 error = xfs_iflush(ip, &bp);
1021 if (error == -EAGAIN) {
1022 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1023 /* backoff longer than in xfs_ifree_cluster */
1024 delay(2);
1025 goto restart;
1026 }
1027
1028 if (!error) {
1029 error = xfs_bwrite(bp);
1030 xfs_buf_relse(bp);
1031 }
1032
1033reclaim:
1034 ASSERT(!xfs_isiflocked(ip));
1035
1036 /*
1037 * Because we use RCU freeing we need to ensure the inode always appears
1038 * to be reclaimed with an invalid inode number when in the free state.
1039 * We do this as early as possible under the ILOCK so that
1040 * xfs_iflush_cluster() can be guaranteed to detect races with us here.
1041 * By doing this, we guarantee that once xfs_iflush_cluster has locked
1042 * XFS_ILOCK that it will see either a valid, flushable inode that will
1043 * serialise correctly, or it will see a clean (and invalid) inode that
1044 * it can skip.
1045 */
1046 spin_lock(&ip->i_flags_lock);
1047 ip->i_flags = XFS_IRECLAIM;
1048 ip->i_ino = 0;
1049 spin_unlock(&ip->i_flags_lock);
1050
1051 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1052
1053 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1054 /*
1055 * Remove the inode from the per-AG radix tree.
1056 *
1057 * Because radix_tree_delete won't complain even if the item was never
1058 * added to the tree assert that it's been there before to catch
1059 * problems with the inode life time early on.
1060 */
1061 spin_lock(&pag->pag_ici_lock);
1062 if (!radix_tree_delete(&pag->pag_ici_root,
1063 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1064 ASSERT(0);
1065 xfs_perag_clear_reclaim_tag(pag);
1066 spin_unlock(&pag->pag_ici_lock);
1067
1068 /*
1069 * Here we do an (almost) spurious inode lock in order to coordinate
1070 * with inode cache radix tree lookups. This is because the lookup
1071 * can reference the inodes in the cache without taking references.
1072 *
1073 * We make that OK here by ensuring that we wait until the inode is
1074 * unlocked after the lookup before we go ahead and free it.
1075 */
1076 xfs_ilock(ip, XFS_ILOCK_EXCL);
1077 xfs_qm_dqdetach(ip);
1078 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1079
1080 __xfs_inode_free(ip);
1081 return error;
1082
1083out_ifunlock:
1084 xfs_ifunlock(ip);
1085out:
1086 xfs_iflags_clear(ip, XFS_IRECLAIM);
1087 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1088 /*
1089 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1090 * a short while. However, this just burns CPU time scanning the tree
1091 * waiting for IO to complete and the reclaim work never goes back to
1092 * the idle state. Instead, return 0 to let the next scheduled
1093 * background reclaim attempt to reclaim the inode again.
1094 */
1095 return 0;
1096}
1097
1098/*
1099 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1100 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1101 * then a shut down during filesystem unmount reclaim walk leak all the
1102 * unreclaimed inodes.
1103 */
1104STATIC int
1105xfs_reclaim_inodes_ag(
1106 struct xfs_mount *mp,
1107 int flags,
1108 int *nr_to_scan)
1109{
1110 struct xfs_perag *pag;
1111 int error = 0;
1112 int last_error = 0;
1113 xfs_agnumber_t ag;
1114 int trylock = flags & SYNC_TRYLOCK;
1115 int skipped;
1116
1117restart:
1118 ag = 0;
1119 skipped = 0;
1120 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1121 unsigned long first_index = 0;
1122 int done = 0;
1123 int nr_found = 0;
1124
1125 ag = pag->pag_agno + 1;
1126
1127 if (trylock) {
1128 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1129 skipped++;
1130 xfs_perag_put(pag);
1131 continue;
1132 }
1133 first_index = pag->pag_ici_reclaim_cursor;
1134 } else
1135 mutex_lock(&pag->pag_ici_reclaim_lock);
1136
1137 do {
1138 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1139 int i;
1140
1141 rcu_read_lock();
1142 nr_found = radix_tree_gang_lookup_tag(
1143 &pag->pag_ici_root,
1144 (void **)batch, first_index,
1145 XFS_LOOKUP_BATCH,
1146 XFS_ICI_RECLAIM_TAG);
1147 if (!nr_found) {
1148 done = 1;
1149 rcu_read_unlock();
1150 break;
1151 }
1152
1153 /*
1154 * Grab the inodes before we drop the lock. if we found
1155 * nothing, nr == 0 and the loop will be skipped.
1156 */
1157 for (i = 0; i < nr_found; i++) {
1158 struct xfs_inode *ip = batch[i];
1159
1160 if (done || xfs_reclaim_inode_grab(ip, flags))
1161 batch[i] = NULL;
1162
1163 /*
1164 * Update the index for the next lookup. Catch
1165 * overflows into the next AG range which can
1166 * occur if we have inodes in the last block of
1167 * the AG and we are currently pointing to the
1168 * last inode.
1169 *
1170 * Because we may see inodes that are from the
1171 * wrong AG due to RCU freeing and
1172 * reallocation, only update the index if it
1173 * lies in this AG. It was a race that lead us
1174 * to see this inode, so another lookup from
1175 * the same index will not find it again.
1176 */
1177 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1178 pag->pag_agno)
1179 continue;
1180 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1181 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1182 done = 1;
1183 }
1184
1185 /* unlock now we've grabbed the inodes. */
1186 rcu_read_unlock();
1187
1188 for (i = 0; i < nr_found; i++) {
1189 if (!batch[i])
1190 continue;
1191 error = xfs_reclaim_inode(batch[i], pag, flags);
1192 if (error && last_error != -EFSCORRUPTED)
1193 last_error = error;
1194 }
1195
1196 *nr_to_scan -= XFS_LOOKUP_BATCH;
1197
1198 cond_resched();
1199
1200 } while (nr_found && !done && *nr_to_scan > 0);
1201
1202 if (trylock && !done)
1203 pag->pag_ici_reclaim_cursor = first_index;
1204 else
1205 pag->pag_ici_reclaim_cursor = 0;
1206 mutex_unlock(&pag->pag_ici_reclaim_lock);
1207 xfs_perag_put(pag);
1208 }
1209
1210 /*
1211 * if we skipped any AG, and we still have scan count remaining, do
1212 * another pass this time using blocking reclaim semantics (i.e
1213 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1214 * ensure that when we get more reclaimers than AGs we block rather
1215 * than spin trying to execute reclaim.
1216 */
1217 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1218 trylock = 0;
1219 goto restart;
1220 }
1221 return last_error;
1222}
1223
1224int
1225xfs_reclaim_inodes(
1226 xfs_mount_t *mp,
1227 int mode)
1228{
1229 int nr_to_scan = INT_MAX;
1230
1231 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1232}
1233
1234/*
1235 * Scan a certain number of inodes for reclaim.
1236 *
1237 * When called we make sure that there is a background (fast) inode reclaim in
1238 * progress, while we will throttle the speed of reclaim via doing synchronous
1239 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1240 * them to be cleaned, which we hope will not be very long due to the
1241 * background walker having already kicked the IO off on those dirty inodes.
1242 */
1243long
1244xfs_reclaim_inodes_nr(
1245 struct xfs_mount *mp,
1246 int nr_to_scan)
1247{
1248 /* kick background reclaimer and push the AIL */
1249 xfs_reclaim_work_queue(mp);
1250 xfs_ail_push_all(mp->m_ail);
1251
1252 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1253}
1254
1255/*
1256 * Return the number of reclaimable inodes in the filesystem for
1257 * the shrinker to determine how much to reclaim.
1258 */
1259int
1260xfs_reclaim_inodes_count(
1261 struct xfs_mount *mp)
1262{
1263 struct xfs_perag *pag;
1264 xfs_agnumber_t ag = 0;
1265 int reclaimable = 0;
1266
1267 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1268 ag = pag->pag_agno + 1;
1269 reclaimable += pag->pag_ici_reclaimable;
1270 xfs_perag_put(pag);
1271 }
1272 return reclaimable;
1273}
1274
1275STATIC int
1276xfs_inode_match_id(
1277 struct xfs_inode *ip,
1278 struct xfs_eofblocks *eofb)
1279{
1280 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1281 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1282 return 0;
1283
1284 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1285 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1286 return 0;
1287
1288 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1289 xfs_get_projid(ip) != eofb->eof_prid)
1290 return 0;
1291
1292 return 1;
1293}
1294
1295/*
1296 * A union-based inode filtering algorithm. Process the inode if any of the
1297 * criteria match. This is for global/internal scans only.
1298 */
1299STATIC int
1300xfs_inode_match_id_union(
1301 struct xfs_inode *ip,
1302 struct xfs_eofblocks *eofb)
1303{
1304 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1305 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1306 return 1;
1307
1308 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1309 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1310 return 1;
1311
1312 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1313 xfs_get_projid(ip) == eofb->eof_prid)
1314 return 1;
1315
1316 return 0;
1317}
1318
1319STATIC int
1320xfs_inode_free_eofblocks(
1321 struct xfs_inode *ip,
1322 int flags,
1323 void *args)
1324{
1325 int ret = 0;
1326 struct xfs_eofblocks *eofb = args;
1327 int match;
1328
1329 if (!xfs_can_free_eofblocks(ip, false)) {
1330 /* inode could be preallocated or append-only */
1331 trace_xfs_inode_free_eofblocks_invalid(ip);
1332 xfs_inode_clear_eofblocks_tag(ip);
1333 return 0;
1334 }
1335
1336 /*
1337 * If the mapping is dirty the operation can block and wait for some
1338 * time. Unless we are waiting, skip it.
1339 */
1340 if (!(flags & SYNC_WAIT) &&
1341 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1342 return 0;
1343
1344 if (eofb) {
1345 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1346 match = xfs_inode_match_id_union(ip, eofb);
1347 else
1348 match = xfs_inode_match_id(ip, eofb);
1349 if (!match)
1350 return 0;
1351
1352 /* skip the inode if the file size is too small */
1353 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1354 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1355 return 0;
1356 }
1357
1358 /*
1359 * If the caller is waiting, return -EAGAIN to keep the background
1360 * scanner moving and revisit the inode in a subsequent pass.
1361 */
1362 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1363 if (flags & SYNC_WAIT)
1364 ret = -EAGAIN;
1365 return ret;
1366 }
1367 ret = xfs_free_eofblocks(ip);
1368 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1369
1370 return ret;
1371}
1372
1373static int
1374__xfs_icache_free_eofblocks(
1375 struct xfs_mount *mp,
1376 struct xfs_eofblocks *eofb,
1377 int (*execute)(struct xfs_inode *ip, int flags,
1378 void *args),
1379 int tag)
1380{
1381 int flags = SYNC_TRYLOCK;
1382
1383 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1384 flags = SYNC_WAIT;
1385
1386 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1387 eofb, tag);
1388}
1389
1390int
1391xfs_icache_free_eofblocks(
1392 struct xfs_mount *mp,
1393 struct xfs_eofblocks *eofb)
1394{
1395 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1396 XFS_ICI_EOFBLOCKS_TAG);
1397}
1398
1399/*
1400 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1401 * multiple quotas, we don't know exactly which quota caused an allocation
1402 * failure. We make a best effort by including each quota under low free space
1403 * conditions (less than 1% free space) in the scan.
1404 */
1405static int
1406__xfs_inode_free_quota_eofblocks(
1407 struct xfs_inode *ip,
1408 int (*execute)(struct xfs_mount *mp,
1409 struct xfs_eofblocks *eofb))
1410{
1411 int scan = 0;
1412 struct xfs_eofblocks eofb = {0};
1413 struct xfs_dquot *dq;
1414
1415 /*
1416 * Run a sync scan to increase effectiveness and use the union filter to
1417 * cover all applicable quotas in a single scan.
1418 */
1419 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1420
1421 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1422 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1423 if (dq && xfs_dquot_lowsp(dq)) {
1424 eofb.eof_uid = VFS_I(ip)->i_uid;
1425 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1426 scan = 1;
1427 }
1428 }
1429
1430 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1431 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1432 if (dq && xfs_dquot_lowsp(dq)) {
1433 eofb.eof_gid = VFS_I(ip)->i_gid;
1434 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1435 scan = 1;
1436 }
1437 }
1438
1439 if (scan)
1440 execute(ip->i_mount, &eofb);
1441
1442 return scan;
1443}
1444
1445int
1446xfs_inode_free_quota_eofblocks(
1447 struct xfs_inode *ip)
1448{
1449 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1450}
1451
1452static void
1453__xfs_inode_set_eofblocks_tag(
1454 xfs_inode_t *ip,
1455 void (*execute)(struct xfs_mount *mp),
1456 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1457 int error, unsigned long caller_ip),
1458 int tag)
1459{
1460 struct xfs_mount *mp = ip->i_mount;
1461 struct xfs_perag *pag;
1462 int tagged;
1463
1464 /*
1465 * Don't bother locking the AG and looking up in the radix trees
1466 * if we already know that we have the tag set.
1467 */
1468 if (ip->i_flags & XFS_IEOFBLOCKS)
1469 return;
1470 spin_lock(&ip->i_flags_lock);
1471 ip->i_flags |= XFS_IEOFBLOCKS;
1472 spin_unlock(&ip->i_flags_lock);
1473
1474 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1475 spin_lock(&pag->pag_ici_lock);
1476
1477 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1478 radix_tree_tag_set(&pag->pag_ici_root,
1479 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1480 if (!tagged) {
1481 /* propagate the eofblocks tag up into the perag radix tree */
1482 spin_lock(&ip->i_mount->m_perag_lock);
1483 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1484 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1485 tag);
1486 spin_unlock(&ip->i_mount->m_perag_lock);
1487
1488 /* kick off background trimming */
1489 execute(ip->i_mount);
1490
1491 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1492 }
1493
1494 spin_unlock(&pag->pag_ici_lock);
1495 xfs_perag_put(pag);
1496}
1497
1498void
1499xfs_inode_set_eofblocks_tag(
1500 xfs_inode_t *ip)
1501{
1502 trace_xfs_inode_set_eofblocks_tag(ip);
1503 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
1504 trace_xfs_perag_set_eofblocks,
1505 XFS_ICI_EOFBLOCKS_TAG);
1506}
1507
1508static void
1509__xfs_inode_clear_eofblocks_tag(
1510 xfs_inode_t *ip,
1511 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1512 int error, unsigned long caller_ip),
1513 int tag)
1514{
1515 struct xfs_mount *mp = ip->i_mount;
1516 struct xfs_perag *pag;
1517
1518 spin_lock(&ip->i_flags_lock);
1519 ip->i_flags &= ~XFS_IEOFBLOCKS;
1520 spin_unlock(&ip->i_flags_lock);
1521
1522 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1523 spin_lock(&pag->pag_ici_lock);
1524
1525 radix_tree_tag_clear(&pag->pag_ici_root,
1526 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1527 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1528 /* clear the eofblocks tag from the perag radix tree */
1529 spin_lock(&ip->i_mount->m_perag_lock);
1530 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1531 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1532 tag);
1533 spin_unlock(&ip->i_mount->m_perag_lock);
1534 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1535 }
1536
1537 spin_unlock(&pag->pag_ici_lock);
1538 xfs_perag_put(pag);
1539}
1540
1541void
1542xfs_inode_clear_eofblocks_tag(
1543 xfs_inode_t *ip)
1544{
1545 trace_xfs_inode_clear_eofblocks_tag(ip);
1546 return __xfs_inode_clear_eofblocks_tag(ip,
1547 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1548}
1549
1550/*
1551 * Automatic CoW Reservation Freeing
1552 *
1553 * These functions automatically garbage collect leftover CoW reservations
1554 * that were made on behalf of a cowextsize hint when we start to run out
1555 * of quota or when the reservations sit around for too long. If the file
1556 * has dirty pages or is undergoing writeback, its CoW reservations will
1557 * be retained.
1558 *
1559 * The actual garbage collection piggybacks off the same code that runs
1560 * the speculative EOF preallocation garbage collector.
1561 */
1562STATIC int
1563xfs_inode_free_cowblocks(
1564 struct xfs_inode *ip,
1565 int flags,
1566 void *args)
1567{
1568 int ret;
1569 struct xfs_eofblocks *eofb = args;
1570 int match;
1571 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1572
1573 /*
1574 * Just clear the tag if we have an empty cow fork or none at all. It's
1575 * possible the inode was fully unshared since it was originally tagged.
1576 */
1577 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1578 trace_xfs_inode_free_cowblocks_invalid(ip);
1579 xfs_inode_clear_cowblocks_tag(ip);
1580 return 0;
1581 }
1582
1583 /*
1584 * If the mapping is dirty or under writeback we cannot touch the
1585 * CoW fork. Leave it alone if we're in the midst of a directio.
1586 */
1587 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1588 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1589 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1590 atomic_read(&VFS_I(ip)->i_dio_count))
1591 return 0;
1592
1593 if (eofb) {
1594 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1595 match = xfs_inode_match_id_union(ip, eofb);
1596 else
1597 match = xfs_inode_match_id(ip, eofb);
1598 if (!match)
1599 return 0;
1600
1601 /* skip the inode if the file size is too small */
1602 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1603 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1604 return 0;
1605 }
1606
1607 /* Free the CoW blocks */
1608 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1609 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1610
1611 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1612
1613 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1614 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1615
1616 return ret;
1617}
1618
1619int
1620xfs_icache_free_cowblocks(
1621 struct xfs_mount *mp,
1622 struct xfs_eofblocks *eofb)
1623{
1624 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1625 XFS_ICI_COWBLOCKS_TAG);
1626}
1627
1628int
1629xfs_inode_free_quota_cowblocks(
1630 struct xfs_inode *ip)
1631{
1632 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1633}
1634
1635void
1636xfs_inode_set_cowblocks_tag(
1637 xfs_inode_t *ip)
1638{
1639 trace_xfs_inode_set_cowblocks_tag(ip);
1640 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
1641 trace_xfs_perag_set_cowblocks,
1642 XFS_ICI_COWBLOCKS_TAG);
1643}
1644
1645void
1646xfs_inode_clear_cowblocks_tag(
1647 xfs_inode_t *ip)
1648{
1649 trace_xfs_inode_clear_cowblocks_tag(ip);
1650 return __xfs_inode_clear_eofblocks_tag(ip,
1651 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1652}