Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36#include "xfs_reflink.h"
37
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/iversion.h>
41
42/*
43 * Allocate and initialise an xfs_inode.
44 */
45struct xfs_inode *
46xfs_inode_alloc(
47 struct xfs_mount *mp,
48 xfs_ino_t ino)
49{
50 struct xfs_inode *ip;
51
52 /*
53 * if this didn't occur in transactions, we could use
54 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
55 * code up to do this anyway.
56 */
57 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
58 if (!ip)
59 return NULL;
60 if (inode_init_always(mp->m_super, VFS_I(ip))) {
61 kmem_zone_free(xfs_inode_zone, ip);
62 return NULL;
63 }
64
65 /* VFS doesn't initialise i_mode! */
66 VFS_I(ip)->i_mode = 0;
67
68 XFS_STATS_INC(mp, vn_active);
69 ASSERT(atomic_read(&ip->i_pincount) == 0);
70 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0);
72
73 /* initialise the xfs inode */
74 ip->i_ino = ino;
75 ip->i_mount = mp;
76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77 ip->i_afp = NULL;
78 ip->i_cowfp = NULL;
79 ip->i_cnextents = 0;
80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
82 ip->i_flags = 0;
83 ip->i_delayed_blks = 0;
84 memset(&ip->i_d, 0, sizeof(ip->i_d));
85
86 return ip;
87}
88
89STATIC void
90xfs_inode_free_callback(
91 struct rcu_head *head)
92{
93 struct inode *inode = container_of(head, struct inode, i_rcu);
94 struct xfs_inode *ip = XFS_I(inode);
95
96 switch (VFS_I(ip)->i_mode & S_IFMT) {
97 case S_IFREG:
98 case S_IFDIR:
99 case S_IFLNK:
100 xfs_idestroy_fork(ip, XFS_DATA_FORK);
101 break;
102 }
103
104 if (ip->i_afp)
105 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
106 if (ip->i_cowfp)
107 xfs_idestroy_fork(ip, XFS_COW_FORK);
108
109 if (ip->i_itemp) {
110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
111 xfs_inode_item_destroy(ip);
112 ip->i_itemp = NULL;
113 }
114
115 kmem_zone_free(xfs_inode_zone, ip);
116}
117
118static void
119__xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 XFS_STATS_DEC(ip->i_mount, vn_active);
125
126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
127}
128
129void
130xfs_inode_free(
131 struct xfs_inode *ip)
132{
133 ASSERT(!xfs_isiflocked(ip));
134
135 /*
136 * Because we use RCU freeing we need to ensure the inode always
137 * appears to be reclaimed with an invalid inode number when in the
138 * free state. The ip->i_flags_lock provides the barrier against lookup
139 * races.
140 */
141 spin_lock(&ip->i_flags_lock);
142 ip->i_flags = XFS_IRECLAIM;
143 ip->i_ino = 0;
144 spin_unlock(&ip->i_flags_lock);
145
146 __xfs_inode_free(ip);
147}
148
149/*
150 * Queue a new inode reclaim pass if there are reclaimable inodes and there
151 * isn't a reclaim pass already in progress. By default it runs every 5s based
152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
153 * tunable, but that can be done if this method proves to be ineffective or too
154 * aggressive.
155 */
156static void
157xfs_reclaim_work_queue(
158 struct xfs_mount *mp)
159{
160
161 rcu_read_lock();
162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
165 }
166 rcu_read_unlock();
167}
168
169/*
170 * This is a fast pass over the inode cache to try to get reclaim moving on as
171 * many inodes as possible in a short period of time. It kicks itself every few
172 * seconds, as well as being kicked by the inode cache shrinker when memory
173 * goes low. It scans as quickly as possible avoiding locked inodes or those
174 * already being flushed, and once done schedules a future pass.
175 */
176void
177xfs_reclaim_worker(
178 struct work_struct *work)
179{
180 struct xfs_mount *mp = container_of(to_delayed_work(work),
181 struct xfs_mount, m_reclaim_work);
182
183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
184 xfs_reclaim_work_queue(mp);
185}
186
187static void
188xfs_perag_set_reclaim_tag(
189 struct xfs_perag *pag)
190{
191 struct xfs_mount *mp = pag->pag_mount;
192
193 lockdep_assert_held(&pag->pag_ici_lock);
194 if (pag->pag_ici_reclaimable++)
195 return;
196
197 /* propagate the reclaim tag up into the perag radix tree */
198 spin_lock(&mp->m_perag_lock);
199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
200 XFS_ICI_RECLAIM_TAG);
201 spin_unlock(&mp->m_perag_lock);
202
203 /* schedule periodic background inode reclaim */
204 xfs_reclaim_work_queue(mp);
205
206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
207}
208
209static void
210xfs_perag_clear_reclaim_tag(
211 struct xfs_perag *pag)
212{
213 struct xfs_mount *mp = pag->pag_mount;
214
215 lockdep_assert_held(&pag->pag_ici_lock);
216 if (--pag->pag_ici_reclaimable)
217 return;
218
219 /* clear the reclaim tag from the perag radix tree */
220 spin_lock(&mp->m_perag_lock);
221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
222 XFS_ICI_RECLAIM_TAG);
223 spin_unlock(&mp->m_perag_lock);
224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
225}
226
227
228/*
229 * We set the inode flag atomically with the radix tree tag.
230 * Once we get tag lookups on the radix tree, this inode flag
231 * can go away.
232 */
233void
234xfs_inode_set_reclaim_tag(
235 struct xfs_inode *ip)
236{
237 struct xfs_mount *mp = ip->i_mount;
238 struct xfs_perag *pag;
239
240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
241 spin_lock(&pag->pag_ici_lock);
242 spin_lock(&ip->i_flags_lock);
243
244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
245 XFS_ICI_RECLAIM_TAG);
246 xfs_perag_set_reclaim_tag(pag);
247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
248
249 spin_unlock(&ip->i_flags_lock);
250 spin_unlock(&pag->pag_ici_lock);
251 xfs_perag_put(pag);
252}
253
254STATIC void
255xfs_inode_clear_reclaim_tag(
256 struct xfs_perag *pag,
257 xfs_ino_t ino)
258{
259 radix_tree_tag_clear(&pag->pag_ici_root,
260 XFS_INO_TO_AGINO(pag->pag_mount, ino),
261 XFS_ICI_RECLAIM_TAG);
262 xfs_perag_clear_reclaim_tag(pag);
263}
264
265static void
266xfs_inew_wait(
267 struct xfs_inode *ip)
268{
269 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
270 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
271
272 do {
273 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
274 if (!xfs_iflags_test(ip, XFS_INEW))
275 break;
276 schedule();
277 } while (true);
278 finish_wait(wq, &wait.wq_entry);
279}
280
281/*
282 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
283 * part of the structure. This is made more complex by the fact we store
284 * information about the on-disk values in the VFS inode and so we can't just
285 * overwrite the values unconditionally. Hence we save the parameters we
286 * need to retain across reinitialisation, and rewrite them into the VFS inode
287 * after reinitialisation even if it fails.
288 */
289static int
290xfs_reinit_inode(
291 struct xfs_mount *mp,
292 struct inode *inode)
293{
294 int error;
295 uint32_t nlink = inode->i_nlink;
296 uint32_t generation = inode->i_generation;
297 uint64_t version = inode_peek_iversion(inode);
298 umode_t mode = inode->i_mode;
299 dev_t dev = inode->i_rdev;
300
301 error = inode_init_always(mp->m_super, inode);
302
303 set_nlink(inode, nlink);
304 inode->i_generation = generation;
305 inode_set_iversion_queried(inode, version);
306 inode->i_mode = mode;
307 inode->i_rdev = dev;
308 return error;
309}
310
311/*
312 * Check the validity of the inode we just found it the cache
313 */
314static int
315xfs_iget_cache_hit(
316 struct xfs_perag *pag,
317 struct xfs_inode *ip,
318 xfs_ino_t ino,
319 int flags,
320 int lock_flags) __releases(RCU)
321{
322 struct inode *inode = VFS_I(ip);
323 struct xfs_mount *mp = ip->i_mount;
324 int error;
325
326 /*
327 * check for re-use of an inode within an RCU grace period due to the
328 * radix tree nodes not being updated yet. We monitor for this by
329 * setting the inode number to zero before freeing the inode structure.
330 * If the inode has been reallocated and set up, then the inode number
331 * will not match, so check for that, too.
332 */
333 spin_lock(&ip->i_flags_lock);
334 if (ip->i_ino != ino) {
335 trace_xfs_iget_skip(ip);
336 XFS_STATS_INC(mp, xs_ig_frecycle);
337 error = -EAGAIN;
338 goto out_error;
339 }
340
341
342 /*
343 * If we are racing with another cache hit that is currently
344 * instantiating this inode or currently recycling it out of
345 * reclaimabe state, wait for the initialisation to complete
346 * before continuing.
347 *
348 * XXX(hch): eventually we should do something equivalent to
349 * wait_on_inode to wait for these flags to be cleared
350 * instead of polling for it.
351 */
352 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
353 trace_xfs_iget_skip(ip);
354 XFS_STATS_INC(mp, xs_ig_frecycle);
355 error = -EAGAIN;
356 goto out_error;
357 }
358
359 /*
360 * If lookup is racing with unlink return an error immediately.
361 */
362 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
363 error = -ENOENT;
364 goto out_error;
365 }
366
367 /*
368 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
369 * Need to carefully get it back into useable state.
370 */
371 if (ip->i_flags & XFS_IRECLAIMABLE) {
372 trace_xfs_iget_reclaim(ip);
373
374 if (flags & XFS_IGET_INCORE) {
375 error = -EAGAIN;
376 goto out_error;
377 }
378
379 /*
380 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
381 * from stomping over us while we recycle the inode. We can't
382 * clear the radix tree reclaimable tag yet as it requires
383 * pag_ici_lock to be held exclusive.
384 */
385 ip->i_flags |= XFS_IRECLAIM;
386
387 spin_unlock(&ip->i_flags_lock);
388 rcu_read_unlock();
389
390 error = xfs_reinit_inode(mp, inode);
391 if (error) {
392 bool wake;
393 /*
394 * Re-initializing the inode failed, and we are in deep
395 * trouble. Try to re-add it to the reclaim list.
396 */
397 rcu_read_lock();
398 spin_lock(&ip->i_flags_lock);
399 wake = !!__xfs_iflags_test(ip, XFS_INEW);
400 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
401 if (wake)
402 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
403 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
404 trace_xfs_iget_reclaim_fail(ip);
405 goto out_error;
406 }
407
408 spin_lock(&pag->pag_ici_lock);
409 spin_lock(&ip->i_flags_lock);
410
411 /*
412 * Clear the per-lifetime state in the inode as we are now
413 * effectively a new inode and need to return to the initial
414 * state before reuse occurs.
415 */
416 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
417 ip->i_flags |= XFS_INEW;
418 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
419 inode->i_state = I_NEW;
420
421 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
422 init_rwsem(&inode->i_rwsem);
423
424 spin_unlock(&ip->i_flags_lock);
425 spin_unlock(&pag->pag_ici_lock);
426 } else {
427 /* If the VFS inode is being torn down, pause and try again. */
428 if (!igrab(inode)) {
429 trace_xfs_iget_skip(ip);
430 error = -EAGAIN;
431 goto out_error;
432 }
433
434 /* We've got a live one. */
435 spin_unlock(&ip->i_flags_lock);
436 rcu_read_unlock();
437 trace_xfs_iget_hit(ip);
438 }
439
440 if (lock_flags != 0)
441 xfs_ilock(ip, lock_flags);
442
443 if (!(flags & XFS_IGET_INCORE))
444 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
445 XFS_STATS_INC(mp, xs_ig_found);
446
447 return 0;
448
449out_error:
450 spin_unlock(&ip->i_flags_lock);
451 rcu_read_unlock();
452 return error;
453}
454
455
456static int
457xfs_iget_cache_miss(
458 struct xfs_mount *mp,
459 struct xfs_perag *pag,
460 xfs_trans_t *tp,
461 xfs_ino_t ino,
462 struct xfs_inode **ipp,
463 int flags,
464 int lock_flags)
465{
466 struct xfs_inode *ip;
467 int error;
468 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
469 int iflags;
470
471 ip = xfs_inode_alloc(mp, ino);
472 if (!ip)
473 return -ENOMEM;
474
475 error = xfs_iread(mp, tp, ip, flags);
476 if (error)
477 goto out_destroy;
478
479 if (!xfs_inode_verify_forks(ip)) {
480 error = -EFSCORRUPTED;
481 goto out_destroy;
482 }
483
484 trace_xfs_iget_miss(ip);
485
486
487 /*
488 * If we are allocating a new inode, then check what was returned is
489 * actually a free, empty inode. If we are not allocating an inode,
490 * the check we didn't find a free inode.
491 */
492 if (flags & XFS_IGET_CREATE) {
493 if (VFS_I(ip)->i_mode != 0) {
494 xfs_warn(mp,
495"Corruption detected! Free inode 0x%llx not marked free on disk",
496 ino);
497 error = -EFSCORRUPTED;
498 goto out_destroy;
499 }
500 if (ip->i_d.di_nblocks != 0) {
501 xfs_warn(mp,
502"Corruption detected! Free inode 0x%llx has blocks allocated!",
503 ino);
504 error = -EFSCORRUPTED;
505 goto out_destroy;
506 }
507 } else if (VFS_I(ip)->i_mode == 0) {
508 error = -ENOENT;
509 goto out_destroy;
510 }
511
512 /*
513 * Preload the radix tree so we can insert safely under the
514 * write spinlock. Note that we cannot sleep inside the preload
515 * region. Since we can be called from transaction context, don't
516 * recurse into the file system.
517 */
518 if (radix_tree_preload(GFP_NOFS)) {
519 error = -EAGAIN;
520 goto out_destroy;
521 }
522
523 /*
524 * Because the inode hasn't been added to the radix-tree yet it can't
525 * be found by another thread, so we can do the non-sleeping lock here.
526 */
527 if (lock_flags) {
528 if (!xfs_ilock_nowait(ip, lock_flags))
529 BUG();
530 }
531
532 /*
533 * These values must be set before inserting the inode into the radix
534 * tree as the moment it is inserted a concurrent lookup (allowed by the
535 * RCU locking mechanism) can find it and that lookup must see that this
536 * is an inode currently under construction (i.e. that XFS_INEW is set).
537 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
538 * memory barrier that ensures this detection works correctly at lookup
539 * time.
540 */
541 iflags = XFS_INEW;
542 if (flags & XFS_IGET_DONTCACHE)
543 iflags |= XFS_IDONTCACHE;
544 ip->i_udquot = NULL;
545 ip->i_gdquot = NULL;
546 ip->i_pdquot = NULL;
547 xfs_iflags_set(ip, iflags);
548
549 /* insert the new inode */
550 spin_lock(&pag->pag_ici_lock);
551 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
552 if (unlikely(error)) {
553 WARN_ON(error != -EEXIST);
554 XFS_STATS_INC(mp, xs_ig_dup);
555 error = -EAGAIN;
556 goto out_preload_end;
557 }
558 spin_unlock(&pag->pag_ici_lock);
559 radix_tree_preload_end();
560
561 *ipp = ip;
562 return 0;
563
564out_preload_end:
565 spin_unlock(&pag->pag_ici_lock);
566 radix_tree_preload_end();
567 if (lock_flags)
568 xfs_iunlock(ip, lock_flags);
569out_destroy:
570 __destroy_inode(VFS_I(ip));
571 xfs_inode_free(ip);
572 return error;
573}
574
575/*
576 * Look up an inode by number in the given file system.
577 * The inode is looked up in the cache held in each AG.
578 * If the inode is found in the cache, initialise the vfs inode
579 * if necessary.
580 *
581 * If it is not in core, read it in from the file system's device,
582 * add it to the cache and initialise the vfs inode.
583 *
584 * The inode is locked according to the value of the lock_flags parameter.
585 * This flag parameter indicates how and if the inode's IO lock and inode lock
586 * should be taken.
587 *
588 * mp -- the mount point structure for the current file system. It points
589 * to the inode hash table.
590 * tp -- a pointer to the current transaction if there is one. This is
591 * simply passed through to the xfs_iread() call.
592 * ino -- the number of the inode desired. This is the unique identifier
593 * within the file system for the inode being requested.
594 * lock_flags -- flags indicating how to lock the inode. See the comment
595 * for xfs_ilock() for a list of valid values.
596 */
597int
598xfs_iget(
599 xfs_mount_t *mp,
600 xfs_trans_t *tp,
601 xfs_ino_t ino,
602 uint flags,
603 uint lock_flags,
604 xfs_inode_t **ipp)
605{
606 xfs_inode_t *ip;
607 int error;
608 xfs_perag_t *pag;
609 xfs_agino_t agino;
610
611 /*
612 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
613 * doesn't get freed while it's being referenced during a
614 * radix tree traversal here. It assumes this function
615 * aqcuires only the ILOCK (and therefore it has no need to
616 * involve the IOLOCK in this synchronization).
617 */
618 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
619
620 /* reject inode numbers outside existing AGs */
621 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
622 return -EINVAL;
623
624 XFS_STATS_INC(mp, xs_ig_attempts);
625
626 /* get the perag structure and ensure that it's inode capable */
627 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
628 agino = XFS_INO_TO_AGINO(mp, ino);
629
630again:
631 error = 0;
632 rcu_read_lock();
633 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
634
635 if (ip) {
636 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
637 if (error)
638 goto out_error_or_again;
639 } else {
640 rcu_read_unlock();
641 if (flags & XFS_IGET_INCORE) {
642 error = -ENODATA;
643 goto out_error_or_again;
644 }
645 XFS_STATS_INC(mp, xs_ig_missed);
646
647 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
648 flags, lock_flags);
649 if (error)
650 goto out_error_or_again;
651 }
652 xfs_perag_put(pag);
653
654 *ipp = ip;
655
656 /*
657 * If we have a real type for an on-disk inode, we can setup the inode
658 * now. If it's a new inode being created, xfs_ialloc will handle it.
659 */
660 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
661 xfs_setup_existing_inode(ip);
662 return 0;
663
664out_error_or_again:
665 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
666 delay(1);
667 goto again;
668 }
669 xfs_perag_put(pag);
670 return error;
671}
672
673/*
674 * "Is this a cached inode that's also allocated?"
675 *
676 * Look up an inode by number in the given file system. If the inode is
677 * in cache and isn't in purgatory, return 1 if the inode is allocated
678 * and 0 if it is not. For all other cases (not in cache, being torn
679 * down, etc.), return a negative error code.
680 *
681 * The caller has to prevent inode allocation and freeing activity,
682 * presumably by locking the AGI buffer. This is to ensure that an
683 * inode cannot transition from allocated to freed until the caller is
684 * ready to allow that. If the inode is in an intermediate state (new,
685 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
686 * inode is not in the cache, -ENOENT will be returned. The caller must
687 * deal with these scenarios appropriately.
688 *
689 * This is a specialized use case for the online scrubber; if you're
690 * reading this, you probably want xfs_iget.
691 */
692int
693xfs_icache_inode_is_allocated(
694 struct xfs_mount *mp,
695 struct xfs_trans *tp,
696 xfs_ino_t ino,
697 bool *inuse)
698{
699 struct xfs_inode *ip;
700 int error;
701
702 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
703 if (error)
704 return error;
705
706 *inuse = !!(VFS_I(ip)->i_mode);
707 IRELE(ip);
708 return 0;
709}
710
711/*
712 * The inode lookup is done in batches to keep the amount of lock traffic and
713 * radix tree lookups to a minimum. The batch size is a trade off between
714 * lookup reduction and stack usage. This is in the reclaim path, so we can't
715 * be too greedy.
716 */
717#define XFS_LOOKUP_BATCH 32
718
719STATIC int
720xfs_inode_ag_walk_grab(
721 struct xfs_inode *ip,
722 int flags)
723{
724 struct inode *inode = VFS_I(ip);
725 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
726
727 ASSERT(rcu_read_lock_held());
728
729 /*
730 * check for stale RCU freed inode
731 *
732 * If the inode has been reallocated, it doesn't matter if it's not in
733 * the AG we are walking - we are walking for writeback, so if it
734 * passes all the "valid inode" checks and is dirty, then we'll write
735 * it back anyway. If it has been reallocated and still being
736 * initialised, the XFS_INEW check below will catch it.
737 */
738 spin_lock(&ip->i_flags_lock);
739 if (!ip->i_ino)
740 goto out_unlock_noent;
741
742 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
743 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
744 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
745 goto out_unlock_noent;
746 spin_unlock(&ip->i_flags_lock);
747
748 /* nothing to sync during shutdown */
749 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
750 return -EFSCORRUPTED;
751
752 /* If we can't grab the inode, it must on it's way to reclaim. */
753 if (!igrab(inode))
754 return -ENOENT;
755
756 /* inode is valid */
757 return 0;
758
759out_unlock_noent:
760 spin_unlock(&ip->i_flags_lock);
761 return -ENOENT;
762}
763
764STATIC int
765xfs_inode_ag_walk(
766 struct xfs_mount *mp,
767 struct xfs_perag *pag,
768 int (*execute)(struct xfs_inode *ip, int flags,
769 void *args),
770 int flags,
771 void *args,
772 int tag,
773 int iter_flags)
774{
775 uint32_t first_index;
776 int last_error = 0;
777 int skipped;
778 int done;
779 int nr_found;
780
781restart:
782 done = 0;
783 skipped = 0;
784 first_index = 0;
785 nr_found = 0;
786 do {
787 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
788 int error = 0;
789 int i;
790
791 rcu_read_lock();
792
793 if (tag == -1)
794 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
795 (void **)batch, first_index,
796 XFS_LOOKUP_BATCH);
797 else
798 nr_found = radix_tree_gang_lookup_tag(
799 &pag->pag_ici_root,
800 (void **) batch, first_index,
801 XFS_LOOKUP_BATCH, tag);
802
803 if (!nr_found) {
804 rcu_read_unlock();
805 break;
806 }
807
808 /*
809 * Grab the inodes before we drop the lock. if we found
810 * nothing, nr == 0 and the loop will be skipped.
811 */
812 for (i = 0; i < nr_found; i++) {
813 struct xfs_inode *ip = batch[i];
814
815 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
816 batch[i] = NULL;
817
818 /*
819 * Update the index for the next lookup. Catch
820 * overflows into the next AG range which can occur if
821 * we have inodes in the last block of the AG and we
822 * are currently pointing to the last inode.
823 *
824 * Because we may see inodes that are from the wrong AG
825 * due to RCU freeing and reallocation, only update the
826 * index if it lies in this AG. It was a race that lead
827 * us to see this inode, so another lookup from the
828 * same index will not find it again.
829 */
830 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
831 continue;
832 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
833 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
834 done = 1;
835 }
836
837 /* unlock now we've grabbed the inodes. */
838 rcu_read_unlock();
839
840 for (i = 0; i < nr_found; i++) {
841 if (!batch[i])
842 continue;
843 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
844 xfs_iflags_test(batch[i], XFS_INEW))
845 xfs_inew_wait(batch[i]);
846 error = execute(batch[i], flags, args);
847 IRELE(batch[i]);
848 if (error == -EAGAIN) {
849 skipped++;
850 continue;
851 }
852 if (error && last_error != -EFSCORRUPTED)
853 last_error = error;
854 }
855
856 /* bail out if the filesystem is corrupted. */
857 if (error == -EFSCORRUPTED)
858 break;
859
860 cond_resched();
861
862 } while (nr_found && !done);
863
864 if (skipped) {
865 delay(1);
866 goto restart;
867 }
868 return last_error;
869}
870
871/*
872 * Background scanning to trim post-EOF preallocated space. This is queued
873 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
874 */
875void
876xfs_queue_eofblocks(
877 struct xfs_mount *mp)
878{
879 rcu_read_lock();
880 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
881 queue_delayed_work(mp->m_eofblocks_workqueue,
882 &mp->m_eofblocks_work,
883 msecs_to_jiffies(xfs_eofb_secs * 1000));
884 rcu_read_unlock();
885}
886
887void
888xfs_eofblocks_worker(
889 struct work_struct *work)
890{
891 struct xfs_mount *mp = container_of(to_delayed_work(work),
892 struct xfs_mount, m_eofblocks_work);
893 xfs_icache_free_eofblocks(mp, NULL);
894 xfs_queue_eofblocks(mp);
895}
896
897/*
898 * Background scanning to trim preallocated CoW space. This is queued
899 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
900 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
901 */
902void
903xfs_queue_cowblocks(
904 struct xfs_mount *mp)
905{
906 rcu_read_lock();
907 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
908 queue_delayed_work(mp->m_eofblocks_workqueue,
909 &mp->m_cowblocks_work,
910 msecs_to_jiffies(xfs_cowb_secs * 1000));
911 rcu_read_unlock();
912}
913
914void
915xfs_cowblocks_worker(
916 struct work_struct *work)
917{
918 struct xfs_mount *mp = container_of(to_delayed_work(work),
919 struct xfs_mount, m_cowblocks_work);
920 xfs_icache_free_cowblocks(mp, NULL);
921 xfs_queue_cowblocks(mp);
922}
923
924int
925xfs_inode_ag_iterator_flags(
926 struct xfs_mount *mp,
927 int (*execute)(struct xfs_inode *ip, int flags,
928 void *args),
929 int flags,
930 void *args,
931 int iter_flags)
932{
933 struct xfs_perag *pag;
934 int error = 0;
935 int last_error = 0;
936 xfs_agnumber_t ag;
937
938 ag = 0;
939 while ((pag = xfs_perag_get(mp, ag))) {
940 ag = pag->pag_agno + 1;
941 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
942 iter_flags);
943 xfs_perag_put(pag);
944 if (error) {
945 last_error = error;
946 if (error == -EFSCORRUPTED)
947 break;
948 }
949 }
950 return last_error;
951}
952
953int
954xfs_inode_ag_iterator(
955 struct xfs_mount *mp,
956 int (*execute)(struct xfs_inode *ip, int flags,
957 void *args),
958 int flags,
959 void *args)
960{
961 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
962}
963
964int
965xfs_inode_ag_iterator_tag(
966 struct xfs_mount *mp,
967 int (*execute)(struct xfs_inode *ip, int flags,
968 void *args),
969 int flags,
970 void *args,
971 int tag)
972{
973 struct xfs_perag *pag;
974 int error = 0;
975 int last_error = 0;
976 xfs_agnumber_t ag;
977
978 ag = 0;
979 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
980 ag = pag->pag_agno + 1;
981 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
982 0);
983 xfs_perag_put(pag);
984 if (error) {
985 last_error = error;
986 if (error == -EFSCORRUPTED)
987 break;
988 }
989 }
990 return last_error;
991}
992
993/*
994 * Grab the inode for reclaim exclusively.
995 * Return 0 if we grabbed it, non-zero otherwise.
996 */
997STATIC int
998xfs_reclaim_inode_grab(
999 struct xfs_inode *ip,
1000 int flags)
1001{
1002 ASSERT(rcu_read_lock_held());
1003
1004 /* quick check for stale RCU freed inode */
1005 if (!ip->i_ino)
1006 return 1;
1007
1008 /*
1009 * If we are asked for non-blocking operation, do unlocked checks to
1010 * see if the inode already is being flushed or in reclaim to avoid
1011 * lock traffic.
1012 */
1013 if ((flags & SYNC_TRYLOCK) &&
1014 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1015 return 1;
1016
1017 /*
1018 * The radix tree lock here protects a thread in xfs_iget from racing
1019 * with us starting reclaim on the inode. Once we have the
1020 * XFS_IRECLAIM flag set it will not touch us.
1021 *
1022 * Due to RCU lookup, we may find inodes that have been freed and only
1023 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
1024 * aren't candidates for reclaim at all, so we must check the
1025 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1026 */
1027 spin_lock(&ip->i_flags_lock);
1028 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1029 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1030 /* not a reclaim candidate. */
1031 spin_unlock(&ip->i_flags_lock);
1032 return 1;
1033 }
1034 __xfs_iflags_set(ip, XFS_IRECLAIM);
1035 spin_unlock(&ip->i_flags_lock);
1036 return 0;
1037}
1038
1039/*
1040 * Inodes in different states need to be treated differently. The following
1041 * table lists the inode states and the reclaim actions necessary:
1042 *
1043 * inode state iflush ret required action
1044 * --------------- ---------- ---------------
1045 * bad - reclaim
1046 * shutdown EIO unpin and reclaim
1047 * clean, unpinned 0 reclaim
1048 * stale, unpinned 0 reclaim
1049 * clean, pinned(*) 0 requeue
1050 * stale, pinned EAGAIN requeue
1051 * dirty, async - requeue
1052 * dirty, sync 0 reclaim
1053 *
1054 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1055 * handled anyway given the order of checks implemented.
1056 *
1057 * Also, because we get the flush lock first, we know that any inode that has
1058 * been flushed delwri has had the flush completed by the time we check that
1059 * the inode is clean.
1060 *
1061 * Note that because the inode is flushed delayed write by AIL pushing, the
1062 * flush lock may already be held here and waiting on it can result in very
1063 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
1064 * the caller should push the AIL first before trying to reclaim inodes to
1065 * minimise the amount of time spent waiting. For background relaim, we only
1066 * bother to reclaim clean inodes anyway.
1067 *
1068 * Hence the order of actions after gaining the locks should be:
1069 * bad => reclaim
1070 * shutdown => unpin and reclaim
1071 * pinned, async => requeue
1072 * pinned, sync => unpin
1073 * stale => reclaim
1074 * clean => reclaim
1075 * dirty, async => requeue
1076 * dirty, sync => flush, wait and reclaim
1077 */
1078STATIC int
1079xfs_reclaim_inode(
1080 struct xfs_inode *ip,
1081 struct xfs_perag *pag,
1082 int sync_mode)
1083{
1084 struct xfs_buf *bp = NULL;
1085 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1086 int error;
1087
1088restart:
1089 error = 0;
1090 xfs_ilock(ip, XFS_ILOCK_EXCL);
1091 if (!xfs_iflock_nowait(ip)) {
1092 if (!(sync_mode & SYNC_WAIT))
1093 goto out;
1094 xfs_iflock(ip);
1095 }
1096
1097 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1098 xfs_iunpin_wait(ip);
1099 /* xfs_iflush_abort() drops the flush lock */
1100 xfs_iflush_abort(ip, false);
1101 goto reclaim;
1102 }
1103 if (xfs_ipincount(ip)) {
1104 if (!(sync_mode & SYNC_WAIT))
1105 goto out_ifunlock;
1106 xfs_iunpin_wait(ip);
1107 }
1108 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1109 xfs_ifunlock(ip);
1110 goto reclaim;
1111 }
1112
1113 /*
1114 * Never flush out dirty data during non-blocking reclaim, as it would
1115 * just contend with AIL pushing trying to do the same job.
1116 */
1117 if (!(sync_mode & SYNC_WAIT))
1118 goto out_ifunlock;
1119
1120 /*
1121 * Now we have an inode that needs flushing.
1122 *
1123 * Note that xfs_iflush will never block on the inode buffer lock, as
1124 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1125 * ip->i_lock, and we are doing the exact opposite here. As a result,
1126 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1127 * result in an ABBA deadlock with xfs_ifree_cluster().
1128 *
1129 * As xfs_ifree_cluser() must gather all inodes that are active in the
1130 * cache to mark them stale, if we hit this case we don't actually want
1131 * to do IO here - we want the inode marked stale so we can simply
1132 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1133 * inode, back off and try again. Hopefully the next pass through will
1134 * see the stale flag set on the inode.
1135 */
1136 error = xfs_iflush(ip, &bp);
1137 if (error == -EAGAIN) {
1138 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1139 /* backoff longer than in xfs_ifree_cluster */
1140 delay(2);
1141 goto restart;
1142 }
1143
1144 if (!error) {
1145 error = xfs_bwrite(bp);
1146 xfs_buf_relse(bp);
1147 }
1148
1149reclaim:
1150 ASSERT(!xfs_isiflocked(ip));
1151
1152 /*
1153 * Because we use RCU freeing we need to ensure the inode always appears
1154 * to be reclaimed with an invalid inode number when in the free state.
1155 * We do this as early as possible under the ILOCK so that
1156 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1157 * detect races with us here. By doing this, we guarantee that once
1158 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1159 * it will see either a valid inode that will serialise correctly, or it
1160 * will see an invalid inode that it can skip.
1161 */
1162 spin_lock(&ip->i_flags_lock);
1163 ip->i_flags = XFS_IRECLAIM;
1164 ip->i_ino = 0;
1165 spin_unlock(&ip->i_flags_lock);
1166
1167 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1168
1169 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1170 /*
1171 * Remove the inode from the per-AG radix tree.
1172 *
1173 * Because radix_tree_delete won't complain even if the item was never
1174 * added to the tree assert that it's been there before to catch
1175 * problems with the inode life time early on.
1176 */
1177 spin_lock(&pag->pag_ici_lock);
1178 if (!radix_tree_delete(&pag->pag_ici_root,
1179 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1180 ASSERT(0);
1181 xfs_perag_clear_reclaim_tag(pag);
1182 spin_unlock(&pag->pag_ici_lock);
1183
1184 /*
1185 * Here we do an (almost) spurious inode lock in order to coordinate
1186 * with inode cache radix tree lookups. This is because the lookup
1187 * can reference the inodes in the cache without taking references.
1188 *
1189 * We make that OK here by ensuring that we wait until the inode is
1190 * unlocked after the lookup before we go ahead and free it.
1191 */
1192 xfs_ilock(ip, XFS_ILOCK_EXCL);
1193 xfs_qm_dqdetach(ip);
1194 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1195
1196 __xfs_inode_free(ip);
1197 return error;
1198
1199out_ifunlock:
1200 xfs_ifunlock(ip);
1201out:
1202 xfs_iflags_clear(ip, XFS_IRECLAIM);
1203 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1204 /*
1205 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1206 * a short while. However, this just burns CPU time scanning the tree
1207 * waiting for IO to complete and the reclaim work never goes back to
1208 * the idle state. Instead, return 0 to let the next scheduled
1209 * background reclaim attempt to reclaim the inode again.
1210 */
1211 return 0;
1212}
1213
1214/*
1215 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1216 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1217 * then a shut down during filesystem unmount reclaim walk leak all the
1218 * unreclaimed inodes.
1219 */
1220STATIC int
1221xfs_reclaim_inodes_ag(
1222 struct xfs_mount *mp,
1223 int flags,
1224 int *nr_to_scan)
1225{
1226 struct xfs_perag *pag;
1227 int error = 0;
1228 int last_error = 0;
1229 xfs_agnumber_t ag;
1230 int trylock = flags & SYNC_TRYLOCK;
1231 int skipped;
1232
1233restart:
1234 ag = 0;
1235 skipped = 0;
1236 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1237 unsigned long first_index = 0;
1238 int done = 0;
1239 int nr_found = 0;
1240
1241 ag = pag->pag_agno + 1;
1242
1243 if (trylock) {
1244 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1245 skipped++;
1246 xfs_perag_put(pag);
1247 continue;
1248 }
1249 first_index = pag->pag_ici_reclaim_cursor;
1250 } else
1251 mutex_lock(&pag->pag_ici_reclaim_lock);
1252
1253 do {
1254 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1255 int i;
1256
1257 rcu_read_lock();
1258 nr_found = radix_tree_gang_lookup_tag(
1259 &pag->pag_ici_root,
1260 (void **)batch, first_index,
1261 XFS_LOOKUP_BATCH,
1262 XFS_ICI_RECLAIM_TAG);
1263 if (!nr_found) {
1264 done = 1;
1265 rcu_read_unlock();
1266 break;
1267 }
1268
1269 /*
1270 * Grab the inodes before we drop the lock. if we found
1271 * nothing, nr == 0 and the loop will be skipped.
1272 */
1273 for (i = 0; i < nr_found; i++) {
1274 struct xfs_inode *ip = batch[i];
1275
1276 if (done || xfs_reclaim_inode_grab(ip, flags))
1277 batch[i] = NULL;
1278
1279 /*
1280 * Update the index for the next lookup. Catch
1281 * overflows into the next AG range which can
1282 * occur if we have inodes in the last block of
1283 * the AG and we are currently pointing to the
1284 * last inode.
1285 *
1286 * Because we may see inodes that are from the
1287 * wrong AG due to RCU freeing and
1288 * reallocation, only update the index if it
1289 * lies in this AG. It was a race that lead us
1290 * to see this inode, so another lookup from
1291 * the same index will not find it again.
1292 */
1293 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1294 pag->pag_agno)
1295 continue;
1296 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1297 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1298 done = 1;
1299 }
1300
1301 /* unlock now we've grabbed the inodes. */
1302 rcu_read_unlock();
1303
1304 for (i = 0; i < nr_found; i++) {
1305 if (!batch[i])
1306 continue;
1307 error = xfs_reclaim_inode(batch[i], pag, flags);
1308 if (error && last_error != -EFSCORRUPTED)
1309 last_error = error;
1310 }
1311
1312 *nr_to_scan -= XFS_LOOKUP_BATCH;
1313
1314 cond_resched();
1315
1316 } while (nr_found && !done && *nr_to_scan > 0);
1317
1318 if (trylock && !done)
1319 pag->pag_ici_reclaim_cursor = first_index;
1320 else
1321 pag->pag_ici_reclaim_cursor = 0;
1322 mutex_unlock(&pag->pag_ici_reclaim_lock);
1323 xfs_perag_put(pag);
1324 }
1325
1326 /*
1327 * if we skipped any AG, and we still have scan count remaining, do
1328 * another pass this time using blocking reclaim semantics (i.e
1329 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1330 * ensure that when we get more reclaimers than AGs we block rather
1331 * than spin trying to execute reclaim.
1332 */
1333 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1334 trylock = 0;
1335 goto restart;
1336 }
1337 return last_error;
1338}
1339
1340int
1341xfs_reclaim_inodes(
1342 xfs_mount_t *mp,
1343 int mode)
1344{
1345 int nr_to_scan = INT_MAX;
1346
1347 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1348}
1349
1350/*
1351 * Scan a certain number of inodes for reclaim.
1352 *
1353 * When called we make sure that there is a background (fast) inode reclaim in
1354 * progress, while we will throttle the speed of reclaim via doing synchronous
1355 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1356 * them to be cleaned, which we hope will not be very long due to the
1357 * background walker having already kicked the IO off on those dirty inodes.
1358 */
1359long
1360xfs_reclaim_inodes_nr(
1361 struct xfs_mount *mp,
1362 int nr_to_scan)
1363{
1364 /* kick background reclaimer and push the AIL */
1365 xfs_reclaim_work_queue(mp);
1366 xfs_ail_push_all(mp->m_ail);
1367
1368 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1369}
1370
1371/*
1372 * Return the number of reclaimable inodes in the filesystem for
1373 * the shrinker to determine how much to reclaim.
1374 */
1375int
1376xfs_reclaim_inodes_count(
1377 struct xfs_mount *mp)
1378{
1379 struct xfs_perag *pag;
1380 xfs_agnumber_t ag = 0;
1381 int reclaimable = 0;
1382
1383 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1384 ag = pag->pag_agno + 1;
1385 reclaimable += pag->pag_ici_reclaimable;
1386 xfs_perag_put(pag);
1387 }
1388 return reclaimable;
1389}
1390
1391STATIC int
1392xfs_inode_match_id(
1393 struct xfs_inode *ip,
1394 struct xfs_eofblocks *eofb)
1395{
1396 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1397 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1398 return 0;
1399
1400 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1401 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1402 return 0;
1403
1404 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1405 xfs_get_projid(ip) != eofb->eof_prid)
1406 return 0;
1407
1408 return 1;
1409}
1410
1411/*
1412 * A union-based inode filtering algorithm. Process the inode if any of the
1413 * criteria match. This is for global/internal scans only.
1414 */
1415STATIC int
1416xfs_inode_match_id_union(
1417 struct xfs_inode *ip,
1418 struct xfs_eofblocks *eofb)
1419{
1420 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1421 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1422 return 1;
1423
1424 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1425 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1426 return 1;
1427
1428 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1429 xfs_get_projid(ip) == eofb->eof_prid)
1430 return 1;
1431
1432 return 0;
1433}
1434
1435STATIC int
1436xfs_inode_free_eofblocks(
1437 struct xfs_inode *ip,
1438 int flags,
1439 void *args)
1440{
1441 int ret = 0;
1442 struct xfs_eofblocks *eofb = args;
1443 int match;
1444
1445 if (!xfs_can_free_eofblocks(ip, false)) {
1446 /* inode could be preallocated or append-only */
1447 trace_xfs_inode_free_eofblocks_invalid(ip);
1448 xfs_inode_clear_eofblocks_tag(ip);
1449 return 0;
1450 }
1451
1452 /*
1453 * If the mapping is dirty the operation can block and wait for some
1454 * time. Unless we are waiting, skip it.
1455 */
1456 if (!(flags & SYNC_WAIT) &&
1457 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1458 return 0;
1459
1460 if (eofb) {
1461 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1462 match = xfs_inode_match_id_union(ip, eofb);
1463 else
1464 match = xfs_inode_match_id(ip, eofb);
1465 if (!match)
1466 return 0;
1467
1468 /* skip the inode if the file size is too small */
1469 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1470 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1471 return 0;
1472 }
1473
1474 /*
1475 * If the caller is waiting, return -EAGAIN to keep the background
1476 * scanner moving and revisit the inode in a subsequent pass.
1477 */
1478 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1479 if (flags & SYNC_WAIT)
1480 ret = -EAGAIN;
1481 return ret;
1482 }
1483 ret = xfs_free_eofblocks(ip);
1484 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1485
1486 return ret;
1487}
1488
1489static int
1490__xfs_icache_free_eofblocks(
1491 struct xfs_mount *mp,
1492 struct xfs_eofblocks *eofb,
1493 int (*execute)(struct xfs_inode *ip, int flags,
1494 void *args),
1495 int tag)
1496{
1497 int flags = SYNC_TRYLOCK;
1498
1499 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1500 flags = SYNC_WAIT;
1501
1502 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1503 eofb, tag);
1504}
1505
1506int
1507xfs_icache_free_eofblocks(
1508 struct xfs_mount *mp,
1509 struct xfs_eofblocks *eofb)
1510{
1511 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1512 XFS_ICI_EOFBLOCKS_TAG);
1513}
1514
1515/*
1516 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1517 * multiple quotas, we don't know exactly which quota caused an allocation
1518 * failure. We make a best effort by including each quota under low free space
1519 * conditions (less than 1% free space) in the scan.
1520 */
1521static int
1522__xfs_inode_free_quota_eofblocks(
1523 struct xfs_inode *ip,
1524 int (*execute)(struct xfs_mount *mp,
1525 struct xfs_eofblocks *eofb))
1526{
1527 int scan = 0;
1528 struct xfs_eofblocks eofb = {0};
1529 struct xfs_dquot *dq;
1530
1531 /*
1532 * Run a sync scan to increase effectiveness and use the union filter to
1533 * cover all applicable quotas in a single scan.
1534 */
1535 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1536
1537 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1538 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1539 if (dq && xfs_dquot_lowsp(dq)) {
1540 eofb.eof_uid = VFS_I(ip)->i_uid;
1541 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1542 scan = 1;
1543 }
1544 }
1545
1546 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1547 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1548 if (dq && xfs_dquot_lowsp(dq)) {
1549 eofb.eof_gid = VFS_I(ip)->i_gid;
1550 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1551 scan = 1;
1552 }
1553 }
1554
1555 if (scan)
1556 execute(ip->i_mount, &eofb);
1557
1558 return scan;
1559}
1560
1561int
1562xfs_inode_free_quota_eofblocks(
1563 struct xfs_inode *ip)
1564{
1565 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1566}
1567
1568static inline unsigned long
1569xfs_iflag_for_tag(
1570 int tag)
1571{
1572 switch (tag) {
1573 case XFS_ICI_EOFBLOCKS_TAG:
1574 return XFS_IEOFBLOCKS;
1575 case XFS_ICI_COWBLOCKS_TAG:
1576 return XFS_ICOWBLOCKS;
1577 default:
1578 ASSERT(0);
1579 return 0;
1580 }
1581}
1582
1583static void
1584__xfs_inode_set_blocks_tag(
1585 xfs_inode_t *ip,
1586 void (*execute)(struct xfs_mount *mp),
1587 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1588 int error, unsigned long caller_ip),
1589 int tag)
1590{
1591 struct xfs_mount *mp = ip->i_mount;
1592 struct xfs_perag *pag;
1593 int tagged;
1594
1595 /*
1596 * Don't bother locking the AG and looking up in the radix trees
1597 * if we already know that we have the tag set.
1598 */
1599 if (ip->i_flags & xfs_iflag_for_tag(tag))
1600 return;
1601 spin_lock(&ip->i_flags_lock);
1602 ip->i_flags |= xfs_iflag_for_tag(tag);
1603 spin_unlock(&ip->i_flags_lock);
1604
1605 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1606 spin_lock(&pag->pag_ici_lock);
1607
1608 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1609 radix_tree_tag_set(&pag->pag_ici_root,
1610 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1611 if (!tagged) {
1612 /* propagate the eofblocks tag up into the perag radix tree */
1613 spin_lock(&ip->i_mount->m_perag_lock);
1614 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1615 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1616 tag);
1617 spin_unlock(&ip->i_mount->m_perag_lock);
1618
1619 /* kick off background trimming */
1620 execute(ip->i_mount);
1621
1622 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1623 }
1624
1625 spin_unlock(&pag->pag_ici_lock);
1626 xfs_perag_put(pag);
1627}
1628
1629void
1630xfs_inode_set_eofblocks_tag(
1631 xfs_inode_t *ip)
1632{
1633 trace_xfs_inode_set_eofblocks_tag(ip);
1634 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1635 trace_xfs_perag_set_eofblocks,
1636 XFS_ICI_EOFBLOCKS_TAG);
1637}
1638
1639static void
1640__xfs_inode_clear_blocks_tag(
1641 xfs_inode_t *ip,
1642 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1643 int error, unsigned long caller_ip),
1644 int tag)
1645{
1646 struct xfs_mount *mp = ip->i_mount;
1647 struct xfs_perag *pag;
1648
1649 spin_lock(&ip->i_flags_lock);
1650 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1651 spin_unlock(&ip->i_flags_lock);
1652
1653 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1654 spin_lock(&pag->pag_ici_lock);
1655
1656 radix_tree_tag_clear(&pag->pag_ici_root,
1657 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1658 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1659 /* clear the eofblocks tag from the perag radix tree */
1660 spin_lock(&ip->i_mount->m_perag_lock);
1661 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1662 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1663 tag);
1664 spin_unlock(&ip->i_mount->m_perag_lock);
1665 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1666 }
1667
1668 spin_unlock(&pag->pag_ici_lock);
1669 xfs_perag_put(pag);
1670}
1671
1672void
1673xfs_inode_clear_eofblocks_tag(
1674 xfs_inode_t *ip)
1675{
1676 trace_xfs_inode_clear_eofblocks_tag(ip);
1677 return __xfs_inode_clear_blocks_tag(ip,
1678 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1679}
1680
1681/*
1682 * Set ourselves up to free CoW blocks from this file. If it's already clean
1683 * then we can bail out quickly, but otherwise we must back off if the file
1684 * is undergoing some kind of write.
1685 */
1686static bool
1687xfs_prep_free_cowblocks(
1688 struct xfs_inode *ip,
1689 struct xfs_ifork *ifp)
1690{
1691 /*
1692 * Just clear the tag if we have an empty cow fork or none at all. It's
1693 * possible the inode was fully unshared since it was originally tagged.
1694 */
1695 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1696 trace_xfs_inode_free_cowblocks_invalid(ip);
1697 xfs_inode_clear_cowblocks_tag(ip);
1698 return false;
1699 }
1700
1701 /*
1702 * If the mapping is dirty or under writeback we cannot touch the
1703 * CoW fork. Leave it alone if we're in the midst of a directio.
1704 */
1705 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1706 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1707 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1708 atomic_read(&VFS_I(ip)->i_dio_count))
1709 return false;
1710
1711 return true;
1712}
1713
1714/*
1715 * Automatic CoW Reservation Freeing
1716 *
1717 * These functions automatically garbage collect leftover CoW reservations
1718 * that were made on behalf of a cowextsize hint when we start to run out
1719 * of quota or when the reservations sit around for too long. If the file
1720 * has dirty pages or is undergoing writeback, its CoW reservations will
1721 * be retained.
1722 *
1723 * The actual garbage collection piggybacks off the same code that runs
1724 * the speculative EOF preallocation garbage collector.
1725 */
1726STATIC int
1727xfs_inode_free_cowblocks(
1728 struct xfs_inode *ip,
1729 int flags,
1730 void *args)
1731{
1732 struct xfs_eofblocks *eofb = args;
1733 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1734 int match;
1735 int ret = 0;
1736
1737 if (!xfs_prep_free_cowblocks(ip, ifp))
1738 return 0;
1739
1740 if (eofb) {
1741 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1742 match = xfs_inode_match_id_union(ip, eofb);
1743 else
1744 match = xfs_inode_match_id(ip, eofb);
1745 if (!match)
1746 return 0;
1747
1748 /* skip the inode if the file size is too small */
1749 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1750 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1751 return 0;
1752 }
1753
1754 /* Free the CoW blocks */
1755 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1756 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1757
1758 /*
1759 * Check again, nobody else should be able to dirty blocks or change
1760 * the reflink iflag now that we have the first two locks held.
1761 */
1762 if (xfs_prep_free_cowblocks(ip, ifp))
1763 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1764
1765 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1766 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1767
1768 return ret;
1769}
1770
1771int
1772xfs_icache_free_cowblocks(
1773 struct xfs_mount *mp,
1774 struct xfs_eofblocks *eofb)
1775{
1776 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1777 XFS_ICI_COWBLOCKS_TAG);
1778}
1779
1780int
1781xfs_inode_free_quota_cowblocks(
1782 struct xfs_inode *ip)
1783{
1784 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1785}
1786
1787void
1788xfs_inode_set_cowblocks_tag(
1789 xfs_inode_t *ip)
1790{
1791 trace_xfs_inode_set_cowblocks_tag(ip);
1792 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1793 trace_xfs_perag_set_cowblocks,
1794 XFS_ICI_COWBLOCKS_TAG);
1795}
1796
1797void
1798xfs_inode_clear_cowblocks_tag(
1799 xfs_inode_t *ip)
1800{
1801 trace_xfs_inode_clear_cowblocks_tag(ip);
1802 return __xfs_inode_clear_blocks_tag(ip,
1803 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1804}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_inode_item.h"
18#include "xfs_quota.h"
19#include "xfs_trace.h"
20#include "xfs_icache.h"
21#include "xfs_bmap_util.h"
22#include "xfs_dquot_item.h"
23#include "xfs_dquot.h"
24#include "xfs_reflink.h"
25#include "xfs_ialloc.h"
26
27#include <linux/iversion.h>
28
29/*
30 * Allocate and initialise an xfs_inode.
31 */
32struct xfs_inode *
33xfs_inode_alloc(
34 struct xfs_mount *mp,
35 xfs_ino_t ino)
36{
37 struct xfs_inode *ip;
38
39 /*
40 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
41 * and return NULL here on ENOMEM.
42 */
43 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
44
45 if (inode_init_always(mp->m_super, VFS_I(ip))) {
46 kmem_cache_free(xfs_inode_zone, ip);
47 return NULL;
48 }
49
50 /* VFS doesn't initialise i_mode! */
51 VFS_I(ip)->i_mode = 0;
52
53 XFS_STATS_INC(mp, vn_active);
54 ASSERT(atomic_read(&ip->i_pincount) == 0);
55 ASSERT(!xfs_isiflocked(ip));
56 ASSERT(ip->i_ino == 0);
57
58 /* initialise the xfs inode */
59 ip->i_ino = ino;
60 ip->i_mount = mp;
61 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
62 ip->i_afp = NULL;
63 ip->i_cowfp = NULL;
64 memset(&ip->i_df, 0, sizeof(ip->i_df));
65 ip->i_flags = 0;
66 ip->i_delayed_blks = 0;
67 memset(&ip->i_d, 0, sizeof(ip->i_d));
68 ip->i_sick = 0;
69 ip->i_checked = 0;
70 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
71 INIT_LIST_HEAD(&ip->i_ioend_list);
72 spin_lock_init(&ip->i_ioend_lock);
73
74 return ip;
75}
76
77STATIC void
78xfs_inode_free_callback(
79 struct rcu_head *head)
80{
81 struct inode *inode = container_of(head, struct inode, i_rcu);
82 struct xfs_inode *ip = XFS_I(inode);
83
84 switch (VFS_I(ip)->i_mode & S_IFMT) {
85 case S_IFREG:
86 case S_IFDIR:
87 case S_IFLNK:
88 xfs_idestroy_fork(&ip->i_df);
89 break;
90 }
91
92 if (ip->i_afp) {
93 xfs_idestroy_fork(ip->i_afp);
94 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
95 }
96 if (ip->i_cowfp) {
97 xfs_idestroy_fork(ip->i_cowfp);
98 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
99 }
100 if (ip->i_itemp) {
101 ASSERT(!test_bit(XFS_LI_IN_AIL,
102 &ip->i_itemp->ili_item.li_flags));
103 xfs_inode_item_destroy(ip);
104 ip->i_itemp = NULL;
105 }
106
107 kmem_cache_free(xfs_inode_zone, ip);
108}
109
110static void
111__xfs_inode_free(
112 struct xfs_inode *ip)
113{
114 /* asserts to verify all state is correct here */
115 ASSERT(atomic_read(&ip->i_pincount) == 0);
116 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
117 XFS_STATS_DEC(ip->i_mount, vn_active);
118
119 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
120}
121
122void
123xfs_inode_free(
124 struct xfs_inode *ip)
125{
126 ASSERT(!xfs_isiflocked(ip));
127
128 /*
129 * Because we use RCU freeing we need to ensure the inode always
130 * appears to be reclaimed with an invalid inode number when in the
131 * free state. The ip->i_flags_lock provides the barrier against lookup
132 * races.
133 */
134 spin_lock(&ip->i_flags_lock);
135 ip->i_flags = XFS_IRECLAIM;
136 ip->i_ino = 0;
137 spin_unlock(&ip->i_flags_lock);
138
139 __xfs_inode_free(ip);
140}
141
142/*
143 * Queue background inode reclaim work if there are reclaimable inodes and there
144 * isn't reclaim work already scheduled or in progress.
145 */
146static void
147xfs_reclaim_work_queue(
148 struct xfs_mount *mp)
149{
150
151 rcu_read_lock();
152 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
153 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
154 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
155 }
156 rcu_read_unlock();
157}
158
159static void
160xfs_perag_set_reclaim_tag(
161 struct xfs_perag *pag)
162{
163 struct xfs_mount *mp = pag->pag_mount;
164
165 lockdep_assert_held(&pag->pag_ici_lock);
166 if (pag->pag_ici_reclaimable++)
167 return;
168
169 /* propagate the reclaim tag up into the perag radix tree */
170 spin_lock(&mp->m_perag_lock);
171 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
172 XFS_ICI_RECLAIM_TAG);
173 spin_unlock(&mp->m_perag_lock);
174
175 /* schedule periodic background inode reclaim */
176 xfs_reclaim_work_queue(mp);
177
178 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
179}
180
181static void
182xfs_perag_clear_reclaim_tag(
183 struct xfs_perag *pag)
184{
185 struct xfs_mount *mp = pag->pag_mount;
186
187 lockdep_assert_held(&pag->pag_ici_lock);
188 if (--pag->pag_ici_reclaimable)
189 return;
190
191 /* clear the reclaim tag from the perag radix tree */
192 spin_lock(&mp->m_perag_lock);
193 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
194 XFS_ICI_RECLAIM_TAG);
195 spin_unlock(&mp->m_perag_lock);
196 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
197}
198
199
200/*
201 * We set the inode flag atomically with the radix tree tag.
202 * Once we get tag lookups on the radix tree, this inode flag
203 * can go away.
204 */
205void
206xfs_inode_set_reclaim_tag(
207 struct xfs_inode *ip)
208{
209 struct xfs_mount *mp = ip->i_mount;
210 struct xfs_perag *pag;
211
212 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
213 spin_lock(&pag->pag_ici_lock);
214 spin_lock(&ip->i_flags_lock);
215
216 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
217 XFS_ICI_RECLAIM_TAG);
218 xfs_perag_set_reclaim_tag(pag);
219 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
220
221 spin_unlock(&ip->i_flags_lock);
222 spin_unlock(&pag->pag_ici_lock);
223 xfs_perag_put(pag);
224}
225
226STATIC void
227xfs_inode_clear_reclaim_tag(
228 struct xfs_perag *pag,
229 xfs_ino_t ino)
230{
231 radix_tree_tag_clear(&pag->pag_ici_root,
232 XFS_INO_TO_AGINO(pag->pag_mount, ino),
233 XFS_ICI_RECLAIM_TAG);
234 xfs_perag_clear_reclaim_tag(pag);
235}
236
237static void
238xfs_inew_wait(
239 struct xfs_inode *ip)
240{
241 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
242 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
243
244 do {
245 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
246 if (!xfs_iflags_test(ip, XFS_INEW))
247 break;
248 schedule();
249 } while (true);
250 finish_wait(wq, &wait.wq_entry);
251}
252
253/*
254 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
255 * part of the structure. This is made more complex by the fact we store
256 * information about the on-disk values in the VFS inode and so we can't just
257 * overwrite the values unconditionally. Hence we save the parameters we
258 * need to retain across reinitialisation, and rewrite them into the VFS inode
259 * after reinitialisation even if it fails.
260 */
261static int
262xfs_reinit_inode(
263 struct xfs_mount *mp,
264 struct inode *inode)
265{
266 int error;
267 uint32_t nlink = inode->i_nlink;
268 uint32_t generation = inode->i_generation;
269 uint64_t version = inode_peek_iversion(inode);
270 umode_t mode = inode->i_mode;
271 dev_t dev = inode->i_rdev;
272 kuid_t uid = inode->i_uid;
273 kgid_t gid = inode->i_gid;
274
275 error = inode_init_always(mp->m_super, inode);
276
277 set_nlink(inode, nlink);
278 inode->i_generation = generation;
279 inode_set_iversion_queried(inode, version);
280 inode->i_mode = mode;
281 inode->i_rdev = dev;
282 inode->i_uid = uid;
283 inode->i_gid = gid;
284 return error;
285}
286
287/*
288 * If we are allocating a new inode, then check what was returned is
289 * actually a free, empty inode. If we are not allocating an inode,
290 * then check we didn't find a free inode.
291 *
292 * Returns:
293 * 0 if the inode free state matches the lookup context
294 * -ENOENT if the inode is free and we are not allocating
295 * -EFSCORRUPTED if there is any state mismatch at all
296 */
297static int
298xfs_iget_check_free_state(
299 struct xfs_inode *ip,
300 int flags)
301{
302 if (flags & XFS_IGET_CREATE) {
303 /* should be a free inode */
304 if (VFS_I(ip)->i_mode != 0) {
305 xfs_warn(ip->i_mount,
306"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
307 ip->i_ino, VFS_I(ip)->i_mode);
308 return -EFSCORRUPTED;
309 }
310
311 if (ip->i_d.di_nblocks != 0) {
312 xfs_warn(ip->i_mount,
313"Corruption detected! Free inode 0x%llx has blocks allocated!",
314 ip->i_ino);
315 return -EFSCORRUPTED;
316 }
317 return 0;
318 }
319
320 /* should be an allocated inode */
321 if (VFS_I(ip)->i_mode == 0)
322 return -ENOENT;
323
324 return 0;
325}
326
327/*
328 * Check the validity of the inode we just found it the cache
329 */
330static int
331xfs_iget_cache_hit(
332 struct xfs_perag *pag,
333 struct xfs_inode *ip,
334 xfs_ino_t ino,
335 int flags,
336 int lock_flags) __releases(RCU)
337{
338 struct inode *inode = VFS_I(ip);
339 struct xfs_mount *mp = ip->i_mount;
340 int error;
341
342 /*
343 * check for re-use of an inode within an RCU grace period due to the
344 * radix tree nodes not being updated yet. We monitor for this by
345 * setting the inode number to zero before freeing the inode structure.
346 * If the inode has been reallocated and set up, then the inode number
347 * will not match, so check for that, too.
348 */
349 spin_lock(&ip->i_flags_lock);
350 if (ip->i_ino != ino) {
351 trace_xfs_iget_skip(ip);
352 XFS_STATS_INC(mp, xs_ig_frecycle);
353 error = -EAGAIN;
354 goto out_error;
355 }
356
357
358 /*
359 * If we are racing with another cache hit that is currently
360 * instantiating this inode or currently recycling it out of
361 * reclaimabe state, wait for the initialisation to complete
362 * before continuing.
363 *
364 * XXX(hch): eventually we should do something equivalent to
365 * wait_on_inode to wait for these flags to be cleared
366 * instead of polling for it.
367 */
368 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
369 trace_xfs_iget_skip(ip);
370 XFS_STATS_INC(mp, xs_ig_frecycle);
371 error = -EAGAIN;
372 goto out_error;
373 }
374
375 /*
376 * Check the inode free state is valid. This also detects lookup
377 * racing with unlinks.
378 */
379 error = xfs_iget_check_free_state(ip, flags);
380 if (error)
381 goto out_error;
382
383 /*
384 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
385 * Need to carefully get it back into useable state.
386 */
387 if (ip->i_flags & XFS_IRECLAIMABLE) {
388 trace_xfs_iget_reclaim(ip);
389
390 if (flags & XFS_IGET_INCORE) {
391 error = -EAGAIN;
392 goto out_error;
393 }
394
395 /*
396 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
397 * from stomping over us while we recycle the inode. We can't
398 * clear the radix tree reclaimable tag yet as it requires
399 * pag_ici_lock to be held exclusive.
400 */
401 ip->i_flags |= XFS_IRECLAIM;
402
403 spin_unlock(&ip->i_flags_lock);
404 rcu_read_unlock();
405
406 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
407 error = xfs_reinit_inode(mp, inode);
408 if (error) {
409 bool wake;
410 /*
411 * Re-initializing the inode failed, and we are in deep
412 * trouble. Try to re-add it to the reclaim list.
413 */
414 rcu_read_lock();
415 spin_lock(&ip->i_flags_lock);
416 wake = !!__xfs_iflags_test(ip, XFS_INEW);
417 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
418 if (wake)
419 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
420 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
421 trace_xfs_iget_reclaim_fail(ip);
422 goto out_error;
423 }
424
425 spin_lock(&pag->pag_ici_lock);
426 spin_lock(&ip->i_flags_lock);
427
428 /*
429 * Clear the per-lifetime state in the inode as we are now
430 * effectively a new inode and need to return to the initial
431 * state before reuse occurs.
432 */
433 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
434 ip->i_flags |= XFS_INEW;
435 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
436 inode->i_state = I_NEW;
437 ip->i_sick = 0;
438 ip->i_checked = 0;
439
440 spin_unlock(&ip->i_flags_lock);
441 spin_unlock(&pag->pag_ici_lock);
442 } else {
443 /* If the VFS inode is being torn down, pause and try again. */
444 if (!igrab(inode)) {
445 trace_xfs_iget_skip(ip);
446 error = -EAGAIN;
447 goto out_error;
448 }
449
450 /* We've got a live one. */
451 spin_unlock(&ip->i_flags_lock);
452 rcu_read_unlock();
453 trace_xfs_iget_hit(ip);
454 }
455
456 if (lock_flags != 0)
457 xfs_ilock(ip, lock_flags);
458
459 if (!(flags & XFS_IGET_INCORE))
460 xfs_iflags_clear(ip, XFS_ISTALE);
461 XFS_STATS_INC(mp, xs_ig_found);
462
463 return 0;
464
465out_error:
466 spin_unlock(&ip->i_flags_lock);
467 rcu_read_unlock();
468 return error;
469}
470
471
472static int
473xfs_iget_cache_miss(
474 struct xfs_mount *mp,
475 struct xfs_perag *pag,
476 xfs_trans_t *tp,
477 xfs_ino_t ino,
478 struct xfs_inode **ipp,
479 int flags,
480 int lock_flags)
481{
482 struct xfs_inode *ip;
483 int error;
484 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
485 int iflags;
486
487 ip = xfs_inode_alloc(mp, ino);
488 if (!ip)
489 return -ENOMEM;
490
491 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
492 if (error)
493 goto out_destroy;
494
495 /*
496 * For version 5 superblocks, if we are initialising a new inode and we
497 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
498 * simply build the new inode core with a random generation number.
499 *
500 * For version 4 (and older) superblocks, log recovery is dependent on
501 * the di_flushiter field being initialised from the current on-disk
502 * value and hence we must also read the inode off disk even when
503 * initializing new inodes.
504 */
505 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
506 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
507 VFS_I(ip)->i_generation = prandom_u32();
508 } else {
509 struct xfs_dinode *dip;
510 struct xfs_buf *bp;
511
512 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
513 if (error)
514 goto out_destroy;
515
516 error = xfs_inode_from_disk(ip, dip);
517 if (!error)
518 xfs_buf_set_ref(bp, XFS_INO_REF);
519 xfs_trans_brelse(tp, bp);
520
521 if (error)
522 goto out_destroy;
523 }
524
525 trace_xfs_iget_miss(ip);
526
527 /*
528 * Check the inode free state is valid. This also detects lookup
529 * racing with unlinks.
530 */
531 error = xfs_iget_check_free_state(ip, flags);
532 if (error)
533 goto out_destroy;
534
535 /*
536 * Preload the radix tree so we can insert safely under the
537 * write spinlock. Note that we cannot sleep inside the preload
538 * region. Since we can be called from transaction context, don't
539 * recurse into the file system.
540 */
541 if (radix_tree_preload(GFP_NOFS)) {
542 error = -EAGAIN;
543 goto out_destroy;
544 }
545
546 /*
547 * Because the inode hasn't been added to the radix-tree yet it can't
548 * be found by another thread, so we can do the non-sleeping lock here.
549 */
550 if (lock_flags) {
551 if (!xfs_ilock_nowait(ip, lock_flags))
552 BUG();
553 }
554
555 /*
556 * These values must be set before inserting the inode into the radix
557 * tree as the moment it is inserted a concurrent lookup (allowed by the
558 * RCU locking mechanism) can find it and that lookup must see that this
559 * is an inode currently under construction (i.e. that XFS_INEW is set).
560 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
561 * memory barrier that ensures this detection works correctly at lookup
562 * time.
563 */
564 iflags = XFS_INEW;
565 if (flags & XFS_IGET_DONTCACHE)
566 d_mark_dontcache(VFS_I(ip));
567 ip->i_udquot = NULL;
568 ip->i_gdquot = NULL;
569 ip->i_pdquot = NULL;
570 xfs_iflags_set(ip, iflags);
571
572 /* insert the new inode */
573 spin_lock(&pag->pag_ici_lock);
574 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
575 if (unlikely(error)) {
576 WARN_ON(error != -EEXIST);
577 XFS_STATS_INC(mp, xs_ig_dup);
578 error = -EAGAIN;
579 goto out_preload_end;
580 }
581 spin_unlock(&pag->pag_ici_lock);
582 radix_tree_preload_end();
583
584 *ipp = ip;
585 return 0;
586
587out_preload_end:
588 spin_unlock(&pag->pag_ici_lock);
589 radix_tree_preload_end();
590 if (lock_flags)
591 xfs_iunlock(ip, lock_flags);
592out_destroy:
593 __destroy_inode(VFS_I(ip));
594 xfs_inode_free(ip);
595 return error;
596}
597
598/*
599 * Look up an inode by number in the given file system. The inode is looked up
600 * in the cache held in each AG. If the inode is found in the cache, initialise
601 * the vfs inode if necessary.
602 *
603 * If it is not in core, read it in from the file system's device, add it to the
604 * cache and initialise the vfs inode.
605 *
606 * The inode is locked according to the value of the lock_flags parameter.
607 * Inode lookup is only done during metadata operations and not as part of the
608 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
609 */
610int
611xfs_iget(
612 struct xfs_mount *mp,
613 struct xfs_trans *tp,
614 xfs_ino_t ino,
615 uint flags,
616 uint lock_flags,
617 struct xfs_inode **ipp)
618{
619 struct xfs_inode *ip;
620 struct xfs_perag *pag;
621 xfs_agino_t agino;
622 int error;
623
624 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
625
626 /* reject inode numbers outside existing AGs */
627 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
628 return -EINVAL;
629
630 XFS_STATS_INC(mp, xs_ig_attempts);
631
632 /* get the perag structure and ensure that it's inode capable */
633 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
634 agino = XFS_INO_TO_AGINO(mp, ino);
635
636again:
637 error = 0;
638 rcu_read_lock();
639 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
640
641 if (ip) {
642 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
643 if (error)
644 goto out_error_or_again;
645 } else {
646 rcu_read_unlock();
647 if (flags & XFS_IGET_INCORE) {
648 error = -ENODATA;
649 goto out_error_or_again;
650 }
651 XFS_STATS_INC(mp, xs_ig_missed);
652
653 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
654 flags, lock_flags);
655 if (error)
656 goto out_error_or_again;
657 }
658 xfs_perag_put(pag);
659
660 *ipp = ip;
661
662 /*
663 * If we have a real type for an on-disk inode, we can setup the inode
664 * now. If it's a new inode being created, xfs_ialloc will handle it.
665 */
666 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
667 xfs_setup_existing_inode(ip);
668 return 0;
669
670out_error_or_again:
671 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
672 delay(1);
673 goto again;
674 }
675 xfs_perag_put(pag);
676 return error;
677}
678
679/*
680 * "Is this a cached inode that's also allocated?"
681 *
682 * Look up an inode by number in the given file system. If the inode is
683 * in cache and isn't in purgatory, return 1 if the inode is allocated
684 * and 0 if it is not. For all other cases (not in cache, being torn
685 * down, etc.), return a negative error code.
686 *
687 * The caller has to prevent inode allocation and freeing activity,
688 * presumably by locking the AGI buffer. This is to ensure that an
689 * inode cannot transition from allocated to freed until the caller is
690 * ready to allow that. If the inode is in an intermediate state (new,
691 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
692 * inode is not in the cache, -ENOENT will be returned. The caller must
693 * deal with these scenarios appropriately.
694 *
695 * This is a specialized use case for the online scrubber; if you're
696 * reading this, you probably want xfs_iget.
697 */
698int
699xfs_icache_inode_is_allocated(
700 struct xfs_mount *mp,
701 struct xfs_trans *tp,
702 xfs_ino_t ino,
703 bool *inuse)
704{
705 struct xfs_inode *ip;
706 int error;
707
708 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
709 if (error)
710 return error;
711
712 *inuse = !!(VFS_I(ip)->i_mode);
713 xfs_irele(ip);
714 return 0;
715}
716
717/*
718 * The inode lookup is done in batches to keep the amount of lock traffic and
719 * radix tree lookups to a minimum. The batch size is a trade off between
720 * lookup reduction and stack usage. This is in the reclaim path, so we can't
721 * be too greedy.
722 */
723#define XFS_LOOKUP_BATCH 32
724
725/*
726 * Decide if the given @ip is eligible to be a part of the inode walk, and
727 * grab it if so. Returns true if it's ready to go or false if we should just
728 * ignore it.
729 */
730STATIC bool
731xfs_inode_walk_ag_grab(
732 struct xfs_inode *ip,
733 int flags)
734{
735 struct inode *inode = VFS_I(ip);
736 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
737
738 ASSERT(rcu_read_lock_held());
739
740 /* Check for stale RCU freed inode */
741 spin_lock(&ip->i_flags_lock);
742 if (!ip->i_ino)
743 goto out_unlock_noent;
744
745 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
746 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
747 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
748 goto out_unlock_noent;
749 spin_unlock(&ip->i_flags_lock);
750
751 /* nothing to sync during shutdown */
752 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
753 return false;
754
755 /* If we can't grab the inode, it must on it's way to reclaim. */
756 if (!igrab(inode))
757 return false;
758
759 /* inode is valid */
760 return true;
761
762out_unlock_noent:
763 spin_unlock(&ip->i_flags_lock);
764 return false;
765}
766
767/*
768 * For a given per-AG structure @pag, grab, @execute, and rele all incore
769 * inodes with the given radix tree @tag.
770 */
771STATIC int
772xfs_inode_walk_ag(
773 struct xfs_perag *pag,
774 int iter_flags,
775 int (*execute)(struct xfs_inode *ip, void *args),
776 void *args,
777 int tag)
778{
779 struct xfs_mount *mp = pag->pag_mount;
780 uint32_t first_index;
781 int last_error = 0;
782 int skipped;
783 bool done;
784 int nr_found;
785
786restart:
787 done = false;
788 skipped = 0;
789 first_index = 0;
790 nr_found = 0;
791 do {
792 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
793 int error = 0;
794 int i;
795
796 rcu_read_lock();
797
798 if (tag == XFS_ICI_NO_TAG)
799 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
800 (void **)batch, first_index,
801 XFS_LOOKUP_BATCH);
802 else
803 nr_found = radix_tree_gang_lookup_tag(
804 &pag->pag_ici_root,
805 (void **) batch, first_index,
806 XFS_LOOKUP_BATCH, tag);
807
808 if (!nr_found) {
809 rcu_read_unlock();
810 break;
811 }
812
813 /*
814 * Grab the inodes before we drop the lock. if we found
815 * nothing, nr == 0 and the loop will be skipped.
816 */
817 for (i = 0; i < nr_found; i++) {
818 struct xfs_inode *ip = batch[i];
819
820 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
821 batch[i] = NULL;
822
823 /*
824 * Update the index for the next lookup. Catch
825 * overflows into the next AG range which can occur if
826 * we have inodes in the last block of the AG and we
827 * are currently pointing to the last inode.
828 *
829 * Because we may see inodes that are from the wrong AG
830 * due to RCU freeing and reallocation, only update the
831 * index if it lies in this AG. It was a race that lead
832 * us to see this inode, so another lookup from the
833 * same index will not find it again.
834 */
835 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
836 continue;
837 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
838 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
839 done = true;
840 }
841
842 /* unlock now we've grabbed the inodes. */
843 rcu_read_unlock();
844
845 for (i = 0; i < nr_found; i++) {
846 if (!batch[i])
847 continue;
848 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
849 xfs_iflags_test(batch[i], XFS_INEW))
850 xfs_inew_wait(batch[i]);
851 error = execute(batch[i], args);
852 xfs_irele(batch[i]);
853 if (error == -EAGAIN) {
854 skipped++;
855 continue;
856 }
857 if (error && last_error != -EFSCORRUPTED)
858 last_error = error;
859 }
860
861 /* bail out if the filesystem is corrupted. */
862 if (error == -EFSCORRUPTED)
863 break;
864
865 cond_resched();
866
867 } while (nr_found && !done);
868
869 if (skipped) {
870 delay(1);
871 goto restart;
872 }
873 return last_error;
874}
875
876/* Fetch the next (possibly tagged) per-AG structure. */
877static inline struct xfs_perag *
878xfs_inode_walk_get_perag(
879 struct xfs_mount *mp,
880 xfs_agnumber_t agno,
881 int tag)
882{
883 if (tag == XFS_ICI_NO_TAG)
884 return xfs_perag_get(mp, agno);
885 return xfs_perag_get_tag(mp, agno, tag);
886}
887
888/*
889 * Call the @execute function on all incore inodes matching the radix tree
890 * @tag.
891 */
892int
893xfs_inode_walk(
894 struct xfs_mount *mp,
895 int iter_flags,
896 int (*execute)(struct xfs_inode *ip, void *args),
897 void *args,
898 int tag)
899{
900 struct xfs_perag *pag;
901 int error = 0;
902 int last_error = 0;
903 xfs_agnumber_t ag;
904
905 ag = 0;
906 while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
907 ag = pag->pag_agno + 1;
908 error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
909 xfs_perag_put(pag);
910 if (error) {
911 last_error = error;
912 if (error == -EFSCORRUPTED)
913 break;
914 }
915 }
916 return last_error;
917}
918
919/*
920 * Background scanning to trim post-EOF preallocated space. This is queued
921 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
922 */
923void
924xfs_queue_eofblocks(
925 struct xfs_mount *mp)
926{
927 rcu_read_lock();
928 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
929 queue_delayed_work(mp->m_eofblocks_workqueue,
930 &mp->m_eofblocks_work,
931 msecs_to_jiffies(xfs_eofb_secs * 1000));
932 rcu_read_unlock();
933}
934
935void
936xfs_eofblocks_worker(
937 struct work_struct *work)
938{
939 struct xfs_mount *mp = container_of(to_delayed_work(work),
940 struct xfs_mount, m_eofblocks_work);
941
942 if (!sb_start_write_trylock(mp->m_super))
943 return;
944 xfs_icache_free_eofblocks(mp, NULL);
945 sb_end_write(mp->m_super);
946
947 xfs_queue_eofblocks(mp);
948}
949
950/*
951 * Background scanning to trim preallocated CoW space. This is queued
952 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
953 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
954 */
955void
956xfs_queue_cowblocks(
957 struct xfs_mount *mp)
958{
959 rcu_read_lock();
960 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
961 queue_delayed_work(mp->m_eofblocks_workqueue,
962 &mp->m_cowblocks_work,
963 msecs_to_jiffies(xfs_cowb_secs * 1000));
964 rcu_read_unlock();
965}
966
967void
968xfs_cowblocks_worker(
969 struct work_struct *work)
970{
971 struct xfs_mount *mp = container_of(to_delayed_work(work),
972 struct xfs_mount, m_cowblocks_work);
973
974 if (!sb_start_write_trylock(mp->m_super))
975 return;
976 xfs_icache_free_cowblocks(mp, NULL);
977 sb_end_write(mp->m_super);
978
979 xfs_queue_cowblocks(mp);
980}
981
982/*
983 * Grab the inode for reclaim exclusively.
984 *
985 * We have found this inode via a lookup under RCU, so the inode may have
986 * already been freed, or it may be in the process of being recycled by
987 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
988 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
989 * will not be set. Hence we need to check for both these flag conditions to
990 * avoid inodes that are no longer reclaim candidates.
991 *
992 * Note: checking for other state flags here, under the i_flags_lock or not, is
993 * racy and should be avoided. Those races should be resolved only after we have
994 * ensured that we are able to reclaim this inode and the world can see that we
995 * are going to reclaim it.
996 *
997 * Return true if we grabbed it, false otherwise.
998 */
999static bool
1000xfs_reclaim_inode_grab(
1001 struct xfs_inode *ip)
1002{
1003 ASSERT(rcu_read_lock_held());
1004
1005 spin_lock(&ip->i_flags_lock);
1006 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1007 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1008 /* not a reclaim candidate. */
1009 spin_unlock(&ip->i_flags_lock);
1010 return false;
1011 }
1012 __xfs_iflags_set(ip, XFS_IRECLAIM);
1013 spin_unlock(&ip->i_flags_lock);
1014 return true;
1015}
1016
1017/*
1018 * Inode reclaim is non-blocking, so the default action if progress cannot be
1019 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
1020 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
1021 * blocking anymore and hence we can wait for the inode to be able to reclaim
1022 * it.
1023 *
1024 * We do no IO here - if callers require inodes to be cleaned they must push the
1025 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
1026 * done in the background in a non-blocking manner, and enables memory reclaim
1027 * to make progress without blocking.
1028 */
1029static void
1030xfs_reclaim_inode(
1031 struct xfs_inode *ip,
1032 struct xfs_perag *pag)
1033{
1034 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1035
1036 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
1037 goto out;
1038 if (!xfs_iflock_nowait(ip))
1039 goto out_iunlock;
1040
1041 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1042 xfs_iunpin_wait(ip);
1043 /* xfs_iflush_abort() drops the flush lock */
1044 xfs_iflush_abort(ip);
1045 goto reclaim;
1046 }
1047 if (xfs_ipincount(ip))
1048 goto out_ifunlock;
1049 if (!xfs_inode_clean(ip))
1050 goto out_ifunlock;
1051
1052 xfs_ifunlock(ip);
1053reclaim:
1054 ASSERT(!xfs_isiflocked(ip));
1055
1056 /*
1057 * Because we use RCU freeing we need to ensure the inode always appears
1058 * to be reclaimed with an invalid inode number when in the free state.
1059 * We do this as early as possible under the ILOCK so that
1060 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1061 * detect races with us here. By doing this, we guarantee that once
1062 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1063 * it will see either a valid inode that will serialise correctly, or it
1064 * will see an invalid inode that it can skip.
1065 */
1066 spin_lock(&ip->i_flags_lock);
1067 ip->i_flags = XFS_IRECLAIM;
1068 ip->i_ino = 0;
1069 spin_unlock(&ip->i_flags_lock);
1070
1071 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1072
1073 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1074 /*
1075 * Remove the inode from the per-AG radix tree.
1076 *
1077 * Because radix_tree_delete won't complain even if the item was never
1078 * added to the tree assert that it's been there before to catch
1079 * problems with the inode life time early on.
1080 */
1081 spin_lock(&pag->pag_ici_lock);
1082 if (!radix_tree_delete(&pag->pag_ici_root,
1083 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1084 ASSERT(0);
1085 xfs_perag_clear_reclaim_tag(pag);
1086 spin_unlock(&pag->pag_ici_lock);
1087
1088 /*
1089 * Here we do an (almost) spurious inode lock in order to coordinate
1090 * with inode cache radix tree lookups. This is because the lookup
1091 * can reference the inodes in the cache without taking references.
1092 *
1093 * We make that OK here by ensuring that we wait until the inode is
1094 * unlocked after the lookup before we go ahead and free it.
1095 */
1096 xfs_ilock(ip, XFS_ILOCK_EXCL);
1097 xfs_qm_dqdetach(ip);
1098 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1099 ASSERT(xfs_inode_clean(ip));
1100
1101 __xfs_inode_free(ip);
1102 return;
1103
1104out_ifunlock:
1105 xfs_ifunlock(ip);
1106out_iunlock:
1107 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1108out:
1109 xfs_iflags_clear(ip, XFS_IRECLAIM);
1110}
1111
1112/*
1113 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1114 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1115 * then a shut down during filesystem unmount reclaim walk leak all the
1116 * unreclaimed inodes.
1117 *
1118 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1119 * so that callers that want to block until all dirty inodes are written back
1120 * and reclaimed can sanely loop.
1121 */
1122static void
1123xfs_reclaim_inodes_ag(
1124 struct xfs_mount *mp,
1125 int *nr_to_scan)
1126{
1127 struct xfs_perag *pag;
1128 xfs_agnumber_t ag = 0;
1129
1130 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1131 unsigned long first_index = 0;
1132 int done = 0;
1133 int nr_found = 0;
1134
1135 ag = pag->pag_agno + 1;
1136
1137 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1138 do {
1139 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1140 int i;
1141
1142 rcu_read_lock();
1143 nr_found = radix_tree_gang_lookup_tag(
1144 &pag->pag_ici_root,
1145 (void **)batch, first_index,
1146 XFS_LOOKUP_BATCH,
1147 XFS_ICI_RECLAIM_TAG);
1148 if (!nr_found) {
1149 done = 1;
1150 rcu_read_unlock();
1151 break;
1152 }
1153
1154 /*
1155 * Grab the inodes before we drop the lock. if we found
1156 * nothing, nr == 0 and the loop will be skipped.
1157 */
1158 for (i = 0; i < nr_found; i++) {
1159 struct xfs_inode *ip = batch[i];
1160
1161 if (done || !xfs_reclaim_inode_grab(ip))
1162 batch[i] = NULL;
1163
1164 /*
1165 * Update the index for the next lookup. Catch
1166 * overflows into the next AG range which can
1167 * occur if we have inodes in the last block of
1168 * the AG and we are currently pointing to the
1169 * last inode.
1170 *
1171 * Because we may see inodes that are from the
1172 * wrong AG due to RCU freeing and
1173 * reallocation, only update the index if it
1174 * lies in this AG. It was a race that lead us
1175 * to see this inode, so another lookup from
1176 * the same index will not find it again.
1177 */
1178 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1179 pag->pag_agno)
1180 continue;
1181 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1182 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1183 done = 1;
1184 }
1185
1186 /* unlock now we've grabbed the inodes. */
1187 rcu_read_unlock();
1188
1189 for (i = 0; i < nr_found; i++) {
1190 if (batch[i])
1191 xfs_reclaim_inode(batch[i], pag);
1192 }
1193
1194 *nr_to_scan -= XFS_LOOKUP_BATCH;
1195 cond_resched();
1196 } while (nr_found && !done && *nr_to_scan > 0);
1197
1198 if (done)
1199 first_index = 0;
1200 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1201 xfs_perag_put(pag);
1202 }
1203}
1204
1205void
1206xfs_reclaim_inodes(
1207 struct xfs_mount *mp)
1208{
1209 int nr_to_scan = INT_MAX;
1210
1211 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1212 xfs_ail_push_all_sync(mp->m_ail);
1213 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1214 };
1215}
1216
1217/*
1218 * The shrinker infrastructure determines how many inodes we should scan for
1219 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1220 * push the AIL here. We also want to proactively free up memory if we can to
1221 * minimise the amount of work memory reclaim has to do so we kick the
1222 * background reclaim if it isn't already scheduled.
1223 */
1224long
1225xfs_reclaim_inodes_nr(
1226 struct xfs_mount *mp,
1227 int nr_to_scan)
1228{
1229 /* kick background reclaimer and push the AIL */
1230 xfs_reclaim_work_queue(mp);
1231 xfs_ail_push_all(mp->m_ail);
1232
1233 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1234 return 0;
1235}
1236
1237/*
1238 * Return the number of reclaimable inodes in the filesystem for
1239 * the shrinker to determine how much to reclaim.
1240 */
1241int
1242xfs_reclaim_inodes_count(
1243 struct xfs_mount *mp)
1244{
1245 struct xfs_perag *pag;
1246 xfs_agnumber_t ag = 0;
1247 int reclaimable = 0;
1248
1249 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1250 ag = pag->pag_agno + 1;
1251 reclaimable += pag->pag_ici_reclaimable;
1252 xfs_perag_put(pag);
1253 }
1254 return reclaimable;
1255}
1256
1257STATIC bool
1258xfs_inode_match_id(
1259 struct xfs_inode *ip,
1260 struct xfs_eofblocks *eofb)
1261{
1262 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1263 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1264 return false;
1265
1266 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1267 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1268 return false;
1269
1270 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1271 ip->i_d.di_projid != eofb->eof_prid)
1272 return false;
1273
1274 return true;
1275}
1276
1277/*
1278 * A union-based inode filtering algorithm. Process the inode if any of the
1279 * criteria match. This is for global/internal scans only.
1280 */
1281STATIC bool
1282xfs_inode_match_id_union(
1283 struct xfs_inode *ip,
1284 struct xfs_eofblocks *eofb)
1285{
1286 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1287 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1288 return true;
1289
1290 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1291 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1292 return true;
1293
1294 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1295 ip->i_d.di_projid == eofb->eof_prid)
1296 return true;
1297
1298 return false;
1299}
1300
1301/*
1302 * Is this inode @ip eligible for eof/cow block reclamation, given some
1303 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1304 * if the predicate functions match.
1305 */
1306static bool
1307xfs_inode_matches_eofb(
1308 struct xfs_inode *ip,
1309 struct xfs_eofblocks *eofb)
1310{
1311 bool match;
1312
1313 if (!eofb)
1314 return true;
1315
1316 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1317 match = xfs_inode_match_id_union(ip, eofb);
1318 else
1319 match = xfs_inode_match_id(ip, eofb);
1320 if (!match)
1321 return false;
1322
1323 /* skip the inode if the file size is too small */
1324 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1325 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1326 return false;
1327
1328 return true;
1329}
1330
1331/*
1332 * This is a fast pass over the inode cache to try to get reclaim moving on as
1333 * many inodes as possible in a short period of time. It kicks itself every few
1334 * seconds, as well as being kicked by the inode cache shrinker when memory
1335 * goes low.
1336 */
1337void
1338xfs_reclaim_worker(
1339 struct work_struct *work)
1340{
1341 struct xfs_mount *mp = container_of(to_delayed_work(work),
1342 struct xfs_mount, m_reclaim_work);
1343 int nr_to_scan = INT_MAX;
1344
1345 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1346 xfs_reclaim_work_queue(mp);
1347}
1348
1349STATIC int
1350xfs_inode_free_eofblocks(
1351 struct xfs_inode *ip,
1352 void *args)
1353{
1354 struct xfs_eofblocks *eofb = args;
1355 bool wait;
1356 int ret;
1357
1358 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1359
1360 if (!xfs_can_free_eofblocks(ip, false)) {
1361 /* inode could be preallocated or append-only */
1362 trace_xfs_inode_free_eofblocks_invalid(ip);
1363 xfs_inode_clear_eofblocks_tag(ip);
1364 return 0;
1365 }
1366
1367 /*
1368 * If the mapping is dirty the operation can block and wait for some
1369 * time. Unless we are waiting, skip it.
1370 */
1371 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1372 return 0;
1373
1374 if (!xfs_inode_matches_eofb(ip, eofb))
1375 return 0;
1376
1377 /*
1378 * If the caller is waiting, return -EAGAIN to keep the background
1379 * scanner moving and revisit the inode in a subsequent pass.
1380 */
1381 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1382 if (wait)
1383 return -EAGAIN;
1384 return 0;
1385 }
1386
1387 ret = xfs_free_eofblocks(ip);
1388 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1389
1390 return ret;
1391}
1392
1393int
1394xfs_icache_free_eofblocks(
1395 struct xfs_mount *mp,
1396 struct xfs_eofblocks *eofb)
1397{
1398 return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb,
1399 XFS_ICI_EOFBLOCKS_TAG);
1400}
1401
1402/*
1403 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1404 * multiple quotas, we don't know exactly which quota caused an allocation
1405 * failure. We make a best effort by including each quota under low free space
1406 * conditions (less than 1% free space) in the scan.
1407 */
1408static int
1409__xfs_inode_free_quota_eofblocks(
1410 struct xfs_inode *ip,
1411 int (*execute)(struct xfs_mount *mp,
1412 struct xfs_eofblocks *eofb))
1413{
1414 int scan = 0;
1415 struct xfs_eofblocks eofb = {0};
1416 struct xfs_dquot *dq;
1417
1418 /*
1419 * Run a sync scan to increase effectiveness and use the union filter to
1420 * cover all applicable quotas in a single scan.
1421 */
1422 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1423
1424 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1425 dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER);
1426 if (dq && xfs_dquot_lowsp(dq)) {
1427 eofb.eof_uid = VFS_I(ip)->i_uid;
1428 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1429 scan = 1;
1430 }
1431 }
1432
1433 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1434 dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP);
1435 if (dq && xfs_dquot_lowsp(dq)) {
1436 eofb.eof_gid = VFS_I(ip)->i_gid;
1437 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1438 scan = 1;
1439 }
1440 }
1441
1442 if (scan)
1443 execute(ip->i_mount, &eofb);
1444
1445 return scan;
1446}
1447
1448int
1449xfs_inode_free_quota_eofblocks(
1450 struct xfs_inode *ip)
1451{
1452 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1453}
1454
1455static inline unsigned long
1456xfs_iflag_for_tag(
1457 int tag)
1458{
1459 switch (tag) {
1460 case XFS_ICI_EOFBLOCKS_TAG:
1461 return XFS_IEOFBLOCKS;
1462 case XFS_ICI_COWBLOCKS_TAG:
1463 return XFS_ICOWBLOCKS;
1464 default:
1465 ASSERT(0);
1466 return 0;
1467 }
1468}
1469
1470static void
1471__xfs_inode_set_blocks_tag(
1472 xfs_inode_t *ip,
1473 void (*execute)(struct xfs_mount *mp),
1474 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1475 int error, unsigned long caller_ip),
1476 int tag)
1477{
1478 struct xfs_mount *mp = ip->i_mount;
1479 struct xfs_perag *pag;
1480 int tagged;
1481
1482 /*
1483 * Don't bother locking the AG and looking up in the radix trees
1484 * if we already know that we have the tag set.
1485 */
1486 if (ip->i_flags & xfs_iflag_for_tag(tag))
1487 return;
1488 spin_lock(&ip->i_flags_lock);
1489 ip->i_flags |= xfs_iflag_for_tag(tag);
1490 spin_unlock(&ip->i_flags_lock);
1491
1492 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1493 spin_lock(&pag->pag_ici_lock);
1494
1495 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1496 radix_tree_tag_set(&pag->pag_ici_root,
1497 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1498 if (!tagged) {
1499 /* propagate the eofblocks tag up into the perag radix tree */
1500 spin_lock(&ip->i_mount->m_perag_lock);
1501 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1502 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1503 tag);
1504 spin_unlock(&ip->i_mount->m_perag_lock);
1505
1506 /* kick off background trimming */
1507 execute(ip->i_mount);
1508
1509 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1510 }
1511
1512 spin_unlock(&pag->pag_ici_lock);
1513 xfs_perag_put(pag);
1514}
1515
1516void
1517xfs_inode_set_eofblocks_tag(
1518 xfs_inode_t *ip)
1519{
1520 trace_xfs_inode_set_eofblocks_tag(ip);
1521 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1522 trace_xfs_perag_set_eofblocks,
1523 XFS_ICI_EOFBLOCKS_TAG);
1524}
1525
1526static void
1527__xfs_inode_clear_blocks_tag(
1528 xfs_inode_t *ip,
1529 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1530 int error, unsigned long caller_ip),
1531 int tag)
1532{
1533 struct xfs_mount *mp = ip->i_mount;
1534 struct xfs_perag *pag;
1535
1536 spin_lock(&ip->i_flags_lock);
1537 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1538 spin_unlock(&ip->i_flags_lock);
1539
1540 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1541 spin_lock(&pag->pag_ici_lock);
1542
1543 radix_tree_tag_clear(&pag->pag_ici_root,
1544 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1545 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1546 /* clear the eofblocks tag from the perag radix tree */
1547 spin_lock(&ip->i_mount->m_perag_lock);
1548 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1549 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1550 tag);
1551 spin_unlock(&ip->i_mount->m_perag_lock);
1552 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1553 }
1554
1555 spin_unlock(&pag->pag_ici_lock);
1556 xfs_perag_put(pag);
1557}
1558
1559void
1560xfs_inode_clear_eofblocks_tag(
1561 xfs_inode_t *ip)
1562{
1563 trace_xfs_inode_clear_eofblocks_tag(ip);
1564 return __xfs_inode_clear_blocks_tag(ip,
1565 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1566}
1567
1568/*
1569 * Set ourselves up to free CoW blocks from this file. If it's already clean
1570 * then we can bail out quickly, but otherwise we must back off if the file
1571 * is undergoing some kind of write.
1572 */
1573static bool
1574xfs_prep_free_cowblocks(
1575 struct xfs_inode *ip)
1576{
1577 /*
1578 * Just clear the tag if we have an empty cow fork or none at all. It's
1579 * possible the inode was fully unshared since it was originally tagged.
1580 */
1581 if (!xfs_inode_has_cow_data(ip)) {
1582 trace_xfs_inode_free_cowblocks_invalid(ip);
1583 xfs_inode_clear_cowblocks_tag(ip);
1584 return false;
1585 }
1586
1587 /*
1588 * If the mapping is dirty or under writeback we cannot touch the
1589 * CoW fork. Leave it alone if we're in the midst of a directio.
1590 */
1591 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1592 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1593 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1594 atomic_read(&VFS_I(ip)->i_dio_count))
1595 return false;
1596
1597 return true;
1598}
1599
1600/*
1601 * Automatic CoW Reservation Freeing
1602 *
1603 * These functions automatically garbage collect leftover CoW reservations
1604 * that were made on behalf of a cowextsize hint when we start to run out
1605 * of quota or when the reservations sit around for too long. If the file
1606 * has dirty pages or is undergoing writeback, its CoW reservations will
1607 * be retained.
1608 *
1609 * The actual garbage collection piggybacks off the same code that runs
1610 * the speculative EOF preallocation garbage collector.
1611 */
1612STATIC int
1613xfs_inode_free_cowblocks(
1614 struct xfs_inode *ip,
1615 void *args)
1616{
1617 struct xfs_eofblocks *eofb = args;
1618 int ret = 0;
1619
1620 if (!xfs_prep_free_cowblocks(ip))
1621 return 0;
1622
1623 if (!xfs_inode_matches_eofb(ip, eofb))
1624 return 0;
1625
1626 /* Free the CoW blocks */
1627 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1628 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1629
1630 /*
1631 * Check again, nobody else should be able to dirty blocks or change
1632 * the reflink iflag now that we have the first two locks held.
1633 */
1634 if (xfs_prep_free_cowblocks(ip))
1635 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1636
1637 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1638 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1639
1640 return ret;
1641}
1642
1643int
1644xfs_icache_free_cowblocks(
1645 struct xfs_mount *mp,
1646 struct xfs_eofblocks *eofb)
1647{
1648 return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb,
1649 XFS_ICI_COWBLOCKS_TAG);
1650}
1651
1652int
1653xfs_inode_free_quota_cowblocks(
1654 struct xfs_inode *ip)
1655{
1656 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1657}
1658
1659void
1660xfs_inode_set_cowblocks_tag(
1661 xfs_inode_t *ip)
1662{
1663 trace_xfs_inode_set_cowblocks_tag(ip);
1664 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1665 trace_xfs_perag_set_cowblocks,
1666 XFS_ICI_COWBLOCKS_TAG);
1667}
1668
1669void
1670xfs_inode_clear_cowblocks_tag(
1671 xfs_inode_t *ip)
1672{
1673 trace_xfs_inode_clear_cowblocks_tag(ip);
1674 return __xfs_inode_clear_blocks_tag(ip,
1675 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1676}
1677
1678/* Disable post-EOF and CoW block auto-reclamation. */
1679void
1680xfs_stop_block_reaping(
1681 struct xfs_mount *mp)
1682{
1683 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1684 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1685}
1686
1687/* Enable post-EOF and CoW block auto-reclamation. */
1688void
1689xfs_start_block_reaping(
1690 struct xfs_mount *mp)
1691{
1692 xfs_queue_eofblocks(mp);
1693 xfs_queue_cowblocks(mp);
1694}