Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36#include "xfs_reflink.h"
37
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/iversion.h>
41
42/*
43 * Allocate and initialise an xfs_inode.
44 */
45struct xfs_inode *
46xfs_inode_alloc(
47 struct xfs_mount *mp,
48 xfs_ino_t ino)
49{
50 struct xfs_inode *ip;
51
52 /*
53 * if this didn't occur in transactions, we could use
54 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
55 * code up to do this anyway.
56 */
57 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
58 if (!ip)
59 return NULL;
60 if (inode_init_always(mp->m_super, VFS_I(ip))) {
61 kmem_zone_free(xfs_inode_zone, ip);
62 return NULL;
63 }
64
65 /* VFS doesn't initialise i_mode! */
66 VFS_I(ip)->i_mode = 0;
67
68 XFS_STATS_INC(mp, vn_active);
69 ASSERT(atomic_read(&ip->i_pincount) == 0);
70 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0);
72
73 /* initialise the xfs inode */
74 ip->i_ino = ino;
75 ip->i_mount = mp;
76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77 ip->i_afp = NULL;
78 ip->i_cowfp = NULL;
79 ip->i_cnextents = 0;
80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
82 ip->i_flags = 0;
83 ip->i_delayed_blks = 0;
84 memset(&ip->i_d, 0, sizeof(ip->i_d));
85
86 return ip;
87}
88
89STATIC void
90xfs_inode_free_callback(
91 struct rcu_head *head)
92{
93 struct inode *inode = container_of(head, struct inode, i_rcu);
94 struct xfs_inode *ip = XFS_I(inode);
95
96 switch (VFS_I(ip)->i_mode & S_IFMT) {
97 case S_IFREG:
98 case S_IFDIR:
99 case S_IFLNK:
100 xfs_idestroy_fork(ip, XFS_DATA_FORK);
101 break;
102 }
103
104 if (ip->i_afp)
105 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
106 if (ip->i_cowfp)
107 xfs_idestroy_fork(ip, XFS_COW_FORK);
108
109 if (ip->i_itemp) {
110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
111 xfs_inode_item_destroy(ip);
112 ip->i_itemp = NULL;
113 }
114
115 kmem_zone_free(xfs_inode_zone, ip);
116}
117
118static void
119__xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 XFS_STATS_DEC(ip->i_mount, vn_active);
125
126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
127}
128
129void
130xfs_inode_free(
131 struct xfs_inode *ip)
132{
133 ASSERT(!xfs_isiflocked(ip));
134
135 /*
136 * Because we use RCU freeing we need to ensure the inode always
137 * appears to be reclaimed with an invalid inode number when in the
138 * free state. The ip->i_flags_lock provides the barrier against lookup
139 * races.
140 */
141 spin_lock(&ip->i_flags_lock);
142 ip->i_flags = XFS_IRECLAIM;
143 ip->i_ino = 0;
144 spin_unlock(&ip->i_flags_lock);
145
146 __xfs_inode_free(ip);
147}
148
149/*
150 * Queue a new inode reclaim pass if there are reclaimable inodes and there
151 * isn't a reclaim pass already in progress. By default it runs every 5s based
152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
153 * tunable, but that can be done if this method proves to be ineffective or too
154 * aggressive.
155 */
156static void
157xfs_reclaim_work_queue(
158 struct xfs_mount *mp)
159{
160
161 rcu_read_lock();
162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
165 }
166 rcu_read_unlock();
167}
168
169/*
170 * This is a fast pass over the inode cache to try to get reclaim moving on as
171 * many inodes as possible in a short period of time. It kicks itself every few
172 * seconds, as well as being kicked by the inode cache shrinker when memory
173 * goes low. It scans as quickly as possible avoiding locked inodes or those
174 * already being flushed, and once done schedules a future pass.
175 */
176void
177xfs_reclaim_worker(
178 struct work_struct *work)
179{
180 struct xfs_mount *mp = container_of(to_delayed_work(work),
181 struct xfs_mount, m_reclaim_work);
182
183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
184 xfs_reclaim_work_queue(mp);
185}
186
187static void
188xfs_perag_set_reclaim_tag(
189 struct xfs_perag *pag)
190{
191 struct xfs_mount *mp = pag->pag_mount;
192
193 lockdep_assert_held(&pag->pag_ici_lock);
194 if (pag->pag_ici_reclaimable++)
195 return;
196
197 /* propagate the reclaim tag up into the perag radix tree */
198 spin_lock(&mp->m_perag_lock);
199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
200 XFS_ICI_RECLAIM_TAG);
201 spin_unlock(&mp->m_perag_lock);
202
203 /* schedule periodic background inode reclaim */
204 xfs_reclaim_work_queue(mp);
205
206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
207}
208
209static void
210xfs_perag_clear_reclaim_tag(
211 struct xfs_perag *pag)
212{
213 struct xfs_mount *mp = pag->pag_mount;
214
215 lockdep_assert_held(&pag->pag_ici_lock);
216 if (--pag->pag_ici_reclaimable)
217 return;
218
219 /* clear the reclaim tag from the perag radix tree */
220 spin_lock(&mp->m_perag_lock);
221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
222 XFS_ICI_RECLAIM_TAG);
223 spin_unlock(&mp->m_perag_lock);
224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
225}
226
227
228/*
229 * We set the inode flag atomically with the radix tree tag.
230 * Once we get tag lookups on the radix tree, this inode flag
231 * can go away.
232 */
233void
234xfs_inode_set_reclaim_tag(
235 struct xfs_inode *ip)
236{
237 struct xfs_mount *mp = ip->i_mount;
238 struct xfs_perag *pag;
239
240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
241 spin_lock(&pag->pag_ici_lock);
242 spin_lock(&ip->i_flags_lock);
243
244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
245 XFS_ICI_RECLAIM_TAG);
246 xfs_perag_set_reclaim_tag(pag);
247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
248
249 spin_unlock(&ip->i_flags_lock);
250 spin_unlock(&pag->pag_ici_lock);
251 xfs_perag_put(pag);
252}
253
254STATIC void
255xfs_inode_clear_reclaim_tag(
256 struct xfs_perag *pag,
257 xfs_ino_t ino)
258{
259 radix_tree_tag_clear(&pag->pag_ici_root,
260 XFS_INO_TO_AGINO(pag->pag_mount, ino),
261 XFS_ICI_RECLAIM_TAG);
262 xfs_perag_clear_reclaim_tag(pag);
263}
264
265static void
266xfs_inew_wait(
267 struct xfs_inode *ip)
268{
269 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
270 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
271
272 do {
273 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
274 if (!xfs_iflags_test(ip, XFS_INEW))
275 break;
276 schedule();
277 } while (true);
278 finish_wait(wq, &wait.wq_entry);
279}
280
281/*
282 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
283 * part of the structure. This is made more complex by the fact we store
284 * information about the on-disk values in the VFS inode and so we can't just
285 * overwrite the values unconditionally. Hence we save the parameters we
286 * need to retain across reinitialisation, and rewrite them into the VFS inode
287 * after reinitialisation even if it fails.
288 */
289static int
290xfs_reinit_inode(
291 struct xfs_mount *mp,
292 struct inode *inode)
293{
294 int error;
295 uint32_t nlink = inode->i_nlink;
296 uint32_t generation = inode->i_generation;
297 uint64_t version = inode_peek_iversion(inode);
298 umode_t mode = inode->i_mode;
299 dev_t dev = inode->i_rdev;
300
301 error = inode_init_always(mp->m_super, inode);
302
303 set_nlink(inode, nlink);
304 inode->i_generation = generation;
305 inode_set_iversion_queried(inode, version);
306 inode->i_mode = mode;
307 inode->i_rdev = dev;
308 return error;
309}
310
311/*
312 * Check the validity of the inode we just found it the cache
313 */
314static int
315xfs_iget_cache_hit(
316 struct xfs_perag *pag,
317 struct xfs_inode *ip,
318 xfs_ino_t ino,
319 int flags,
320 int lock_flags) __releases(RCU)
321{
322 struct inode *inode = VFS_I(ip);
323 struct xfs_mount *mp = ip->i_mount;
324 int error;
325
326 /*
327 * check for re-use of an inode within an RCU grace period due to the
328 * radix tree nodes not being updated yet. We monitor for this by
329 * setting the inode number to zero before freeing the inode structure.
330 * If the inode has been reallocated and set up, then the inode number
331 * will not match, so check for that, too.
332 */
333 spin_lock(&ip->i_flags_lock);
334 if (ip->i_ino != ino) {
335 trace_xfs_iget_skip(ip);
336 XFS_STATS_INC(mp, xs_ig_frecycle);
337 error = -EAGAIN;
338 goto out_error;
339 }
340
341
342 /*
343 * If we are racing with another cache hit that is currently
344 * instantiating this inode or currently recycling it out of
345 * reclaimabe state, wait for the initialisation to complete
346 * before continuing.
347 *
348 * XXX(hch): eventually we should do something equivalent to
349 * wait_on_inode to wait for these flags to be cleared
350 * instead of polling for it.
351 */
352 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
353 trace_xfs_iget_skip(ip);
354 XFS_STATS_INC(mp, xs_ig_frecycle);
355 error = -EAGAIN;
356 goto out_error;
357 }
358
359 /*
360 * If lookup is racing with unlink return an error immediately.
361 */
362 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
363 error = -ENOENT;
364 goto out_error;
365 }
366
367 /*
368 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
369 * Need to carefully get it back into useable state.
370 */
371 if (ip->i_flags & XFS_IRECLAIMABLE) {
372 trace_xfs_iget_reclaim(ip);
373
374 if (flags & XFS_IGET_INCORE) {
375 error = -EAGAIN;
376 goto out_error;
377 }
378
379 /*
380 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
381 * from stomping over us while we recycle the inode. We can't
382 * clear the radix tree reclaimable tag yet as it requires
383 * pag_ici_lock to be held exclusive.
384 */
385 ip->i_flags |= XFS_IRECLAIM;
386
387 spin_unlock(&ip->i_flags_lock);
388 rcu_read_unlock();
389
390 error = xfs_reinit_inode(mp, inode);
391 if (error) {
392 bool wake;
393 /*
394 * Re-initializing the inode failed, and we are in deep
395 * trouble. Try to re-add it to the reclaim list.
396 */
397 rcu_read_lock();
398 spin_lock(&ip->i_flags_lock);
399 wake = !!__xfs_iflags_test(ip, XFS_INEW);
400 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
401 if (wake)
402 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
403 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
404 trace_xfs_iget_reclaim_fail(ip);
405 goto out_error;
406 }
407
408 spin_lock(&pag->pag_ici_lock);
409 spin_lock(&ip->i_flags_lock);
410
411 /*
412 * Clear the per-lifetime state in the inode as we are now
413 * effectively a new inode and need to return to the initial
414 * state before reuse occurs.
415 */
416 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
417 ip->i_flags |= XFS_INEW;
418 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
419 inode->i_state = I_NEW;
420
421 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
422 init_rwsem(&inode->i_rwsem);
423
424 spin_unlock(&ip->i_flags_lock);
425 spin_unlock(&pag->pag_ici_lock);
426 } else {
427 /* If the VFS inode is being torn down, pause and try again. */
428 if (!igrab(inode)) {
429 trace_xfs_iget_skip(ip);
430 error = -EAGAIN;
431 goto out_error;
432 }
433
434 /* We've got a live one. */
435 spin_unlock(&ip->i_flags_lock);
436 rcu_read_unlock();
437 trace_xfs_iget_hit(ip);
438 }
439
440 if (lock_flags != 0)
441 xfs_ilock(ip, lock_flags);
442
443 if (!(flags & XFS_IGET_INCORE))
444 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
445 XFS_STATS_INC(mp, xs_ig_found);
446
447 return 0;
448
449out_error:
450 spin_unlock(&ip->i_flags_lock);
451 rcu_read_unlock();
452 return error;
453}
454
455
456static int
457xfs_iget_cache_miss(
458 struct xfs_mount *mp,
459 struct xfs_perag *pag,
460 xfs_trans_t *tp,
461 xfs_ino_t ino,
462 struct xfs_inode **ipp,
463 int flags,
464 int lock_flags)
465{
466 struct xfs_inode *ip;
467 int error;
468 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
469 int iflags;
470
471 ip = xfs_inode_alloc(mp, ino);
472 if (!ip)
473 return -ENOMEM;
474
475 error = xfs_iread(mp, tp, ip, flags);
476 if (error)
477 goto out_destroy;
478
479 if (!xfs_inode_verify_forks(ip)) {
480 error = -EFSCORRUPTED;
481 goto out_destroy;
482 }
483
484 trace_xfs_iget_miss(ip);
485
486
487 /*
488 * If we are allocating a new inode, then check what was returned is
489 * actually a free, empty inode. If we are not allocating an inode,
490 * the check we didn't find a free inode.
491 */
492 if (flags & XFS_IGET_CREATE) {
493 if (VFS_I(ip)->i_mode != 0) {
494 xfs_warn(mp,
495"Corruption detected! Free inode 0x%llx not marked free on disk",
496 ino);
497 error = -EFSCORRUPTED;
498 goto out_destroy;
499 }
500 if (ip->i_d.di_nblocks != 0) {
501 xfs_warn(mp,
502"Corruption detected! Free inode 0x%llx has blocks allocated!",
503 ino);
504 error = -EFSCORRUPTED;
505 goto out_destroy;
506 }
507 } else if (VFS_I(ip)->i_mode == 0) {
508 error = -ENOENT;
509 goto out_destroy;
510 }
511
512 /*
513 * Preload the radix tree so we can insert safely under the
514 * write spinlock. Note that we cannot sleep inside the preload
515 * region. Since we can be called from transaction context, don't
516 * recurse into the file system.
517 */
518 if (radix_tree_preload(GFP_NOFS)) {
519 error = -EAGAIN;
520 goto out_destroy;
521 }
522
523 /*
524 * Because the inode hasn't been added to the radix-tree yet it can't
525 * be found by another thread, so we can do the non-sleeping lock here.
526 */
527 if (lock_flags) {
528 if (!xfs_ilock_nowait(ip, lock_flags))
529 BUG();
530 }
531
532 /*
533 * These values must be set before inserting the inode into the radix
534 * tree as the moment it is inserted a concurrent lookup (allowed by the
535 * RCU locking mechanism) can find it and that lookup must see that this
536 * is an inode currently under construction (i.e. that XFS_INEW is set).
537 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
538 * memory barrier that ensures this detection works correctly at lookup
539 * time.
540 */
541 iflags = XFS_INEW;
542 if (flags & XFS_IGET_DONTCACHE)
543 iflags |= XFS_IDONTCACHE;
544 ip->i_udquot = NULL;
545 ip->i_gdquot = NULL;
546 ip->i_pdquot = NULL;
547 xfs_iflags_set(ip, iflags);
548
549 /* insert the new inode */
550 spin_lock(&pag->pag_ici_lock);
551 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
552 if (unlikely(error)) {
553 WARN_ON(error != -EEXIST);
554 XFS_STATS_INC(mp, xs_ig_dup);
555 error = -EAGAIN;
556 goto out_preload_end;
557 }
558 spin_unlock(&pag->pag_ici_lock);
559 radix_tree_preload_end();
560
561 *ipp = ip;
562 return 0;
563
564out_preload_end:
565 spin_unlock(&pag->pag_ici_lock);
566 radix_tree_preload_end();
567 if (lock_flags)
568 xfs_iunlock(ip, lock_flags);
569out_destroy:
570 __destroy_inode(VFS_I(ip));
571 xfs_inode_free(ip);
572 return error;
573}
574
575/*
576 * Look up an inode by number in the given file system.
577 * The inode is looked up in the cache held in each AG.
578 * If the inode is found in the cache, initialise the vfs inode
579 * if necessary.
580 *
581 * If it is not in core, read it in from the file system's device,
582 * add it to the cache and initialise the vfs inode.
583 *
584 * The inode is locked according to the value of the lock_flags parameter.
585 * This flag parameter indicates how and if the inode's IO lock and inode lock
586 * should be taken.
587 *
588 * mp -- the mount point structure for the current file system. It points
589 * to the inode hash table.
590 * tp -- a pointer to the current transaction if there is one. This is
591 * simply passed through to the xfs_iread() call.
592 * ino -- the number of the inode desired. This is the unique identifier
593 * within the file system for the inode being requested.
594 * lock_flags -- flags indicating how to lock the inode. See the comment
595 * for xfs_ilock() for a list of valid values.
596 */
597int
598xfs_iget(
599 xfs_mount_t *mp,
600 xfs_trans_t *tp,
601 xfs_ino_t ino,
602 uint flags,
603 uint lock_flags,
604 xfs_inode_t **ipp)
605{
606 xfs_inode_t *ip;
607 int error;
608 xfs_perag_t *pag;
609 xfs_agino_t agino;
610
611 /*
612 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
613 * doesn't get freed while it's being referenced during a
614 * radix tree traversal here. It assumes this function
615 * aqcuires only the ILOCK (and therefore it has no need to
616 * involve the IOLOCK in this synchronization).
617 */
618 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
619
620 /* reject inode numbers outside existing AGs */
621 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
622 return -EINVAL;
623
624 XFS_STATS_INC(mp, xs_ig_attempts);
625
626 /* get the perag structure and ensure that it's inode capable */
627 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
628 agino = XFS_INO_TO_AGINO(mp, ino);
629
630again:
631 error = 0;
632 rcu_read_lock();
633 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
634
635 if (ip) {
636 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
637 if (error)
638 goto out_error_or_again;
639 } else {
640 rcu_read_unlock();
641 if (flags & XFS_IGET_INCORE) {
642 error = -ENODATA;
643 goto out_error_or_again;
644 }
645 XFS_STATS_INC(mp, xs_ig_missed);
646
647 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
648 flags, lock_flags);
649 if (error)
650 goto out_error_or_again;
651 }
652 xfs_perag_put(pag);
653
654 *ipp = ip;
655
656 /*
657 * If we have a real type for an on-disk inode, we can setup the inode
658 * now. If it's a new inode being created, xfs_ialloc will handle it.
659 */
660 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
661 xfs_setup_existing_inode(ip);
662 return 0;
663
664out_error_or_again:
665 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
666 delay(1);
667 goto again;
668 }
669 xfs_perag_put(pag);
670 return error;
671}
672
673/*
674 * "Is this a cached inode that's also allocated?"
675 *
676 * Look up an inode by number in the given file system. If the inode is
677 * in cache and isn't in purgatory, return 1 if the inode is allocated
678 * and 0 if it is not. For all other cases (not in cache, being torn
679 * down, etc.), return a negative error code.
680 *
681 * The caller has to prevent inode allocation and freeing activity,
682 * presumably by locking the AGI buffer. This is to ensure that an
683 * inode cannot transition from allocated to freed until the caller is
684 * ready to allow that. If the inode is in an intermediate state (new,
685 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
686 * inode is not in the cache, -ENOENT will be returned. The caller must
687 * deal with these scenarios appropriately.
688 *
689 * This is a specialized use case for the online scrubber; if you're
690 * reading this, you probably want xfs_iget.
691 */
692int
693xfs_icache_inode_is_allocated(
694 struct xfs_mount *mp,
695 struct xfs_trans *tp,
696 xfs_ino_t ino,
697 bool *inuse)
698{
699 struct xfs_inode *ip;
700 int error;
701
702 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
703 if (error)
704 return error;
705
706 *inuse = !!(VFS_I(ip)->i_mode);
707 IRELE(ip);
708 return 0;
709}
710
711/*
712 * The inode lookup is done in batches to keep the amount of lock traffic and
713 * radix tree lookups to a minimum. The batch size is a trade off between
714 * lookup reduction and stack usage. This is in the reclaim path, so we can't
715 * be too greedy.
716 */
717#define XFS_LOOKUP_BATCH 32
718
719STATIC int
720xfs_inode_ag_walk_grab(
721 struct xfs_inode *ip,
722 int flags)
723{
724 struct inode *inode = VFS_I(ip);
725 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
726
727 ASSERT(rcu_read_lock_held());
728
729 /*
730 * check for stale RCU freed inode
731 *
732 * If the inode has been reallocated, it doesn't matter if it's not in
733 * the AG we are walking - we are walking for writeback, so if it
734 * passes all the "valid inode" checks and is dirty, then we'll write
735 * it back anyway. If it has been reallocated and still being
736 * initialised, the XFS_INEW check below will catch it.
737 */
738 spin_lock(&ip->i_flags_lock);
739 if (!ip->i_ino)
740 goto out_unlock_noent;
741
742 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
743 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
744 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
745 goto out_unlock_noent;
746 spin_unlock(&ip->i_flags_lock);
747
748 /* nothing to sync during shutdown */
749 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
750 return -EFSCORRUPTED;
751
752 /* If we can't grab the inode, it must on it's way to reclaim. */
753 if (!igrab(inode))
754 return -ENOENT;
755
756 /* inode is valid */
757 return 0;
758
759out_unlock_noent:
760 spin_unlock(&ip->i_flags_lock);
761 return -ENOENT;
762}
763
764STATIC int
765xfs_inode_ag_walk(
766 struct xfs_mount *mp,
767 struct xfs_perag *pag,
768 int (*execute)(struct xfs_inode *ip, int flags,
769 void *args),
770 int flags,
771 void *args,
772 int tag,
773 int iter_flags)
774{
775 uint32_t first_index;
776 int last_error = 0;
777 int skipped;
778 int done;
779 int nr_found;
780
781restart:
782 done = 0;
783 skipped = 0;
784 first_index = 0;
785 nr_found = 0;
786 do {
787 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
788 int error = 0;
789 int i;
790
791 rcu_read_lock();
792
793 if (tag == -1)
794 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
795 (void **)batch, first_index,
796 XFS_LOOKUP_BATCH);
797 else
798 nr_found = radix_tree_gang_lookup_tag(
799 &pag->pag_ici_root,
800 (void **) batch, first_index,
801 XFS_LOOKUP_BATCH, tag);
802
803 if (!nr_found) {
804 rcu_read_unlock();
805 break;
806 }
807
808 /*
809 * Grab the inodes before we drop the lock. if we found
810 * nothing, nr == 0 and the loop will be skipped.
811 */
812 for (i = 0; i < nr_found; i++) {
813 struct xfs_inode *ip = batch[i];
814
815 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
816 batch[i] = NULL;
817
818 /*
819 * Update the index for the next lookup. Catch
820 * overflows into the next AG range which can occur if
821 * we have inodes in the last block of the AG and we
822 * are currently pointing to the last inode.
823 *
824 * Because we may see inodes that are from the wrong AG
825 * due to RCU freeing and reallocation, only update the
826 * index if it lies in this AG. It was a race that lead
827 * us to see this inode, so another lookup from the
828 * same index will not find it again.
829 */
830 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
831 continue;
832 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
833 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
834 done = 1;
835 }
836
837 /* unlock now we've grabbed the inodes. */
838 rcu_read_unlock();
839
840 for (i = 0; i < nr_found; i++) {
841 if (!batch[i])
842 continue;
843 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
844 xfs_iflags_test(batch[i], XFS_INEW))
845 xfs_inew_wait(batch[i]);
846 error = execute(batch[i], flags, args);
847 IRELE(batch[i]);
848 if (error == -EAGAIN) {
849 skipped++;
850 continue;
851 }
852 if (error && last_error != -EFSCORRUPTED)
853 last_error = error;
854 }
855
856 /* bail out if the filesystem is corrupted. */
857 if (error == -EFSCORRUPTED)
858 break;
859
860 cond_resched();
861
862 } while (nr_found && !done);
863
864 if (skipped) {
865 delay(1);
866 goto restart;
867 }
868 return last_error;
869}
870
871/*
872 * Background scanning to trim post-EOF preallocated space. This is queued
873 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
874 */
875void
876xfs_queue_eofblocks(
877 struct xfs_mount *mp)
878{
879 rcu_read_lock();
880 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
881 queue_delayed_work(mp->m_eofblocks_workqueue,
882 &mp->m_eofblocks_work,
883 msecs_to_jiffies(xfs_eofb_secs * 1000));
884 rcu_read_unlock();
885}
886
887void
888xfs_eofblocks_worker(
889 struct work_struct *work)
890{
891 struct xfs_mount *mp = container_of(to_delayed_work(work),
892 struct xfs_mount, m_eofblocks_work);
893 xfs_icache_free_eofblocks(mp, NULL);
894 xfs_queue_eofblocks(mp);
895}
896
897/*
898 * Background scanning to trim preallocated CoW space. This is queued
899 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
900 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
901 */
902void
903xfs_queue_cowblocks(
904 struct xfs_mount *mp)
905{
906 rcu_read_lock();
907 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
908 queue_delayed_work(mp->m_eofblocks_workqueue,
909 &mp->m_cowblocks_work,
910 msecs_to_jiffies(xfs_cowb_secs * 1000));
911 rcu_read_unlock();
912}
913
914void
915xfs_cowblocks_worker(
916 struct work_struct *work)
917{
918 struct xfs_mount *mp = container_of(to_delayed_work(work),
919 struct xfs_mount, m_cowblocks_work);
920 xfs_icache_free_cowblocks(mp, NULL);
921 xfs_queue_cowblocks(mp);
922}
923
924int
925xfs_inode_ag_iterator_flags(
926 struct xfs_mount *mp,
927 int (*execute)(struct xfs_inode *ip, int flags,
928 void *args),
929 int flags,
930 void *args,
931 int iter_flags)
932{
933 struct xfs_perag *pag;
934 int error = 0;
935 int last_error = 0;
936 xfs_agnumber_t ag;
937
938 ag = 0;
939 while ((pag = xfs_perag_get(mp, ag))) {
940 ag = pag->pag_agno + 1;
941 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
942 iter_flags);
943 xfs_perag_put(pag);
944 if (error) {
945 last_error = error;
946 if (error == -EFSCORRUPTED)
947 break;
948 }
949 }
950 return last_error;
951}
952
953int
954xfs_inode_ag_iterator(
955 struct xfs_mount *mp,
956 int (*execute)(struct xfs_inode *ip, int flags,
957 void *args),
958 int flags,
959 void *args)
960{
961 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
962}
963
964int
965xfs_inode_ag_iterator_tag(
966 struct xfs_mount *mp,
967 int (*execute)(struct xfs_inode *ip, int flags,
968 void *args),
969 int flags,
970 void *args,
971 int tag)
972{
973 struct xfs_perag *pag;
974 int error = 0;
975 int last_error = 0;
976 xfs_agnumber_t ag;
977
978 ag = 0;
979 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
980 ag = pag->pag_agno + 1;
981 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
982 0);
983 xfs_perag_put(pag);
984 if (error) {
985 last_error = error;
986 if (error == -EFSCORRUPTED)
987 break;
988 }
989 }
990 return last_error;
991}
992
993/*
994 * Grab the inode for reclaim exclusively.
995 * Return 0 if we grabbed it, non-zero otherwise.
996 */
997STATIC int
998xfs_reclaim_inode_grab(
999 struct xfs_inode *ip,
1000 int flags)
1001{
1002 ASSERT(rcu_read_lock_held());
1003
1004 /* quick check for stale RCU freed inode */
1005 if (!ip->i_ino)
1006 return 1;
1007
1008 /*
1009 * If we are asked for non-blocking operation, do unlocked checks to
1010 * see if the inode already is being flushed or in reclaim to avoid
1011 * lock traffic.
1012 */
1013 if ((flags & SYNC_TRYLOCK) &&
1014 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1015 return 1;
1016
1017 /*
1018 * The radix tree lock here protects a thread in xfs_iget from racing
1019 * with us starting reclaim on the inode. Once we have the
1020 * XFS_IRECLAIM flag set it will not touch us.
1021 *
1022 * Due to RCU lookup, we may find inodes that have been freed and only
1023 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
1024 * aren't candidates for reclaim at all, so we must check the
1025 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1026 */
1027 spin_lock(&ip->i_flags_lock);
1028 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1029 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1030 /* not a reclaim candidate. */
1031 spin_unlock(&ip->i_flags_lock);
1032 return 1;
1033 }
1034 __xfs_iflags_set(ip, XFS_IRECLAIM);
1035 spin_unlock(&ip->i_flags_lock);
1036 return 0;
1037}
1038
1039/*
1040 * Inodes in different states need to be treated differently. The following
1041 * table lists the inode states and the reclaim actions necessary:
1042 *
1043 * inode state iflush ret required action
1044 * --------------- ---------- ---------------
1045 * bad - reclaim
1046 * shutdown EIO unpin and reclaim
1047 * clean, unpinned 0 reclaim
1048 * stale, unpinned 0 reclaim
1049 * clean, pinned(*) 0 requeue
1050 * stale, pinned EAGAIN requeue
1051 * dirty, async - requeue
1052 * dirty, sync 0 reclaim
1053 *
1054 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1055 * handled anyway given the order of checks implemented.
1056 *
1057 * Also, because we get the flush lock first, we know that any inode that has
1058 * been flushed delwri has had the flush completed by the time we check that
1059 * the inode is clean.
1060 *
1061 * Note that because the inode is flushed delayed write by AIL pushing, the
1062 * flush lock may already be held here and waiting on it can result in very
1063 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
1064 * the caller should push the AIL first before trying to reclaim inodes to
1065 * minimise the amount of time spent waiting. For background relaim, we only
1066 * bother to reclaim clean inodes anyway.
1067 *
1068 * Hence the order of actions after gaining the locks should be:
1069 * bad => reclaim
1070 * shutdown => unpin and reclaim
1071 * pinned, async => requeue
1072 * pinned, sync => unpin
1073 * stale => reclaim
1074 * clean => reclaim
1075 * dirty, async => requeue
1076 * dirty, sync => flush, wait and reclaim
1077 */
1078STATIC int
1079xfs_reclaim_inode(
1080 struct xfs_inode *ip,
1081 struct xfs_perag *pag,
1082 int sync_mode)
1083{
1084 struct xfs_buf *bp = NULL;
1085 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1086 int error;
1087
1088restart:
1089 error = 0;
1090 xfs_ilock(ip, XFS_ILOCK_EXCL);
1091 if (!xfs_iflock_nowait(ip)) {
1092 if (!(sync_mode & SYNC_WAIT))
1093 goto out;
1094 xfs_iflock(ip);
1095 }
1096
1097 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1098 xfs_iunpin_wait(ip);
1099 /* xfs_iflush_abort() drops the flush lock */
1100 xfs_iflush_abort(ip, false);
1101 goto reclaim;
1102 }
1103 if (xfs_ipincount(ip)) {
1104 if (!(sync_mode & SYNC_WAIT))
1105 goto out_ifunlock;
1106 xfs_iunpin_wait(ip);
1107 }
1108 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1109 xfs_ifunlock(ip);
1110 goto reclaim;
1111 }
1112
1113 /*
1114 * Never flush out dirty data during non-blocking reclaim, as it would
1115 * just contend with AIL pushing trying to do the same job.
1116 */
1117 if (!(sync_mode & SYNC_WAIT))
1118 goto out_ifunlock;
1119
1120 /*
1121 * Now we have an inode that needs flushing.
1122 *
1123 * Note that xfs_iflush will never block on the inode buffer lock, as
1124 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1125 * ip->i_lock, and we are doing the exact opposite here. As a result,
1126 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1127 * result in an ABBA deadlock with xfs_ifree_cluster().
1128 *
1129 * As xfs_ifree_cluser() must gather all inodes that are active in the
1130 * cache to mark them stale, if we hit this case we don't actually want
1131 * to do IO here - we want the inode marked stale so we can simply
1132 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1133 * inode, back off and try again. Hopefully the next pass through will
1134 * see the stale flag set on the inode.
1135 */
1136 error = xfs_iflush(ip, &bp);
1137 if (error == -EAGAIN) {
1138 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1139 /* backoff longer than in xfs_ifree_cluster */
1140 delay(2);
1141 goto restart;
1142 }
1143
1144 if (!error) {
1145 error = xfs_bwrite(bp);
1146 xfs_buf_relse(bp);
1147 }
1148
1149reclaim:
1150 ASSERT(!xfs_isiflocked(ip));
1151
1152 /*
1153 * Because we use RCU freeing we need to ensure the inode always appears
1154 * to be reclaimed with an invalid inode number when in the free state.
1155 * We do this as early as possible under the ILOCK so that
1156 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1157 * detect races with us here. By doing this, we guarantee that once
1158 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1159 * it will see either a valid inode that will serialise correctly, or it
1160 * will see an invalid inode that it can skip.
1161 */
1162 spin_lock(&ip->i_flags_lock);
1163 ip->i_flags = XFS_IRECLAIM;
1164 ip->i_ino = 0;
1165 spin_unlock(&ip->i_flags_lock);
1166
1167 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1168
1169 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1170 /*
1171 * Remove the inode from the per-AG radix tree.
1172 *
1173 * Because radix_tree_delete won't complain even if the item was never
1174 * added to the tree assert that it's been there before to catch
1175 * problems with the inode life time early on.
1176 */
1177 spin_lock(&pag->pag_ici_lock);
1178 if (!radix_tree_delete(&pag->pag_ici_root,
1179 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1180 ASSERT(0);
1181 xfs_perag_clear_reclaim_tag(pag);
1182 spin_unlock(&pag->pag_ici_lock);
1183
1184 /*
1185 * Here we do an (almost) spurious inode lock in order to coordinate
1186 * with inode cache radix tree lookups. This is because the lookup
1187 * can reference the inodes in the cache without taking references.
1188 *
1189 * We make that OK here by ensuring that we wait until the inode is
1190 * unlocked after the lookup before we go ahead and free it.
1191 */
1192 xfs_ilock(ip, XFS_ILOCK_EXCL);
1193 xfs_qm_dqdetach(ip);
1194 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1195
1196 __xfs_inode_free(ip);
1197 return error;
1198
1199out_ifunlock:
1200 xfs_ifunlock(ip);
1201out:
1202 xfs_iflags_clear(ip, XFS_IRECLAIM);
1203 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1204 /*
1205 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1206 * a short while. However, this just burns CPU time scanning the tree
1207 * waiting for IO to complete and the reclaim work never goes back to
1208 * the idle state. Instead, return 0 to let the next scheduled
1209 * background reclaim attempt to reclaim the inode again.
1210 */
1211 return 0;
1212}
1213
1214/*
1215 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1216 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1217 * then a shut down during filesystem unmount reclaim walk leak all the
1218 * unreclaimed inodes.
1219 */
1220STATIC int
1221xfs_reclaim_inodes_ag(
1222 struct xfs_mount *mp,
1223 int flags,
1224 int *nr_to_scan)
1225{
1226 struct xfs_perag *pag;
1227 int error = 0;
1228 int last_error = 0;
1229 xfs_agnumber_t ag;
1230 int trylock = flags & SYNC_TRYLOCK;
1231 int skipped;
1232
1233restart:
1234 ag = 0;
1235 skipped = 0;
1236 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1237 unsigned long first_index = 0;
1238 int done = 0;
1239 int nr_found = 0;
1240
1241 ag = pag->pag_agno + 1;
1242
1243 if (trylock) {
1244 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1245 skipped++;
1246 xfs_perag_put(pag);
1247 continue;
1248 }
1249 first_index = pag->pag_ici_reclaim_cursor;
1250 } else
1251 mutex_lock(&pag->pag_ici_reclaim_lock);
1252
1253 do {
1254 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1255 int i;
1256
1257 rcu_read_lock();
1258 nr_found = radix_tree_gang_lookup_tag(
1259 &pag->pag_ici_root,
1260 (void **)batch, first_index,
1261 XFS_LOOKUP_BATCH,
1262 XFS_ICI_RECLAIM_TAG);
1263 if (!nr_found) {
1264 done = 1;
1265 rcu_read_unlock();
1266 break;
1267 }
1268
1269 /*
1270 * Grab the inodes before we drop the lock. if we found
1271 * nothing, nr == 0 and the loop will be skipped.
1272 */
1273 for (i = 0; i < nr_found; i++) {
1274 struct xfs_inode *ip = batch[i];
1275
1276 if (done || xfs_reclaim_inode_grab(ip, flags))
1277 batch[i] = NULL;
1278
1279 /*
1280 * Update the index for the next lookup. Catch
1281 * overflows into the next AG range which can
1282 * occur if we have inodes in the last block of
1283 * the AG and we are currently pointing to the
1284 * last inode.
1285 *
1286 * Because we may see inodes that are from the
1287 * wrong AG due to RCU freeing and
1288 * reallocation, only update the index if it
1289 * lies in this AG. It was a race that lead us
1290 * to see this inode, so another lookup from
1291 * the same index will not find it again.
1292 */
1293 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1294 pag->pag_agno)
1295 continue;
1296 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1297 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1298 done = 1;
1299 }
1300
1301 /* unlock now we've grabbed the inodes. */
1302 rcu_read_unlock();
1303
1304 for (i = 0; i < nr_found; i++) {
1305 if (!batch[i])
1306 continue;
1307 error = xfs_reclaim_inode(batch[i], pag, flags);
1308 if (error && last_error != -EFSCORRUPTED)
1309 last_error = error;
1310 }
1311
1312 *nr_to_scan -= XFS_LOOKUP_BATCH;
1313
1314 cond_resched();
1315
1316 } while (nr_found && !done && *nr_to_scan > 0);
1317
1318 if (trylock && !done)
1319 pag->pag_ici_reclaim_cursor = first_index;
1320 else
1321 pag->pag_ici_reclaim_cursor = 0;
1322 mutex_unlock(&pag->pag_ici_reclaim_lock);
1323 xfs_perag_put(pag);
1324 }
1325
1326 /*
1327 * if we skipped any AG, and we still have scan count remaining, do
1328 * another pass this time using blocking reclaim semantics (i.e
1329 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1330 * ensure that when we get more reclaimers than AGs we block rather
1331 * than spin trying to execute reclaim.
1332 */
1333 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1334 trylock = 0;
1335 goto restart;
1336 }
1337 return last_error;
1338}
1339
1340int
1341xfs_reclaim_inodes(
1342 xfs_mount_t *mp,
1343 int mode)
1344{
1345 int nr_to_scan = INT_MAX;
1346
1347 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1348}
1349
1350/*
1351 * Scan a certain number of inodes for reclaim.
1352 *
1353 * When called we make sure that there is a background (fast) inode reclaim in
1354 * progress, while we will throttle the speed of reclaim via doing synchronous
1355 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1356 * them to be cleaned, which we hope will not be very long due to the
1357 * background walker having already kicked the IO off on those dirty inodes.
1358 */
1359long
1360xfs_reclaim_inodes_nr(
1361 struct xfs_mount *mp,
1362 int nr_to_scan)
1363{
1364 /* kick background reclaimer and push the AIL */
1365 xfs_reclaim_work_queue(mp);
1366 xfs_ail_push_all(mp->m_ail);
1367
1368 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1369}
1370
1371/*
1372 * Return the number of reclaimable inodes in the filesystem for
1373 * the shrinker to determine how much to reclaim.
1374 */
1375int
1376xfs_reclaim_inodes_count(
1377 struct xfs_mount *mp)
1378{
1379 struct xfs_perag *pag;
1380 xfs_agnumber_t ag = 0;
1381 int reclaimable = 0;
1382
1383 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1384 ag = pag->pag_agno + 1;
1385 reclaimable += pag->pag_ici_reclaimable;
1386 xfs_perag_put(pag);
1387 }
1388 return reclaimable;
1389}
1390
1391STATIC int
1392xfs_inode_match_id(
1393 struct xfs_inode *ip,
1394 struct xfs_eofblocks *eofb)
1395{
1396 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1397 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1398 return 0;
1399
1400 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1401 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1402 return 0;
1403
1404 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1405 xfs_get_projid(ip) != eofb->eof_prid)
1406 return 0;
1407
1408 return 1;
1409}
1410
1411/*
1412 * A union-based inode filtering algorithm. Process the inode if any of the
1413 * criteria match. This is for global/internal scans only.
1414 */
1415STATIC int
1416xfs_inode_match_id_union(
1417 struct xfs_inode *ip,
1418 struct xfs_eofblocks *eofb)
1419{
1420 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1421 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1422 return 1;
1423
1424 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1425 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1426 return 1;
1427
1428 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1429 xfs_get_projid(ip) == eofb->eof_prid)
1430 return 1;
1431
1432 return 0;
1433}
1434
1435STATIC int
1436xfs_inode_free_eofblocks(
1437 struct xfs_inode *ip,
1438 int flags,
1439 void *args)
1440{
1441 int ret = 0;
1442 struct xfs_eofblocks *eofb = args;
1443 int match;
1444
1445 if (!xfs_can_free_eofblocks(ip, false)) {
1446 /* inode could be preallocated or append-only */
1447 trace_xfs_inode_free_eofblocks_invalid(ip);
1448 xfs_inode_clear_eofblocks_tag(ip);
1449 return 0;
1450 }
1451
1452 /*
1453 * If the mapping is dirty the operation can block and wait for some
1454 * time. Unless we are waiting, skip it.
1455 */
1456 if (!(flags & SYNC_WAIT) &&
1457 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1458 return 0;
1459
1460 if (eofb) {
1461 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1462 match = xfs_inode_match_id_union(ip, eofb);
1463 else
1464 match = xfs_inode_match_id(ip, eofb);
1465 if (!match)
1466 return 0;
1467
1468 /* skip the inode if the file size is too small */
1469 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1470 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1471 return 0;
1472 }
1473
1474 /*
1475 * If the caller is waiting, return -EAGAIN to keep the background
1476 * scanner moving and revisit the inode in a subsequent pass.
1477 */
1478 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1479 if (flags & SYNC_WAIT)
1480 ret = -EAGAIN;
1481 return ret;
1482 }
1483 ret = xfs_free_eofblocks(ip);
1484 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1485
1486 return ret;
1487}
1488
1489static int
1490__xfs_icache_free_eofblocks(
1491 struct xfs_mount *mp,
1492 struct xfs_eofblocks *eofb,
1493 int (*execute)(struct xfs_inode *ip, int flags,
1494 void *args),
1495 int tag)
1496{
1497 int flags = SYNC_TRYLOCK;
1498
1499 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1500 flags = SYNC_WAIT;
1501
1502 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1503 eofb, tag);
1504}
1505
1506int
1507xfs_icache_free_eofblocks(
1508 struct xfs_mount *mp,
1509 struct xfs_eofblocks *eofb)
1510{
1511 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1512 XFS_ICI_EOFBLOCKS_TAG);
1513}
1514
1515/*
1516 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1517 * multiple quotas, we don't know exactly which quota caused an allocation
1518 * failure. We make a best effort by including each quota under low free space
1519 * conditions (less than 1% free space) in the scan.
1520 */
1521static int
1522__xfs_inode_free_quota_eofblocks(
1523 struct xfs_inode *ip,
1524 int (*execute)(struct xfs_mount *mp,
1525 struct xfs_eofblocks *eofb))
1526{
1527 int scan = 0;
1528 struct xfs_eofblocks eofb = {0};
1529 struct xfs_dquot *dq;
1530
1531 /*
1532 * Run a sync scan to increase effectiveness and use the union filter to
1533 * cover all applicable quotas in a single scan.
1534 */
1535 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1536
1537 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1538 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1539 if (dq && xfs_dquot_lowsp(dq)) {
1540 eofb.eof_uid = VFS_I(ip)->i_uid;
1541 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1542 scan = 1;
1543 }
1544 }
1545
1546 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1547 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1548 if (dq && xfs_dquot_lowsp(dq)) {
1549 eofb.eof_gid = VFS_I(ip)->i_gid;
1550 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1551 scan = 1;
1552 }
1553 }
1554
1555 if (scan)
1556 execute(ip->i_mount, &eofb);
1557
1558 return scan;
1559}
1560
1561int
1562xfs_inode_free_quota_eofblocks(
1563 struct xfs_inode *ip)
1564{
1565 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1566}
1567
1568static inline unsigned long
1569xfs_iflag_for_tag(
1570 int tag)
1571{
1572 switch (tag) {
1573 case XFS_ICI_EOFBLOCKS_TAG:
1574 return XFS_IEOFBLOCKS;
1575 case XFS_ICI_COWBLOCKS_TAG:
1576 return XFS_ICOWBLOCKS;
1577 default:
1578 ASSERT(0);
1579 return 0;
1580 }
1581}
1582
1583static void
1584__xfs_inode_set_blocks_tag(
1585 xfs_inode_t *ip,
1586 void (*execute)(struct xfs_mount *mp),
1587 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1588 int error, unsigned long caller_ip),
1589 int tag)
1590{
1591 struct xfs_mount *mp = ip->i_mount;
1592 struct xfs_perag *pag;
1593 int tagged;
1594
1595 /*
1596 * Don't bother locking the AG and looking up in the radix trees
1597 * if we already know that we have the tag set.
1598 */
1599 if (ip->i_flags & xfs_iflag_for_tag(tag))
1600 return;
1601 spin_lock(&ip->i_flags_lock);
1602 ip->i_flags |= xfs_iflag_for_tag(tag);
1603 spin_unlock(&ip->i_flags_lock);
1604
1605 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1606 spin_lock(&pag->pag_ici_lock);
1607
1608 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1609 radix_tree_tag_set(&pag->pag_ici_root,
1610 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1611 if (!tagged) {
1612 /* propagate the eofblocks tag up into the perag radix tree */
1613 spin_lock(&ip->i_mount->m_perag_lock);
1614 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1615 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1616 tag);
1617 spin_unlock(&ip->i_mount->m_perag_lock);
1618
1619 /* kick off background trimming */
1620 execute(ip->i_mount);
1621
1622 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1623 }
1624
1625 spin_unlock(&pag->pag_ici_lock);
1626 xfs_perag_put(pag);
1627}
1628
1629void
1630xfs_inode_set_eofblocks_tag(
1631 xfs_inode_t *ip)
1632{
1633 trace_xfs_inode_set_eofblocks_tag(ip);
1634 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1635 trace_xfs_perag_set_eofblocks,
1636 XFS_ICI_EOFBLOCKS_TAG);
1637}
1638
1639static void
1640__xfs_inode_clear_blocks_tag(
1641 xfs_inode_t *ip,
1642 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1643 int error, unsigned long caller_ip),
1644 int tag)
1645{
1646 struct xfs_mount *mp = ip->i_mount;
1647 struct xfs_perag *pag;
1648
1649 spin_lock(&ip->i_flags_lock);
1650 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1651 spin_unlock(&ip->i_flags_lock);
1652
1653 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1654 spin_lock(&pag->pag_ici_lock);
1655
1656 radix_tree_tag_clear(&pag->pag_ici_root,
1657 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1658 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1659 /* clear the eofblocks tag from the perag radix tree */
1660 spin_lock(&ip->i_mount->m_perag_lock);
1661 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1662 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1663 tag);
1664 spin_unlock(&ip->i_mount->m_perag_lock);
1665 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1666 }
1667
1668 spin_unlock(&pag->pag_ici_lock);
1669 xfs_perag_put(pag);
1670}
1671
1672void
1673xfs_inode_clear_eofblocks_tag(
1674 xfs_inode_t *ip)
1675{
1676 trace_xfs_inode_clear_eofblocks_tag(ip);
1677 return __xfs_inode_clear_blocks_tag(ip,
1678 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1679}
1680
1681/*
1682 * Set ourselves up to free CoW blocks from this file. If it's already clean
1683 * then we can bail out quickly, but otherwise we must back off if the file
1684 * is undergoing some kind of write.
1685 */
1686static bool
1687xfs_prep_free_cowblocks(
1688 struct xfs_inode *ip,
1689 struct xfs_ifork *ifp)
1690{
1691 /*
1692 * Just clear the tag if we have an empty cow fork or none at all. It's
1693 * possible the inode was fully unshared since it was originally tagged.
1694 */
1695 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1696 trace_xfs_inode_free_cowblocks_invalid(ip);
1697 xfs_inode_clear_cowblocks_tag(ip);
1698 return false;
1699 }
1700
1701 /*
1702 * If the mapping is dirty or under writeback we cannot touch the
1703 * CoW fork. Leave it alone if we're in the midst of a directio.
1704 */
1705 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1706 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1707 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1708 atomic_read(&VFS_I(ip)->i_dio_count))
1709 return false;
1710
1711 return true;
1712}
1713
1714/*
1715 * Automatic CoW Reservation Freeing
1716 *
1717 * These functions automatically garbage collect leftover CoW reservations
1718 * that were made on behalf of a cowextsize hint when we start to run out
1719 * of quota or when the reservations sit around for too long. If the file
1720 * has dirty pages or is undergoing writeback, its CoW reservations will
1721 * be retained.
1722 *
1723 * The actual garbage collection piggybacks off the same code that runs
1724 * the speculative EOF preallocation garbage collector.
1725 */
1726STATIC int
1727xfs_inode_free_cowblocks(
1728 struct xfs_inode *ip,
1729 int flags,
1730 void *args)
1731{
1732 struct xfs_eofblocks *eofb = args;
1733 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1734 int match;
1735 int ret = 0;
1736
1737 if (!xfs_prep_free_cowblocks(ip, ifp))
1738 return 0;
1739
1740 if (eofb) {
1741 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1742 match = xfs_inode_match_id_union(ip, eofb);
1743 else
1744 match = xfs_inode_match_id(ip, eofb);
1745 if (!match)
1746 return 0;
1747
1748 /* skip the inode if the file size is too small */
1749 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1750 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1751 return 0;
1752 }
1753
1754 /* Free the CoW blocks */
1755 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1756 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1757
1758 /*
1759 * Check again, nobody else should be able to dirty blocks or change
1760 * the reflink iflag now that we have the first two locks held.
1761 */
1762 if (xfs_prep_free_cowblocks(ip, ifp))
1763 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1764
1765 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1766 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1767
1768 return ret;
1769}
1770
1771int
1772xfs_icache_free_cowblocks(
1773 struct xfs_mount *mp,
1774 struct xfs_eofblocks *eofb)
1775{
1776 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1777 XFS_ICI_COWBLOCKS_TAG);
1778}
1779
1780int
1781xfs_inode_free_quota_cowblocks(
1782 struct xfs_inode *ip)
1783{
1784 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1785}
1786
1787void
1788xfs_inode_set_cowblocks_tag(
1789 xfs_inode_t *ip)
1790{
1791 trace_xfs_inode_set_cowblocks_tag(ip);
1792 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1793 trace_xfs_perag_set_cowblocks,
1794 XFS_ICI_COWBLOCKS_TAG);
1795}
1796
1797void
1798xfs_inode_clear_cowblocks_tag(
1799 xfs_inode_t *ip)
1800{
1801 trace_xfs_inode_clear_cowblocks_tag(ip);
1802 return __xfs_inode_clear_blocks_tag(ip,
1803 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1804}
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36#include "xfs_reflink.h"
37
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40
41/*
42 * Allocate and initialise an xfs_inode.
43 */
44struct xfs_inode *
45xfs_inode_alloc(
46 struct xfs_mount *mp,
47 xfs_ino_t ino)
48{
49 struct xfs_inode *ip;
50
51 /*
52 * if this didn't occur in transactions, we could use
53 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
54 * code up to do this anyway.
55 */
56 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
57 if (!ip)
58 return NULL;
59 if (inode_init_always(mp->m_super, VFS_I(ip))) {
60 kmem_zone_free(xfs_inode_zone, ip);
61 return NULL;
62 }
63
64 /* VFS doesn't initialise i_mode! */
65 VFS_I(ip)->i_mode = 0;
66
67 XFS_STATS_INC(mp, vn_active);
68 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0);
72
73 /* initialise the xfs inode */
74 ip->i_ino = ino;
75 ip->i_mount = mp;
76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77 ip->i_afp = NULL;
78 ip->i_cowfp = NULL;
79 ip->i_cnextents = 0;
80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
82 ip->i_flags = 0;
83 ip->i_delayed_blks = 0;
84 memset(&ip->i_d, 0, sizeof(ip->i_d));
85
86 return ip;
87}
88
89STATIC void
90xfs_inode_free_callback(
91 struct rcu_head *head)
92{
93 struct inode *inode = container_of(head, struct inode, i_rcu);
94 struct xfs_inode *ip = XFS_I(inode);
95
96 switch (VFS_I(ip)->i_mode & S_IFMT) {
97 case S_IFREG:
98 case S_IFDIR:
99 case S_IFLNK:
100 xfs_idestroy_fork(ip, XFS_DATA_FORK);
101 break;
102 }
103
104 if (ip->i_afp)
105 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
106 if (ip->i_cowfp)
107 xfs_idestroy_fork(ip, XFS_COW_FORK);
108
109 if (ip->i_itemp) {
110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
111 xfs_inode_item_destroy(ip);
112 ip->i_itemp = NULL;
113 }
114
115 kmem_zone_free(xfs_inode_zone, ip);
116}
117
118static void
119__xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 XFS_STATS_DEC(ip->i_mount, vn_active);
125
126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
127}
128
129void
130xfs_inode_free(
131 struct xfs_inode *ip)
132{
133 ASSERT(!xfs_isiflocked(ip));
134
135 /*
136 * Because we use RCU freeing we need to ensure the inode always
137 * appears to be reclaimed with an invalid inode number when in the
138 * free state. The ip->i_flags_lock provides the barrier against lookup
139 * races.
140 */
141 spin_lock(&ip->i_flags_lock);
142 ip->i_flags = XFS_IRECLAIM;
143 ip->i_ino = 0;
144 spin_unlock(&ip->i_flags_lock);
145
146 __xfs_inode_free(ip);
147}
148
149/*
150 * Queue a new inode reclaim pass if there are reclaimable inodes and there
151 * isn't a reclaim pass already in progress. By default it runs every 5s based
152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
153 * tunable, but that can be done if this method proves to be ineffective or too
154 * aggressive.
155 */
156static void
157xfs_reclaim_work_queue(
158 struct xfs_mount *mp)
159{
160
161 rcu_read_lock();
162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
165 }
166 rcu_read_unlock();
167}
168
169/*
170 * This is a fast pass over the inode cache to try to get reclaim moving on as
171 * many inodes as possible in a short period of time. It kicks itself every few
172 * seconds, as well as being kicked by the inode cache shrinker when memory
173 * goes low. It scans as quickly as possible avoiding locked inodes or those
174 * already being flushed, and once done schedules a future pass.
175 */
176void
177xfs_reclaim_worker(
178 struct work_struct *work)
179{
180 struct xfs_mount *mp = container_of(to_delayed_work(work),
181 struct xfs_mount, m_reclaim_work);
182
183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
184 xfs_reclaim_work_queue(mp);
185}
186
187static void
188xfs_perag_set_reclaim_tag(
189 struct xfs_perag *pag)
190{
191 struct xfs_mount *mp = pag->pag_mount;
192
193 ASSERT(spin_is_locked(&pag->pag_ici_lock));
194 if (pag->pag_ici_reclaimable++)
195 return;
196
197 /* propagate the reclaim tag up into the perag radix tree */
198 spin_lock(&mp->m_perag_lock);
199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
200 XFS_ICI_RECLAIM_TAG);
201 spin_unlock(&mp->m_perag_lock);
202
203 /* schedule periodic background inode reclaim */
204 xfs_reclaim_work_queue(mp);
205
206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
207}
208
209static void
210xfs_perag_clear_reclaim_tag(
211 struct xfs_perag *pag)
212{
213 struct xfs_mount *mp = pag->pag_mount;
214
215 ASSERT(spin_is_locked(&pag->pag_ici_lock));
216 if (--pag->pag_ici_reclaimable)
217 return;
218
219 /* clear the reclaim tag from the perag radix tree */
220 spin_lock(&mp->m_perag_lock);
221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
222 XFS_ICI_RECLAIM_TAG);
223 spin_unlock(&mp->m_perag_lock);
224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
225}
226
227
228/*
229 * We set the inode flag atomically with the radix tree tag.
230 * Once we get tag lookups on the radix tree, this inode flag
231 * can go away.
232 */
233void
234xfs_inode_set_reclaim_tag(
235 struct xfs_inode *ip)
236{
237 struct xfs_mount *mp = ip->i_mount;
238 struct xfs_perag *pag;
239
240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
241 spin_lock(&pag->pag_ici_lock);
242 spin_lock(&ip->i_flags_lock);
243
244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
245 XFS_ICI_RECLAIM_TAG);
246 xfs_perag_set_reclaim_tag(pag);
247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
248
249 spin_unlock(&ip->i_flags_lock);
250 spin_unlock(&pag->pag_ici_lock);
251 xfs_perag_put(pag);
252}
253
254STATIC void
255xfs_inode_clear_reclaim_tag(
256 struct xfs_perag *pag,
257 xfs_ino_t ino)
258{
259 radix_tree_tag_clear(&pag->pag_ici_root,
260 XFS_INO_TO_AGINO(pag->pag_mount, ino),
261 XFS_ICI_RECLAIM_TAG);
262 xfs_perag_clear_reclaim_tag(pag);
263}
264
265/*
266 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
267 * part of the structure. This is made more complex by the fact we store
268 * information about the on-disk values in the VFS inode and so we can't just
269 * overwrite the values unconditionally. Hence we save the parameters we
270 * need to retain across reinitialisation, and rewrite them into the VFS inode
271 * after reinitialisation even if it fails.
272 */
273static int
274xfs_reinit_inode(
275 struct xfs_mount *mp,
276 struct inode *inode)
277{
278 int error;
279 uint32_t nlink = inode->i_nlink;
280 uint32_t generation = inode->i_generation;
281 uint64_t version = inode->i_version;
282 umode_t mode = inode->i_mode;
283
284 error = inode_init_always(mp->m_super, inode);
285
286 set_nlink(inode, nlink);
287 inode->i_generation = generation;
288 inode->i_version = version;
289 inode->i_mode = mode;
290 return error;
291}
292
293/*
294 * Check the validity of the inode we just found it the cache
295 */
296static int
297xfs_iget_cache_hit(
298 struct xfs_perag *pag,
299 struct xfs_inode *ip,
300 xfs_ino_t ino,
301 int flags,
302 int lock_flags) __releases(RCU)
303{
304 struct inode *inode = VFS_I(ip);
305 struct xfs_mount *mp = ip->i_mount;
306 int error;
307
308 /*
309 * check for re-use of an inode within an RCU grace period due to the
310 * radix tree nodes not being updated yet. We monitor for this by
311 * setting the inode number to zero before freeing the inode structure.
312 * If the inode has been reallocated and set up, then the inode number
313 * will not match, so check for that, too.
314 */
315 spin_lock(&ip->i_flags_lock);
316 if (ip->i_ino != ino) {
317 trace_xfs_iget_skip(ip);
318 XFS_STATS_INC(mp, xs_ig_frecycle);
319 error = -EAGAIN;
320 goto out_error;
321 }
322
323
324 /*
325 * If we are racing with another cache hit that is currently
326 * instantiating this inode or currently recycling it out of
327 * reclaimabe state, wait for the initialisation to complete
328 * before continuing.
329 *
330 * XXX(hch): eventually we should do something equivalent to
331 * wait_on_inode to wait for these flags to be cleared
332 * instead of polling for it.
333 */
334 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
335 trace_xfs_iget_skip(ip);
336 XFS_STATS_INC(mp, xs_ig_frecycle);
337 error = -EAGAIN;
338 goto out_error;
339 }
340
341 /*
342 * If lookup is racing with unlink return an error immediately.
343 */
344 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
345 error = -ENOENT;
346 goto out_error;
347 }
348
349 /*
350 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
351 * Need to carefully get it back into useable state.
352 */
353 if (ip->i_flags & XFS_IRECLAIMABLE) {
354 trace_xfs_iget_reclaim(ip);
355
356 /*
357 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
358 * from stomping over us while we recycle the inode. We can't
359 * clear the radix tree reclaimable tag yet as it requires
360 * pag_ici_lock to be held exclusive.
361 */
362 ip->i_flags |= XFS_IRECLAIM;
363
364 spin_unlock(&ip->i_flags_lock);
365 rcu_read_unlock();
366
367 error = xfs_reinit_inode(mp, inode);
368 if (error) {
369 /*
370 * Re-initializing the inode failed, and we are in deep
371 * trouble. Try to re-add it to the reclaim list.
372 */
373 rcu_read_lock();
374 spin_lock(&ip->i_flags_lock);
375
376 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
377 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
378 trace_xfs_iget_reclaim_fail(ip);
379 goto out_error;
380 }
381
382 spin_lock(&pag->pag_ici_lock);
383 spin_lock(&ip->i_flags_lock);
384
385 /*
386 * Clear the per-lifetime state in the inode as we are now
387 * effectively a new inode and need to return to the initial
388 * state before reuse occurs.
389 */
390 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
391 ip->i_flags |= XFS_INEW;
392 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
393 inode->i_state = I_NEW;
394
395 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
396 init_rwsem(&inode->i_rwsem);
397
398 spin_unlock(&ip->i_flags_lock);
399 spin_unlock(&pag->pag_ici_lock);
400 } else {
401 /* If the VFS inode is being torn down, pause and try again. */
402 if (!igrab(inode)) {
403 trace_xfs_iget_skip(ip);
404 error = -EAGAIN;
405 goto out_error;
406 }
407
408 /* We've got a live one. */
409 spin_unlock(&ip->i_flags_lock);
410 rcu_read_unlock();
411 trace_xfs_iget_hit(ip);
412 }
413
414 if (lock_flags != 0)
415 xfs_ilock(ip, lock_flags);
416
417 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
418 XFS_STATS_INC(mp, xs_ig_found);
419
420 return 0;
421
422out_error:
423 spin_unlock(&ip->i_flags_lock);
424 rcu_read_unlock();
425 return error;
426}
427
428
429static int
430xfs_iget_cache_miss(
431 struct xfs_mount *mp,
432 struct xfs_perag *pag,
433 xfs_trans_t *tp,
434 xfs_ino_t ino,
435 struct xfs_inode **ipp,
436 int flags,
437 int lock_flags)
438{
439 struct xfs_inode *ip;
440 int error;
441 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
442 int iflags;
443
444 ip = xfs_inode_alloc(mp, ino);
445 if (!ip)
446 return -ENOMEM;
447
448 error = xfs_iread(mp, tp, ip, flags);
449 if (error)
450 goto out_destroy;
451
452 trace_xfs_iget_miss(ip);
453
454 if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
455 error = -ENOENT;
456 goto out_destroy;
457 }
458
459 /*
460 * Preload the radix tree so we can insert safely under the
461 * write spinlock. Note that we cannot sleep inside the preload
462 * region. Since we can be called from transaction context, don't
463 * recurse into the file system.
464 */
465 if (radix_tree_preload(GFP_NOFS)) {
466 error = -EAGAIN;
467 goto out_destroy;
468 }
469
470 /*
471 * Because the inode hasn't been added to the radix-tree yet it can't
472 * be found by another thread, so we can do the non-sleeping lock here.
473 */
474 if (lock_flags) {
475 if (!xfs_ilock_nowait(ip, lock_flags))
476 BUG();
477 }
478
479 /*
480 * These values must be set before inserting the inode into the radix
481 * tree as the moment it is inserted a concurrent lookup (allowed by the
482 * RCU locking mechanism) can find it and that lookup must see that this
483 * is an inode currently under construction (i.e. that XFS_INEW is set).
484 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
485 * memory barrier that ensures this detection works correctly at lookup
486 * time.
487 */
488 iflags = XFS_INEW;
489 if (flags & XFS_IGET_DONTCACHE)
490 iflags |= XFS_IDONTCACHE;
491 ip->i_udquot = NULL;
492 ip->i_gdquot = NULL;
493 ip->i_pdquot = NULL;
494 xfs_iflags_set(ip, iflags);
495
496 /* insert the new inode */
497 spin_lock(&pag->pag_ici_lock);
498 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
499 if (unlikely(error)) {
500 WARN_ON(error != -EEXIST);
501 XFS_STATS_INC(mp, xs_ig_dup);
502 error = -EAGAIN;
503 goto out_preload_end;
504 }
505 spin_unlock(&pag->pag_ici_lock);
506 radix_tree_preload_end();
507
508 *ipp = ip;
509 return 0;
510
511out_preload_end:
512 spin_unlock(&pag->pag_ici_lock);
513 radix_tree_preload_end();
514 if (lock_flags)
515 xfs_iunlock(ip, lock_flags);
516out_destroy:
517 __destroy_inode(VFS_I(ip));
518 xfs_inode_free(ip);
519 return error;
520}
521
522/*
523 * Look up an inode by number in the given file system.
524 * The inode is looked up in the cache held in each AG.
525 * If the inode is found in the cache, initialise the vfs inode
526 * if necessary.
527 *
528 * If it is not in core, read it in from the file system's device,
529 * add it to the cache and initialise the vfs inode.
530 *
531 * The inode is locked according to the value of the lock_flags parameter.
532 * This flag parameter indicates how and if the inode's IO lock and inode lock
533 * should be taken.
534 *
535 * mp -- the mount point structure for the current file system. It points
536 * to the inode hash table.
537 * tp -- a pointer to the current transaction if there is one. This is
538 * simply passed through to the xfs_iread() call.
539 * ino -- the number of the inode desired. This is the unique identifier
540 * within the file system for the inode being requested.
541 * lock_flags -- flags indicating how to lock the inode. See the comment
542 * for xfs_ilock() for a list of valid values.
543 */
544int
545xfs_iget(
546 xfs_mount_t *mp,
547 xfs_trans_t *tp,
548 xfs_ino_t ino,
549 uint flags,
550 uint lock_flags,
551 xfs_inode_t **ipp)
552{
553 xfs_inode_t *ip;
554 int error;
555 xfs_perag_t *pag;
556 xfs_agino_t agino;
557
558 /*
559 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
560 * doesn't get freed while it's being referenced during a
561 * radix tree traversal here. It assumes this function
562 * aqcuires only the ILOCK (and therefore it has no need to
563 * involve the IOLOCK in this synchronization).
564 */
565 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
566
567 /* reject inode numbers outside existing AGs */
568 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
569 return -EINVAL;
570
571 XFS_STATS_INC(mp, xs_ig_attempts);
572
573 /* get the perag structure and ensure that it's inode capable */
574 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
575 agino = XFS_INO_TO_AGINO(mp, ino);
576
577again:
578 error = 0;
579 rcu_read_lock();
580 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
581
582 if (ip) {
583 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
584 if (error)
585 goto out_error_or_again;
586 } else {
587 rcu_read_unlock();
588 XFS_STATS_INC(mp, xs_ig_missed);
589
590 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
591 flags, lock_flags);
592 if (error)
593 goto out_error_or_again;
594 }
595 xfs_perag_put(pag);
596
597 *ipp = ip;
598
599 /*
600 * If we have a real type for an on-disk inode, we can setup the inode
601 * now. If it's a new inode being created, xfs_ialloc will handle it.
602 */
603 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
604 xfs_setup_existing_inode(ip);
605 return 0;
606
607out_error_or_again:
608 if (error == -EAGAIN) {
609 delay(1);
610 goto again;
611 }
612 xfs_perag_put(pag);
613 return error;
614}
615
616/*
617 * The inode lookup is done in batches to keep the amount of lock traffic and
618 * radix tree lookups to a minimum. The batch size is a trade off between
619 * lookup reduction and stack usage. This is in the reclaim path, so we can't
620 * be too greedy.
621 */
622#define XFS_LOOKUP_BATCH 32
623
624STATIC int
625xfs_inode_ag_walk_grab(
626 struct xfs_inode *ip)
627{
628 struct inode *inode = VFS_I(ip);
629
630 ASSERT(rcu_read_lock_held());
631
632 /*
633 * check for stale RCU freed inode
634 *
635 * If the inode has been reallocated, it doesn't matter if it's not in
636 * the AG we are walking - we are walking for writeback, so if it
637 * passes all the "valid inode" checks and is dirty, then we'll write
638 * it back anyway. If it has been reallocated and still being
639 * initialised, the XFS_INEW check below will catch it.
640 */
641 spin_lock(&ip->i_flags_lock);
642 if (!ip->i_ino)
643 goto out_unlock_noent;
644
645 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
646 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
647 goto out_unlock_noent;
648 spin_unlock(&ip->i_flags_lock);
649
650 /* nothing to sync during shutdown */
651 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
652 return -EFSCORRUPTED;
653
654 /* If we can't grab the inode, it must on it's way to reclaim. */
655 if (!igrab(inode))
656 return -ENOENT;
657
658 /* inode is valid */
659 return 0;
660
661out_unlock_noent:
662 spin_unlock(&ip->i_flags_lock);
663 return -ENOENT;
664}
665
666STATIC int
667xfs_inode_ag_walk(
668 struct xfs_mount *mp,
669 struct xfs_perag *pag,
670 int (*execute)(struct xfs_inode *ip, int flags,
671 void *args),
672 int flags,
673 void *args,
674 int tag)
675{
676 uint32_t first_index;
677 int last_error = 0;
678 int skipped;
679 int done;
680 int nr_found;
681
682restart:
683 done = 0;
684 skipped = 0;
685 first_index = 0;
686 nr_found = 0;
687 do {
688 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
689 int error = 0;
690 int i;
691
692 rcu_read_lock();
693
694 if (tag == -1)
695 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
696 (void **)batch, first_index,
697 XFS_LOOKUP_BATCH);
698 else
699 nr_found = radix_tree_gang_lookup_tag(
700 &pag->pag_ici_root,
701 (void **) batch, first_index,
702 XFS_LOOKUP_BATCH, tag);
703
704 if (!nr_found) {
705 rcu_read_unlock();
706 break;
707 }
708
709 /*
710 * Grab the inodes before we drop the lock. if we found
711 * nothing, nr == 0 and the loop will be skipped.
712 */
713 for (i = 0; i < nr_found; i++) {
714 struct xfs_inode *ip = batch[i];
715
716 if (done || xfs_inode_ag_walk_grab(ip))
717 batch[i] = NULL;
718
719 /*
720 * Update the index for the next lookup. Catch
721 * overflows into the next AG range which can occur if
722 * we have inodes in the last block of the AG and we
723 * are currently pointing to the last inode.
724 *
725 * Because we may see inodes that are from the wrong AG
726 * due to RCU freeing and reallocation, only update the
727 * index if it lies in this AG. It was a race that lead
728 * us to see this inode, so another lookup from the
729 * same index will not find it again.
730 */
731 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
732 continue;
733 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
734 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
735 done = 1;
736 }
737
738 /* unlock now we've grabbed the inodes. */
739 rcu_read_unlock();
740
741 for (i = 0; i < nr_found; i++) {
742 if (!batch[i])
743 continue;
744 error = execute(batch[i], flags, args);
745 IRELE(batch[i]);
746 if (error == -EAGAIN) {
747 skipped++;
748 continue;
749 }
750 if (error && last_error != -EFSCORRUPTED)
751 last_error = error;
752 }
753
754 /* bail out if the filesystem is corrupted. */
755 if (error == -EFSCORRUPTED)
756 break;
757
758 cond_resched();
759
760 } while (nr_found && !done);
761
762 if (skipped) {
763 delay(1);
764 goto restart;
765 }
766 return last_error;
767}
768
769/*
770 * Background scanning to trim post-EOF preallocated space. This is queued
771 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
772 */
773void
774xfs_queue_eofblocks(
775 struct xfs_mount *mp)
776{
777 rcu_read_lock();
778 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
779 queue_delayed_work(mp->m_eofblocks_workqueue,
780 &mp->m_eofblocks_work,
781 msecs_to_jiffies(xfs_eofb_secs * 1000));
782 rcu_read_unlock();
783}
784
785void
786xfs_eofblocks_worker(
787 struct work_struct *work)
788{
789 struct xfs_mount *mp = container_of(to_delayed_work(work),
790 struct xfs_mount, m_eofblocks_work);
791 xfs_icache_free_eofblocks(mp, NULL);
792 xfs_queue_eofblocks(mp);
793}
794
795/*
796 * Background scanning to trim preallocated CoW space. This is queued
797 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
798 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
799 */
800STATIC void
801xfs_queue_cowblocks(
802 struct xfs_mount *mp)
803{
804 rcu_read_lock();
805 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
806 queue_delayed_work(mp->m_eofblocks_workqueue,
807 &mp->m_cowblocks_work,
808 msecs_to_jiffies(xfs_cowb_secs * 1000));
809 rcu_read_unlock();
810}
811
812void
813xfs_cowblocks_worker(
814 struct work_struct *work)
815{
816 struct xfs_mount *mp = container_of(to_delayed_work(work),
817 struct xfs_mount, m_cowblocks_work);
818 xfs_icache_free_cowblocks(mp, NULL);
819 xfs_queue_cowblocks(mp);
820}
821
822int
823xfs_inode_ag_iterator(
824 struct xfs_mount *mp,
825 int (*execute)(struct xfs_inode *ip, int flags,
826 void *args),
827 int flags,
828 void *args)
829{
830 struct xfs_perag *pag;
831 int error = 0;
832 int last_error = 0;
833 xfs_agnumber_t ag;
834
835 ag = 0;
836 while ((pag = xfs_perag_get(mp, ag))) {
837 ag = pag->pag_agno + 1;
838 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
839 xfs_perag_put(pag);
840 if (error) {
841 last_error = error;
842 if (error == -EFSCORRUPTED)
843 break;
844 }
845 }
846 return last_error;
847}
848
849int
850xfs_inode_ag_iterator_tag(
851 struct xfs_mount *mp,
852 int (*execute)(struct xfs_inode *ip, int flags,
853 void *args),
854 int flags,
855 void *args,
856 int tag)
857{
858 struct xfs_perag *pag;
859 int error = 0;
860 int last_error = 0;
861 xfs_agnumber_t ag;
862
863 ag = 0;
864 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
865 ag = pag->pag_agno + 1;
866 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
867 xfs_perag_put(pag);
868 if (error) {
869 last_error = error;
870 if (error == -EFSCORRUPTED)
871 break;
872 }
873 }
874 return last_error;
875}
876
877/*
878 * Grab the inode for reclaim exclusively.
879 * Return 0 if we grabbed it, non-zero otherwise.
880 */
881STATIC int
882xfs_reclaim_inode_grab(
883 struct xfs_inode *ip,
884 int flags)
885{
886 ASSERT(rcu_read_lock_held());
887
888 /* quick check for stale RCU freed inode */
889 if (!ip->i_ino)
890 return 1;
891
892 /*
893 * If we are asked for non-blocking operation, do unlocked checks to
894 * see if the inode already is being flushed or in reclaim to avoid
895 * lock traffic.
896 */
897 if ((flags & SYNC_TRYLOCK) &&
898 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
899 return 1;
900
901 /*
902 * The radix tree lock here protects a thread in xfs_iget from racing
903 * with us starting reclaim on the inode. Once we have the
904 * XFS_IRECLAIM flag set it will not touch us.
905 *
906 * Due to RCU lookup, we may find inodes that have been freed and only
907 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
908 * aren't candidates for reclaim at all, so we must check the
909 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
910 */
911 spin_lock(&ip->i_flags_lock);
912 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
913 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
914 /* not a reclaim candidate. */
915 spin_unlock(&ip->i_flags_lock);
916 return 1;
917 }
918 __xfs_iflags_set(ip, XFS_IRECLAIM);
919 spin_unlock(&ip->i_flags_lock);
920 return 0;
921}
922
923/*
924 * Inodes in different states need to be treated differently. The following
925 * table lists the inode states and the reclaim actions necessary:
926 *
927 * inode state iflush ret required action
928 * --------------- ---------- ---------------
929 * bad - reclaim
930 * shutdown EIO unpin and reclaim
931 * clean, unpinned 0 reclaim
932 * stale, unpinned 0 reclaim
933 * clean, pinned(*) 0 requeue
934 * stale, pinned EAGAIN requeue
935 * dirty, async - requeue
936 * dirty, sync 0 reclaim
937 *
938 * (*) dgc: I don't think the clean, pinned state is possible but it gets
939 * handled anyway given the order of checks implemented.
940 *
941 * Also, because we get the flush lock first, we know that any inode that has
942 * been flushed delwri has had the flush completed by the time we check that
943 * the inode is clean.
944 *
945 * Note that because the inode is flushed delayed write by AIL pushing, the
946 * flush lock may already be held here and waiting on it can result in very
947 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
948 * the caller should push the AIL first before trying to reclaim inodes to
949 * minimise the amount of time spent waiting. For background relaim, we only
950 * bother to reclaim clean inodes anyway.
951 *
952 * Hence the order of actions after gaining the locks should be:
953 * bad => reclaim
954 * shutdown => unpin and reclaim
955 * pinned, async => requeue
956 * pinned, sync => unpin
957 * stale => reclaim
958 * clean => reclaim
959 * dirty, async => requeue
960 * dirty, sync => flush, wait and reclaim
961 */
962STATIC int
963xfs_reclaim_inode(
964 struct xfs_inode *ip,
965 struct xfs_perag *pag,
966 int sync_mode)
967{
968 struct xfs_buf *bp = NULL;
969 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
970 int error;
971
972restart:
973 error = 0;
974 xfs_ilock(ip, XFS_ILOCK_EXCL);
975 if (!xfs_iflock_nowait(ip)) {
976 if (!(sync_mode & SYNC_WAIT))
977 goto out;
978 xfs_iflock(ip);
979 }
980
981 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
982 xfs_iunpin_wait(ip);
983 /* xfs_iflush_abort() drops the flush lock */
984 xfs_iflush_abort(ip, false);
985 goto reclaim;
986 }
987 if (xfs_ipincount(ip)) {
988 if (!(sync_mode & SYNC_WAIT))
989 goto out_ifunlock;
990 xfs_iunpin_wait(ip);
991 }
992 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
993 xfs_ifunlock(ip);
994 goto reclaim;
995 }
996
997 /*
998 * Never flush out dirty data during non-blocking reclaim, as it would
999 * just contend with AIL pushing trying to do the same job.
1000 */
1001 if (!(sync_mode & SYNC_WAIT))
1002 goto out_ifunlock;
1003
1004 /*
1005 * Now we have an inode that needs flushing.
1006 *
1007 * Note that xfs_iflush will never block on the inode buffer lock, as
1008 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1009 * ip->i_lock, and we are doing the exact opposite here. As a result,
1010 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1011 * result in an ABBA deadlock with xfs_ifree_cluster().
1012 *
1013 * As xfs_ifree_cluser() must gather all inodes that are active in the
1014 * cache to mark them stale, if we hit this case we don't actually want
1015 * to do IO here - we want the inode marked stale so we can simply
1016 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1017 * inode, back off and try again. Hopefully the next pass through will
1018 * see the stale flag set on the inode.
1019 */
1020 error = xfs_iflush(ip, &bp);
1021 if (error == -EAGAIN) {
1022 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1023 /* backoff longer than in xfs_ifree_cluster */
1024 delay(2);
1025 goto restart;
1026 }
1027
1028 if (!error) {
1029 error = xfs_bwrite(bp);
1030 xfs_buf_relse(bp);
1031 }
1032
1033reclaim:
1034 ASSERT(!xfs_isiflocked(ip));
1035
1036 /*
1037 * Because we use RCU freeing we need to ensure the inode always appears
1038 * to be reclaimed with an invalid inode number when in the free state.
1039 * We do this as early as possible under the ILOCK so that
1040 * xfs_iflush_cluster() can be guaranteed to detect races with us here.
1041 * By doing this, we guarantee that once xfs_iflush_cluster has locked
1042 * XFS_ILOCK that it will see either a valid, flushable inode that will
1043 * serialise correctly, or it will see a clean (and invalid) inode that
1044 * it can skip.
1045 */
1046 spin_lock(&ip->i_flags_lock);
1047 ip->i_flags = XFS_IRECLAIM;
1048 ip->i_ino = 0;
1049 spin_unlock(&ip->i_flags_lock);
1050
1051 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1052
1053 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1054 /*
1055 * Remove the inode from the per-AG radix tree.
1056 *
1057 * Because radix_tree_delete won't complain even if the item was never
1058 * added to the tree assert that it's been there before to catch
1059 * problems with the inode life time early on.
1060 */
1061 spin_lock(&pag->pag_ici_lock);
1062 if (!radix_tree_delete(&pag->pag_ici_root,
1063 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1064 ASSERT(0);
1065 xfs_perag_clear_reclaim_tag(pag);
1066 spin_unlock(&pag->pag_ici_lock);
1067
1068 /*
1069 * Here we do an (almost) spurious inode lock in order to coordinate
1070 * with inode cache radix tree lookups. This is because the lookup
1071 * can reference the inodes in the cache without taking references.
1072 *
1073 * We make that OK here by ensuring that we wait until the inode is
1074 * unlocked after the lookup before we go ahead and free it.
1075 */
1076 xfs_ilock(ip, XFS_ILOCK_EXCL);
1077 xfs_qm_dqdetach(ip);
1078 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1079
1080 __xfs_inode_free(ip);
1081 return error;
1082
1083out_ifunlock:
1084 xfs_ifunlock(ip);
1085out:
1086 xfs_iflags_clear(ip, XFS_IRECLAIM);
1087 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1088 /*
1089 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1090 * a short while. However, this just burns CPU time scanning the tree
1091 * waiting for IO to complete and the reclaim work never goes back to
1092 * the idle state. Instead, return 0 to let the next scheduled
1093 * background reclaim attempt to reclaim the inode again.
1094 */
1095 return 0;
1096}
1097
1098/*
1099 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1100 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1101 * then a shut down during filesystem unmount reclaim walk leak all the
1102 * unreclaimed inodes.
1103 */
1104STATIC int
1105xfs_reclaim_inodes_ag(
1106 struct xfs_mount *mp,
1107 int flags,
1108 int *nr_to_scan)
1109{
1110 struct xfs_perag *pag;
1111 int error = 0;
1112 int last_error = 0;
1113 xfs_agnumber_t ag;
1114 int trylock = flags & SYNC_TRYLOCK;
1115 int skipped;
1116
1117restart:
1118 ag = 0;
1119 skipped = 0;
1120 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1121 unsigned long first_index = 0;
1122 int done = 0;
1123 int nr_found = 0;
1124
1125 ag = pag->pag_agno + 1;
1126
1127 if (trylock) {
1128 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1129 skipped++;
1130 xfs_perag_put(pag);
1131 continue;
1132 }
1133 first_index = pag->pag_ici_reclaim_cursor;
1134 } else
1135 mutex_lock(&pag->pag_ici_reclaim_lock);
1136
1137 do {
1138 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1139 int i;
1140
1141 rcu_read_lock();
1142 nr_found = radix_tree_gang_lookup_tag(
1143 &pag->pag_ici_root,
1144 (void **)batch, first_index,
1145 XFS_LOOKUP_BATCH,
1146 XFS_ICI_RECLAIM_TAG);
1147 if (!nr_found) {
1148 done = 1;
1149 rcu_read_unlock();
1150 break;
1151 }
1152
1153 /*
1154 * Grab the inodes before we drop the lock. if we found
1155 * nothing, nr == 0 and the loop will be skipped.
1156 */
1157 for (i = 0; i < nr_found; i++) {
1158 struct xfs_inode *ip = batch[i];
1159
1160 if (done || xfs_reclaim_inode_grab(ip, flags))
1161 batch[i] = NULL;
1162
1163 /*
1164 * Update the index for the next lookup. Catch
1165 * overflows into the next AG range which can
1166 * occur if we have inodes in the last block of
1167 * the AG and we are currently pointing to the
1168 * last inode.
1169 *
1170 * Because we may see inodes that are from the
1171 * wrong AG due to RCU freeing and
1172 * reallocation, only update the index if it
1173 * lies in this AG. It was a race that lead us
1174 * to see this inode, so another lookup from
1175 * the same index will not find it again.
1176 */
1177 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1178 pag->pag_agno)
1179 continue;
1180 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1181 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1182 done = 1;
1183 }
1184
1185 /* unlock now we've grabbed the inodes. */
1186 rcu_read_unlock();
1187
1188 for (i = 0; i < nr_found; i++) {
1189 if (!batch[i])
1190 continue;
1191 error = xfs_reclaim_inode(batch[i], pag, flags);
1192 if (error && last_error != -EFSCORRUPTED)
1193 last_error = error;
1194 }
1195
1196 *nr_to_scan -= XFS_LOOKUP_BATCH;
1197
1198 cond_resched();
1199
1200 } while (nr_found && !done && *nr_to_scan > 0);
1201
1202 if (trylock && !done)
1203 pag->pag_ici_reclaim_cursor = first_index;
1204 else
1205 pag->pag_ici_reclaim_cursor = 0;
1206 mutex_unlock(&pag->pag_ici_reclaim_lock);
1207 xfs_perag_put(pag);
1208 }
1209
1210 /*
1211 * if we skipped any AG, and we still have scan count remaining, do
1212 * another pass this time using blocking reclaim semantics (i.e
1213 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1214 * ensure that when we get more reclaimers than AGs we block rather
1215 * than spin trying to execute reclaim.
1216 */
1217 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1218 trylock = 0;
1219 goto restart;
1220 }
1221 return last_error;
1222}
1223
1224int
1225xfs_reclaim_inodes(
1226 xfs_mount_t *mp,
1227 int mode)
1228{
1229 int nr_to_scan = INT_MAX;
1230
1231 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1232}
1233
1234/*
1235 * Scan a certain number of inodes for reclaim.
1236 *
1237 * When called we make sure that there is a background (fast) inode reclaim in
1238 * progress, while we will throttle the speed of reclaim via doing synchronous
1239 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1240 * them to be cleaned, which we hope will not be very long due to the
1241 * background walker having already kicked the IO off on those dirty inodes.
1242 */
1243long
1244xfs_reclaim_inodes_nr(
1245 struct xfs_mount *mp,
1246 int nr_to_scan)
1247{
1248 /* kick background reclaimer and push the AIL */
1249 xfs_reclaim_work_queue(mp);
1250 xfs_ail_push_all(mp->m_ail);
1251
1252 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1253}
1254
1255/*
1256 * Return the number of reclaimable inodes in the filesystem for
1257 * the shrinker to determine how much to reclaim.
1258 */
1259int
1260xfs_reclaim_inodes_count(
1261 struct xfs_mount *mp)
1262{
1263 struct xfs_perag *pag;
1264 xfs_agnumber_t ag = 0;
1265 int reclaimable = 0;
1266
1267 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1268 ag = pag->pag_agno + 1;
1269 reclaimable += pag->pag_ici_reclaimable;
1270 xfs_perag_put(pag);
1271 }
1272 return reclaimable;
1273}
1274
1275STATIC int
1276xfs_inode_match_id(
1277 struct xfs_inode *ip,
1278 struct xfs_eofblocks *eofb)
1279{
1280 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1281 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1282 return 0;
1283
1284 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1285 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1286 return 0;
1287
1288 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1289 xfs_get_projid(ip) != eofb->eof_prid)
1290 return 0;
1291
1292 return 1;
1293}
1294
1295/*
1296 * A union-based inode filtering algorithm. Process the inode if any of the
1297 * criteria match. This is for global/internal scans only.
1298 */
1299STATIC int
1300xfs_inode_match_id_union(
1301 struct xfs_inode *ip,
1302 struct xfs_eofblocks *eofb)
1303{
1304 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1305 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1306 return 1;
1307
1308 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1309 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1310 return 1;
1311
1312 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1313 xfs_get_projid(ip) == eofb->eof_prid)
1314 return 1;
1315
1316 return 0;
1317}
1318
1319STATIC int
1320xfs_inode_free_eofblocks(
1321 struct xfs_inode *ip,
1322 int flags,
1323 void *args)
1324{
1325 int ret = 0;
1326 struct xfs_eofblocks *eofb = args;
1327 int match;
1328
1329 if (!xfs_can_free_eofblocks(ip, false)) {
1330 /* inode could be preallocated or append-only */
1331 trace_xfs_inode_free_eofblocks_invalid(ip);
1332 xfs_inode_clear_eofblocks_tag(ip);
1333 return 0;
1334 }
1335
1336 /*
1337 * If the mapping is dirty the operation can block and wait for some
1338 * time. Unless we are waiting, skip it.
1339 */
1340 if (!(flags & SYNC_WAIT) &&
1341 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1342 return 0;
1343
1344 if (eofb) {
1345 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1346 match = xfs_inode_match_id_union(ip, eofb);
1347 else
1348 match = xfs_inode_match_id(ip, eofb);
1349 if (!match)
1350 return 0;
1351
1352 /* skip the inode if the file size is too small */
1353 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1354 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1355 return 0;
1356 }
1357
1358 /*
1359 * If the caller is waiting, return -EAGAIN to keep the background
1360 * scanner moving and revisit the inode in a subsequent pass.
1361 */
1362 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1363 if (flags & SYNC_WAIT)
1364 ret = -EAGAIN;
1365 return ret;
1366 }
1367 ret = xfs_free_eofblocks(ip);
1368 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1369
1370 return ret;
1371}
1372
1373static int
1374__xfs_icache_free_eofblocks(
1375 struct xfs_mount *mp,
1376 struct xfs_eofblocks *eofb,
1377 int (*execute)(struct xfs_inode *ip, int flags,
1378 void *args),
1379 int tag)
1380{
1381 int flags = SYNC_TRYLOCK;
1382
1383 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1384 flags = SYNC_WAIT;
1385
1386 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1387 eofb, tag);
1388}
1389
1390int
1391xfs_icache_free_eofblocks(
1392 struct xfs_mount *mp,
1393 struct xfs_eofblocks *eofb)
1394{
1395 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1396 XFS_ICI_EOFBLOCKS_TAG);
1397}
1398
1399/*
1400 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1401 * multiple quotas, we don't know exactly which quota caused an allocation
1402 * failure. We make a best effort by including each quota under low free space
1403 * conditions (less than 1% free space) in the scan.
1404 */
1405static int
1406__xfs_inode_free_quota_eofblocks(
1407 struct xfs_inode *ip,
1408 int (*execute)(struct xfs_mount *mp,
1409 struct xfs_eofblocks *eofb))
1410{
1411 int scan = 0;
1412 struct xfs_eofblocks eofb = {0};
1413 struct xfs_dquot *dq;
1414
1415 /*
1416 * Run a sync scan to increase effectiveness and use the union filter to
1417 * cover all applicable quotas in a single scan.
1418 */
1419 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1420
1421 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1422 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1423 if (dq && xfs_dquot_lowsp(dq)) {
1424 eofb.eof_uid = VFS_I(ip)->i_uid;
1425 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1426 scan = 1;
1427 }
1428 }
1429
1430 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1431 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1432 if (dq && xfs_dquot_lowsp(dq)) {
1433 eofb.eof_gid = VFS_I(ip)->i_gid;
1434 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1435 scan = 1;
1436 }
1437 }
1438
1439 if (scan)
1440 execute(ip->i_mount, &eofb);
1441
1442 return scan;
1443}
1444
1445int
1446xfs_inode_free_quota_eofblocks(
1447 struct xfs_inode *ip)
1448{
1449 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1450}
1451
1452static void
1453__xfs_inode_set_eofblocks_tag(
1454 xfs_inode_t *ip,
1455 void (*execute)(struct xfs_mount *mp),
1456 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1457 int error, unsigned long caller_ip),
1458 int tag)
1459{
1460 struct xfs_mount *mp = ip->i_mount;
1461 struct xfs_perag *pag;
1462 int tagged;
1463
1464 /*
1465 * Don't bother locking the AG and looking up in the radix trees
1466 * if we already know that we have the tag set.
1467 */
1468 if (ip->i_flags & XFS_IEOFBLOCKS)
1469 return;
1470 spin_lock(&ip->i_flags_lock);
1471 ip->i_flags |= XFS_IEOFBLOCKS;
1472 spin_unlock(&ip->i_flags_lock);
1473
1474 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1475 spin_lock(&pag->pag_ici_lock);
1476
1477 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1478 radix_tree_tag_set(&pag->pag_ici_root,
1479 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1480 if (!tagged) {
1481 /* propagate the eofblocks tag up into the perag radix tree */
1482 spin_lock(&ip->i_mount->m_perag_lock);
1483 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1484 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1485 tag);
1486 spin_unlock(&ip->i_mount->m_perag_lock);
1487
1488 /* kick off background trimming */
1489 execute(ip->i_mount);
1490
1491 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1492 }
1493
1494 spin_unlock(&pag->pag_ici_lock);
1495 xfs_perag_put(pag);
1496}
1497
1498void
1499xfs_inode_set_eofblocks_tag(
1500 xfs_inode_t *ip)
1501{
1502 trace_xfs_inode_set_eofblocks_tag(ip);
1503 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
1504 trace_xfs_perag_set_eofblocks,
1505 XFS_ICI_EOFBLOCKS_TAG);
1506}
1507
1508static void
1509__xfs_inode_clear_eofblocks_tag(
1510 xfs_inode_t *ip,
1511 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1512 int error, unsigned long caller_ip),
1513 int tag)
1514{
1515 struct xfs_mount *mp = ip->i_mount;
1516 struct xfs_perag *pag;
1517
1518 spin_lock(&ip->i_flags_lock);
1519 ip->i_flags &= ~XFS_IEOFBLOCKS;
1520 spin_unlock(&ip->i_flags_lock);
1521
1522 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1523 spin_lock(&pag->pag_ici_lock);
1524
1525 radix_tree_tag_clear(&pag->pag_ici_root,
1526 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1527 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1528 /* clear the eofblocks tag from the perag radix tree */
1529 spin_lock(&ip->i_mount->m_perag_lock);
1530 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1531 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1532 tag);
1533 spin_unlock(&ip->i_mount->m_perag_lock);
1534 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1535 }
1536
1537 spin_unlock(&pag->pag_ici_lock);
1538 xfs_perag_put(pag);
1539}
1540
1541void
1542xfs_inode_clear_eofblocks_tag(
1543 xfs_inode_t *ip)
1544{
1545 trace_xfs_inode_clear_eofblocks_tag(ip);
1546 return __xfs_inode_clear_eofblocks_tag(ip,
1547 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1548}
1549
1550/*
1551 * Automatic CoW Reservation Freeing
1552 *
1553 * These functions automatically garbage collect leftover CoW reservations
1554 * that were made on behalf of a cowextsize hint when we start to run out
1555 * of quota or when the reservations sit around for too long. If the file
1556 * has dirty pages or is undergoing writeback, its CoW reservations will
1557 * be retained.
1558 *
1559 * The actual garbage collection piggybacks off the same code that runs
1560 * the speculative EOF preallocation garbage collector.
1561 */
1562STATIC int
1563xfs_inode_free_cowblocks(
1564 struct xfs_inode *ip,
1565 int flags,
1566 void *args)
1567{
1568 int ret;
1569 struct xfs_eofblocks *eofb = args;
1570 int match;
1571 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1572
1573 /*
1574 * Just clear the tag if we have an empty cow fork or none at all. It's
1575 * possible the inode was fully unshared since it was originally tagged.
1576 */
1577 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1578 trace_xfs_inode_free_cowblocks_invalid(ip);
1579 xfs_inode_clear_cowblocks_tag(ip);
1580 return 0;
1581 }
1582
1583 /*
1584 * If the mapping is dirty or under writeback we cannot touch the
1585 * CoW fork. Leave it alone if we're in the midst of a directio.
1586 */
1587 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1588 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1589 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1590 atomic_read(&VFS_I(ip)->i_dio_count))
1591 return 0;
1592
1593 if (eofb) {
1594 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1595 match = xfs_inode_match_id_union(ip, eofb);
1596 else
1597 match = xfs_inode_match_id(ip, eofb);
1598 if (!match)
1599 return 0;
1600
1601 /* skip the inode if the file size is too small */
1602 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1603 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1604 return 0;
1605 }
1606
1607 /* Free the CoW blocks */
1608 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1609 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1610
1611 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1612
1613 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1614 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1615
1616 return ret;
1617}
1618
1619int
1620xfs_icache_free_cowblocks(
1621 struct xfs_mount *mp,
1622 struct xfs_eofblocks *eofb)
1623{
1624 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1625 XFS_ICI_COWBLOCKS_TAG);
1626}
1627
1628int
1629xfs_inode_free_quota_cowblocks(
1630 struct xfs_inode *ip)
1631{
1632 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1633}
1634
1635void
1636xfs_inode_set_cowblocks_tag(
1637 xfs_inode_t *ip)
1638{
1639 trace_xfs_inode_set_cowblocks_tag(ip);
1640 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
1641 trace_xfs_perag_set_cowblocks,
1642 XFS_ICI_COWBLOCKS_TAG);
1643}
1644
1645void
1646xfs_inode_clear_cowblocks_tag(
1647 xfs_inode_t *ip)
1648{
1649 trace_xfs_inode_clear_cowblocks_tag(ip);
1650 return __xfs_inode_clear_eofblocks_tag(ip,
1651 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1652}