Loading...
1/*
2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4 */
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/dcache.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/writeback.h>
11#include <linux/module.h>
12#include <linux/backing-dev.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15#include <linux/hash.h>
16#include <linux/swap.h>
17#include <linux/security.h>
18#include <linux/pagemap.h>
19#include <linux/cdev.h>
20#include <linux/bootmem.h>
21#include <linux/fsnotify.h>
22#include <linux/mount.h>
23#include <linux/async.h>
24#include <linux/posix_acl.h>
25#include <linux/prefetch.h>
26#include <linux/ima.h>
27#include <linux/cred.h>
28#include <linux/buffer_head.h> /* for inode_has_buffers */
29#include "internal.h"
30
31/*
32 * Inode locking rules:
33 *
34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget()
36 * inode->i_sb->s_inode_lru_lock protects:
37 * inode->i_sb->s_inode_lru, inode->i_lru
38 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list
40 * bdi->wb.list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
44 *
45 * Lock ordering:
46 *
47 * inode_sb_list_lock
48 * inode->i_lock
49 * inode->i_sb->s_inode_lru_lock
50 *
51 * bdi->wb.list_lock
52 * inode->i_lock
53 *
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
57 *
58 * iunique_lock
59 * inode_hash_lock
60 */
61
62static unsigned int i_hash_mask __read_mostly;
63static unsigned int i_hash_shift __read_mostly;
64static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
66
67__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
68
69/*
70 * Empty aops. Can be used for the cases where the user does not
71 * define any of the address_space operations.
72 */
73const struct address_space_operations empty_aops = {
74};
75EXPORT_SYMBOL(empty_aops);
76
77/*
78 * Statistics gathering..
79 */
80struct inodes_stat_t inodes_stat;
81
82static DEFINE_PER_CPU(unsigned int, nr_inodes);
83static DEFINE_PER_CPU(unsigned int, nr_unused);
84
85static struct kmem_cache *inode_cachep __read_mostly;
86
87static int get_nr_inodes(void)
88{
89 int i;
90 int sum = 0;
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_inodes, i);
93 return sum < 0 ? 0 : sum;
94}
95
96static inline int get_nr_inodes_unused(void)
97{
98 int i;
99 int sum = 0;
100 for_each_possible_cpu(i)
101 sum += per_cpu(nr_unused, i);
102 return sum < 0 ? 0 : sum;
103}
104
105int get_nr_dirty_inodes(void)
106{
107 /* not actually dirty inodes, but a wild approximation */
108 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
109 return nr_dirty > 0 ? nr_dirty : 0;
110}
111
112/*
113 * Handle nr_inode sysctl
114 */
115#ifdef CONFIG_SYSCTL
116int proc_nr_inodes(ctl_table *table, int write,
117 void __user *buffer, size_t *lenp, loff_t *ppos)
118{
119 inodes_stat.nr_inodes = get_nr_inodes();
120 inodes_stat.nr_unused = get_nr_inodes_unused();
121 return proc_dointvec(table, write, buffer, lenp, ppos);
122}
123#endif
124
125/**
126 * inode_init_always - perform inode structure intialisation
127 * @sb: superblock inode belongs to
128 * @inode: inode to initialise
129 *
130 * These are initializations that need to be done on every inode
131 * allocation as the fields are not initialised by slab allocation.
132 */
133int inode_init_always(struct super_block *sb, struct inode *inode)
134{
135 static const struct inode_operations empty_iops;
136 static const struct file_operations empty_fops;
137 struct address_space *const mapping = &inode->i_data;
138
139 inode->i_sb = sb;
140 inode->i_blkbits = sb->s_blocksize_bits;
141 inode->i_flags = 0;
142 atomic_set(&inode->i_count, 1);
143 inode->i_op = &empty_iops;
144 inode->i_fop = &empty_fops;
145 inode->i_nlink = 1;
146 inode->i_opflags = 0;
147 inode->i_uid = 0;
148 inode->i_gid = 0;
149 atomic_set(&inode->i_writecount, 0);
150 inode->i_size = 0;
151 inode->i_blocks = 0;
152 inode->i_bytes = 0;
153 inode->i_generation = 0;
154#ifdef CONFIG_QUOTA
155 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
156#endif
157 inode->i_pipe = NULL;
158 inode->i_bdev = NULL;
159 inode->i_cdev = NULL;
160 inode->i_rdev = 0;
161 inode->dirtied_when = 0;
162
163 if (security_inode_alloc(inode))
164 goto out;
165 spin_lock_init(&inode->i_lock);
166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
167
168 mutex_init(&inode->i_mutex);
169 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
170
171 atomic_set(&inode->i_dio_count, 0);
172
173 mapping->a_ops = &empty_aops;
174 mapping->host = inode;
175 mapping->flags = 0;
176 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
177 mapping->assoc_mapping = NULL;
178 mapping->backing_dev_info = &default_backing_dev_info;
179 mapping->writeback_index = 0;
180
181 /*
182 * If the block_device provides a backing_dev_info for client
183 * inodes then use that. Otherwise the inode share the bdev's
184 * backing_dev_info.
185 */
186 if (sb->s_bdev) {
187 struct backing_dev_info *bdi;
188
189 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
190 mapping->backing_dev_info = bdi;
191 }
192 inode->i_private = NULL;
193 inode->i_mapping = mapping;
194#ifdef CONFIG_FS_POSIX_ACL
195 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
196#endif
197
198#ifdef CONFIG_FSNOTIFY
199 inode->i_fsnotify_mask = 0;
200#endif
201
202 this_cpu_inc(nr_inodes);
203
204 return 0;
205out:
206 return -ENOMEM;
207}
208EXPORT_SYMBOL(inode_init_always);
209
210static struct inode *alloc_inode(struct super_block *sb)
211{
212 struct inode *inode;
213
214 if (sb->s_op->alloc_inode)
215 inode = sb->s_op->alloc_inode(sb);
216 else
217 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
218
219 if (!inode)
220 return NULL;
221
222 if (unlikely(inode_init_always(sb, inode))) {
223 if (inode->i_sb->s_op->destroy_inode)
224 inode->i_sb->s_op->destroy_inode(inode);
225 else
226 kmem_cache_free(inode_cachep, inode);
227 return NULL;
228 }
229
230 return inode;
231}
232
233void free_inode_nonrcu(struct inode *inode)
234{
235 kmem_cache_free(inode_cachep, inode);
236}
237EXPORT_SYMBOL(free_inode_nonrcu);
238
239void __destroy_inode(struct inode *inode)
240{
241 BUG_ON(inode_has_buffers(inode));
242 security_inode_free(inode);
243 fsnotify_inode_delete(inode);
244#ifdef CONFIG_FS_POSIX_ACL
245 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
246 posix_acl_release(inode->i_acl);
247 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
248 posix_acl_release(inode->i_default_acl);
249#endif
250 this_cpu_dec(nr_inodes);
251}
252EXPORT_SYMBOL(__destroy_inode);
253
254static void i_callback(struct rcu_head *head)
255{
256 struct inode *inode = container_of(head, struct inode, i_rcu);
257 INIT_LIST_HEAD(&inode->i_dentry);
258 kmem_cache_free(inode_cachep, inode);
259}
260
261static void destroy_inode(struct inode *inode)
262{
263 BUG_ON(!list_empty(&inode->i_lru));
264 __destroy_inode(inode);
265 if (inode->i_sb->s_op->destroy_inode)
266 inode->i_sb->s_op->destroy_inode(inode);
267 else
268 call_rcu(&inode->i_rcu, i_callback);
269}
270
271void address_space_init_once(struct address_space *mapping)
272{
273 memset(mapping, 0, sizeof(*mapping));
274 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
275 spin_lock_init(&mapping->tree_lock);
276 mutex_init(&mapping->i_mmap_mutex);
277 INIT_LIST_HEAD(&mapping->private_list);
278 spin_lock_init(&mapping->private_lock);
279 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
280 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
281}
282EXPORT_SYMBOL(address_space_init_once);
283
284/*
285 * These are initializations that only need to be done
286 * once, because the fields are idempotent across use
287 * of the inode, so let the slab aware of that.
288 */
289void inode_init_once(struct inode *inode)
290{
291 memset(inode, 0, sizeof(*inode));
292 INIT_HLIST_NODE(&inode->i_hash);
293 INIT_LIST_HEAD(&inode->i_dentry);
294 INIT_LIST_HEAD(&inode->i_devices);
295 INIT_LIST_HEAD(&inode->i_wb_list);
296 INIT_LIST_HEAD(&inode->i_lru);
297 address_space_init_once(&inode->i_data);
298 i_size_ordered_init(inode);
299#ifdef CONFIG_FSNOTIFY
300 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
301#endif
302}
303EXPORT_SYMBOL(inode_init_once);
304
305static void init_once(void *foo)
306{
307 struct inode *inode = (struct inode *) foo;
308
309 inode_init_once(inode);
310}
311
312/*
313 * inode->i_lock must be held
314 */
315void __iget(struct inode *inode)
316{
317 atomic_inc(&inode->i_count);
318}
319
320/*
321 * get additional reference to inode; caller must already hold one.
322 */
323void ihold(struct inode *inode)
324{
325 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
326}
327EXPORT_SYMBOL(ihold);
328
329static void inode_lru_list_add(struct inode *inode)
330{
331 spin_lock(&inode->i_sb->s_inode_lru_lock);
332 if (list_empty(&inode->i_lru)) {
333 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
334 inode->i_sb->s_nr_inodes_unused++;
335 this_cpu_inc(nr_unused);
336 }
337 spin_unlock(&inode->i_sb->s_inode_lru_lock);
338}
339
340static void inode_lru_list_del(struct inode *inode)
341{
342 spin_lock(&inode->i_sb->s_inode_lru_lock);
343 if (!list_empty(&inode->i_lru)) {
344 list_del_init(&inode->i_lru);
345 inode->i_sb->s_nr_inodes_unused--;
346 this_cpu_dec(nr_unused);
347 }
348 spin_unlock(&inode->i_sb->s_inode_lru_lock);
349}
350
351/**
352 * inode_sb_list_add - add inode to the superblock list of inodes
353 * @inode: inode to add
354 */
355void inode_sb_list_add(struct inode *inode)
356{
357 spin_lock(&inode_sb_list_lock);
358 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
359 spin_unlock(&inode_sb_list_lock);
360}
361EXPORT_SYMBOL_GPL(inode_sb_list_add);
362
363static inline void inode_sb_list_del(struct inode *inode)
364{
365 if (!list_empty(&inode->i_sb_list)) {
366 spin_lock(&inode_sb_list_lock);
367 list_del_init(&inode->i_sb_list);
368 spin_unlock(&inode_sb_list_lock);
369 }
370}
371
372static unsigned long hash(struct super_block *sb, unsigned long hashval)
373{
374 unsigned long tmp;
375
376 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
377 L1_CACHE_BYTES;
378 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
379 return tmp & i_hash_mask;
380}
381
382/**
383 * __insert_inode_hash - hash an inode
384 * @inode: unhashed inode
385 * @hashval: unsigned long value used to locate this object in the
386 * inode_hashtable.
387 *
388 * Add an inode to the inode hash for this superblock.
389 */
390void __insert_inode_hash(struct inode *inode, unsigned long hashval)
391{
392 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
393
394 spin_lock(&inode_hash_lock);
395 spin_lock(&inode->i_lock);
396 hlist_add_head(&inode->i_hash, b);
397 spin_unlock(&inode->i_lock);
398 spin_unlock(&inode_hash_lock);
399}
400EXPORT_SYMBOL(__insert_inode_hash);
401
402/**
403 * __remove_inode_hash - remove an inode from the hash
404 * @inode: inode to unhash
405 *
406 * Remove an inode from the superblock.
407 */
408void __remove_inode_hash(struct inode *inode)
409{
410 spin_lock(&inode_hash_lock);
411 spin_lock(&inode->i_lock);
412 hlist_del_init(&inode->i_hash);
413 spin_unlock(&inode->i_lock);
414 spin_unlock(&inode_hash_lock);
415}
416EXPORT_SYMBOL(__remove_inode_hash);
417
418void end_writeback(struct inode *inode)
419{
420 might_sleep();
421 /*
422 * We have to cycle tree_lock here because reclaim can be still in the
423 * process of removing the last page (in __delete_from_page_cache())
424 * and we must not free mapping under it.
425 */
426 spin_lock_irq(&inode->i_data.tree_lock);
427 BUG_ON(inode->i_data.nrpages);
428 spin_unlock_irq(&inode->i_data.tree_lock);
429 BUG_ON(!list_empty(&inode->i_data.private_list));
430 BUG_ON(!(inode->i_state & I_FREEING));
431 BUG_ON(inode->i_state & I_CLEAR);
432 inode_sync_wait(inode);
433 /* don't need i_lock here, no concurrent mods to i_state */
434 inode->i_state = I_FREEING | I_CLEAR;
435}
436EXPORT_SYMBOL(end_writeback);
437
438/*
439 * Free the inode passed in, removing it from the lists it is still connected
440 * to. We remove any pages still attached to the inode and wait for any IO that
441 * is still in progress before finally destroying the inode.
442 *
443 * An inode must already be marked I_FREEING so that we avoid the inode being
444 * moved back onto lists if we race with other code that manipulates the lists
445 * (e.g. writeback_single_inode). The caller is responsible for setting this.
446 *
447 * An inode must already be removed from the LRU list before being evicted from
448 * the cache. This should occur atomically with setting the I_FREEING state
449 * flag, so no inodes here should ever be on the LRU when being evicted.
450 */
451static void evict(struct inode *inode)
452{
453 const struct super_operations *op = inode->i_sb->s_op;
454
455 BUG_ON(!(inode->i_state & I_FREEING));
456 BUG_ON(!list_empty(&inode->i_lru));
457
458 if (!list_empty(&inode->i_wb_list))
459 inode_wb_list_del(inode);
460
461 inode_sb_list_del(inode);
462
463 if (op->evict_inode) {
464 op->evict_inode(inode);
465 } else {
466 if (inode->i_data.nrpages)
467 truncate_inode_pages(&inode->i_data, 0);
468 end_writeback(inode);
469 }
470 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
471 bd_forget(inode);
472 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
473 cd_forget(inode);
474
475 remove_inode_hash(inode);
476
477 spin_lock(&inode->i_lock);
478 wake_up_bit(&inode->i_state, __I_NEW);
479 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
480 spin_unlock(&inode->i_lock);
481
482 destroy_inode(inode);
483}
484
485/*
486 * dispose_list - dispose of the contents of a local list
487 * @head: the head of the list to free
488 *
489 * Dispose-list gets a local list with local inodes in it, so it doesn't
490 * need to worry about list corruption and SMP locks.
491 */
492static void dispose_list(struct list_head *head)
493{
494 while (!list_empty(head)) {
495 struct inode *inode;
496
497 inode = list_first_entry(head, struct inode, i_lru);
498 list_del_init(&inode->i_lru);
499
500 evict(inode);
501 }
502}
503
504/**
505 * evict_inodes - evict all evictable inodes for a superblock
506 * @sb: superblock to operate on
507 *
508 * Make sure that no inodes with zero refcount are retained. This is
509 * called by superblock shutdown after having MS_ACTIVE flag removed,
510 * so any inode reaching zero refcount during or after that call will
511 * be immediately evicted.
512 */
513void evict_inodes(struct super_block *sb)
514{
515 struct inode *inode, *next;
516 LIST_HEAD(dispose);
517
518 spin_lock(&inode_sb_list_lock);
519 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
520 if (atomic_read(&inode->i_count))
521 continue;
522
523 spin_lock(&inode->i_lock);
524 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
525 spin_unlock(&inode->i_lock);
526 continue;
527 }
528
529 inode->i_state |= I_FREEING;
530 inode_lru_list_del(inode);
531 spin_unlock(&inode->i_lock);
532 list_add(&inode->i_lru, &dispose);
533 }
534 spin_unlock(&inode_sb_list_lock);
535
536 dispose_list(&dispose);
537}
538
539/**
540 * invalidate_inodes - attempt to free all inodes on a superblock
541 * @sb: superblock to operate on
542 * @kill_dirty: flag to guide handling of dirty inodes
543 *
544 * Attempts to free all inodes for a given superblock. If there were any
545 * busy inodes return a non-zero value, else zero.
546 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
547 * them as busy.
548 */
549int invalidate_inodes(struct super_block *sb, bool kill_dirty)
550{
551 int busy = 0;
552 struct inode *inode, *next;
553 LIST_HEAD(dispose);
554
555 spin_lock(&inode_sb_list_lock);
556 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
557 spin_lock(&inode->i_lock);
558 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
559 spin_unlock(&inode->i_lock);
560 continue;
561 }
562 if (inode->i_state & I_DIRTY && !kill_dirty) {
563 spin_unlock(&inode->i_lock);
564 busy = 1;
565 continue;
566 }
567 if (atomic_read(&inode->i_count)) {
568 spin_unlock(&inode->i_lock);
569 busy = 1;
570 continue;
571 }
572
573 inode->i_state |= I_FREEING;
574 inode_lru_list_del(inode);
575 spin_unlock(&inode->i_lock);
576 list_add(&inode->i_lru, &dispose);
577 }
578 spin_unlock(&inode_sb_list_lock);
579
580 dispose_list(&dispose);
581
582 return busy;
583}
584
585static int can_unuse(struct inode *inode)
586{
587 if (inode->i_state & ~I_REFERENCED)
588 return 0;
589 if (inode_has_buffers(inode))
590 return 0;
591 if (atomic_read(&inode->i_count))
592 return 0;
593 if (inode->i_data.nrpages)
594 return 0;
595 return 1;
596}
597
598/*
599 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
600 * This is called from the superblock shrinker function with a number of inodes
601 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
602 * then are freed outside inode_lock by dispose_list().
603 *
604 * Any inodes which are pinned purely because of attached pagecache have their
605 * pagecache removed. If the inode has metadata buffers attached to
606 * mapping->private_list then try to remove them.
607 *
608 * If the inode has the I_REFERENCED flag set, then it means that it has been
609 * used recently - the flag is set in iput_final(). When we encounter such an
610 * inode, clear the flag and move it to the back of the LRU so it gets another
611 * pass through the LRU before it gets reclaimed. This is necessary because of
612 * the fact we are doing lazy LRU updates to minimise lock contention so the
613 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
614 * with this flag set because they are the inodes that are out of order.
615 */
616void prune_icache_sb(struct super_block *sb, int nr_to_scan)
617{
618 LIST_HEAD(freeable);
619 int nr_scanned;
620 unsigned long reap = 0;
621
622 spin_lock(&sb->s_inode_lru_lock);
623 for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
624 struct inode *inode;
625
626 if (list_empty(&sb->s_inode_lru))
627 break;
628
629 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
630
631 /*
632 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
633 * so use a trylock. If we fail to get the lock, just move the
634 * inode to the back of the list so we don't spin on it.
635 */
636 if (!spin_trylock(&inode->i_lock)) {
637 list_move(&inode->i_lru, &sb->s_inode_lru);
638 continue;
639 }
640
641 /*
642 * Referenced or dirty inodes are still in use. Give them
643 * another pass through the LRU as we canot reclaim them now.
644 */
645 if (atomic_read(&inode->i_count) ||
646 (inode->i_state & ~I_REFERENCED)) {
647 list_del_init(&inode->i_lru);
648 spin_unlock(&inode->i_lock);
649 sb->s_nr_inodes_unused--;
650 this_cpu_dec(nr_unused);
651 continue;
652 }
653
654 /* recently referenced inodes get one more pass */
655 if (inode->i_state & I_REFERENCED) {
656 inode->i_state &= ~I_REFERENCED;
657 list_move(&inode->i_lru, &sb->s_inode_lru);
658 spin_unlock(&inode->i_lock);
659 continue;
660 }
661 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
662 __iget(inode);
663 spin_unlock(&inode->i_lock);
664 spin_unlock(&sb->s_inode_lru_lock);
665 if (remove_inode_buffers(inode))
666 reap += invalidate_mapping_pages(&inode->i_data,
667 0, -1);
668 iput(inode);
669 spin_lock(&sb->s_inode_lru_lock);
670
671 if (inode != list_entry(sb->s_inode_lru.next,
672 struct inode, i_lru))
673 continue; /* wrong inode or list_empty */
674 /* avoid lock inversions with trylock */
675 if (!spin_trylock(&inode->i_lock))
676 continue;
677 if (!can_unuse(inode)) {
678 spin_unlock(&inode->i_lock);
679 continue;
680 }
681 }
682 WARN_ON(inode->i_state & I_NEW);
683 inode->i_state |= I_FREEING;
684 spin_unlock(&inode->i_lock);
685
686 list_move(&inode->i_lru, &freeable);
687 sb->s_nr_inodes_unused--;
688 this_cpu_dec(nr_unused);
689 }
690 if (current_is_kswapd())
691 __count_vm_events(KSWAPD_INODESTEAL, reap);
692 else
693 __count_vm_events(PGINODESTEAL, reap);
694 spin_unlock(&sb->s_inode_lru_lock);
695
696 dispose_list(&freeable);
697}
698
699static void __wait_on_freeing_inode(struct inode *inode);
700/*
701 * Called with the inode lock held.
702 */
703static struct inode *find_inode(struct super_block *sb,
704 struct hlist_head *head,
705 int (*test)(struct inode *, void *),
706 void *data)
707{
708 struct hlist_node *node;
709 struct inode *inode = NULL;
710
711repeat:
712 hlist_for_each_entry(inode, node, head, i_hash) {
713 spin_lock(&inode->i_lock);
714 if (inode->i_sb != sb) {
715 spin_unlock(&inode->i_lock);
716 continue;
717 }
718 if (!test(inode, data)) {
719 spin_unlock(&inode->i_lock);
720 continue;
721 }
722 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
723 __wait_on_freeing_inode(inode);
724 goto repeat;
725 }
726 __iget(inode);
727 spin_unlock(&inode->i_lock);
728 return inode;
729 }
730 return NULL;
731}
732
733/*
734 * find_inode_fast is the fast path version of find_inode, see the comment at
735 * iget_locked for details.
736 */
737static struct inode *find_inode_fast(struct super_block *sb,
738 struct hlist_head *head, unsigned long ino)
739{
740 struct hlist_node *node;
741 struct inode *inode = NULL;
742
743repeat:
744 hlist_for_each_entry(inode, node, head, i_hash) {
745 spin_lock(&inode->i_lock);
746 if (inode->i_ino != ino) {
747 spin_unlock(&inode->i_lock);
748 continue;
749 }
750 if (inode->i_sb != sb) {
751 spin_unlock(&inode->i_lock);
752 continue;
753 }
754 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
755 __wait_on_freeing_inode(inode);
756 goto repeat;
757 }
758 __iget(inode);
759 spin_unlock(&inode->i_lock);
760 return inode;
761 }
762 return NULL;
763}
764
765/*
766 * Each cpu owns a range of LAST_INO_BATCH numbers.
767 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
768 * to renew the exhausted range.
769 *
770 * This does not significantly increase overflow rate because every CPU can
771 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
772 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
773 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
774 * overflow rate by 2x, which does not seem too significant.
775 *
776 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
777 * error if st_ino won't fit in target struct field. Use 32bit counter
778 * here to attempt to avoid that.
779 */
780#define LAST_INO_BATCH 1024
781static DEFINE_PER_CPU(unsigned int, last_ino);
782
783unsigned int get_next_ino(void)
784{
785 unsigned int *p = &get_cpu_var(last_ino);
786 unsigned int res = *p;
787
788#ifdef CONFIG_SMP
789 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
790 static atomic_t shared_last_ino;
791 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
792
793 res = next - LAST_INO_BATCH;
794 }
795#endif
796
797 *p = ++res;
798 put_cpu_var(last_ino);
799 return res;
800}
801EXPORT_SYMBOL(get_next_ino);
802
803/**
804 * new_inode_pseudo - obtain an inode
805 * @sb: superblock
806 *
807 * Allocates a new inode for given superblock.
808 * Inode wont be chained in superblock s_inodes list
809 * This means :
810 * - fs can't be unmount
811 * - quotas, fsnotify, writeback can't work
812 */
813struct inode *new_inode_pseudo(struct super_block *sb)
814{
815 struct inode *inode = alloc_inode(sb);
816
817 if (inode) {
818 spin_lock(&inode->i_lock);
819 inode->i_state = 0;
820 spin_unlock(&inode->i_lock);
821 INIT_LIST_HEAD(&inode->i_sb_list);
822 }
823 return inode;
824}
825
826/**
827 * new_inode - obtain an inode
828 * @sb: superblock
829 *
830 * Allocates a new inode for given superblock. The default gfp_mask
831 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
832 * If HIGHMEM pages are unsuitable or it is known that pages allocated
833 * for the page cache are not reclaimable or migratable,
834 * mapping_set_gfp_mask() must be called with suitable flags on the
835 * newly created inode's mapping
836 *
837 */
838struct inode *new_inode(struct super_block *sb)
839{
840 struct inode *inode;
841
842 spin_lock_prefetch(&inode_sb_list_lock);
843
844 inode = new_inode_pseudo(sb);
845 if (inode)
846 inode_sb_list_add(inode);
847 return inode;
848}
849EXPORT_SYMBOL(new_inode);
850
851#ifdef CONFIG_DEBUG_LOCK_ALLOC
852void lockdep_annotate_inode_mutex_key(struct inode *inode)
853{
854 if (S_ISDIR(inode->i_mode)) {
855 struct file_system_type *type = inode->i_sb->s_type;
856
857 /* Set new key only if filesystem hasn't already changed it */
858 if (!lockdep_match_class(&inode->i_mutex,
859 &type->i_mutex_key)) {
860 /*
861 * ensure nobody is actually holding i_mutex
862 */
863 mutex_destroy(&inode->i_mutex);
864 mutex_init(&inode->i_mutex);
865 lockdep_set_class(&inode->i_mutex,
866 &type->i_mutex_dir_key);
867 }
868 }
869}
870EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
871#endif
872
873/**
874 * unlock_new_inode - clear the I_NEW state and wake up any waiters
875 * @inode: new inode to unlock
876 *
877 * Called when the inode is fully initialised to clear the new state of the
878 * inode and wake up anyone waiting for the inode to finish initialisation.
879 */
880void unlock_new_inode(struct inode *inode)
881{
882 lockdep_annotate_inode_mutex_key(inode);
883 spin_lock(&inode->i_lock);
884 WARN_ON(!(inode->i_state & I_NEW));
885 inode->i_state &= ~I_NEW;
886 wake_up_bit(&inode->i_state, __I_NEW);
887 spin_unlock(&inode->i_lock);
888}
889EXPORT_SYMBOL(unlock_new_inode);
890
891/**
892 * iget5_locked - obtain an inode from a mounted file system
893 * @sb: super block of file system
894 * @hashval: hash value (usually inode number) to get
895 * @test: callback used for comparisons between inodes
896 * @set: callback used to initialize a new struct inode
897 * @data: opaque data pointer to pass to @test and @set
898 *
899 * Search for the inode specified by @hashval and @data in the inode cache,
900 * and if present it is return it with an increased reference count. This is
901 * a generalized version of iget_locked() for file systems where the inode
902 * number is not sufficient for unique identification of an inode.
903 *
904 * If the inode is not in cache, allocate a new inode and return it locked,
905 * hashed, and with the I_NEW flag set. The file system gets to fill it in
906 * before unlocking it via unlock_new_inode().
907 *
908 * Note both @test and @set are called with the inode_hash_lock held, so can't
909 * sleep.
910 */
911struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
912 int (*test)(struct inode *, void *),
913 int (*set)(struct inode *, void *), void *data)
914{
915 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
916 struct inode *inode;
917
918 spin_lock(&inode_hash_lock);
919 inode = find_inode(sb, head, test, data);
920 spin_unlock(&inode_hash_lock);
921
922 if (inode) {
923 wait_on_inode(inode);
924 return inode;
925 }
926
927 inode = alloc_inode(sb);
928 if (inode) {
929 struct inode *old;
930
931 spin_lock(&inode_hash_lock);
932 /* We released the lock, so.. */
933 old = find_inode(sb, head, test, data);
934 if (!old) {
935 if (set(inode, data))
936 goto set_failed;
937
938 spin_lock(&inode->i_lock);
939 inode->i_state = I_NEW;
940 hlist_add_head(&inode->i_hash, head);
941 spin_unlock(&inode->i_lock);
942 inode_sb_list_add(inode);
943 spin_unlock(&inode_hash_lock);
944
945 /* Return the locked inode with I_NEW set, the
946 * caller is responsible for filling in the contents
947 */
948 return inode;
949 }
950
951 /*
952 * Uhhuh, somebody else created the same inode under
953 * us. Use the old inode instead of the one we just
954 * allocated.
955 */
956 spin_unlock(&inode_hash_lock);
957 destroy_inode(inode);
958 inode = old;
959 wait_on_inode(inode);
960 }
961 return inode;
962
963set_failed:
964 spin_unlock(&inode_hash_lock);
965 destroy_inode(inode);
966 return NULL;
967}
968EXPORT_SYMBOL(iget5_locked);
969
970/**
971 * iget_locked - obtain an inode from a mounted file system
972 * @sb: super block of file system
973 * @ino: inode number to get
974 *
975 * Search for the inode specified by @ino in the inode cache and if present
976 * return it with an increased reference count. This is for file systems
977 * where the inode number is sufficient for unique identification of an inode.
978 *
979 * If the inode is not in cache, allocate a new inode and return it locked,
980 * hashed, and with the I_NEW flag set. The file system gets to fill it in
981 * before unlocking it via unlock_new_inode().
982 */
983struct inode *iget_locked(struct super_block *sb, unsigned long ino)
984{
985 struct hlist_head *head = inode_hashtable + hash(sb, ino);
986 struct inode *inode;
987
988 spin_lock(&inode_hash_lock);
989 inode = find_inode_fast(sb, head, ino);
990 spin_unlock(&inode_hash_lock);
991 if (inode) {
992 wait_on_inode(inode);
993 return inode;
994 }
995
996 inode = alloc_inode(sb);
997 if (inode) {
998 struct inode *old;
999
1000 spin_lock(&inode_hash_lock);
1001 /* We released the lock, so.. */
1002 old = find_inode_fast(sb, head, ino);
1003 if (!old) {
1004 inode->i_ino = ino;
1005 spin_lock(&inode->i_lock);
1006 inode->i_state = I_NEW;
1007 hlist_add_head(&inode->i_hash, head);
1008 spin_unlock(&inode->i_lock);
1009 inode_sb_list_add(inode);
1010 spin_unlock(&inode_hash_lock);
1011
1012 /* Return the locked inode with I_NEW set, the
1013 * caller is responsible for filling in the contents
1014 */
1015 return inode;
1016 }
1017
1018 /*
1019 * Uhhuh, somebody else created the same inode under
1020 * us. Use the old inode instead of the one we just
1021 * allocated.
1022 */
1023 spin_unlock(&inode_hash_lock);
1024 destroy_inode(inode);
1025 inode = old;
1026 wait_on_inode(inode);
1027 }
1028 return inode;
1029}
1030EXPORT_SYMBOL(iget_locked);
1031
1032/*
1033 * search the inode cache for a matching inode number.
1034 * If we find one, then the inode number we are trying to
1035 * allocate is not unique and so we should not use it.
1036 *
1037 * Returns 1 if the inode number is unique, 0 if it is not.
1038 */
1039static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1040{
1041 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1042 struct hlist_node *node;
1043 struct inode *inode;
1044
1045 spin_lock(&inode_hash_lock);
1046 hlist_for_each_entry(inode, node, b, i_hash) {
1047 if (inode->i_ino == ino && inode->i_sb == sb) {
1048 spin_unlock(&inode_hash_lock);
1049 return 0;
1050 }
1051 }
1052 spin_unlock(&inode_hash_lock);
1053
1054 return 1;
1055}
1056
1057/**
1058 * iunique - get a unique inode number
1059 * @sb: superblock
1060 * @max_reserved: highest reserved inode number
1061 *
1062 * Obtain an inode number that is unique on the system for a given
1063 * superblock. This is used by file systems that have no natural
1064 * permanent inode numbering system. An inode number is returned that
1065 * is higher than the reserved limit but unique.
1066 *
1067 * BUGS:
1068 * With a large number of inodes live on the file system this function
1069 * currently becomes quite slow.
1070 */
1071ino_t iunique(struct super_block *sb, ino_t max_reserved)
1072{
1073 /*
1074 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1075 * error if st_ino won't fit in target struct field. Use 32bit counter
1076 * here to attempt to avoid that.
1077 */
1078 static DEFINE_SPINLOCK(iunique_lock);
1079 static unsigned int counter;
1080 ino_t res;
1081
1082 spin_lock(&iunique_lock);
1083 do {
1084 if (counter <= max_reserved)
1085 counter = max_reserved + 1;
1086 res = counter++;
1087 } while (!test_inode_iunique(sb, res));
1088 spin_unlock(&iunique_lock);
1089
1090 return res;
1091}
1092EXPORT_SYMBOL(iunique);
1093
1094struct inode *igrab(struct inode *inode)
1095{
1096 spin_lock(&inode->i_lock);
1097 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1098 __iget(inode);
1099 spin_unlock(&inode->i_lock);
1100 } else {
1101 spin_unlock(&inode->i_lock);
1102 /*
1103 * Handle the case where s_op->clear_inode is not been
1104 * called yet, and somebody is calling igrab
1105 * while the inode is getting freed.
1106 */
1107 inode = NULL;
1108 }
1109 return inode;
1110}
1111EXPORT_SYMBOL(igrab);
1112
1113/**
1114 * ilookup5_nowait - search for an inode in the inode cache
1115 * @sb: super block of file system to search
1116 * @hashval: hash value (usually inode number) to search for
1117 * @test: callback used for comparisons between inodes
1118 * @data: opaque data pointer to pass to @test
1119 *
1120 * Search for the inode specified by @hashval and @data in the inode cache.
1121 * If the inode is in the cache, the inode is returned with an incremented
1122 * reference count.
1123 *
1124 * Note: I_NEW is not waited upon so you have to be very careful what you do
1125 * with the returned inode. You probably should be using ilookup5() instead.
1126 *
1127 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1128 */
1129struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1130 int (*test)(struct inode *, void *), void *data)
1131{
1132 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1133 struct inode *inode;
1134
1135 spin_lock(&inode_hash_lock);
1136 inode = find_inode(sb, head, test, data);
1137 spin_unlock(&inode_hash_lock);
1138
1139 return inode;
1140}
1141EXPORT_SYMBOL(ilookup5_nowait);
1142
1143/**
1144 * ilookup5 - search for an inode in the inode cache
1145 * @sb: super block of file system to search
1146 * @hashval: hash value (usually inode number) to search for
1147 * @test: callback used for comparisons between inodes
1148 * @data: opaque data pointer to pass to @test
1149 *
1150 * Search for the inode specified by @hashval and @data in the inode cache,
1151 * and if the inode is in the cache, return the inode with an incremented
1152 * reference count. Waits on I_NEW before returning the inode.
1153 * returned with an incremented reference count.
1154 *
1155 * This is a generalized version of ilookup() for file systems where the
1156 * inode number is not sufficient for unique identification of an inode.
1157 *
1158 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1159 */
1160struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1161 int (*test)(struct inode *, void *), void *data)
1162{
1163 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1164
1165 if (inode)
1166 wait_on_inode(inode);
1167 return inode;
1168}
1169EXPORT_SYMBOL(ilookup5);
1170
1171/**
1172 * ilookup - search for an inode in the inode cache
1173 * @sb: super block of file system to search
1174 * @ino: inode number to search for
1175 *
1176 * Search for the inode @ino in the inode cache, and if the inode is in the
1177 * cache, the inode is returned with an incremented reference count.
1178 */
1179struct inode *ilookup(struct super_block *sb, unsigned long ino)
1180{
1181 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1182 struct inode *inode;
1183
1184 spin_lock(&inode_hash_lock);
1185 inode = find_inode_fast(sb, head, ino);
1186 spin_unlock(&inode_hash_lock);
1187
1188 if (inode)
1189 wait_on_inode(inode);
1190 return inode;
1191}
1192EXPORT_SYMBOL(ilookup);
1193
1194int insert_inode_locked(struct inode *inode)
1195{
1196 struct super_block *sb = inode->i_sb;
1197 ino_t ino = inode->i_ino;
1198 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1199
1200 while (1) {
1201 struct hlist_node *node;
1202 struct inode *old = NULL;
1203 spin_lock(&inode_hash_lock);
1204 hlist_for_each_entry(old, node, head, i_hash) {
1205 if (old->i_ino != ino)
1206 continue;
1207 if (old->i_sb != sb)
1208 continue;
1209 spin_lock(&old->i_lock);
1210 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1211 spin_unlock(&old->i_lock);
1212 continue;
1213 }
1214 break;
1215 }
1216 if (likely(!node)) {
1217 spin_lock(&inode->i_lock);
1218 inode->i_state |= I_NEW;
1219 hlist_add_head(&inode->i_hash, head);
1220 spin_unlock(&inode->i_lock);
1221 spin_unlock(&inode_hash_lock);
1222 return 0;
1223 }
1224 __iget(old);
1225 spin_unlock(&old->i_lock);
1226 spin_unlock(&inode_hash_lock);
1227 wait_on_inode(old);
1228 if (unlikely(!inode_unhashed(old))) {
1229 iput(old);
1230 return -EBUSY;
1231 }
1232 iput(old);
1233 }
1234}
1235EXPORT_SYMBOL(insert_inode_locked);
1236
1237int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1238 int (*test)(struct inode *, void *), void *data)
1239{
1240 struct super_block *sb = inode->i_sb;
1241 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1242
1243 while (1) {
1244 struct hlist_node *node;
1245 struct inode *old = NULL;
1246
1247 spin_lock(&inode_hash_lock);
1248 hlist_for_each_entry(old, node, head, i_hash) {
1249 if (old->i_sb != sb)
1250 continue;
1251 if (!test(old, data))
1252 continue;
1253 spin_lock(&old->i_lock);
1254 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1255 spin_unlock(&old->i_lock);
1256 continue;
1257 }
1258 break;
1259 }
1260 if (likely(!node)) {
1261 spin_lock(&inode->i_lock);
1262 inode->i_state |= I_NEW;
1263 hlist_add_head(&inode->i_hash, head);
1264 spin_unlock(&inode->i_lock);
1265 spin_unlock(&inode_hash_lock);
1266 return 0;
1267 }
1268 __iget(old);
1269 spin_unlock(&old->i_lock);
1270 spin_unlock(&inode_hash_lock);
1271 wait_on_inode(old);
1272 if (unlikely(!inode_unhashed(old))) {
1273 iput(old);
1274 return -EBUSY;
1275 }
1276 iput(old);
1277 }
1278}
1279EXPORT_SYMBOL(insert_inode_locked4);
1280
1281
1282int generic_delete_inode(struct inode *inode)
1283{
1284 return 1;
1285}
1286EXPORT_SYMBOL(generic_delete_inode);
1287
1288/*
1289 * Normal UNIX filesystem behaviour: delete the
1290 * inode when the usage count drops to zero, and
1291 * i_nlink is zero.
1292 */
1293int generic_drop_inode(struct inode *inode)
1294{
1295 return !inode->i_nlink || inode_unhashed(inode);
1296}
1297EXPORT_SYMBOL_GPL(generic_drop_inode);
1298
1299/*
1300 * Called when we're dropping the last reference
1301 * to an inode.
1302 *
1303 * Call the FS "drop_inode()" function, defaulting to
1304 * the legacy UNIX filesystem behaviour. If it tells
1305 * us to evict inode, do so. Otherwise, retain inode
1306 * in cache if fs is alive, sync and evict if fs is
1307 * shutting down.
1308 */
1309static void iput_final(struct inode *inode)
1310{
1311 struct super_block *sb = inode->i_sb;
1312 const struct super_operations *op = inode->i_sb->s_op;
1313 int drop;
1314
1315 WARN_ON(inode->i_state & I_NEW);
1316
1317 if (op->drop_inode)
1318 drop = op->drop_inode(inode);
1319 else
1320 drop = generic_drop_inode(inode);
1321
1322 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1323 inode->i_state |= I_REFERENCED;
1324 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1325 inode_lru_list_add(inode);
1326 spin_unlock(&inode->i_lock);
1327 return;
1328 }
1329
1330 if (!drop) {
1331 inode->i_state |= I_WILL_FREE;
1332 spin_unlock(&inode->i_lock);
1333 write_inode_now(inode, 1);
1334 spin_lock(&inode->i_lock);
1335 WARN_ON(inode->i_state & I_NEW);
1336 inode->i_state &= ~I_WILL_FREE;
1337 }
1338
1339 inode->i_state |= I_FREEING;
1340 if (!list_empty(&inode->i_lru))
1341 inode_lru_list_del(inode);
1342 spin_unlock(&inode->i_lock);
1343
1344 evict(inode);
1345}
1346
1347/**
1348 * iput - put an inode
1349 * @inode: inode to put
1350 *
1351 * Puts an inode, dropping its usage count. If the inode use count hits
1352 * zero, the inode is then freed and may also be destroyed.
1353 *
1354 * Consequently, iput() can sleep.
1355 */
1356void iput(struct inode *inode)
1357{
1358 if (inode) {
1359 BUG_ON(inode->i_state & I_CLEAR);
1360
1361 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1362 iput_final(inode);
1363 }
1364}
1365EXPORT_SYMBOL(iput);
1366
1367/**
1368 * bmap - find a block number in a file
1369 * @inode: inode of file
1370 * @block: block to find
1371 *
1372 * Returns the block number on the device holding the inode that
1373 * is the disk block number for the block of the file requested.
1374 * That is, asked for block 4 of inode 1 the function will return the
1375 * disk block relative to the disk start that holds that block of the
1376 * file.
1377 */
1378sector_t bmap(struct inode *inode, sector_t block)
1379{
1380 sector_t res = 0;
1381 if (inode->i_mapping->a_ops->bmap)
1382 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1383 return res;
1384}
1385EXPORT_SYMBOL(bmap);
1386
1387/*
1388 * With relative atime, only update atime if the previous atime is
1389 * earlier than either the ctime or mtime or if at least a day has
1390 * passed since the last atime update.
1391 */
1392static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1393 struct timespec now)
1394{
1395
1396 if (!(mnt->mnt_flags & MNT_RELATIME))
1397 return 1;
1398 /*
1399 * Is mtime younger than atime? If yes, update atime:
1400 */
1401 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1402 return 1;
1403 /*
1404 * Is ctime younger than atime? If yes, update atime:
1405 */
1406 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1407 return 1;
1408
1409 /*
1410 * Is the previous atime value older than a day? If yes,
1411 * update atime:
1412 */
1413 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1414 return 1;
1415 /*
1416 * Good, we can skip the atime update:
1417 */
1418 return 0;
1419}
1420
1421/**
1422 * touch_atime - update the access time
1423 * @mnt: mount the inode is accessed on
1424 * @dentry: dentry accessed
1425 *
1426 * Update the accessed time on an inode and mark it for writeback.
1427 * This function automatically handles read only file systems and media,
1428 * as well as the "noatime" flag and inode specific "noatime" markers.
1429 */
1430void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1431{
1432 struct inode *inode = dentry->d_inode;
1433 struct timespec now;
1434
1435 if (inode->i_flags & S_NOATIME)
1436 return;
1437 if (IS_NOATIME(inode))
1438 return;
1439 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1440 return;
1441
1442 if (mnt->mnt_flags & MNT_NOATIME)
1443 return;
1444 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1445 return;
1446
1447 now = current_fs_time(inode->i_sb);
1448
1449 if (!relatime_need_update(mnt, inode, now))
1450 return;
1451
1452 if (timespec_equal(&inode->i_atime, &now))
1453 return;
1454
1455 if (mnt_want_write(mnt))
1456 return;
1457
1458 inode->i_atime = now;
1459 mark_inode_dirty_sync(inode);
1460 mnt_drop_write(mnt);
1461}
1462EXPORT_SYMBOL(touch_atime);
1463
1464/**
1465 * file_update_time - update mtime and ctime time
1466 * @file: file accessed
1467 *
1468 * Update the mtime and ctime members of an inode and mark the inode
1469 * for writeback. Note that this function is meant exclusively for
1470 * usage in the file write path of filesystems, and filesystems may
1471 * choose to explicitly ignore update via this function with the
1472 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1473 * timestamps are handled by the server.
1474 */
1475
1476void file_update_time(struct file *file)
1477{
1478 struct inode *inode = file->f_path.dentry->d_inode;
1479 struct timespec now;
1480 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
1481
1482 /* First try to exhaust all avenues to not sync */
1483 if (IS_NOCMTIME(inode))
1484 return;
1485
1486 now = current_fs_time(inode->i_sb);
1487 if (!timespec_equal(&inode->i_mtime, &now))
1488 sync_it = S_MTIME;
1489
1490 if (!timespec_equal(&inode->i_ctime, &now))
1491 sync_it |= S_CTIME;
1492
1493 if (IS_I_VERSION(inode))
1494 sync_it |= S_VERSION;
1495
1496 if (!sync_it)
1497 return;
1498
1499 /* Finally allowed to write? Takes lock. */
1500 if (mnt_want_write_file(file))
1501 return;
1502
1503 /* Only change inode inside the lock region */
1504 if (sync_it & S_VERSION)
1505 inode_inc_iversion(inode);
1506 if (sync_it & S_CTIME)
1507 inode->i_ctime = now;
1508 if (sync_it & S_MTIME)
1509 inode->i_mtime = now;
1510 mark_inode_dirty_sync(inode);
1511 mnt_drop_write(file->f_path.mnt);
1512}
1513EXPORT_SYMBOL(file_update_time);
1514
1515int inode_needs_sync(struct inode *inode)
1516{
1517 if (IS_SYNC(inode))
1518 return 1;
1519 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1520 return 1;
1521 return 0;
1522}
1523EXPORT_SYMBOL(inode_needs_sync);
1524
1525int inode_wait(void *word)
1526{
1527 schedule();
1528 return 0;
1529}
1530EXPORT_SYMBOL(inode_wait);
1531
1532/*
1533 * If we try to find an inode in the inode hash while it is being
1534 * deleted, we have to wait until the filesystem completes its
1535 * deletion before reporting that it isn't found. This function waits
1536 * until the deletion _might_ have completed. Callers are responsible
1537 * to recheck inode state.
1538 *
1539 * It doesn't matter if I_NEW is not set initially, a call to
1540 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1541 * will DTRT.
1542 */
1543static void __wait_on_freeing_inode(struct inode *inode)
1544{
1545 wait_queue_head_t *wq;
1546 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1547 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1548 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1549 spin_unlock(&inode->i_lock);
1550 spin_unlock(&inode_hash_lock);
1551 schedule();
1552 finish_wait(wq, &wait.wait);
1553 spin_lock(&inode_hash_lock);
1554}
1555
1556static __initdata unsigned long ihash_entries;
1557static int __init set_ihash_entries(char *str)
1558{
1559 if (!str)
1560 return 0;
1561 ihash_entries = simple_strtoul(str, &str, 0);
1562 return 1;
1563}
1564__setup("ihash_entries=", set_ihash_entries);
1565
1566/*
1567 * Initialize the waitqueues and inode hash table.
1568 */
1569void __init inode_init_early(void)
1570{
1571 int loop;
1572
1573 /* If hashes are distributed across NUMA nodes, defer
1574 * hash allocation until vmalloc space is available.
1575 */
1576 if (hashdist)
1577 return;
1578
1579 inode_hashtable =
1580 alloc_large_system_hash("Inode-cache",
1581 sizeof(struct hlist_head),
1582 ihash_entries,
1583 14,
1584 HASH_EARLY,
1585 &i_hash_shift,
1586 &i_hash_mask,
1587 0);
1588
1589 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1590 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1591}
1592
1593void __init inode_init(void)
1594{
1595 int loop;
1596
1597 /* inode slab cache */
1598 inode_cachep = kmem_cache_create("inode_cache",
1599 sizeof(struct inode),
1600 0,
1601 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1602 SLAB_MEM_SPREAD),
1603 init_once);
1604
1605 /* Hash may have been set up in inode_init_early */
1606 if (!hashdist)
1607 return;
1608
1609 inode_hashtable =
1610 alloc_large_system_hash("Inode-cache",
1611 sizeof(struct hlist_head),
1612 ihash_entries,
1613 14,
1614 0,
1615 &i_hash_shift,
1616 &i_hash_mask,
1617 0);
1618
1619 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1620 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1621}
1622
1623void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1624{
1625 inode->i_mode = mode;
1626 if (S_ISCHR(mode)) {
1627 inode->i_fop = &def_chr_fops;
1628 inode->i_rdev = rdev;
1629 } else if (S_ISBLK(mode)) {
1630 inode->i_fop = &def_blk_fops;
1631 inode->i_rdev = rdev;
1632 } else if (S_ISFIFO(mode))
1633 inode->i_fop = &def_fifo_fops;
1634 else if (S_ISSOCK(mode))
1635 inode->i_fop = &bad_sock_fops;
1636 else
1637 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1638 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1639 inode->i_ino);
1640}
1641EXPORT_SYMBOL(init_special_inode);
1642
1643/**
1644 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1645 * @inode: New inode
1646 * @dir: Directory inode
1647 * @mode: mode of the new inode
1648 */
1649void inode_init_owner(struct inode *inode, const struct inode *dir,
1650 mode_t mode)
1651{
1652 inode->i_uid = current_fsuid();
1653 if (dir && dir->i_mode & S_ISGID) {
1654 inode->i_gid = dir->i_gid;
1655 if (S_ISDIR(mode))
1656 mode |= S_ISGID;
1657 } else
1658 inode->i_gid = current_fsgid();
1659 inode->i_mode = mode;
1660}
1661EXPORT_SYMBOL(inode_init_owner);
1662
1663/**
1664 * inode_owner_or_capable - check current task permissions to inode
1665 * @inode: inode being checked
1666 *
1667 * Return true if current either has CAP_FOWNER to the inode, or
1668 * owns the file.
1669 */
1670bool inode_owner_or_capable(const struct inode *inode)
1671{
1672 struct user_namespace *ns = inode_userns(inode);
1673
1674 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1675 return true;
1676 if (ns_capable(ns, CAP_FOWNER))
1677 return true;
1678 return false;
1679}
1680EXPORT_SYMBOL(inode_owner_or_capable);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/backing-dev.h>
10#include <linux/hash.h>
11#include <linux/swap.h>
12#include <linux/security.h>
13#include <linux/cdev.h>
14#include <linux/memblock.h>
15#include <linux/fsnotify.h>
16#include <linux/mount.h>
17#include <linux/posix_acl.h>
18#include <linux/prefetch.h>
19#include <linux/buffer_head.h> /* for inode_has_buffers */
20#include <linux/ratelimit.h>
21#include <linux/list_lru.h>
22#include <linux/iversion.h>
23#include <trace/events/writeback.h>
24#include "internal.h"
25
26/*
27 * Inode locking rules:
28 *
29 * inode->i_lock protects:
30 * inode->i_state, inode->i_hash, __iget()
31 * Inode LRU list locks protect:
32 * inode->i_sb->s_inode_lru, inode->i_lru
33 * inode->i_sb->s_inode_list_lock protects:
34 * inode->i_sb->s_inodes, inode->i_sb_list
35 * bdi->wb.list_lock protects:
36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
37 * inode_hash_lock protects:
38 * inode_hashtable, inode->i_hash
39 *
40 * Lock ordering:
41 *
42 * inode->i_sb->s_inode_list_lock
43 * inode->i_lock
44 * Inode LRU list locks
45 *
46 * bdi->wb.list_lock
47 * inode->i_lock
48 *
49 * inode_hash_lock
50 * inode->i_sb->s_inode_list_lock
51 * inode->i_lock
52 *
53 * iunique_lock
54 * inode_hash_lock
55 */
56
57static unsigned int i_hash_mask __read_mostly;
58static unsigned int i_hash_shift __read_mostly;
59static struct hlist_head *inode_hashtable __read_mostly;
60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
61
62/*
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
65 */
66const struct address_space_operations empty_aops = {
67};
68EXPORT_SYMBOL(empty_aops);
69
70/*
71 * Statistics gathering..
72 */
73struct inodes_stat_t inodes_stat;
74
75static DEFINE_PER_CPU(unsigned long, nr_inodes);
76static DEFINE_PER_CPU(unsigned long, nr_unused);
77
78static struct kmem_cache *inode_cachep __read_mostly;
79
80static long get_nr_inodes(void)
81{
82 int i;
83 long sum = 0;
84 for_each_possible_cpu(i)
85 sum += per_cpu(nr_inodes, i);
86 return sum < 0 ? 0 : sum;
87}
88
89static inline long get_nr_inodes_unused(void)
90{
91 int i;
92 long sum = 0;
93 for_each_possible_cpu(i)
94 sum += per_cpu(nr_unused, i);
95 return sum < 0 ? 0 : sum;
96}
97
98long get_nr_dirty_inodes(void)
99{
100 /* not actually dirty inodes, but a wild approximation */
101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
102 return nr_dirty > 0 ? nr_dirty : 0;
103}
104
105/*
106 * Handle nr_inode sysctl
107 */
108#ifdef CONFIG_SYSCTL
109int proc_nr_inodes(struct ctl_table *table, int write,
110 void *buffer, size_t *lenp, loff_t *ppos)
111{
112 inodes_stat.nr_inodes = get_nr_inodes();
113 inodes_stat.nr_unused = get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
115}
116#endif
117
118static int no_open(struct inode *inode, struct file *file)
119{
120 return -ENXIO;
121}
122
123/**
124 * inode_init_always - perform inode structure initialisation
125 * @sb: superblock inode belongs to
126 * @inode: inode to initialise
127 *
128 * These are initializations that need to be done on every inode
129 * allocation as the fields are not initialised by slab allocation.
130 */
131int inode_init_always(struct super_block *sb, struct inode *inode)
132{
133 static const struct inode_operations empty_iops;
134 static const struct file_operations no_open_fops = {.open = no_open};
135 struct address_space *const mapping = &inode->i_data;
136
137 inode->i_sb = sb;
138 inode->i_blkbits = sb->s_blocksize_bits;
139 inode->i_flags = 0;
140 atomic64_set(&inode->i_sequence, 0);
141 atomic_set(&inode->i_count, 1);
142 inode->i_op = &empty_iops;
143 inode->i_fop = &no_open_fops;
144 inode->i_ino = 0;
145 inode->__i_nlink = 1;
146 inode->i_opflags = 0;
147 if (sb->s_xattr)
148 inode->i_opflags |= IOP_XATTR;
149 i_uid_write(inode, 0);
150 i_gid_write(inode, 0);
151 atomic_set(&inode->i_writecount, 0);
152 inode->i_size = 0;
153 inode->i_write_hint = WRITE_LIFE_NOT_SET;
154 inode->i_blocks = 0;
155 inode->i_bytes = 0;
156 inode->i_generation = 0;
157 inode->i_pipe = NULL;
158 inode->i_cdev = NULL;
159 inode->i_link = NULL;
160 inode->i_dir_seq = 0;
161 inode->i_rdev = 0;
162 inode->dirtied_when = 0;
163
164#ifdef CONFIG_CGROUP_WRITEBACK
165 inode->i_wb_frn_winner = 0;
166 inode->i_wb_frn_avg_time = 0;
167 inode->i_wb_frn_history = 0;
168#endif
169
170 if (security_inode_alloc(inode))
171 goto out;
172 spin_lock_init(&inode->i_lock);
173 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
174
175 init_rwsem(&inode->i_rwsem);
176 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
177
178 atomic_set(&inode->i_dio_count, 0);
179
180 mapping->a_ops = &empty_aops;
181 mapping->host = inode;
182 mapping->flags = 0;
183 if (sb->s_type->fs_flags & FS_THP_SUPPORT)
184 __set_bit(AS_THP_SUPPORT, &mapping->flags);
185 mapping->wb_err = 0;
186 atomic_set(&mapping->i_mmap_writable, 0);
187#ifdef CONFIG_READ_ONLY_THP_FOR_FS
188 atomic_set(&mapping->nr_thps, 0);
189#endif
190 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
191 mapping->private_data = NULL;
192 mapping->writeback_index = 0;
193 inode->i_private = NULL;
194 inode->i_mapping = mapping;
195 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
196#ifdef CONFIG_FS_POSIX_ACL
197 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
198#endif
199
200#ifdef CONFIG_FSNOTIFY
201 inode->i_fsnotify_mask = 0;
202#endif
203 inode->i_flctx = NULL;
204 this_cpu_inc(nr_inodes);
205
206 return 0;
207out:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(inode_init_always);
211
212void free_inode_nonrcu(struct inode *inode)
213{
214 kmem_cache_free(inode_cachep, inode);
215}
216EXPORT_SYMBOL(free_inode_nonrcu);
217
218static void i_callback(struct rcu_head *head)
219{
220 struct inode *inode = container_of(head, struct inode, i_rcu);
221 if (inode->free_inode)
222 inode->free_inode(inode);
223 else
224 free_inode_nonrcu(inode);
225}
226
227static struct inode *alloc_inode(struct super_block *sb)
228{
229 const struct super_operations *ops = sb->s_op;
230 struct inode *inode;
231
232 if (ops->alloc_inode)
233 inode = ops->alloc_inode(sb);
234 else
235 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
236
237 if (!inode)
238 return NULL;
239
240 if (unlikely(inode_init_always(sb, inode))) {
241 if (ops->destroy_inode) {
242 ops->destroy_inode(inode);
243 if (!ops->free_inode)
244 return NULL;
245 }
246 inode->free_inode = ops->free_inode;
247 i_callback(&inode->i_rcu);
248 return NULL;
249 }
250
251 return inode;
252}
253
254void __destroy_inode(struct inode *inode)
255{
256 BUG_ON(inode_has_buffers(inode));
257 inode_detach_wb(inode);
258 security_inode_free(inode);
259 fsnotify_inode_delete(inode);
260 locks_free_lock_context(inode);
261 if (!inode->i_nlink) {
262 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
263 atomic_long_dec(&inode->i_sb->s_remove_count);
264 }
265
266#ifdef CONFIG_FS_POSIX_ACL
267 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
268 posix_acl_release(inode->i_acl);
269 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
270 posix_acl_release(inode->i_default_acl);
271#endif
272 this_cpu_dec(nr_inodes);
273}
274EXPORT_SYMBOL(__destroy_inode);
275
276static void destroy_inode(struct inode *inode)
277{
278 const struct super_operations *ops = inode->i_sb->s_op;
279
280 BUG_ON(!list_empty(&inode->i_lru));
281 __destroy_inode(inode);
282 if (ops->destroy_inode) {
283 ops->destroy_inode(inode);
284 if (!ops->free_inode)
285 return;
286 }
287 inode->free_inode = ops->free_inode;
288 call_rcu(&inode->i_rcu, i_callback);
289}
290
291/**
292 * drop_nlink - directly drop an inode's link count
293 * @inode: inode
294 *
295 * This is a low-level filesystem helper to replace any
296 * direct filesystem manipulation of i_nlink. In cases
297 * where we are attempting to track writes to the
298 * filesystem, a decrement to zero means an imminent
299 * write when the file is truncated and actually unlinked
300 * on the filesystem.
301 */
302void drop_nlink(struct inode *inode)
303{
304 WARN_ON(inode->i_nlink == 0);
305 inode->__i_nlink--;
306 if (!inode->i_nlink)
307 atomic_long_inc(&inode->i_sb->s_remove_count);
308}
309EXPORT_SYMBOL(drop_nlink);
310
311/**
312 * clear_nlink - directly zero an inode's link count
313 * @inode: inode
314 *
315 * This is a low-level filesystem helper to replace any
316 * direct filesystem manipulation of i_nlink. See
317 * drop_nlink() for why we care about i_nlink hitting zero.
318 */
319void clear_nlink(struct inode *inode)
320{
321 if (inode->i_nlink) {
322 inode->__i_nlink = 0;
323 atomic_long_inc(&inode->i_sb->s_remove_count);
324 }
325}
326EXPORT_SYMBOL(clear_nlink);
327
328/**
329 * set_nlink - directly set an inode's link count
330 * @inode: inode
331 * @nlink: new nlink (should be non-zero)
332 *
333 * This is a low-level filesystem helper to replace any
334 * direct filesystem manipulation of i_nlink.
335 */
336void set_nlink(struct inode *inode, unsigned int nlink)
337{
338 if (!nlink) {
339 clear_nlink(inode);
340 } else {
341 /* Yes, some filesystems do change nlink from zero to one */
342 if (inode->i_nlink == 0)
343 atomic_long_dec(&inode->i_sb->s_remove_count);
344
345 inode->__i_nlink = nlink;
346 }
347}
348EXPORT_SYMBOL(set_nlink);
349
350/**
351 * inc_nlink - directly increment an inode's link count
352 * @inode: inode
353 *
354 * This is a low-level filesystem helper to replace any
355 * direct filesystem manipulation of i_nlink. Currently,
356 * it is only here for parity with dec_nlink().
357 */
358void inc_nlink(struct inode *inode)
359{
360 if (unlikely(inode->i_nlink == 0)) {
361 WARN_ON(!(inode->i_state & I_LINKABLE));
362 atomic_long_dec(&inode->i_sb->s_remove_count);
363 }
364
365 inode->__i_nlink++;
366}
367EXPORT_SYMBOL(inc_nlink);
368
369static void __address_space_init_once(struct address_space *mapping)
370{
371 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
372 init_rwsem(&mapping->i_mmap_rwsem);
373 INIT_LIST_HEAD(&mapping->private_list);
374 spin_lock_init(&mapping->private_lock);
375 mapping->i_mmap = RB_ROOT_CACHED;
376}
377
378void address_space_init_once(struct address_space *mapping)
379{
380 memset(mapping, 0, sizeof(*mapping));
381 __address_space_init_once(mapping);
382}
383EXPORT_SYMBOL(address_space_init_once);
384
385/*
386 * These are initializations that only need to be done
387 * once, because the fields are idempotent across use
388 * of the inode, so let the slab aware of that.
389 */
390void inode_init_once(struct inode *inode)
391{
392 memset(inode, 0, sizeof(*inode));
393 INIT_HLIST_NODE(&inode->i_hash);
394 INIT_LIST_HEAD(&inode->i_devices);
395 INIT_LIST_HEAD(&inode->i_io_list);
396 INIT_LIST_HEAD(&inode->i_wb_list);
397 INIT_LIST_HEAD(&inode->i_lru);
398 __address_space_init_once(&inode->i_data);
399 i_size_ordered_init(inode);
400}
401EXPORT_SYMBOL(inode_init_once);
402
403static void init_once(void *foo)
404{
405 struct inode *inode = (struct inode *) foo;
406
407 inode_init_once(inode);
408}
409
410/*
411 * inode->i_lock must be held
412 */
413void __iget(struct inode *inode)
414{
415 atomic_inc(&inode->i_count);
416}
417
418/*
419 * get additional reference to inode; caller must already hold one.
420 */
421void ihold(struct inode *inode)
422{
423 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
424}
425EXPORT_SYMBOL(ihold);
426
427static void inode_lru_list_add(struct inode *inode)
428{
429 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
430 this_cpu_inc(nr_unused);
431 else
432 inode->i_state |= I_REFERENCED;
433}
434
435/*
436 * Add inode to LRU if needed (inode is unused and clean).
437 *
438 * Needs inode->i_lock held.
439 */
440void inode_add_lru(struct inode *inode)
441{
442 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
443 I_FREEING | I_WILL_FREE)) &&
444 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
445 inode_lru_list_add(inode);
446}
447
448
449static void inode_lru_list_del(struct inode *inode)
450{
451
452 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
453 this_cpu_dec(nr_unused);
454}
455
456/**
457 * inode_sb_list_add - add inode to the superblock list of inodes
458 * @inode: inode to add
459 */
460void inode_sb_list_add(struct inode *inode)
461{
462 spin_lock(&inode->i_sb->s_inode_list_lock);
463 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
464 spin_unlock(&inode->i_sb->s_inode_list_lock);
465}
466EXPORT_SYMBOL_GPL(inode_sb_list_add);
467
468static inline void inode_sb_list_del(struct inode *inode)
469{
470 if (!list_empty(&inode->i_sb_list)) {
471 spin_lock(&inode->i_sb->s_inode_list_lock);
472 list_del_init(&inode->i_sb_list);
473 spin_unlock(&inode->i_sb->s_inode_list_lock);
474 }
475}
476
477static unsigned long hash(struct super_block *sb, unsigned long hashval)
478{
479 unsigned long tmp;
480
481 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
482 L1_CACHE_BYTES;
483 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
484 return tmp & i_hash_mask;
485}
486
487/**
488 * __insert_inode_hash - hash an inode
489 * @inode: unhashed inode
490 * @hashval: unsigned long value used to locate this object in the
491 * inode_hashtable.
492 *
493 * Add an inode to the inode hash for this superblock.
494 */
495void __insert_inode_hash(struct inode *inode, unsigned long hashval)
496{
497 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
498
499 spin_lock(&inode_hash_lock);
500 spin_lock(&inode->i_lock);
501 hlist_add_head_rcu(&inode->i_hash, b);
502 spin_unlock(&inode->i_lock);
503 spin_unlock(&inode_hash_lock);
504}
505EXPORT_SYMBOL(__insert_inode_hash);
506
507/**
508 * __remove_inode_hash - remove an inode from the hash
509 * @inode: inode to unhash
510 *
511 * Remove an inode from the superblock.
512 */
513void __remove_inode_hash(struct inode *inode)
514{
515 spin_lock(&inode_hash_lock);
516 spin_lock(&inode->i_lock);
517 hlist_del_init_rcu(&inode->i_hash);
518 spin_unlock(&inode->i_lock);
519 spin_unlock(&inode_hash_lock);
520}
521EXPORT_SYMBOL(__remove_inode_hash);
522
523void clear_inode(struct inode *inode)
524{
525 /*
526 * We have to cycle the i_pages lock here because reclaim can be in the
527 * process of removing the last page (in __delete_from_page_cache())
528 * and we must not free the mapping under it.
529 */
530 xa_lock_irq(&inode->i_data.i_pages);
531 BUG_ON(inode->i_data.nrpages);
532 /*
533 * Almost always, mapping_empty(&inode->i_data) here; but there are
534 * two known and long-standing ways in which nodes may get left behind
535 * (when deep radix-tree node allocation failed partway; or when THP
536 * collapse_file() failed). Until those two known cases are cleaned up,
537 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
538 * nor even WARN_ON(!mapping_empty).
539 */
540 xa_unlock_irq(&inode->i_data.i_pages);
541 BUG_ON(!list_empty(&inode->i_data.private_list));
542 BUG_ON(!(inode->i_state & I_FREEING));
543 BUG_ON(inode->i_state & I_CLEAR);
544 BUG_ON(!list_empty(&inode->i_wb_list));
545 /* don't need i_lock here, no concurrent mods to i_state */
546 inode->i_state = I_FREEING | I_CLEAR;
547}
548EXPORT_SYMBOL(clear_inode);
549
550/*
551 * Free the inode passed in, removing it from the lists it is still connected
552 * to. We remove any pages still attached to the inode and wait for any IO that
553 * is still in progress before finally destroying the inode.
554 *
555 * An inode must already be marked I_FREEING so that we avoid the inode being
556 * moved back onto lists if we race with other code that manipulates the lists
557 * (e.g. writeback_single_inode). The caller is responsible for setting this.
558 *
559 * An inode must already be removed from the LRU list before being evicted from
560 * the cache. This should occur atomically with setting the I_FREEING state
561 * flag, so no inodes here should ever be on the LRU when being evicted.
562 */
563static void evict(struct inode *inode)
564{
565 const struct super_operations *op = inode->i_sb->s_op;
566
567 BUG_ON(!(inode->i_state & I_FREEING));
568 BUG_ON(!list_empty(&inode->i_lru));
569
570 if (!list_empty(&inode->i_io_list))
571 inode_io_list_del(inode);
572
573 inode_sb_list_del(inode);
574
575 /*
576 * Wait for flusher thread to be done with the inode so that filesystem
577 * does not start destroying it while writeback is still running. Since
578 * the inode has I_FREEING set, flusher thread won't start new work on
579 * the inode. We just have to wait for running writeback to finish.
580 */
581 inode_wait_for_writeback(inode);
582
583 if (op->evict_inode) {
584 op->evict_inode(inode);
585 } else {
586 truncate_inode_pages_final(&inode->i_data);
587 clear_inode(inode);
588 }
589 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
590 cd_forget(inode);
591
592 remove_inode_hash(inode);
593
594 spin_lock(&inode->i_lock);
595 wake_up_bit(&inode->i_state, __I_NEW);
596 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
597 spin_unlock(&inode->i_lock);
598
599 destroy_inode(inode);
600}
601
602/*
603 * dispose_list - dispose of the contents of a local list
604 * @head: the head of the list to free
605 *
606 * Dispose-list gets a local list with local inodes in it, so it doesn't
607 * need to worry about list corruption and SMP locks.
608 */
609static void dispose_list(struct list_head *head)
610{
611 while (!list_empty(head)) {
612 struct inode *inode;
613
614 inode = list_first_entry(head, struct inode, i_lru);
615 list_del_init(&inode->i_lru);
616
617 evict(inode);
618 cond_resched();
619 }
620}
621
622/**
623 * evict_inodes - evict all evictable inodes for a superblock
624 * @sb: superblock to operate on
625 *
626 * Make sure that no inodes with zero refcount are retained. This is
627 * called by superblock shutdown after having SB_ACTIVE flag removed,
628 * so any inode reaching zero refcount during or after that call will
629 * be immediately evicted.
630 */
631void evict_inodes(struct super_block *sb)
632{
633 struct inode *inode, *next;
634 LIST_HEAD(dispose);
635
636again:
637 spin_lock(&sb->s_inode_list_lock);
638 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
639 if (atomic_read(&inode->i_count))
640 continue;
641
642 spin_lock(&inode->i_lock);
643 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
644 spin_unlock(&inode->i_lock);
645 continue;
646 }
647
648 inode->i_state |= I_FREEING;
649 inode_lru_list_del(inode);
650 spin_unlock(&inode->i_lock);
651 list_add(&inode->i_lru, &dispose);
652
653 /*
654 * We can have a ton of inodes to evict at unmount time given
655 * enough memory, check to see if we need to go to sleep for a
656 * bit so we don't livelock.
657 */
658 if (need_resched()) {
659 spin_unlock(&sb->s_inode_list_lock);
660 cond_resched();
661 dispose_list(&dispose);
662 goto again;
663 }
664 }
665 spin_unlock(&sb->s_inode_list_lock);
666
667 dispose_list(&dispose);
668}
669EXPORT_SYMBOL_GPL(evict_inodes);
670
671/**
672 * invalidate_inodes - attempt to free all inodes on a superblock
673 * @sb: superblock to operate on
674 * @kill_dirty: flag to guide handling of dirty inodes
675 *
676 * Attempts to free all inodes for a given superblock. If there were any
677 * busy inodes return a non-zero value, else zero.
678 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
679 * them as busy.
680 */
681int invalidate_inodes(struct super_block *sb, bool kill_dirty)
682{
683 int busy = 0;
684 struct inode *inode, *next;
685 LIST_HEAD(dispose);
686
687again:
688 spin_lock(&sb->s_inode_list_lock);
689 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
690 spin_lock(&inode->i_lock);
691 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
692 spin_unlock(&inode->i_lock);
693 continue;
694 }
695 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
696 spin_unlock(&inode->i_lock);
697 busy = 1;
698 continue;
699 }
700 if (atomic_read(&inode->i_count)) {
701 spin_unlock(&inode->i_lock);
702 busy = 1;
703 continue;
704 }
705
706 inode->i_state |= I_FREEING;
707 inode_lru_list_del(inode);
708 spin_unlock(&inode->i_lock);
709 list_add(&inode->i_lru, &dispose);
710 if (need_resched()) {
711 spin_unlock(&sb->s_inode_list_lock);
712 cond_resched();
713 dispose_list(&dispose);
714 goto again;
715 }
716 }
717 spin_unlock(&sb->s_inode_list_lock);
718
719 dispose_list(&dispose);
720
721 return busy;
722}
723
724/*
725 * Isolate the inode from the LRU in preparation for freeing it.
726 *
727 * Any inodes which are pinned purely because of attached pagecache have their
728 * pagecache removed. If the inode has metadata buffers attached to
729 * mapping->private_list then try to remove them.
730 *
731 * If the inode has the I_REFERENCED flag set, then it means that it has been
732 * used recently - the flag is set in iput_final(). When we encounter such an
733 * inode, clear the flag and move it to the back of the LRU so it gets another
734 * pass through the LRU before it gets reclaimed. This is necessary because of
735 * the fact we are doing lazy LRU updates to minimise lock contention so the
736 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
737 * with this flag set because they are the inodes that are out of order.
738 */
739static enum lru_status inode_lru_isolate(struct list_head *item,
740 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
741{
742 struct list_head *freeable = arg;
743 struct inode *inode = container_of(item, struct inode, i_lru);
744
745 /*
746 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
747 * If we fail to get the lock, just skip it.
748 */
749 if (!spin_trylock(&inode->i_lock))
750 return LRU_SKIP;
751
752 /*
753 * Referenced or dirty inodes are still in use. Give them another pass
754 * through the LRU as we canot reclaim them now.
755 */
756 if (atomic_read(&inode->i_count) ||
757 (inode->i_state & ~I_REFERENCED)) {
758 list_lru_isolate(lru, &inode->i_lru);
759 spin_unlock(&inode->i_lock);
760 this_cpu_dec(nr_unused);
761 return LRU_REMOVED;
762 }
763
764 /* recently referenced inodes get one more pass */
765 if (inode->i_state & I_REFERENCED) {
766 inode->i_state &= ~I_REFERENCED;
767 spin_unlock(&inode->i_lock);
768 return LRU_ROTATE;
769 }
770
771 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
772 __iget(inode);
773 spin_unlock(&inode->i_lock);
774 spin_unlock(lru_lock);
775 if (remove_inode_buffers(inode)) {
776 unsigned long reap;
777 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
778 if (current_is_kswapd())
779 __count_vm_events(KSWAPD_INODESTEAL, reap);
780 else
781 __count_vm_events(PGINODESTEAL, reap);
782 if (current->reclaim_state)
783 current->reclaim_state->reclaimed_slab += reap;
784 }
785 iput(inode);
786 spin_lock(lru_lock);
787 return LRU_RETRY;
788 }
789
790 WARN_ON(inode->i_state & I_NEW);
791 inode->i_state |= I_FREEING;
792 list_lru_isolate_move(lru, &inode->i_lru, freeable);
793 spin_unlock(&inode->i_lock);
794
795 this_cpu_dec(nr_unused);
796 return LRU_REMOVED;
797}
798
799/*
800 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
801 * This is called from the superblock shrinker function with a number of inodes
802 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
803 * then are freed outside inode_lock by dispose_list().
804 */
805long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
806{
807 LIST_HEAD(freeable);
808 long freed;
809
810 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
811 inode_lru_isolate, &freeable);
812 dispose_list(&freeable);
813 return freed;
814}
815
816static void __wait_on_freeing_inode(struct inode *inode);
817/*
818 * Called with the inode lock held.
819 */
820static struct inode *find_inode(struct super_block *sb,
821 struct hlist_head *head,
822 int (*test)(struct inode *, void *),
823 void *data)
824{
825 struct inode *inode = NULL;
826
827repeat:
828 hlist_for_each_entry(inode, head, i_hash) {
829 if (inode->i_sb != sb)
830 continue;
831 if (!test(inode, data))
832 continue;
833 spin_lock(&inode->i_lock);
834 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
835 __wait_on_freeing_inode(inode);
836 goto repeat;
837 }
838 if (unlikely(inode->i_state & I_CREATING)) {
839 spin_unlock(&inode->i_lock);
840 return ERR_PTR(-ESTALE);
841 }
842 __iget(inode);
843 spin_unlock(&inode->i_lock);
844 return inode;
845 }
846 return NULL;
847}
848
849/*
850 * find_inode_fast is the fast path version of find_inode, see the comment at
851 * iget_locked for details.
852 */
853static struct inode *find_inode_fast(struct super_block *sb,
854 struct hlist_head *head, unsigned long ino)
855{
856 struct inode *inode = NULL;
857
858repeat:
859 hlist_for_each_entry(inode, head, i_hash) {
860 if (inode->i_ino != ino)
861 continue;
862 if (inode->i_sb != sb)
863 continue;
864 spin_lock(&inode->i_lock);
865 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
866 __wait_on_freeing_inode(inode);
867 goto repeat;
868 }
869 if (unlikely(inode->i_state & I_CREATING)) {
870 spin_unlock(&inode->i_lock);
871 return ERR_PTR(-ESTALE);
872 }
873 __iget(inode);
874 spin_unlock(&inode->i_lock);
875 return inode;
876 }
877 return NULL;
878}
879
880/*
881 * Each cpu owns a range of LAST_INO_BATCH numbers.
882 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
883 * to renew the exhausted range.
884 *
885 * This does not significantly increase overflow rate because every CPU can
886 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
887 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
888 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
889 * overflow rate by 2x, which does not seem too significant.
890 *
891 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
892 * error if st_ino won't fit in target struct field. Use 32bit counter
893 * here to attempt to avoid that.
894 */
895#define LAST_INO_BATCH 1024
896static DEFINE_PER_CPU(unsigned int, last_ino);
897
898unsigned int get_next_ino(void)
899{
900 unsigned int *p = &get_cpu_var(last_ino);
901 unsigned int res = *p;
902
903#ifdef CONFIG_SMP
904 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
905 static atomic_t shared_last_ino;
906 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
907
908 res = next - LAST_INO_BATCH;
909 }
910#endif
911
912 res++;
913 /* get_next_ino should not provide a 0 inode number */
914 if (unlikely(!res))
915 res++;
916 *p = res;
917 put_cpu_var(last_ino);
918 return res;
919}
920EXPORT_SYMBOL(get_next_ino);
921
922/**
923 * new_inode_pseudo - obtain an inode
924 * @sb: superblock
925 *
926 * Allocates a new inode for given superblock.
927 * Inode wont be chained in superblock s_inodes list
928 * This means :
929 * - fs can't be unmount
930 * - quotas, fsnotify, writeback can't work
931 */
932struct inode *new_inode_pseudo(struct super_block *sb)
933{
934 struct inode *inode = alloc_inode(sb);
935
936 if (inode) {
937 spin_lock(&inode->i_lock);
938 inode->i_state = 0;
939 spin_unlock(&inode->i_lock);
940 INIT_LIST_HEAD(&inode->i_sb_list);
941 }
942 return inode;
943}
944
945/**
946 * new_inode - obtain an inode
947 * @sb: superblock
948 *
949 * Allocates a new inode for given superblock. The default gfp_mask
950 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
951 * If HIGHMEM pages are unsuitable or it is known that pages allocated
952 * for the page cache are not reclaimable or migratable,
953 * mapping_set_gfp_mask() must be called with suitable flags on the
954 * newly created inode's mapping
955 *
956 */
957struct inode *new_inode(struct super_block *sb)
958{
959 struct inode *inode;
960
961 spin_lock_prefetch(&sb->s_inode_list_lock);
962
963 inode = new_inode_pseudo(sb);
964 if (inode)
965 inode_sb_list_add(inode);
966 return inode;
967}
968EXPORT_SYMBOL(new_inode);
969
970#ifdef CONFIG_DEBUG_LOCK_ALLOC
971void lockdep_annotate_inode_mutex_key(struct inode *inode)
972{
973 if (S_ISDIR(inode->i_mode)) {
974 struct file_system_type *type = inode->i_sb->s_type;
975
976 /* Set new key only if filesystem hasn't already changed it */
977 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
978 /*
979 * ensure nobody is actually holding i_mutex
980 */
981 // mutex_destroy(&inode->i_mutex);
982 init_rwsem(&inode->i_rwsem);
983 lockdep_set_class(&inode->i_rwsem,
984 &type->i_mutex_dir_key);
985 }
986 }
987}
988EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
989#endif
990
991/**
992 * unlock_new_inode - clear the I_NEW state and wake up any waiters
993 * @inode: new inode to unlock
994 *
995 * Called when the inode is fully initialised to clear the new state of the
996 * inode and wake up anyone waiting for the inode to finish initialisation.
997 */
998void unlock_new_inode(struct inode *inode)
999{
1000 lockdep_annotate_inode_mutex_key(inode);
1001 spin_lock(&inode->i_lock);
1002 WARN_ON(!(inode->i_state & I_NEW));
1003 inode->i_state &= ~I_NEW & ~I_CREATING;
1004 smp_mb();
1005 wake_up_bit(&inode->i_state, __I_NEW);
1006 spin_unlock(&inode->i_lock);
1007}
1008EXPORT_SYMBOL(unlock_new_inode);
1009
1010void discard_new_inode(struct inode *inode)
1011{
1012 lockdep_annotate_inode_mutex_key(inode);
1013 spin_lock(&inode->i_lock);
1014 WARN_ON(!(inode->i_state & I_NEW));
1015 inode->i_state &= ~I_NEW;
1016 smp_mb();
1017 wake_up_bit(&inode->i_state, __I_NEW);
1018 spin_unlock(&inode->i_lock);
1019 iput(inode);
1020}
1021EXPORT_SYMBOL(discard_new_inode);
1022
1023/**
1024 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1025 *
1026 * Lock any non-NULL argument that is not a directory.
1027 * Zero, one or two objects may be locked by this function.
1028 *
1029 * @inode1: first inode to lock
1030 * @inode2: second inode to lock
1031 */
1032void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1033{
1034 if (inode1 > inode2)
1035 swap(inode1, inode2);
1036
1037 if (inode1 && !S_ISDIR(inode1->i_mode))
1038 inode_lock(inode1);
1039 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1040 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1041}
1042EXPORT_SYMBOL(lock_two_nondirectories);
1043
1044/**
1045 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1046 * @inode1: first inode to unlock
1047 * @inode2: second inode to unlock
1048 */
1049void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1050{
1051 if (inode1 && !S_ISDIR(inode1->i_mode))
1052 inode_unlock(inode1);
1053 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1054 inode_unlock(inode2);
1055}
1056EXPORT_SYMBOL(unlock_two_nondirectories);
1057
1058/**
1059 * inode_insert5 - obtain an inode from a mounted file system
1060 * @inode: pre-allocated inode to use for insert to cache
1061 * @hashval: hash value (usually inode number) to get
1062 * @test: callback used for comparisons between inodes
1063 * @set: callback used to initialize a new struct inode
1064 * @data: opaque data pointer to pass to @test and @set
1065 *
1066 * Search for the inode specified by @hashval and @data in the inode cache,
1067 * and if present it is return it with an increased reference count. This is
1068 * a variant of iget5_locked() for callers that don't want to fail on memory
1069 * allocation of inode.
1070 *
1071 * If the inode is not in cache, insert the pre-allocated inode to cache and
1072 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1073 * to fill it in before unlocking it via unlock_new_inode().
1074 *
1075 * Note both @test and @set are called with the inode_hash_lock held, so can't
1076 * sleep.
1077 */
1078struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1079 int (*test)(struct inode *, void *),
1080 int (*set)(struct inode *, void *), void *data)
1081{
1082 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1083 struct inode *old;
1084 bool creating = inode->i_state & I_CREATING;
1085
1086again:
1087 spin_lock(&inode_hash_lock);
1088 old = find_inode(inode->i_sb, head, test, data);
1089 if (unlikely(old)) {
1090 /*
1091 * Uhhuh, somebody else created the same inode under us.
1092 * Use the old inode instead of the preallocated one.
1093 */
1094 spin_unlock(&inode_hash_lock);
1095 if (IS_ERR(old))
1096 return NULL;
1097 wait_on_inode(old);
1098 if (unlikely(inode_unhashed(old))) {
1099 iput(old);
1100 goto again;
1101 }
1102 return old;
1103 }
1104
1105 if (set && unlikely(set(inode, data))) {
1106 inode = NULL;
1107 goto unlock;
1108 }
1109
1110 /*
1111 * Return the locked inode with I_NEW set, the
1112 * caller is responsible for filling in the contents
1113 */
1114 spin_lock(&inode->i_lock);
1115 inode->i_state |= I_NEW;
1116 hlist_add_head_rcu(&inode->i_hash, head);
1117 spin_unlock(&inode->i_lock);
1118 if (!creating)
1119 inode_sb_list_add(inode);
1120unlock:
1121 spin_unlock(&inode_hash_lock);
1122
1123 return inode;
1124}
1125EXPORT_SYMBOL(inode_insert5);
1126
1127/**
1128 * iget5_locked - obtain an inode from a mounted file system
1129 * @sb: super block of file system
1130 * @hashval: hash value (usually inode number) to get
1131 * @test: callback used for comparisons between inodes
1132 * @set: callback used to initialize a new struct inode
1133 * @data: opaque data pointer to pass to @test and @set
1134 *
1135 * Search for the inode specified by @hashval and @data in the inode cache,
1136 * and if present it is return it with an increased reference count. This is
1137 * a generalized version of iget_locked() for file systems where the inode
1138 * number is not sufficient for unique identification of an inode.
1139 *
1140 * If the inode is not in cache, allocate a new inode and return it locked,
1141 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1142 * before unlocking it via unlock_new_inode().
1143 *
1144 * Note both @test and @set are called with the inode_hash_lock held, so can't
1145 * sleep.
1146 */
1147struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1148 int (*test)(struct inode *, void *),
1149 int (*set)(struct inode *, void *), void *data)
1150{
1151 struct inode *inode = ilookup5(sb, hashval, test, data);
1152
1153 if (!inode) {
1154 struct inode *new = alloc_inode(sb);
1155
1156 if (new) {
1157 new->i_state = 0;
1158 inode = inode_insert5(new, hashval, test, set, data);
1159 if (unlikely(inode != new))
1160 destroy_inode(new);
1161 }
1162 }
1163 return inode;
1164}
1165EXPORT_SYMBOL(iget5_locked);
1166
1167/**
1168 * iget_locked - obtain an inode from a mounted file system
1169 * @sb: super block of file system
1170 * @ino: inode number to get
1171 *
1172 * Search for the inode specified by @ino in the inode cache and if present
1173 * return it with an increased reference count. This is for file systems
1174 * where the inode number is sufficient for unique identification of an inode.
1175 *
1176 * If the inode is not in cache, allocate a new inode and return it locked,
1177 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1178 * before unlocking it via unlock_new_inode().
1179 */
1180struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1181{
1182 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1183 struct inode *inode;
1184again:
1185 spin_lock(&inode_hash_lock);
1186 inode = find_inode_fast(sb, head, ino);
1187 spin_unlock(&inode_hash_lock);
1188 if (inode) {
1189 if (IS_ERR(inode))
1190 return NULL;
1191 wait_on_inode(inode);
1192 if (unlikely(inode_unhashed(inode))) {
1193 iput(inode);
1194 goto again;
1195 }
1196 return inode;
1197 }
1198
1199 inode = alloc_inode(sb);
1200 if (inode) {
1201 struct inode *old;
1202
1203 spin_lock(&inode_hash_lock);
1204 /* We released the lock, so.. */
1205 old = find_inode_fast(sb, head, ino);
1206 if (!old) {
1207 inode->i_ino = ino;
1208 spin_lock(&inode->i_lock);
1209 inode->i_state = I_NEW;
1210 hlist_add_head_rcu(&inode->i_hash, head);
1211 spin_unlock(&inode->i_lock);
1212 inode_sb_list_add(inode);
1213 spin_unlock(&inode_hash_lock);
1214
1215 /* Return the locked inode with I_NEW set, the
1216 * caller is responsible for filling in the contents
1217 */
1218 return inode;
1219 }
1220
1221 /*
1222 * Uhhuh, somebody else created the same inode under
1223 * us. Use the old inode instead of the one we just
1224 * allocated.
1225 */
1226 spin_unlock(&inode_hash_lock);
1227 destroy_inode(inode);
1228 if (IS_ERR(old))
1229 return NULL;
1230 inode = old;
1231 wait_on_inode(inode);
1232 if (unlikely(inode_unhashed(inode))) {
1233 iput(inode);
1234 goto again;
1235 }
1236 }
1237 return inode;
1238}
1239EXPORT_SYMBOL(iget_locked);
1240
1241/*
1242 * search the inode cache for a matching inode number.
1243 * If we find one, then the inode number we are trying to
1244 * allocate is not unique and so we should not use it.
1245 *
1246 * Returns 1 if the inode number is unique, 0 if it is not.
1247 */
1248static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1249{
1250 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1251 struct inode *inode;
1252
1253 hlist_for_each_entry_rcu(inode, b, i_hash) {
1254 if (inode->i_ino == ino && inode->i_sb == sb)
1255 return 0;
1256 }
1257 return 1;
1258}
1259
1260/**
1261 * iunique - get a unique inode number
1262 * @sb: superblock
1263 * @max_reserved: highest reserved inode number
1264 *
1265 * Obtain an inode number that is unique on the system for a given
1266 * superblock. This is used by file systems that have no natural
1267 * permanent inode numbering system. An inode number is returned that
1268 * is higher than the reserved limit but unique.
1269 *
1270 * BUGS:
1271 * With a large number of inodes live on the file system this function
1272 * currently becomes quite slow.
1273 */
1274ino_t iunique(struct super_block *sb, ino_t max_reserved)
1275{
1276 /*
1277 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1278 * error if st_ino won't fit in target struct field. Use 32bit counter
1279 * here to attempt to avoid that.
1280 */
1281 static DEFINE_SPINLOCK(iunique_lock);
1282 static unsigned int counter;
1283 ino_t res;
1284
1285 rcu_read_lock();
1286 spin_lock(&iunique_lock);
1287 do {
1288 if (counter <= max_reserved)
1289 counter = max_reserved + 1;
1290 res = counter++;
1291 } while (!test_inode_iunique(sb, res));
1292 spin_unlock(&iunique_lock);
1293 rcu_read_unlock();
1294
1295 return res;
1296}
1297EXPORT_SYMBOL(iunique);
1298
1299struct inode *igrab(struct inode *inode)
1300{
1301 spin_lock(&inode->i_lock);
1302 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1303 __iget(inode);
1304 spin_unlock(&inode->i_lock);
1305 } else {
1306 spin_unlock(&inode->i_lock);
1307 /*
1308 * Handle the case where s_op->clear_inode is not been
1309 * called yet, and somebody is calling igrab
1310 * while the inode is getting freed.
1311 */
1312 inode = NULL;
1313 }
1314 return inode;
1315}
1316EXPORT_SYMBOL(igrab);
1317
1318/**
1319 * ilookup5_nowait - search for an inode in the inode cache
1320 * @sb: super block of file system to search
1321 * @hashval: hash value (usually inode number) to search for
1322 * @test: callback used for comparisons between inodes
1323 * @data: opaque data pointer to pass to @test
1324 *
1325 * Search for the inode specified by @hashval and @data in the inode cache.
1326 * If the inode is in the cache, the inode is returned with an incremented
1327 * reference count.
1328 *
1329 * Note: I_NEW is not waited upon so you have to be very careful what you do
1330 * with the returned inode. You probably should be using ilookup5() instead.
1331 *
1332 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1333 */
1334struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1335 int (*test)(struct inode *, void *), void *data)
1336{
1337 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1338 struct inode *inode;
1339
1340 spin_lock(&inode_hash_lock);
1341 inode = find_inode(sb, head, test, data);
1342 spin_unlock(&inode_hash_lock);
1343
1344 return IS_ERR(inode) ? NULL : inode;
1345}
1346EXPORT_SYMBOL(ilookup5_nowait);
1347
1348/**
1349 * ilookup5 - search for an inode in the inode cache
1350 * @sb: super block of file system to search
1351 * @hashval: hash value (usually inode number) to search for
1352 * @test: callback used for comparisons between inodes
1353 * @data: opaque data pointer to pass to @test
1354 *
1355 * Search for the inode specified by @hashval and @data in the inode cache,
1356 * and if the inode is in the cache, return the inode with an incremented
1357 * reference count. Waits on I_NEW before returning the inode.
1358 * returned with an incremented reference count.
1359 *
1360 * This is a generalized version of ilookup() for file systems where the
1361 * inode number is not sufficient for unique identification of an inode.
1362 *
1363 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1364 */
1365struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1366 int (*test)(struct inode *, void *), void *data)
1367{
1368 struct inode *inode;
1369again:
1370 inode = ilookup5_nowait(sb, hashval, test, data);
1371 if (inode) {
1372 wait_on_inode(inode);
1373 if (unlikely(inode_unhashed(inode))) {
1374 iput(inode);
1375 goto again;
1376 }
1377 }
1378 return inode;
1379}
1380EXPORT_SYMBOL(ilookup5);
1381
1382/**
1383 * ilookup - search for an inode in the inode cache
1384 * @sb: super block of file system to search
1385 * @ino: inode number to search for
1386 *
1387 * Search for the inode @ino in the inode cache, and if the inode is in the
1388 * cache, the inode is returned with an incremented reference count.
1389 */
1390struct inode *ilookup(struct super_block *sb, unsigned long ino)
1391{
1392 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1393 struct inode *inode;
1394again:
1395 spin_lock(&inode_hash_lock);
1396 inode = find_inode_fast(sb, head, ino);
1397 spin_unlock(&inode_hash_lock);
1398
1399 if (inode) {
1400 if (IS_ERR(inode))
1401 return NULL;
1402 wait_on_inode(inode);
1403 if (unlikely(inode_unhashed(inode))) {
1404 iput(inode);
1405 goto again;
1406 }
1407 }
1408 return inode;
1409}
1410EXPORT_SYMBOL(ilookup);
1411
1412/**
1413 * find_inode_nowait - find an inode in the inode cache
1414 * @sb: super block of file system to search
1415 * @hashval: hash value (usually inode number) to search for
1416 * @match: callback used for comparisons between inodes
1417 * @data: opaque data pointer to pass to @match
1418 *
1419 * Search for the inode specified by @hashval and @data in the inode
1420 * cache, where the helper function @match will return 0 if the inode
1421 * does not match, 1 if the inode does match, and -1 if the search
1422 * should be stopped. The @match function must be responsible for
1423 * taking the i_lock spin_lock and checking i_state for an inode being
1424 * freed or being initialized, and incrementing the reference count
1425 * before returning 1. It also must not sleep, since it is called with
1426 * the inode_hash_lock spinlock held.
1427 *
1428 * This is a even more generalized version of ilookup5() when the
1429 * function must never block --- find_inode() can block in
1430 * __wait_on_freeing_inode() --- or when the caller can not increment
1431 * the reference count because the resulting iput() might cause an
1432 * inode eviction. The tradeoff is that the @match funtion must be
1433 * very carefully implemented.
1434 */
1435struct inode *find_inode_nowait(struct super_block *sb,
1436 unsigned long hashval,
1437 int (*match)(struct inode *, unsigned long,
1438 void *),
1439 void *data)
1440{
1441 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1442 struct inode *inode, *ret_inode = NULL;
1443 int mval;
1444
1445 spin_lock(&inode_hash_lock);
1446 hlist_for_each_entry(inode, head, i_hash) {
1447 if (inode->i_sb != sb)
1448 continue;
1449 mval = match(inode, hashval, data);
1450 if (mval == 0)
1451 continue;
1452 if (mval == 1)
1453 ret_inode = inode;
1454 goto out;
1455 }
1456out:
1457 spin_unlock(&inode_hash_lock);
1458 return ret_inode;
1459}
1460EXPORT_SYMBOL(find_inode_nowait);
1461
1462/**
1463 * find_inode_rcu - find an inode in the inode cache
1464 * @sb: Super block of file system to search
1465 * @hashval: Key to hash
1466 * @test: Function to test match on an inode
1467 * @data: Data for test function
1468 *
1469 * Search for the inode specified by @hashval and @data in the inode cache,
1470 * where the helper function @test will return 0 if the inode does not match
1471 * and 1 if it does. The @test function must be responsible for taking the
1472 * i_lock spin_lock and checking i_state for an inode being freed or being
1473 * initialized.
1474 *
1475 * If successful, this will return the inode for which the @test function
1476 * returned 1 and NULL otherwise.
1477 *
1478 * The @test function is not permitted to take a ref on any inode presented.
1479 * It is also not permitted to sleep.
1480 *
1481 * The caller must hold the RCU read lock.
1482 */
1483struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1484 int (*test)(struct inode *, void *), void *data)
1485{
1486 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1487 struct inode *inode;
1488
1489 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1490 "suspicious find_inode_rcu() usage");
1491
1492 hlist_for_each_entry_rcu(inode, head, i_hash) {
1493 if (inode->i_sb == sb &&
1494 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1495 test(inode, data))
1496 return inode;
1497 }
1498 return NULL;
1499}
1500EXPORT_SYMBOL(find_inode_rcu);
1501
1502/**
1503 * find_inode_by_ino_rcu - Find an inode in the inode cache
1504 * @sb: Super block of file system to search
1505 * @ino: The inode number to match
1506 *
1507 * Search for the inode specified by @hashval and @data in the inode cache,
1508 * where the helper function @test will return 0 if the inode does not match
1509 * and 1 if it does. The @test function must be responsible for taking the
1510 * i_lock spin_lock and checking i_state for an inode being freed or being
1511 * initialized.
1512 *
1513 * If successful, this will return the inode for which the @test function
1514 * returned 1 and NULL otherwise.
1515 *
1516 * The @test function is not permitted to take a ref on any inode presented.
1517 * It is also not permitted to sleep.
1518 *
1519 * The caller must hold the RCU read lock.
1520 */
1521struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1522 unsigned long ino)
1523{
1524 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1525 struct inode *inode;
1526
1527 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1528 "suspicious find_inode_by_ino_rcu() usage");
1529
1530 hlist_for_each_entry_rcu(inode, head, i_hash) {
1531 if (inode->i_ino == ino &&
1532 inode->i_sb == sb &&
1533 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1534 return inode;
1535 }
1536 return NULL;
1537}
1538EXPORT_SYMBOL(find_inode_by_ino_rcu);
1539
1540int insert_inode_locked(struct inode *inode)
1541{
1542 struct super_block *sb = inode->i_sb;
1543 ino_t ino = inode->i_ino;
1544 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1545
1546 while (1) {
1547 struct inode *old = NULL;
1548 spin_lock(&inode_hash_lock);
1549 hlist_for_each_entry(old, head, i_hash) {
1550 if (old->i_ino != ino)
1551 continue;
1552 if (old->i_sb != sb)
1553 continue;
1554 spin_lock(&old->i_lock);
1555 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1556 spin_unlock(&old->i_lock);
1557 continue;
1558 }
1559 break;
1560 }
1561 if (likely(!old)) {
1562 spin_lock(&inode->i_lock);
1563 inode->i_state |= I_NEW | I_CREATING;
1564 hlist_add_head_rcu(&inode->i_hash, head);
1565 spin_unlock(&inode->i_lock);
1566 spin_unlock(&inode_hash_lock);
1567 return 0;
1568 }
1569 if (unlikely(old->i_state & I_CREATING)) {
1570 spin_unlock(&old->i_lock);
1571 spin_unlock(&inode_hash_lock);
1572 return -EBUSY;
1573 }
1574 __iget(old);
1575 spin_unlock(&old->i_lock);
1576 spin_unlock(&inode_hash_lock);
1577 wait_on_inode(old);
1578 if (unlikely(!inode_unhashed(old))) {
1579 iput(old);
1580 return -EBUSY;
1581 }
1582 iput(old);
1583 }
1584}
1585EXPORT_SYMBOL(insert_inode_locked);
1586
1587int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1588 int (*test)(struct inode *, void *), void *data)
1589{
1590 struct inode *old;
1591
1592 inode->i_state |= I_CREATING;
1593 old = inode_insert5(inode, hashval, test, NULL, data);
1594
1595 if (old != inode) {
1596 iput(old);
1597 return -EBUSY;
1598 }
1599 return 0;
1600}
1601EXPORT_SYMBOL(insert_inode_locked4);
1602
1603
1604int generic_delete_inode(struct inode *inode)
1605{
1606 return 1;
1607}
1608EXPORT_SYMBOL(generic_delete_inode);
1609
1610/*
1611 * Called when we're dropping the last reference
1612 * to an inode.
1613 *
1614 * Call the FS "drop_inode()" function, defaulting to
1615 * the legacy UNIX filesystem behaviour. If it tells
1616 * us to evict inode, do so. Otherwise, retain inode
1617 * in cache if fs is alive, sync and evict if fs is
1618 * shutting down.
1619 */
1620static void iput_final(struct inode *inode)
1621{
1622 struct super_block *sb = inode->i_sb;
1623 const struct super_operations *op = inode->i_sb->s_op;
1624 unsigned long state;
1625 int drop;
1626
1627 WARN_ON(inode->i_state & I_NEW);
1628
1629 if (op->drop_inode)
1630 drop = op->drop_inode(inode);
1631 else
1632 drop = generic_drop_inode(inode);
1633
1634 if (!drop &&
1635 !(inode->i_state & I_DONTCACHE) &&
1636 (sb->s_flags & SB_ACTIVE)) {
1637 inode_add_lru(inode);
1638 spin_unlock(&inode->i_lock);
1639 return;
1640 }
1641
1642 state = inode->i_state;
1643 if (!drop) {
1644 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1645 spin_unlock(&inode->i_lock);
1646
1647 write_inode_now(inode, 1);
1648
1649 spin_lock(&inode->i_lock);
1650 state = inode->i_state;
1651 WARN_ON(state & I_NEW);
1652 state &= ~I_WILL_FREE;
1653 }
1654
1655 WRITE_ONCE(inode->i_state, state | I_FREEING);
1656 if (!list_empty(&inode->i_lru))
1657 inode_lru_list_del(inode);
1658 spin_unlock(&inode->i_lock);
1659
1660 evict(inode);
1661}
1662
1663/**
1664 * iput - put an inode
1665 * @inode: inode to put
1666 *
1667 * Puts an inode, dropping its usage count. If the inode use count hits
1668 * zero, the inode is then freed and may also be destroyed.
1669 *
1670 * Consequently, iput() can sleep.
1671 */
1672void iput(struct inode *inode)
1673{
1674 if (!inode)
1675 return;
1676 BUG_ON(inode->i_state & I_CLEAR);
1677retry:
1678 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1679 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1680 atomic_inc(&inode->i_count);
1681 spin_unlock(&inode->i_lock);
1682 trace_writeback_lazytime_iput(inode);
1683 mark_inode_dirty_sync(inode);
1684 goto retry;
1685 }
1686 iput_final(inode);
1687 }
1688}
1689EXPORT_SYMBOL(iput);
1690
1691#ifdef CONFIG_BLOCK
1692/**
1693 * bmap - find a block number in a file
1694 * @inode: inode owning the block number being requested
1695 * @block: pointer containing the block to find
1696 *
1697 * Replaces the value in ``*block`` with the block number on the device holding
1698 * corresponding to the requested block number in the file.
1699 * That is, asked for block 4 of inode 1 the function will replace the
1700 * 4 in ``*block``, with disk block relative to the disk start that holds that
1701 * block of the file.
1702 *
1703 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1704 * hole, returns 0 and ``*block`` is also set to 0.
1705 */
1706int bmap(struct inode *inode, sector_t *block)
1707{
1708 if (!inode->i_mapping->a_ops->bmap)
1709 return -EINVAL;
1710
1711 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1712 return 0;
1713}
1714EXPORT_SYMBOL(bmap);
1715#endif
1716
1717/*
1718 * With relative atime, only update atime if the previous atime is
1719 * earlier than either the ctime or mtime or if at least a day has
1720 * passed since the last atime update.
1721 */
1722static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1723 struct timespec64 now)
1724{
1725
1726 if (!(mnt->mnt_flags & MNT_RELATIME))
1727 return 1;
1728 /*
1729 * Is mtime younger than atime? If yes, update atime:
1730 */
1731 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1732 return 1;
1733 /*
1734 * Is ctime younger than atime? If yes, update atime:
1735 */
1736 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1737 return 1;
1738
1739 /*
1740 * Is the previous atime value older than a day? If yes,
1741 * update atime:
1742 */
1743 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1744 return 1;
1745 /*
1746 * Good, we can skip the atime update:
1747 */
1748 return 0;
1749}
1750
1751int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1752{
1753 int dirty_flags = 0;
1754
1755 if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
1756 if (flags & S_ATIME)
1757 inode->i_atime = *time;
1758 if (flags & S_CTIME)
1759 inode->i_ctime = *time;
1760 if (flags & S_MTIME)
1761 inode->i_mtime = *time;
1762
1763 if (inode->i_sb->s_flags & SB_LAZYTIME)
1764 dirty_flags |= I_DIRTY_TIME;
1765 else
1766 dirty_flags |= I_DIRTY_SYNC;
1767 }
1768
1769 if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
1770 dirty_flags |= I_DIRTY_SYNC;
1771
1772 __mark_inode_dirty(inode, dirty_flags);
1773 return 0;
1774}
1775EXPORT_SYMBOL(generic_update_time);
1776
1777/*
1778 * This does the actual work of updating an inodes time or version. Must have
1779 * had called mnt_want_write() before calling this.
1780 */
1781static int update_time(struct inode *inode, struct timespec64 *time, int flags)
1782{
1783 if (inode->i_op->update_time)
1784 return inode->i_op->update_time(inode, time, flags);
1785 return generic_update_time(inode, time, flags);
1786}
1787
1788/**
1789 * atime_needs_update - update the access time
1790 * @path: the &struct path to update
1791 * @inode: inode to update
1792 *
1793 * Update the accessed time on an inode and mark it for writeback.
1794 * This function automatically handles read only file systems and media,
1795 * as well as the "noatime" flag and inode specific "noatime" markers.
1796 */
1797bool atime_needs_update(const struct path *path, struct inode *inode)
1798{
1799 struct vfsmount *mnt = path->mnt;
1800 struct timespec64 now;
1801
1802 if (inode->i_flags & S_NOATIME)
1803 return false;
1804
1805 /* Atime updates will likely cause i_uid and i_gid to be written
1806 * back improprely if their true value is unknown to the vfs.
1807 */
1808 if (HAS_UNMAPPED_ID(mnt_user_ns(mnt), inode))
1809 return false;
1810
1811 if (IS_NOATIME(inode))
1812 return false;
1813 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1814 return false;
1815
1816 if (mnt->mnt_flags & MNT_NOATIME)
1817 return false;
1818 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1819 return false;
1820
1821 now = current_time(inode);
1822
1823 if (!relatime_need_update(mnt, inode, now))
1824 return false;
1825
1826 if (timespec64_equal(&inode->i_atime, &now))
1827 return false;
1828
1829 return true;
1830}
1831
1832void touch_atime(const struct path *path)
1833{
1834 struct vfsmount *mnt = path->mnt;
1835 struct inode *inode = d_inode(path->dentry);
1836 struct timespec64 now;
1837
1838 if (!atime_needs_update(path, inode))
1839 return;
1840
1841 if (!sb_start_write_trylock(inode->i_sb))
1842 return;
1843
1844 if (__mnt_want_write(mnt) != 0)
1845 goto skip_update;
1846 /*
1847 * File systems can error out when updating inodes if they need to
1848 * allocate new space to modify an inode (such is the case for
1849 * Btrfs), but since we touch atime while walking down the path we
1850 * really don't care if we failed to update the atime of the file,
1851 * so just ignore the return value.
1852 * We may also fail on filesystems that have the ability to make parts
1853 * of the fs read only, e.g. subvolumes in Btrfs.
1854 */
1855 now = current_time(inode);
1856 update_time(inode, &now, S_ATIME);
1857 __mnt_drop_write(mnt);
1858skip_update:
1859 sb_end_write(inode->i_sb);
1860}
1861EXPORT_SYMBOL(touch_atime);
1862
1863/*
1864 * The logic we want is
1865 *
1866 * if suid or (sgid and xgrp)
1867 * remove privs
1868 */
1869int should_remove_suid(struct dentry *dentry)
1870{
1871 umode_t mode = d_inode(dentry)->i_mode;
1872 int kill = 0;
1873
1874 /* suid always must be killed */
1875 if (unlikely(mode & S_ISUID))
1876 kill = ATTR_KILL_SUID;
1877
1878 /*
1879 * sgid without any exec bits is just a mandatory locking mark; leave
1880 * it alone. If some exec bits are set, it's a real sgid; kill it.
1881 */
1882 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1883 kill |= ATTR_KILL_SGID;
1884
1885 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1886 return kill;
1887
1888 return 0;
1889}
1890EXPORT_SYMBOL(should_remove_suid);
1891
1892/*
1893 * Return mask of changes for notify_change() that need to be done as a
1894 * response to write or truncate. Return 0 if nothing has to be changed.
1895 * Negative value on error (change should be denied).
1896 */
1897int dentry_needs_remove_privs(struct dentry *dentry)
1898{
1899 struct inode *inode = d_inode(dentry);
1900 int mask = 0;
1901 int ret;
1902
1903 if (IS_NOSEC(inode))
1904 return 0;
1905
1906 mask = should_remove_suid(dentry);
1907 ret = security_inode_need_killpriv(dentry);
1908 if (ret < 0)
1909 return ret;
1910 if (ret)
1911 mask |= ATTR_KILL_PRIV;
1912 return mask;
1913}
1914
1915static int __remove_privs(struct user_namespace *mnt_userns,
1916 struct dentry *dentry, int kill)
1917{
1918 struct iattr newattrs;
1919
1920 newattrs.ia_valid = ATTR_FORCE | kill;
1921 /*
1922 * Note we call this on write, so notify_change will not
1923 * encounter any conflicting delegations:
1924 */
1925 return notify_change(mnt_userns, dentry, &newattrs, NULL);
1926}
1927
1928/*
1929 * Remove special file priviledges (suid, capabilities) when file is written
1930 * to or truncated.
1931 */
1932int file_remove_privs(struct file *file)
1933{
1934 struct dentry *dentry = file_dentry(file);
1935 struct inode *inode = file_inode(file);
1936 int kill;
1937 int error = 0;
1938
1939 /*
1940 * Fast path for nothing security related.
1941 * As well for non-regular files, e.g. blkdev inodes.
1942 * For example, blkdev_write_iter() might get here
1943 * trying to remove privs which it is not allowed to.
1944 */
1945 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1946 return 0;
1947
1948 kill = dentry_needs_remove_privs(dentry);
1949 if (kill < 0)
1950 return kill;
1951 if (kill)
1952 error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
1953 if (!error)
1954 inode_has_no_xattr(inode);
1955
1956 return error;
1957}
1958EXPORT_SYMBOL(file_remove_privs);
1959
1960/**
1961 * file_update_time - update mtime and ctime time
1962 * @file: file accessed
1963 *
1964 * Update the mtime and ctime members of an inode and mark the inode
1965 * for writeback. Note that this function is meant exclusively for
1966 * usage in the file write path of filesystems, and filesystems may
1967 * choose to explicitly ignore update via this function with the
1968 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1969 * timestamps are handled by the server. This can return an error for
1970 * file systems who need to allocate space in order to update an inode.
1971 */
1972
1973int file_update_time(struct file *file)
1974{
1975 struct inode *inode = file_inode(file);
1976 struct timespec64 now;
1977 int sync_it = 0;
1978 int ret;
1979
1980 /* First try to exhaust all avenues to not sync */
1981 if (IS_NOCMTIME(inode))
1982 return 0;
1983
1984 now = current_time(inode);
1985 if (!timespec64_equal(&inode->i_mtime, &now))
1986 sync_it = S_MTIME;
1987
1988 if (!timespec64_equal(&inode->i_ctime, &now))
1989 sync_it |= S_CTIME;
1990
1991 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1992 sync_it |= S_VERSION;
1993
1994 if (!sync_it)
1995 return 0;
1996
1997 /* Finally allowed to write? Takes lock. */
1998 if (__mnt_want_write_file(file))
1999 return 0;
2000
2001 ret = update_time(inode, &now, sync_it);
2002 __mnt_drop_write_file(file);
2003
2004 return ret;
2005}
2006EXPORT_SYMBOL(file_update_time);
2007
2008/* Caller must hold the file's inode lock */
2009int file_modified(struct file *file)
2010{
2011 int err;
2012
2013 /*
2014 * Clear the security bits if the process is not being run by root.
2015 * This keeps people from modifying setuid and setgid binaries.
2016 */
2017 err = file_remove_privs(file);
2018 if (err)
2019 return err;
2020
2021 if (unlikely(file->f_mode & FMODE_NOCMTIME))
2022 return 0;
2023
2024 return file_update_time(file);
2025}
2026EXPORT_SYMBOL(file_modified);
2027
2028int inode_needs_sync(struct inode *inode)
2029{
2030 if (IS_SYNC(inode))
2031 return 1;
2032 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2033 return 1;
2034 return 0;
2035}
2036EXPORT_SYMBOL(inode_needs_sync);
2037
2038/*
2039 * If we try to find an inode in the inode hash while it is being
2040 * deleted, we have to wait until the filesystem completes its
2041 * deletion before reporting that it isn't found. This function waits
2042 * until the deletion _might_ have completed. Callers are responsible
2043 * to recheck inode state.
2044 *
2045 * It doesn't matter if I_NEW is not set initially, a call to
2046 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2047 * will DTRT.
2048 */
2049static void __wait_on_freeing_inode(struct inode *inode)
2050{
2051 wait_queue_head_t *wq;
2052 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2053 wq = bit_waitqueue(&inode->i_state, __I_NEW);
2054 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2055 spin_unlock(&inode->i_lock);
2056 spin_unlock(&inode_hash_lock);
2057 schedule();
2058 finish_wait(wq, &wait.wq_entry);
2059 spin_lock(&inode_hash_lock);
2060}
2061
2062static __initdata unsigned long ihash_entries;
2063static int __init set_ihash_entries(char *str)
2064{
2065 if (!str)
2066 return 0;
2067 ihash_entries = simple_strtoul(str, &str, 0);
2068 return 1;
2069}
2070__setup("ihash_entries=", set_ihash_entries);
2071
2072/*
2073 * Initialize the waitqueues and inode hash table.
2074 */
2075void __init inode_init_early(void)
2076{
2077 /* If hashes are distributed across NUMA nodes, defer
2078 * hash allocation until vmalloc space is available.
2079 */
2080 if (hashdist)
2081 return;
2082
2083 inode_hashtable =
2084 alloc_large_system_hash("Inode-cache",
2085 sizeof(struct hlist_head),
2086 ihash_entries,
2087 14,
2088 HASH_EARLY | HASH_ZERO,
2089 &i_hash_shift,
2090 &i_hash_mask,
2091 0,
2092 0);
2093}
2094
2095void __init inode_init(void)
2096{
2097 /* inode slab cache */
2098 inode_cachep = kmem_cache_create("inode_cache",
2099 sizeof(struct inode),
2100 0,
2101 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2102 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2103 init_once);
2104
2105 /* Hash may have been set up in inode_init_early */
2106 if (!hashdist)
2107 return;
2108
2109 inode_hashtable =
2110 alloc_large_system_hash("Inode-cache",
2111 sizeof(struct hlist_head),
2112 ihash_entries,
2113 14,
2114 HASH_ZERO,
2115 &i_hash_shift,
2116 &i_hash_mask,
2117 0,
2118 0);
2119}
2120
2121void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2122{
2123 inode->i_mode = mode;
2124 if (S_ISCHR(mode)) {
2125 inode->i_fop = &def_chr_fops;
2126 inode->i_rdev = rdev;
2127 } else if (S_ISBLK(mode)) {
2128 inode->i_fop = &def_blk_fops;
2129 inode->i_rdev = rdev;
2130 } else if (S_ISFIFO(mode))
2131 inode->i_fop = &pipefifo_fops;
2132 else if (S_ISSOCK(mode))
2133 ; /* leave it no_open_fops */
2134 else
2135 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2136 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2137 inode->i_ino);
2138}
2139EXPORT_SYMBOL(init_special_inode);
2140
2141/**
2142 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2143 * @mnt_userns: User namespace of the mount the inode was created from
2144 * @inode: New inode
2145 * @dir: Directory inode
2146 * @mode: mode of the new inode
2147 *
2148 * If the inode has been created through an idmapped mount the user namespace of
2149 * the vfsmount must be passed through @mnt_userns. This function will then take
2150 * care to map the inode according to @mnt_userns before checking permissions
2151 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2152 * checking is to be performed on the raw inode simply passs init_user_ns.
2153 */
2154void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
2155 const struct inode *dir, umode_t mode)
2156{
2157 inode_fsuid_set(inode, mnt_userns);
2158 if (dir && dir->i_mode & S_ISGID) {
2159 inode->i_gid = dir->i_gid;
2160
2161 /* Directories are special, and always inherit S_ISGID */
2162 if (S_ISDIR(mode))
2163 mode |= S_ISGID;
2164 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2165 !in_group_p(i_gid_into_mnt(mnt_userns, dir)) &&
2166 !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
2167 mode &= ~S_ISGID;
2168 } else
2169 inode_fsgid_set(inode, mnt_userns);
2170 inode->i_mode = mode;
2171}
2172EXPORT_SYMBOL(inode_init_owner);
2173
2174/**
2175 * inode_owner_or_capable - check current task permissions to inode
2176 * @mnt_userns: user namespace of the mount the inode was found from
2177 * @inode: inode being checked
2178 *
2179 * Return true if current either has CAP_FOWNER in a namespace with the
2180 * inode owner uid mapped, or owns the file.
2181 *
2182 * If the inode has been found through an idmapped mount the user namespace of
2183 * the vfsmount must be passed through @mnt_userns. This function will then take
2184 * care to map the inode according to @mnt_userns before checking permissions.
2185 * On non-idmapped mounts or if permission checking is to be performed on the
2186 * raw inode simply passs init_user_ns.
2187 */
2188bool inode_owner_or_capable(struct user_namespace *mnt_userns,
2189 const struct inode *inode)
2190{
2191 kuid_t i_uid;
2192 struct user_namespace *ns;
2193
2194 i_uid = i_uid_into_mnt(mnt_userns, inode);
2195 if (uid_eq(current_fsuid(), i_uid))
2196 return true;
2197
2198 ns = current_user_ns();
2199 if (kuid_has_mapping(ns, i_uid) && ns_capable(ns, CAP_FOWNER))
2200 return true;
2201 return false;
2202}
2203EXPORT_SYMBOL(inode_owner_or_capable);
2204
2205/*
2206 * Direct i/o helper functions
2207 */
2208static void __inode_dio_wait(struct inode *inode)
2209{
2210 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2211 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2212
2213 do {
2214 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2215 if (atomic_read(&inode->i_dio_count))
2216 schedule();
2217 } while (atomic_read(&inode->i_dio_count));
2218 finish_wait(wq, &q.wq_entry);
2219}
2220
2221/**
2222 * inode_dio_wait - wait for outstanding DIO requests to finish
2223 * @inode: inode to wait for
2224 *
2225 * Waits for all pending direct I/O requests to finish so that we can
2226 * proceed with a truncate or equivalent operation.
2227 *
2228 * Must be called under a lock that serializes taking new references
2229 * to i_dio_count, usually by inode->i_mutex.
2230 */
2231void inode_dio_wait(struct inode *inode)
2232{
2233 if (atomic_read(&inode->i_dio_count))
2234 __inode_dio_wait(inode);
2235}
2236EXPORT_SYMBOL(inode_dio_wait);
2237
2238/*
2239 * inode_set_flags - atomically set some inode flags
2240 *
2241 * Note: the caller should be holding i_mutex, or else be sure that
2242 * they have exclusive access to the inode structure (i.e., while the
2243 * inode is being instantiated). The reason for the cmpxchg() loop
2244 * --- which wouldn't be necessary if all code paths which modify
2245 * i_flags actually followed this rule, is that there is at least one
2246 * code path which doesn't today so we use cmpxchg() out of an abundance
2247 * of caution.
2248 *
2249 * In the long run, i_mutex is overkill, and we should probably look
2250 * at using the i_lock spinlock to protect i_flags, and then make sure
2251 * it is so documented in include/linux/fs.h and that all code follows
2252 * the locking convention!!
2253 */
2254void inode_set_flags(struct inode *inode, unsigned int flags,
2255 unsigned int mask)
2256{
2257 WARN_ON_ONCE(flags & ~mask);
2258 set_mask_bits(&inode->i_flags, mask, flags);
2259}
2260EXPORT_SYMBOL(inode_set_flags);
2261
2262void inode_nohighmem(struct inode *inode)
2263{
2264 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2265}
2266EXPORT_SYMBOL(inode_nohighmem);
2267
2268/**
2269 * timestamp_truncate - Truncate timespec to a granularity
2270 * @t: Timespec
2271 * @inode: inode being updated
2272 *
2273 * Truncate a timespec to the granularity supported by the fs
2274 * containing the inode. Always rounds down. gran must
2275 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2276 */
2277struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2278{
2279 struct super_block *sb = inode->i_sb;
2280 unsigned int gran = sb->s_time_gran;
2281
2282 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2283 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2284 t.tv_nsec = 0;
2285
2286 /* Avoid division in the common cases 1 ns and 1 s. */
2287 if (gran == 1)
2288 ; /* nothing */
2289 else if (gran == NSEC_PER_SEC)
2290 t.tv_nsec = 0;
2291 else if (gran > 1 && gran < NSEC_PER_SEC)
2292 t.tv_nsec -= t.tv_nsec % gran;
2293 else
2294 WARN(1, "invalid file time granularity: %u", gran);
2295 return t;
2296}
2297EXPORT_SYMBOL(timestamp_truncate);
2298
2299/**
2300 * current_time - Return FS time
2301 * @inode: inode.
2302 *
2303 * Return the current time truncated to the time granularity supported by
2304 * the fs.
2305 *
2306 * Note that inode and inode->sb cannot be NULL.
2307 * Otherwise, the function warns and returns time without truncation.
2308 */
2309struct timespec64 current_time(struct inode *inode)
2310{
2311 struct timespec64 now;
2312
2313 ktime_get_coarse_real_ts64(&now);
2314
2315 if (unlikely(!inode->i_sb)) {
2316 WARN(1, "current_time() called with uninitialized super_block in the inode");
2317 return now;
2318 }
2319
2320 return timestamp_truncate(now, inode);
2321}
2322EXPORT_SYMBOL(current_time);