Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/filelock.h>
9#include <linux/mm.h>
10#include <linux/backing-dev.h>
11#include <linux/hash.h>
12#include <linux/swap.h>
13#include <linux/security.h>
14#include <linux/cdev.h>
15#include <linux/memblock.h>
16#include <linux/fsnotify.h>
17#include <linux/mount.h>
18#include <linux/posix_acl.h>
19#include <linux/buffer_head.h> /* for inode_has_buffers */
20#include <linux/ratelimit.h>
21#include <linux/list_lru.h>
22#include <linux/iversion.h>
23#include <linux/rw_hint.h>
24#include <linux/seq_file.h>
25#include <linux/debugfs.h>
26#include <trace/events/writeback.h>
27#define CREATE_TRACE_POINTS
28#include <trace/events/timestamp.h>
29
30#include "internal.h"
31
32/*
33 * Inode locking rules:
34 *
35 * inode->i_lock protects:
36 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
37 * Inode LRU list locks protect:
38 * inode->i_sb->s_inode_lru, inode->i_lru
39 * inode->i_sb->s_inode_list_lock protects:
40 * inode->i_sb->s_inodes, inode->i_sb_list
41 * bdi->wb.list_lock protects:
42 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
43 * inode_hash_lock protects:
44 * inode_hashtable, inode->i_hash
45 *
46 * Lock ordering:
47 *
48 * inode->i_sb->s_inode_list_lock
49 * inode->i_lock
50 * Inode LRU list locks
51 *
52 * bdi->wb.list_lock
53 * inode->i_lock
54 *
55 * inode_hash_lock
56 * inode->i_sb->s_inode_list_lock
57 * inode->i_lock
58 *
59 * iunique_lock
60 * inode_hash_lock
61 */
62
63static unsigned int i_hash_mask __ro_after_init;
64static unsigned int i_hash_shift __ro_after_init;
65static struct hlist_head *inode_hashtable __ro_after_init;
66static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
67
68/*
69 * Empty aops. Can be used for the cases where the user does not
70 * define any of the address_space operations.
71 */
72const struct address_space_operations empty_aops = {
73};
74EXPORT_SYMBOL(empty_aops);
75
76static DEFINE_PER_CPU(unsigned long, nr_inodes);
77static DEFINE_PER_CPU(unsigned long, nr_unused);
78
79static struct kmem_cache *inode_cachep __ro_after_init;
80
81static long get_nr_inodes(void)
82{
83 int i;
84 long sum = 0;
85 for_each_possible_cpu(i)
86 sum += per_cpu(nr_inodes, i);
87 return sum < 0 ? 0 : sum;
88}
89
90static inline long get_nr_inodes_unused(void)
91{
92 int i;
93 long sum = 0;
94 for_each_possible_cpu(i)
95 sum += per_cpu(nr_unused, i);
96 return sum < 0 ? 0 : sum;
97}
98
99long get_nr_dirty_inodes(void)
100{
101 /* not actually dirty inodes, but a wild approximation */
102 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
103 return nr_dirty > 0 ? nr_dirty : 0;
104}
105
106#ifdef CONFIG_DEBUG_FS
107static DEFINE_PER_CPU(long, mg_ctime_updates);
108static DEFINE_PER_CPU(long, mg_fine_stamps);
109static DEFINE_PER_CPU(long, mg_ctime_swaps);
110
111static unsigned long get_mg_ctime_updates(void)
112{
113 unsigned long sum = 0;
114 int i;
115
116 for_each_possible_cpu(i)
117 sum += data_race(per_cpu(mg_ctime_updates, i));
118 return sum;
119}
120
121static unsigned long get_mg_fine_stamps(void)
122{
123 unsigned long sum = 0;
124 int i;
125
126 for_each_possible_cpu(i)
127 sum += data_race(per_cpu(mg_fine_stamps, i));
128 return sum;
129}
130
131static unsigned long get_mg_ctime_swaps(void)
132{
133 unsigned long sum = 0;
134 int i;
135
136 for_each_possible_cpu(i)
137 sum += data_race(per_cpu(mg_ctime_swaps, i));
138 return sum;
139}
140
141#define mgtime_counter_inc(__var) this_cpu_inc(__var)
142
143static int mgts_show(struct seq_file *s, void *p)
144{
145 unsigned long ctime_updates = get_mg_ctime_updates();
146 unsigned long ctime_swaps = get_mg_ctime_swaps();
147 unsigned long fine_stamps = get_mg_fine_stamps();
148 unsigned long floor_swaps = timekeeping_get_mg_floor_swaps();
149
150 seq_printf(s, "%lu %lu %lu %lu\n",
151 ctime_updates, ctime_swaps, fine_stamps, floor_swaps);
152 return 0;
153}
154
155DEFINE_SHOW_ATTRIBUTE(mgts);
156
157static int __init mg_debugfs_init(void)
158{
159 debugfs_create_file("multigrain_timestamps", S_IFREG | S_IRUGO, NULL, NULL, &mgts_fops);
160 return 0;
161}
162late_initcall(mg_debugfs_init);
163
164#else /* ! CONFIG_DEBUG_FS */
165
166#define mgtime_counter_inc(__var) do { } while (0)
167
168#endif /* CONFIG_DEBUG_FS */
169
170/*
171 * Handle nr_inode sysctl
172 */
173#ifdef CONFIG_SYSCTL
174/*
175 * Statistics gathering..
176 */
177static struct inodes_stat_t inodes_stat;
178
179static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer,
180 size_t *lenp, loff_t *ppos)
181{
182 inodes_stat.nr_inodes = get_nr_inodes();
183 inodes_stat.nr_unused = get_nr_inodes_unused();
184 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
185}
186
187static struct ctl_table inodes_sysctls[] = {
188 {
189 .procname = "inode-nr",
190 .data = &inodes_stat,
191 .maxlen = 2*sizeof(long),
192 .mode = 0444,
193 .proc_handler = proc_nr_inodes,
194 },
195 {
196 .procname = "inode-state",
197 .data = &inodes_stat,
198 .maxlen = 7*sizeof(long),
199 .mode = 0444,
200 .proc_handler = proc_nr_inodes,
201 },
202};
203
204static int __init init_fs_inode_sysctls(void)
205{
206 register_sysctl_init("fs", inodes_sysctls);
207 return 0;
208}
209early_initcall(init_fs_inode_sysctls);
210#endif
211
212static int no_open(struct inode *inode, struct file *file)
213{
214 return -ENXIO;
215}
216
217/**
218 * inode_init_always_gfp - perform inode structure initialisation
219 * @sb: superblock inode belongs to
220 * @inode: inode to initialise
221 * @gfp: allocation flags
222 *
223 * These are initializations that need to be done on every inode
224 * allocation as the fields are not initialised by slab allocation.
225 * If there are additional allocations required @gfp is used.
226 */
227int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp)
228{
229 static const struct inode_operations empty_iops;
230 static const struct file_operations no_open_fops = {.open = no_open};
231 struct address_space *const mapping = &inode->i_data;
232
233 inode->i_sb = sb;
234 inode->i_blkbits = sb->s_blocksize_bits;
235 inode->i_flags = 0;
236 inode->i_state = 0;
237 atomic64_set(&inode->i_sequence, 0);
238 atomic_set(&inode->i_count, 1);
239 inode->i_op = &empty_iops;
240 inode->i_fop = &no_open_fops;
241 inode->i_ino = 0;
242 inode->__i_nlink = 1;
243 inode->i_opflags = 0;
244 if (sb->s_xattr)
245 inode->i_opflags |= IOP_XATTR;
246 if (sb->s_type->fs_flags & FS_MGTIME)
247 inode->i_opflags |= IOP_MGTIME;
248 i_uid_write(inode, 0);
249 i_gid_write(inode, 0);
250 atomic_set(&inode->i_writecount, 0);
251 inode->i_size = 0;
252 inode->i_write_hint = WRITE_LIFE_NOT_SET;
253 inode->i_blocks = 0;
254 inode->i_bytes = 0;
255 inode->i_generation = 0;
256 inode->i_pipe = NULL;
257 inode->i_cdev = NULL;
258 inode->i_link = NULL;
259 inode->i_dir_seq = 0;
260 inode->i_rdev = 0;
261 inode->dirtied_when = 0;
262
263#ifdef CONFIG_CGROUP_WRITEBACK
264 inode->i_wb_frn_winner = 0;
265 inode->i_wb_frn_avg_time = 0;
266 inode->i_wb_frn_history = 0;
267#endif
268
269 spin_lock_init(&inode->i_lock);
270 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
271
272 init_rwsem(&inode->i_rwsem);
273 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
274
275 atomic_set(&inode->i_dio_count, 0);
276
277 mapping->a_ops = &empty_aops;
278 mapping->host = inode;
279 mapping->flags = 0;
280 mapping->wb_err = 0;
281 atomic_set(&mapping->i_mmap_writable, 0);
282#ifdef CONFIG_READ_ONLY_THP_FOR_FS
283 atomic_set(&mapping->nr_thps, 0);
284#endif
285 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
286 mapping->i_private_data = NULL;
287 mapping->writeback_index = 0;
288 init_rwsem(&mapping->invalidate_lock);
289 lockdep_set_class_and_name(&mapping->invalidate_lock,
290 &sb->s_type->invalidate_lock_key,
291 "mapping.invalidate_lock");
292 if (sb->s_iflags & SB_I_STABLE_WRITES)
293 mapping_set_stable_writes(mapping);
294 inode->i_private = NULL;
295 inode->i_mapping = mapping;
296 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
297#ifdef CONFIG_FS_POSIX_ACL
298 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
299#endif
300
301#ifdef CONFIG_FSNOTIFY
302 inode->i_fsnotify_mask = 0;
303#endif
304 inode->i_flctx = NULL;
305
306 if (unlikely(security_inode_alloc(inode, gfp)))
307 return -ENOMEM;
308
309 this_cpu_inc(nr_inodes);
310
311 return 0;
312}
313EXPORT_SYMBOL(inode_init_always_gfp);
314
315void free_inode_nonrcu(struct inode *inode)
316{
317 kmem_cache_free(inode_cachep, inode);
318}
319EXPORT_SYMBOL(free_inode_nonrcu);
320
321static void i_callback(struct rcu_head *head)
322{
323 struct inode *inode = container_of(head, struct inode, i_rcu);
324 if (inode->free_inode)
325 inode->free_inode(inode);
326 else
327 free_inode_nonrcu(inode);
328}
329
330static struct inode *alloc_inode(struct super_block *sb)
331{
332 const struct super_operations *ops = sb->s_op;
333 struct inode *inode;
334
335 if (ops->alloc_inode)
336 inode = ops->alloc_inode(sb);
337 else
338 inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL);
339
340 if (!inode)
341 return NULL;
342
343 if (unlikely(inode_init_always(sb, inode))) {
344 if (ops->destroy_inode) {
345 ops->destroy_inode(inode);
346 if (!ops->free_inode)
347 return NULL;
348 }
349 inode->free_inode = ops->free_inode;
350 i_callback(&inode->i_rcu);
351 return NULL;
352 }
353
354 return inode;
355}
356
357void __destroy_inode(struct inode *inode)
358{
359 BUG_ON(inode_has_buffers(inode));
360 inode_detach_wb(inode);
361 security_inode_free(inode);
362 fsnotify_inode_delete(inode);
363 locks_free_lock_context(inode);
364 if (!inode->i_nlink) {
365 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
366 atomic_long_dec(&inode->i_sb->s_remove_count);
367 }
368
369#ifdef CONFIG_FS_POSIX_ACL
370 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
371 posix_acl_release(inode->i_acl);
372 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
373 posix_acl_release(inode->i_default_acl);
374#endif
375 this_cpu_dec(nr_inodes);
376}
377EXPORT_SYMBOL(__destroy_inode);
378
379static void destroy_inode(struct inode *inode)
380{
381 const struct super_operations *ops = inode->i_sb->s_op;
382
383 BUG_ON(!list_empty(&inode->i_lru));
384 __destroy_inode(inode);
385 if (ops->destroy_inode) {
386 ops->destroy_inode(inode);
387 if (!ops->free_inode)
388 return;
389 }
390 inode->free_inode = ops->free_inode;
391 call_rcu(&inode->i_rcu, i_callback);
392}
393
394/**
395 * drop_nlink - directly drop an inode's link count
396 * @inode: inode
397 *
398 * This is a low-level filesystem helper to replace any
399 * direct filesystem manipulation of i_nlink. In cases
400 * where we are attempting to track writes to the
401 * filesystem, a decrement to zero means an imminent
402 * write when the file is truncated and actually unlinked
403 * on the filesystem.
404 */
405void drop_nlink(struct inode *inode)
406{
407 WARN_ON(inode->i_nlink == 0);
408 inode->__i_nlink--;
409 if (!inode->i_nlink)
410 atomic_long_inc(&inode->i_sb->s_remove_count);
411}
412EXPORT_SYMBOL(drop_nlink);
413
414/**
415 * clear_nlink - directly zero an inode's link count
416 * @inode: inode
417 *
418 * This is a low-level filesystem helper to replace any
419 * direct filesystem manipulation of i_nlink. See
420 * drop_nlink() for why we care about i_nlink hitting zero.
421 */
422void clear_nlink(struct inode *inode)
423{
424 if (inode->i_nlink) {
425 inode->__i_nlink = 0;
426 atomic_long_inc(&inode->i_sb->s_remove_count);
427 }
428}
429EXPORT_SYMBOL(clear_nlink);
430
431/**
432 * set_nlink - directly set an inode's link count
433 * @inode: inode
434 * @nlink: new nlink (should be non-zero)
435 *
436 * This is a low-level filesystem helper to replace any
437 * direct filesystem manipulation of i_nlink.
438 */
439void set_nlink(struct inode *inode, unsigned int nlink)
440{
441 if (!nlink) {
442 clear_nlink(inode);
443 } else {
444 /* Yes, some filesystems do change nlink from zero to one */
445 if (inode->i_nlink == 0)
446 atomic_long_dec(&inode->i_sb->s_remove_count);
447
448 inode->__i_nlink = nlink;
449 }
450}
451EXPORT_SYMBOL(set_nlink);
452
453/**
454 * inc_nlink - directly increment an inode's link count
455 * @inode: inode
456 *
457 * This is a low-level filesystem helper to replace any
458 * direct filesystem manipulation of i_nlink. Currently,
459 * it is only here for parity with dec_nlink().
460 */
461void inc_nlink(struct inode *inode)
462{
463 if (unlikely(inode->i_nlink == 0)) {
464 WARN_ON(!(inode->i_state & I_LINKABLE));
465 atomic_long_dec(&inode->i_sb->s_remove_count);
466 }
467
468 inode->__i_nlink++;
469}
470EXPORT_SYMBOL(inc_nlink);
471
472static void __address_space_init_once(struct address_space *mapping)
473{
474 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
475 init_rwsem(&mapping->i_mmap_rwsem);
476 INIT_LIST_HEAD(&mapping->i_private_list);
477 spin_lock_init(&mapping->i_private_lock);
478 mapping->i_mmap = RB_ROOT_CACHED;
479}
480
481void address_space_init_once(struct address_space *mapping)
482{
483 memset(mapping, 0, sizeof(*mapping));
484 __address_space_init_once(mapping);
485}
486EXPORT_SYMBOL(address_space_init_once);
487
488/*
489 * These are initializations that only need to be done
490 * once, because the fields are idempotent across use
491 * of the inode, so let the slab aware of that.
492 */
493void inode_init_once(struct inode *inode)
494{
495 memset(inode, 0, sizeof(*inode));
496 INIT_HLIST_NODE(&inode->i_hash);
497 INIT_LIST_HEAD(&inode->i_devices);
498 INIT_LIST_HEAD(&inode->i_io_list);
499 INIT_LIST_HEAD(&inode->i_wb_list);
500 INIT_LIST_HEAD(&inode->i_lru);
501 INIT_LIST_HEAD(&inode->i_sb_list);
502 __address_space_init_once(&inode->i_data);
503 i_size_ordered_init(inode);
504}
505EXPORT_SYMBOL(inode_init_once);
506
507static void init_once(void *foo)
508{
509 struct inode *inode = (struct inode *) foo;
510
511 inode_init_once(inode);
512}
513
514/*
515 * get additional reference to inode; caller must already hold one.
516 */
517void ihold(struct inode *inode)
518{
519 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
520}
521EXPORT_SYMBOL(ihold);
522
523static void __inode_add_lru(struct inode *inode, bool rotate)
524{
525 if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
526 return;
527 if (atomic_read(&inode->i_count))
528 return;
529 if (!(inode->i_sb->s_flags & SB_ACTIVE))
530 return;
531 if (!mapping_shrinkable(&inode->i_data))
532 return;
533
534 if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
535 this_cpu_inc(nr_unused);
536 else if (rotate)
537 inode->i_state |= I_REFERENCED;
538}
539
540struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
541 struct inode *inode, u32 bit)
542{
543 void *bit_address;
544
545 bit_address = inode_state_wait_address(inode, bit);
546 init_wait_var_entry(wqe, bit_address, 0);
547 return __var_waitqueue(bit_address);
548}
549EXPORT_SYMBOL(inode_bit_waitqueue);
550
551/*
552 * Add inode to LRU if needed (inode is unused and clean).
553 *
554 * Needs inode->i_lock held.
555 */
556void inode_add_lru(struct inode *inode)
557{
558 __inode_add_lru(inode, false);
559}
560
561static void inode_lru_list_del(struct inode *inode)
562{
563 if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
564 this_cpu_dec(nr_unused);
565}
566
567static void inode_pin_lru_isolating(struct inode *inode)
568{
569 lockdep_assert_held(&inode->i_lock);
570 WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
571 inode->i_state |= I_LRU_ISOLATING;
572}
573
574static void inode_unpin_lru_isolating(struct inode *inode)
575{
576 spin_lock(&inode->i_lock);
577 WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
578 inode->i_state &= ~I_LRU_ISOLATING;
579 /* Called with inode->i_lock which ensures memory ordering. */
580 inode_wake_up_bit(inode, __I_LRU_ISOLATING);
581 spin_unlock(&inode->i_lock);
582}
583
584static void inode_wait_for_lru_isolating(struct inode *inode)
585{
586 struct wait_bit_queue_entry wqe;
587 struct wait_queue_head *wq_head;
588
589 lockdep_assert_held(&inode->i_lock);
590 if (!(inode->i_state & I_LRU_ISOLATING))
591 return;
592
593 wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
594 for (;;) {
595 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
596 /*
597 * Checking I_LRU_ISOLATING with inode->i_lock guarantees
598 * memory ordering.
599 */
600 if (!(inode->i_state & I_LRU_ISOLATING))
601 break;
602 spin_unlock(&inode->i_lock);
603 schedule();
604 spin_lock(&inode->i_lock);
605 }
606 finish_wait(wq_head, &wqe.wq_entry);
607 WARN_ON(inode->i_state & I_LRU_ISOLATING);
608}
609
610/**
611 * inode_sb_list_add - add inode to the superblock list of inodes
612 * @inode: inode to add
613 */
614void inode_sb_list_add(struct inode *inode)
615{
616 spin_lock(&inode->i_sb->s_inode_list_lock);
617 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
618 spin_unlock(&inode->i_sb->s_inode_list_lock);
619}
620EXPORT_SYMBOL_GPL(inode_sb_list_add);
621
622static inline void inode_sb_list_del(struct inode *inode)
623{
624 if (!list_empty(&inode->i_sb_list)) {
625 spin_lock(&inode->i_sb->s_inode_list_lock);
626 list_del_init(&inode->i_sb_list);
627 spin_unlock(&inode->i_sb->s_inode_list_lock);
628 }
629}
630
631static unsigned long hash(struct super_block *sb, unsigned long hashval)
632{
633 unsigned long tmp;
634
635 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
636 L1_CACHE_BYTES;
637 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
638 return tmp & i_hash_mask;
639}
640
641/**
642 * __insert_inode_hash - hash an inode
643 * @inode: unhashed inode
644 * @hashval: unsigned long value used to locate this object in the
645 * inode_hashtable.
646 *
647 * Add an inode to the inode hash for this superblock.
648 */
649void __insert_inode_hash(struct inode *inode, unsigned long hashval)
650{
651 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
652
653 spin_lock(&inode_hash_lock);
654 spin_lock(&inode->i_lock);
655 hlist_add_head_rcu(&inode->i_hash, b);
656 spin_unlock(&inode->i_lock);
657 spin_unlock(&inode_hash_lock);
658}
659EXPORT_SYMBOL(__insert_inode_hash);
660
661/**
662 * __remove_inode_hash - remove an inode from the hash
663 * @inode: inode to unhash
664 *
665 * Remove an inode from the superblock.
666 */
667void __remove_inode_hash(struct inode *inode)
668{
669 spin_lock(&inode_hash_lock);
670 spin_lock(&inode->i_lock);
671 hlist_del_init_rcu(&inode->i_hash);
672 spin_unlock(&inode->i_lock);
673 spin_unlock(&inode_hash_lock);
674}
675EXPORT_SYMBOL(__remove_inode_hash);
676
677void dump_mapping(const struct address_space *mapping)
678{
679 struct inode *host;
680 const struct address_space_operations *a_ops;
681 struct hlist_node *dentry_first;
682 struct dentry *dentry_ptr;
683 struct dentry dentry;
684 char fname[64] = {};
685 unsigned long ino;
686
687 /*
688 * If mapping is an invalid pointer, we don't want to crash
689 * accessing it, so probe everything depending on it carefully.
690 */
691 if (get_kernel_nofault(host, &mapping->host) ||
692 get_kernel_nofault(a_ops, &mapping->a_ops)) {
693 pr_warn("invalid mapping:%px\n", mapping);
694 return;
695 }
696
697 if (!host) {
698 pr_warn("aops:%ps\n", a_ops);
699 return;
700 }
701
702 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
703 get_kernel_nofault(ino, &host->i_ino)) {
704 pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
705 return;
706 }
707
708 if (!dentry_first) {
709 pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
710 return;
711 }
712
713 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
714 if (get_kernel_nofault(dentry, dentry_ptr) ||
715 !dentry.d_parent || !dentry.d_name.name) {
716 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
717 a_ops, ino, dentry_ptr);
718 return;
719 }
720
721 if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0)
722 strscpy(fname, "<invalid>");
723 /*
724 * Even if strncpy_from_kernel_nofault() succeeded,
725 * the fname could be unreliable
726 */
727 pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n",
728 a_ops, ino, fname);
729}
730
731void clear_inode(struct inode *inode)
732{
733 /*
734 * We have to cycle the i_pages lock here because reclaim can be in the
735 * process of removing the last page (in __filemap_remove_folio())
736 * and we must not free the mapping under it.
737 */
738 xa_lock_irq(&inode->i_data.i_pages);
739 BUG_ON(inode->i_data.nrpages);
740 /*
741 * Almost always, mapping_empty(&inode->i_data) here; but there are
742 * two known and long-standing ways in which nodes may get left behind
743 * (when deep radix-tree node allocation failed partway; or when THP
744 * collapse_file() failed). Until those two known cases are cleaned up,
745 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
746 * nor even WARN_ON(!mapping_empty).
747 */
748 xa_unlock_irq(&inode->i_data.i_pages);
749 BUG_ON(!list_empty(&inode->i_data.i_private_list));
750 BUG_ON(!(inode->i_state & I_FREEING));
751 BUG_ON(inode->i_state & I_CLEAR);
752 BUG_ON(!list_empty(&inode->i_wb_list));
753 /* don't need i_lock here, no concurrent mods to i_state */
754 inode->i_state = I_FREEING | I_CLEAR;
755}
756EXPORT_SYMBOL(clear_inode);
757
758/*
759 * Free the inode passed in, removing it from the lists it is still connected
760 * to. We remove any pages still attached to the inode and wait for any IO that
761 * is still in progress before finally destroying the inode.
762 *
763 * An inode must already be marked I_FREEING so that we avoid the inode being
764 * moved back onto lists if we race with other code that manipulates the lists
765 * (e.g. writeback_single_inode). The caller is responsible for setting this.
766 *
767 * An inode must already be removed from the LRU list before being evicted from
768 * the cache. This should occur atomically with setting the I_FREEING state
769 * flag, so no inodes here should ever be on the LRU when being evicted.
770 */
771static void evict(struct inode *inode)
772{
773 const struct super_operations *op = inode->i_sb->s_op;
774
775 BUG_ON(!(inode->i_state & I_FREEING));
776 BUG_ON(!list_empty(&inode->i_lru));
777
778 if (!list_empty(&inode->i_io_list))
779 inode_io_list_del(inode);
780
781 inode_sb_list_del(inode);
782
783 spin_lock(&inode->i_lock);
784 inode_wait_for_lru_isolating(inode);
785
786 /*
787 * Wait for flusher thread to be done with the inode so that filesystem
788 * does not start destroying it while writeback is still running. Since
789 * the inode has I_FREEING set, flusher thread won't start new work on
790 * the inode. We just have to wait for running writeback to finish.
791 */
792 inode_wait_for_writeback(inode);
793 spin_unlock(&inode->i_lock);
794
795 if (op->evict_inode) {
796 op->evict_inode(inode);
797 } else {
798 truncate_inode_pages_final(&inode->i_data);
799 clear_inode(inode);
800 }
801 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
802 cd_forget(inode);
803
804 remove_inode_hash(inode);
805
806 /*
807 * Wake up waiters in __wait_on_freeing_inode().
808 *
809 * Lockless hash lookup may end up finding the inode before we removed
810 * it above, but only lock it *after* we are done with the wakeup below.
811 * In this case the potential waiter cannot safely block.
812 *
813 * The inode being unhashed after the call to remove_inode_hash() is
814 * used as an indicator whether blocking on it is safe.
815 */
816 spin_lock(&inode->i_lock);
817 /*
818 * Pairs with the barrier in prepare_to_wait_event() to make sure
819 * ___wait_var_event() either sees the bit cleared or
820 * waitqueue_active() check in wake_up_var() sees the waiter.
821 */
822 smp_mb__after_spinlock();
823 inode_wake_up_bit(inode, __I_NEW);
824 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
825 spin_unlock(&inode->i_lock);
826
827 destroy_inode(inode);
828}
829
830/*
831 * dispose_list - dispose of the contents of a local list
832 * @head: the head of the list to free
833 *
834 * Dispose-list gets a local list with local inodes in it, so it doesn't
835 * need to worry about list corruption and SMP locks.
836 */
837static void dispose_list(struct list_head *head)
838{
839 while (!list_empty(head)) {
840 struct inode *inode;
841
842 inode = list_first_entry(head, struct inode, i_lru);
843 list_del_init(&inode->i_lru);
844
845 evict(inode);
846 cond_resched();
847 }
848}
849
850/**
851 * evict_inodes - evict all evictable inodes for a superblock
852 * @sb: superblock to operate on
853 *
854 * Make sure that no inodes with zero refcount are retained. This is
855 * called by superblock shutdown after having SB_ACTIVE flag removed,
856 * so any inode reaching zero refcount during or after that call will
857 * be immediately evicted.
858 */
859void evict_inodes(struct super_block *sb)
860{
861 struct inode *inode, *next;
862 LIST_HEAD(dispose);
863
864again:
865 spin_lock(&sb->s_inode_list_lock);
866 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
867 if (atomic_read(&inode->i_count))
868 continue;
869
870 spin_lock(&inode->i_lock);
871 if (atomic_read(&inode->i_count)) {
872 spin_unlock(&inode->i_lock);
873 continue;
874 }
875 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
876 spin_unlock(&inode->i_lock);
877 continue;
878 }
879
880 inode->i_state |= I_FREEING;
881 inode_lru_list_del(inode);
882 spin_unlock(&inode->i_lock);
883 list_add(&inode->i_lru, &dispose);
884
885 /*
886 * We can have a ton of inodes to evict at unmount time given
887 * enough memory, check to see if we need to go to sleep for a
888 * bit so we don't livelock.
889 */
890 if (need_resched()) {
891 spin_unlock(&sb->s_inode_list_lock);
892 cond_resched();
893 dispose_list(&dispose);
894 goto again;
895 }
896 }
897 spin_unlock(&sb->s_inode_list_lock);
898
899 dispose_list(&dispose);
900}
901EXPORT_SYMBOL_GPL(evict_inodes);
902
903/**
904 * invalidate_inodes - attempt to free all inodes on a superblock
905 * @sb: superblock to operate on
906 *
907 * Attempts to free all inodes (including dirty inodes) for a given superblock.
908 */
909void invalidate_inodes(struct super_block *sb)
910{
911 struct inode *inode, *next;
912 LIST_HEAD(dispose);
913
914again:
915 spin_lock(&sb->s_inode_list_lock);
916 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
917 spin_lock(&inode->i_lock);
918 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
919 spin_unlock(&inode->i_lock);
920 continue;
921 }
922 if (atomic_read(&inode->i_count)) {
923 spin_unlock(&inode->i_lock);
924 continue;
925 }
926
927 inode->i_state |= I_FREEING;
928 inode_lru_list_del(inode);
929 spin_unlock(&inode->i_lock);
930 list_add(&inode->i_lru, &dispose);
931 if (need_resched()) {
932 spin_unlock(&sb->s_inode_list_lock);
933 cond_resched();
934 dispose_list(&dispose);
935 goto again;
936 }
937 }
938 spin_unlock(&sb->s_inode_list_lock);
939
940 dispose_list(&dispose);
941}
942
943/*
944 * Isolate the inode from the LRU in preparation for freeing it.
945 *
946 * If the inode has the I_REFERENCED flag set, then it means that it has been
947 * used recently - the flag is set in iput_final(). When we encounter such an
948 * inode, clear the flag and move it to the back of the LRU so it gets another
949 * pass through the LRU before it gets reclaimed. This is necessary because of
950 * the fact we are doing lazy LRU updates to minimise lock contention so the
951 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
952 * with this flag set because they are the inodes that are out of order.
953 */
954static enum lru_status inode_lru_isolate(struct list_head *item,
955 struct list_lru_one *lru, void *arg)
956{
957 struct list_head *freeable = arg;
958 struct inode *inode = container_of(item, struct inode, i_lru);
959
960 /*
961 * We are inverting the lru lock/inode->i_lock here, so use a
962 * trylock. If we fail to get the lock, just skip it.
963 */
964 if (!spin_trylock(&inode->i_lock))
965 return LRU_SKIP;
966
967 /*
968 * Inodes can get referenced, redirtied, or repopulated while
969 * they're already on the LRU, and this can make them
970 * unreclaimable for a while. Remove them lazily here; iput,
971 * sync, or the last page cache deletion will requeue them.
972 */
973 if (atomic_read(&inode->i_count) ||
974 (inode->i_state & ~I_REFERENCED) ||
975 !mapping_shrinkable(&inode->i_data)) {
976 list_lru_isolate(lru, &inode->i_lru);
977 spin_unlock(&inode->i_lock);
978 this_cpu_dec(nr_unused);
979 return LRU_REMOVED;
980 }
981
982 /* Recently referenced inodes get one more pass */
983 if (inode->i_state & I_REFERENCED) {
984 inode->i_state &= ~I_REFERENCED;
985 spin_unlock(&inode->i_lock);
986 return LRU_ROTATE;
987 }
988
989 /*
990 * On highmem systems, mapping_shrinkable() permits dropping
991 * page cache in order to free up struct inodes: lowmem might
992 * be under pressure before the cache inside the highmem zone.
993 */
994 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
995 inode_pin_lru_isolating(inode);
996 spin_unlock(&inode->i_lock);
997 spin_unlock(&lru->lock);
998 if (remove_inode_buffers(inode)) {
999 unsigned long reap;
1000 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
1001 if (current_is_kswapd())
1002 __count_vm_events(KSWAPD_INODESTEAL, reap);
1003 else
1004 __count_vm_events(PGINODESTEAL, reap);
1005 mm_account_reclaimed_pages(reap);
1006 }
1007 inode_unpin_lru_isolating(inode);
1008 return LRU_RETRY;
1009 }
1010
1011 WARN_ON(inode->i_state & I_NEW);
1012 inode->i_state |= I_FREEING;
1013 list_lru_isolate_move(lru, &inode->i_lru, freeable);
1014 spin_unlock(&inode->i_lock);
1015
1016 this_cpu_dec(nr_unused);
1017 return LRU_REMOVED;
1018}
1019
1020/*
1021 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
1022 * This is called from the superblock shrinker function with a number of inodes
1023 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
1024 * then are freed outside inode_lock by dispose_list().
1025 */
1026long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
1027{
1028 LIST_HEAD(freeable);
1029 long freed;
1030
1031 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
1032 inode_lru_isolate, &freeable);
1033 dispose_list(&freeable);
1034 return freed;
1035}
1036
1037static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
1038/*
1039 * Called with the inode lock held.
1040 */
1041static struct inode *find_inode(struct super_block *sb,
1042 struct hlist_head *head,
1043 int (*test)(struct inode *, void *),
1044 void *data, bool is_inode_hash_locked)
1045{
1046 struct inode *inode = NULL;
1047
1048 if (is_inode_hash_locked)
1049 lockdep_assert_held(&inode_hash_lock);
1050 else
1051 lockdep_assert_not_held(&inode_hash_lock);
1052
1053 rcu_read_lock();
1054repeat:
1055 hlist_for_each_entry_rcu(inode, head, i_hash) {
1056 if (inode->i_sb != sb)
1057 continue;
1058 if (!test(inode, data))
1059 continue;
1060 spin_lock(&inode->i_lock);
1061 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
1062 __wait_on_freeing_inode(inode, is_inode_hash_locked);
1063 goto repeat;
1064 }
1065 if (unlikely(inode->i_state & I_CREATING)) {
1066 spin_unlock(&inode->i_lock);
1067 rcu_read_unlock();
1068 return ERR_PTR(-ESTALE);
1069 }
1070 __iget(inode);
1071 spin_unlock(&inode->i_lock);
1072 rcu_read_unlock();
1073 return inode;
1074 }
1075 rcu_read_unlock();
1076 return NULL;
1077}
1078
1079/*
1080 * find_inode_fast is the fast path version of find_inode, see the comment at
1081 * iget_locked for details.
1082 */
1083static struct inode *find_inode_fast(struct super_block *sb,
1084 struct hlist_head *head, unsigned long ino,
1085 bool is_inode_hash_locked)
1086{
1087 struct inode *inode = NULL;
1088
1089 if (is_inode_hash_locked)
1090 lockdep_assert_held(&inode_hash_lock);
1091 else
1092 lockdep_assert_not_held(&inode_hash_lock);
1093
1094 rcu_read_lock();
1095repeat:
1096 hlist_for_each_entry_rcu(inode, head, i_hash) {
1097 if (inode->i_ino != ino)
1098 continue;
1099 if (inode->i_sb != sb)
1100 continue;
1101 spin_lock(&inode->i_lock);
1102 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
1103 __wait_on_freeing_inode(inode, is_inode_hash_locked);
1104 goto repeat;
1105 }
1106 if (unlikely(inode->i_state & I_CREATING)) {
1107 spin_unlock(&inode->i_lock);
1108 rcu_read_unlock();
1109 return ERR_PTR(-ESTALE);
1110 }
1111 __iget(inode);
1112 spin_unlock(&inode->i_lock);
1113 rcu_read_unlock();
1114 return inode;
1115 }
1116 rcu_read_unlock();
1117 return NULL;
1118}
1119
1120/*
1121 * Each cpu owns a range of LAST_INO_BATCH numbers.
1122 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
1123 * to renew the exhausted range.
1124 *
1125 * This does not significantly increase overflow rate because every CPU can
1126 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
1127 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
1128 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
1129 * overflow rate by 2x, which does not seem too significant.
1130 *
1131 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1132 * error if st_ino won't fit in target struct field. Use 32bit counter
1133 * here to attempt to avoid that.
1134 */
1135#define LAST_INO_BATCH 1024
1136static DEFINE_PER_CPU(unsigned int, last_ino);
1137
1138unsigned int get_next_ino(void)
1139{
1140 unsigned int *p = &get_cpu_var(last_ino);
1141 unsigned int res = *p;
1142
1143#ifdef CONFIG_SMP
1144 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
1145 static atomic_t shared_last_ino;
1146 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
1147
1148 res = next - LAST_INO_BATCH;
1149 }
1150#endif
1151
1152 res++;
1153 /* get_next_ino should not provide a 0 inode number */
1154 if (unlikely(!res))
1155 res++;
1156 *p = res;
1157 put_cpu_var(last_ino);
1158 return res;
1159}
1160EXPORT_SYMBOL(get_next_ino);
1161
1162/**
1163 * new_inode_pseudo - obtain an inode
1164 * @sb: superblock
1165 *
1166 * Allocates a new inode for given superblock.
1167 * Inode wont be chained in superblock s_inodes list
1168 * This means :
1169 * - fs can't be unmount
1170 * - quotas, fsnotify, writeback can't work
1171 */
1172struct inode *new_inode_pseudo(struct super_block *sb)
1173{
1174 return alloc_inode(sb);
1175}
1176
1177/**
1178 * new_inode - obtain an inode
1179 * @sb: superblock
1180 *
1181 * Allocates a new inode for given superblock. The default gfp_mask
1182 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1183 * If HIGHMEM pages are unsuitable or it is known that pages allocated
1184 * for the page cache are not reclaimable or migratable,
1185 * mapping_set_gfp_mask() must be called with suitable flags on the
1186 * newly created inode's mapping
1187 *
1188 */
1189struct inode *new_inode(struct super_block *sb)
1190{
1191 struct inode *inode;
1192
1193 inode = new_inode_pseudo(sb);
1194 if (inode)
1195 inode_sb_list_add(inode);
1196 return inode;
1197}
1198EXPORT_SYMBOL(new_inode);
1199
1200#ifdef CONFIG_DEBUG_LOCK_ALLOC
1201void lockdep_annotate_inode_mutex_key(struct inode *inode)
1202{
1203 if (S_ISDIR(inode->i_mode)) {
1204 struct file_system_type *type = inode->i_sb->s_type;
1205
1206 /* Set new key only if filesystem hasn't already changed it */
1207 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
1208 /*
1209 * ensure nobody is actually holding i_mutex
1210 */
1211 // mutex_destroy(&inode->i_mutex);
1212 init_rwsem(&inode->i_rwsem);
1213 lockdep_set_class(&inode->i_rwsem,
1214 &type->i_mutex_dir_key);
1215 }
1216 }
1217}
1218EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
1219#endif
1220
1221/**
1222 * unlock_new_inode - clear the I_NEW state and wake up any waiters
1223 * @inode: new inode to unlock
1224 *
1225 * Called when the inode is fully initialised to clear the new state of the
1226 * inode and wake up anyone waiting for the inode to finish initialisation.
1227 */
1228void unlock_new_inode(struct inode *inode)
1229{
1230 lockdep_annotate_inode_mutex_key(inode);
1231 spin_lock(&inode->i_lock);
1232 WARN_ON(!(inode->i_state & I_NEW));
1233 inode->i_state &= ~I_NEW & ~I_CREATING;
1234 /*
1235 * Pairs with the barrier in prepare_to_wait_event() to make sure
1236 * ___wait_var_event() either sees the bit cleared or
1237 * waitqueue_active() check in wake_up_var() sees the waiter.
1238 */
1239 smp_mb();
1240 inode_wake_up_bit(inode, __I_NEW);
1241 spin_unlock(&inode->i_lock);
1242}
1243EXPORT_SYMBOL(unlock_new_inode);
1244
1245void discard_new_inode(struct inode *inode)
1246{
1247 lockdep_annotate_inode_mutex_key(inode);
1248 spin_lock(&inode->i_lock);
1249 WARN_ON(!(inode->i_state & I_NEW));
1250 inode->i_state &= ~I_NEW;
1251 /*
1252 * Pairs with the barrier in prepare_to_wait_event() to make sure
1253 * ___wait_var_event() either sees the bit cleared or
1254 * waitqueue_active() check in wake_up_var() sees the waiter.
1255 */
1256 smp_mb();
1257 inode_wake_up_bit(inode, __I_NEW);
1258 spin_unlock(&inode->i_lock);
1259 iput(inode);
1260}
1261EXPORT_SYMBOL(discard_new_inode);
1262
1263/**
1264 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1265 *
1266 * Lock any non-NULL argument. Passed objects must not be directories.
1267 * Zero, one or two objects may be locked by this function.
1268 *
1269 * @inode1: first inode to lock
1270 * @inode2: second inode to lock
1271 */
1272void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1273{
1274 if (inode1)
1275 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1276 if (inode2)
1277 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1278 if (inode1 > inode2)
1279 swap(inode1, inode2);
1280 if (inode1)
1281 inode_lock(inode1);
1282 if (inode2 && inode2 != inode1)
1283 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1284}
1285EXPORT_SYMBOL(lock_two_nondirectories);
1286
1287/**
1288 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1289 * @inode1: first inode to unlock
1290 * @inode2: second inode to unlock
1291 */
1292void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1293{
1294 if (inode1) {
1295 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1296 inode_unlock(inode1);
1297 }
1298 if (inode2 && inode2 != inode1) {
1299 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1300 inode_unlock(inode2);
1301 }
1302}
1303EXPORT_SYMBOL(unlock_two_nondirectories);
1304
1305/**
1306 * inode_insert5 - obtain an inode from a mounted file system
1307 * @inode: pre-allocated inode to use for insert to cache
1308 * @hashval: hash value (usually inode number) to get
1309 * @test: callback used for comparisons between inodes
1310 * @set: callback used to initialize a new struct inode
1311 * @data: opaque data pointer to pass to @test and @set
1312 *
1313 * Search for the inode specified by @hashval and @data in the inode cache,
1314 * and if present return it with an increased reference count. This is a
1315 * variant of iget5_locked() that doesn't allocate an inode.
1316 *
1317 * If the inode is not present in the cache, insert the pre-allocated inode and
1318 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1319 * to fill it in before unlocking it via unlock_new_inode().
1320 *
1321 * Note that both @test and @set are called with the inode_hash_lock held, so
1322 * they can't sleep.
1323 */
1324struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1325 int (*test)(struct inode *, void *),
1326 int (*set)(struct inode *, void *), void *data)
1327{
1328 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1329 struct inode *old;
1330
1331again:
1332 spin_lock(&inode_hash_lock);
1333 old = find_inode(inode->i_sb, head, test, data, true);
1334 if (unlikely(old)) {
1335 /*
1336 * Uhhuh, somebody else created the same inode under us.
1337 * Use the old inode instead of the preallocated one.
1338 */
1339 spin_unlock(&inode_hash_lock);
1340 if (IS_ERR(old))
1341 return NULL;
1342 wait_on_inode(old);
1343 if (unlikely(inode_unhashed(old))) {
1344 iput(old);
1345 goto again;
1346 }
1347 return old;
1348 }
1349
1350 if (set && unlikely(set(inode, data))) {
1351 inode = NULL;
1352 goto unlock;
1353 }
1354
1355 /*
1356 * Return the locked inode with I_NEW set, the
1357 * caller is responsible for filling in the contents
1358 */
1359 spin_lock(&inode->i_lock);
1360 inode->i_state |= I_NEW;
1361 hlist_add_head_rcu(&inode->i_hash, head);
1362 spin_unlock(&inode->i_lock);
1363
1364 /*
1365 * Add inode to the sb list if it's not already. It has I_NEW at this
1366 * point, so it should be safe to test i_sb_list locklessly.
1367 */
1368 if (list_empty(&inode->i_sb_list))
1369 inode_sb_list_add(inode);
1370unlock:
1371 spin_unlock(&inode_hash_lock);
1372
1373 return inode;
1374}
1375EXPORT_SYMBOL(inode_insert5);
1376
1377/**
1378 * iget5_locked - obtain an inode from a mounted file system
1379 * @sb: super block of file system
1380 * @hashval: hash value (usually inode number) to get
1381 * @test: callback used for comparisons between inodes
1382 * @set: callback used to initialize a new struct inode
1383 * @data: opaque data pointer to pass to @test and @set
1384 *
1385 * Search for the inode specified by @hashval and @data in the inode cache,
1386 * and if present return it with an increased reference count. This is a
1387 * generalized version of iget_locked() for file systems where the inode
1388 * number is not sufficient for unique identification of an inode.
1389 *
1390 * If the inode is not present in the cache, allocate and insert a new inode
1391 * and return it locked, hashed, and with the I_NEW flag set. The file system
1392 * gets to fill it in before unlocking it via unlock_new_inode().
1393 *
1394 * Note that both @test and @set are called with the inode_hash_lock held, so
1395 * they can't sleep.
1396 */
1397struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1398 int (*test)(struct inode *, void *),
1399 int (*set)(struct inode *, void *), void *data)
1400{
1401 struct inode *inode = ilookup5(sb, hashval, test, data);
1402
1403 if (!inode) {
1404 struct inode *new = alloc_inode(sb);
1405
1406 if (new) {
1407 inode = inode_insert5(new, hashval, test, set, data);
1408 if (unlikely(inode != new))
1409 destroy_inode(new);
1410 }
1411 }
1412 return inode;
1413}
1414EXPORT_SYMBOL(iget5_locked);
1415
1416/**
1417 * iget5_locked_rcu - obtain an inode from a mounted file system
1418 * @sb: super block of file system
1419 * @hashval: hash value (usually inode number) to get
1420 * @test: callback used for comparisons between inodes
1421 * @set: callback used to initialize a new struct inode
1422 * @data: opaque data pointer to pass to @test and @set
1423 *
1424 * This is equivalent to iget5_locked, except the @test callback must
1425 * tolerate the inode not being stable, including being mid-teardown.
1426 */
1427struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
1428 int (*test)(struct inode *, void *),
1429 int (*set)(struct inode *, void *), void *data)
1430{
1431 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1432 struct inode *inode, *new;
1433
1434again:
1435 inode = find_inode(sb, head, test, data, false);
1436 if (inode) {
1437 if (IS_ERR(inode))
1438 return NULL;
1439 wait_on_inode(inode);
1440 if (unlikely(inode_unhashed(inode))) {
1441 iput(inode);
1442 goto again;
1443 }
1444 return inode;
1445 }
1446
1447 new = alloc_inode(sb);
1448 if (new) {
1449 inode = inode_insert5(new, hashval, test, set, data);
1450 if (unlikely(inode != new))
1451 destroy_inode(new);
1452 }
1453 return inode;
1454}
1455EXPORT_SYMBOL_GPL(iget5_locked_rcu);
1456
1457/**
1458 * iget_locked - obtain an inode from a mounted file system
1459 * @sb: super block of file system
1460 * @ino: inode number to get
1461 *
1462 * Search for the inode specified by @ino in the inode cache and if present
1463 * return it with an increased reference count. This is for file systems
1464 * where the inode number is sufficient for unique identification of an inode.
1465 *
1466 * If the inode is not in cache, allocate a new inode and return it locked,
1467 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1468 * before unlocking it via unlock_new_inode().
1469 */
1470struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1471{
1472 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1473 struct inode *inode;
1474again:
1475 inode = find_inode_fast(sb, head, ino, false);
1476 if (inode) {
1477 if (IS_ERR(inode))
1478 return NULL;
1479 wait_on_inode(inode);
1480 if (unlikely(inode_unhashed(inode))) {
1481 iput(inode);
1482 goto again;
1483 }
1484 return inode;
1485 }
1486
1487 inode = alloc_inode(sb);
1488 if (inode) {
1489 struct inode *old;
1490
1491 spin_lock(&inode_hash_lock);
1492 /* We released the lock, so.. */
1493 old = find_inode_fast(sb, head, ino, true);
1494 if (!old) {
1495 inode->i_ino = ino;
1496 spin_lock(&inode->i_lock);
1497 inode->i_state = I_NEW;
1498 hlist_add_head_rcu(&inode->i_hash, head);
1499 spin_unlock(&inode->i_lock);
1500 inode_sb_list_add(inode);
1501 spin_unlock(&inode_hash_lock);
1502
1503 /* Return the locked inode with I_NEW set, the
1504 * caller is responsible for filling in the contents
1505 */
1506 return inode;
1507 }
1508
1509 /*
1510 * Uhhuh, somebody else created the same inode under
1511 * us. Use the old inode instead of the one we just
1512 * allocated.
1513 */
1514 spin_unlock(&inode_hash_lock);
1515 destroy_inode(inode);
1516 if (IS_ERR(old))
1517 return NULL;
1518 inode = old;
1519 wait_on_inode(inode);
1520 if (unlikely(inode_unhashed(inode))) {
1521 iput(inode);
1522 goto again;
1523 }
1524 }
1525 return inode;
1526}
1527EXPORT_SYMBOL(iget_locked);
1528
1529/*
1530 * search the inode cache for a matching inode number.
1531 * If we find one, then the inode number we are trying to
1532 * allocate is not unique and so we should not use it.
1533 *
1534 * Returns 1 if the inode number is unique, 0 if it is not.
1535 */
1536static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1537{
1538 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1539 struct inode *inode;
1540
1541 hlist_for_each_entry_rcu(inode, b, i_hash) {
1542 if (inode->i_ino == ino && inode->i_sb == sb)
1543 return 0;
1544 }
1545 return 1;
1546}
1547
1548/**
1549 * iunique - get a unique inode number
1550 * @sb: superblock
1551 * @max_reserved: highest reserved inode number
1552 *
1553 * Obtain an inode number that is unique on the system for a given
1554 * superblock. This is used by file systems that have no natural
1555 * permanent inode numbering system. An inode number is returned that
1556 * is higher than the reserved limit but unique.
1557 *
1558 * BUGS:
1559 * With a large number of inodes live on the file system this function
1560 * currently becomes quite slow.
1561 */
1562ino_t iunique(struct super_block *sb, ino_t max_reserved)
1563{
1564 /*
1565 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1566 * error if st_ino won't fit in target struct field. Use 32bit counter
1567 * here to attempt to avoid that.
1568 */
1569 static DEFINE_SPINLOCK(iunique_lock);
1570 static unsigned int counter;
1571 ino_t res;
1572
1573 rcu_read_lock();
1574 spin_lock(&iunique_lock);
1575 do {
1576 if (counter <= max_reserved)
1577 counter = max_reserved + 1;
1578 res = counter++;
1579 } while (!test_inode_iunique(sb, res));
1580 spin_unlock(&iunique_lock);
1581 rcu_read_unlock();
1582
1583 return res;
1584}
1585EXPORT_SYMBOL(iunique);
1586
1587struct inode *igrab(struct inode *inode)
1588{
1589 spin_lock(&inode->i_lock);
1590 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1591 __iget(inode);
1592 spin_unlock(&inode->i_lock);
1593 } else {
1594 spin_unlock(&inode->i_lock);
1595 /*
1596 * Handle the case where s_op->clear_inode is not been
1597 * called yet, and somebody is calling igrab
1598 * while the inode is getting freed.
1599 */
1600 inode = NULL;
1601 }
1602 return inode;
1603}
1604EXPORT_SYMBOL(igrab);
1605
1606/**
1607 * ilookup5_nowait - search for an inode in the inode cache
1608 * @sb: super block of file system to search
1609 * @hashval: hash value (usually inode number) to search for
1610 * @test: callback used for comparisons between inodes
1611 * @data: opaque data pointer to pass to @test
1612 *
1613 * Search for the inode specified by @hashval and @data in the inode cache.
1614 * If the inode is in the cache, the inode is returned with an incremented
1615 * reference count.
1616 *
1617 * Note: I_NEW is not waited upon so you have to be very careful what you do
1618 * with the returned inode. You probably should be using ilookup5() instead.
1619 *
1620 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1621 */
1622struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1623 int (*test)(struct inode *, void *), void *data)
1624{
1625 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1626 struct inode *inode;
1627
1628 spin_lock(&inode_hash_lock);
1629 inode = find_inode(sb, head, test, data, true);
1630 spin_unlock(&inode_hash_lock);
1631
1632 return IS_ERR(inode) ? NULL : inode;
1633}
1634EXPORT_SYMBOL(ilookup5_nowait);
1635
1636/**
1637 * ilookup5 - search for an inode in the inode cache
1638 * @sb: super block of file system to search
1639 * @hashval: hash value (usually inode number) to search for
1640 * @test: callback used for comparisons between inodes
1641 * @data: opaque data pointer to pass to @test
1642 *
1643 * Search for the inode specified by @hashval and @data in the inode cache,
1644 * and if the inode is in the cache, return the inode with an incremented
1645 * reference count. Waits on I_NEW before returning the inode.
1646 * returned with an incremented reference count.
1647 *
1648 * This is a generalized version of ilookup() for file systems where the
1649 * inode number is not sufficient for unique identification of an inode.
1650 *
1651 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1652 */
1653struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1654 int (*test)(struct inode *, void *), void *data)
1655{
1656 struct inode *inode;
1657again:
1658 inode = ilookup5_nowait(sb, hashval, test, data);
1659 if (inode) {
1660 wait_on_inode(inode);
1661 if (unlikely(inode_unhashed(inode))) {
1662 iput(inode);
1663 goto again;
1664 }
1665 }
1666 return inode;
1667}
1668EXPORT_SYMBOL(ilookup5);
1669
1670/**
1671 * ilookup - search for an inode in the inode cache
1672 * @sb: super block of file system to search
1673 * @ino: inode number to search for
1674 *
1675 * Search for the inode @ino in the inode cache, and if the inode is in the
1676 * cache, the inode is returned with an incremented reference count.
1677 */
1678struct inode *ilookup(struct super_block *sb, unsigned long ino)
1679{
1680 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1681 struct inode *inode;
1682again:
1683 inode = find_inode_fast(sb, head, ino, false);
1684
1685 if (inode) {
1686 if (IS_ERR(inode))
1687 return NULL;
1688 wait_on_inode(inode);
1689 if (unlikely(inode_unhashed(inode))) {
1690 iput(inode);
1691 goto again;
1692 }
1693 }
1694 return inode;
1695}
1696EXPORT_SYMBOL(ilookup);
1697
1698/**
1699 * find_inode_nowait - find an inode in the inode cache
1700 * @sb: super block of file system to search
1701 * @hashval: hash value (usually inode number) to search for
1702 * @match: callback used for comparisons between inodes
1703 * @data: opaque data pointer to pass to @match
1704 *
1705 * Search for the inode specified by @hashval and @data in the inode
1706 * cache, where the helper function @match will return 0 if the inode
1707 * does not match, 1 if the inode does match, and -1 if the search
1708 * should be stopped. The @match function must be responsible for
1709 * taking the i_lock spin_lock and checking i_state for an inode being
1710 * freed or being initialized, and incrementing the reference count
1711 * before returning 1. It also must not sleep, since it is called with
1712 * the inode_hash_lock spinlock held.
1713 *
1714 * This is a even more generalized version of ilookup5() when the
1715 * function must never block --- find_inode() can block in
1716 * __wait_on_freeing_inode() --- or when the caller can not increment
1717 * the reference count because the resulting iput() might cause an
1718 * inode eviction. The tradeoff is that the @match funtion must be
1719 * very carefully implemented.
1720 */
1721struct inode *find_inode_nowait(struct super_block *sb,
1722 unsigned long hashval,
1723 int (*match)(struct inode *, unsigned long,
1724 void *),
1725 void *data)
1726{
1727 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1728 struct inode *inode, *ret_inode = NULL;
1729 int mval;
1730
1731 spin_lock(&inode_hash_lock);
1732 hlist_for_each_entry(inode, head, i_hash) {
1733 if (inode->i_sb != sb)
1734 continue;
1735 mval = match(inode, hashval, data);
1736 if (mval == 0)
1737 continue;
1738 if (mval == 1)
1739 ret_inode = inode;
1740 goto out;
1741 }
1742out:
1743 spin_unlock(&inode_hash_lock);
1744 return ret_inode;
1745}
1746EXPORT_SYMBOL(find_inode_nowait);
1747
1748/**
1749 * find_inode_rcu - find an inode in the inode cache
1750 * @sb: Super block of file system to search
1751 * @hashval: Key to hash
1752 * @test: Function to test match on an inode
1753 * @data: Data for test function
1754 *
1755 * Search for the inode specified by @hashval and @data in the inode cache,
1756 * where the helper function @test will return 0 if the inode does not match
1757 * and 1 if it does. The @test function must be responsible for taking the
1758 * i_lock spin_lock and checking i_state for an inode being freed or being
1759 * initialized.
1760 *
1761 * If successful, this will return the inode for which the @test function
1762 * returned 1 and NULL otherwise.
1763 *
1764 * The @test function is not permitted to take a ref on any inode presented.
1765 * It is also not permitted to sleep.
1766 *
1767 * The caller must hold the RCU read lock.
1768 */
1769struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1770 int (*test)(struct inode *, void *), void *data)
1771{
1772 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1773 struct inode *inode;
1774
1775 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1776 "suspicious find_inode_rcu() usage");
1777
1778 hlist_for_each_entry_rcu(inode, head, i_hash) {
1779 if (inode->i_sb == sb &&
1780 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1781 test(inode, data))
1782 return inode;
1783 }
1784 return NULL;
1785}
1786EXPORT_SYMBOL(find_inode_rcu);
1787
1788/**
1789 * find_inode_by_ino_rcu - Find an inode in the inode cache
1790 * @sb: Super block of file system to search
1791 * @ino: The inode number to match
1792 *
1793 * Search for the inode specified by @hashval and @data in the inode cache,
1794 * where the helper function @test will return 0 if the inode does not match
1795 * and 1 if it does. The @test function must be responsible for taking the
1796 * i_lock spin_lock and checking i_state for an inode being freed or being
1797 * initialized.
1798 *
1799 * If successful, this will return the inode for which the @test function
1800 * returned 1 and NULL otherwise.
1801 *
1802 * The @test function is not permitted to take a ref on any inode presented.
1803 * It is also not permitted to sleep.
1804 *
1805 * The caller must hold the RCU read lock.
1806 */
1807struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1808 unsigned long ino)
1809{
1810 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1811 struct inode *inode;
1812
1813 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1814 "suspicious find_inode_by_ino_rcu() usage");
1815
1816 hlist_for_each_entry_rcu(inode, head, i_hash) {
1817 if (inode->i_ino == ino &&
1818 inode->i_sb == sb &&
1819 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1820 return inode;
1821 }
1822 return NULL;
1823}
1824EXPORT_SYMBOL(find_inode_by_ino_rcu);
1825
1826int insert_inode_locked(struct inode *inode)
1827{
1828 struct super_block *sb = inode->i_sb;
1829 ino_t ino = inode->i_ino;
1830 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1831
1832 while (1) {
1833 struct inode *old = NULL;
1834 spin_lock(&inode_hash_lock);
1835 hlist_for_each_entry(old, head, i_hash) {
1836 if (old->i_ino != ino)
1837 continue;
1838 if (old->i_sb != sb)
1839 continue;
1840 spin_lock(&old->i_lock);
1841 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1842 spin_unlock(&old->i_lock);
1843 continue;
1844 }
1845 break;
1846 }
1847 if (likely(!old)) {
1848 spin_lock(&inode->i_lock);
1849 inode->i_state |= I_NEW | I_CREATING;
1850 hlist_add_head_rcu(&inode->i_hash, head);
1851 spin_unlock(&inode->i_lock);
1852 spin_unlock(&inode_hash_lock);
1853 return 0;
1854 }
1855 if (unlikely(old->i_state & I_CREATING)) {
1856 spin_unlock(&old->i_lock);
1857 spin_unlock(&inode_hash_lock);
1858 return -EBUSY;
1859 }
1860 __iget(old);
1861 spin_unlock(&old->i_lock);
1862 spin_unlock(&inode_hash_lock);
1863 wait_on_inode(old);
1864 if (unlikely(!inode_unhashed(old))) {
1865 iput(old);
1866 return -EBUSY;
1867 }
1868 iput(old);
1869 }
1870}
1871EXPORT_SYMBOL(insert_inode_locked);
1872
1873int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1874 int (*test)(struct inode *, void *), void *data)
1875{
1876 struct inode *old;
1877
1878 inode->i_state |= I_CREATING;
1879 old = inode_insert5(inode, hashval, test, NULL, data);
1880
1881 if (old != inode) {
1882 iput(old);
1883 return -EBUSY;
1884 }
1885 return 0;
1886}
1887EXPORT_SYMBOL(insert_inode_locked4);
1888
1889
1890int generic_delete_inode(struct inode *inode)
1891{
1892 return 1;
1893}
1894EXPORT_SYMBOL(generic_delete_inode);
1895
1896/*
1897 * Called when we're dropping the last reference
1898 * to an inode.
1899 *
1900 * Call the FS "drop_inode()" function, defaulting to
1901 * the legacy UNIX filesystem behaviour. If it tells
1902 * us to evict inode, do so. Otherwise, retain inode
1903 * in cache if fs is alive, sync and evict if fs is
1904 * shutting down.
1905 */
1906static void iput_final(struct inode *inode)
1907{
1908 struct super_block *sb = inode->i_sb;
1909 const struct super_operations *op = inode->i_sb->s_op;
1910 unsigned long state;
1911 int drop;
1912
1913 WARN_ON(inode->i_state & I_NEW);
1914
1915 if (op->drop_inode)
1916 drop = op->drop_inode(inode);
1917 else
1918 drop = generic_drop_inode(inode);
1919
1920 if (!drop &&
1921 !(inode->i_state & I_DONTCACHE) &&
1922 (sb->s_flags & SB_ACTIVE)) {
1923 __inode_add_lru(inode, true);
1924 spin_unlock(&inode->i_lock);
1925 return;
1926 }
1927
1928 state = inode->i_state;
1929 if (!drop) {
1930 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1931 spin_unlock(&inode->i_lock);
1932
1933 write_inode_now(inode, 1);
1934
1935 spin_lock(&inode->i_lock);
1936 state = inode->i_state;
1937 WARN_ON(state & I_NEW);
1938 state &= ~I_WILL_FREE;
1939 }
1940
1941 WRITE_ONCE(inode->i_state, state | I_FREEING);
1942 if (!list_empty(&inode->i_lru))
1943 inode_lru_list_del(inode);
1944 spin_unlock(&inode->i_lock);
1945
1946 evict(inode);
1947}
1948
1949/**
1950 * iput - put an inode
1951 * @inode: inode to put
1952 *
1953 * Puts an inode, dropping its usage count. If the inode use count hits
1954 * zero, the inode is then freed and may also be destroyed.
1955 *
1956 * Consequently, iput() can sleep.
1957 */
1958void iput(struct inode *inode)
1959{
1960 if (!inode)
1961 return;
1962 BUG_ON(inode->i_state & I_CLEAR);
1963retry:
1964 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1965 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1966 atomic_inc(&inode->i_count);
1967 spin_unlock(&inode->i_lock);
1968 trace_writeback_lazytime_iput(inode);
1969 mark_inode_dirty_sync(inode);
1970 goto retry;
1971 }
1972 iput_final(inode);
1973 }
1974}
1975EXPORT_SYMBOL(iput);
1976
1977#ifdef CONFIG_BLOCK
1978/**
1979 * bmap - find a block number in a file
1980 * @inode: inode owning the block number being requested
1981 * @block: pointer containing the block to find
1982 *
1983 * Replaces the value in ``*block`` with the block number on the device holding
1984 * corresponding to the requested block number in the file.
1985 * That is, asked for block 4 of inode 1 the function will replace the
1986 * 4 in ``*block``, with disk block relative to the disk start that holds that
1987 * block of the file.
1988 *
1989 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1990 * hole, returns 0 and ``*block`` is also set to 0.
1991 */
1992int bmap(struct inode *inode, sector_t *block)
1993{
1994 if (!inode->i_mapping->a_ops->bmap)
1995 return -EINVAL;
1996
1997 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1998 return 0;
1999}
2000EXPORT_SYMBOL(bmap);
2001#endif
2002
2003/*
2004 * With relative atime, only update atime if the previous atime is
2005 * earlier than or equal to either the ctime or mtime,
2006 * or if at least a day has passed since the last atime update.
2007 */
2008static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode,
2009 struct timespec64 now)
2010{
2011 struct timespec64 atime, mtime, ctime;
2012
2013 if (!(mnt->mnt_flags & MNT_RELATIME))
2014 return true;
2015 /*
2016 * Is mtime younger than or equal to atime? If yes, update atime:
2017 */
2018 atime = inode_get_atime(inode);
2019 mtime = inode_get_mtime(inode);
2020 if (timespec64_compare(&mtime, &atime) >= 0)
2021 return true;
2022 /*
2023 * Is ctime younger than or equal to atime? If yes, update atime:
2024 */
2025 ctime = inode_get_ctime(inode);
2026 if (timespec64_compare(&ctime, &atime) >= 0)
2027 return true;
2028
2029 /*
2030 * Is the previous atime value older than a day? If yes,
2031 * update atime:
2032 */
2033 if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
2034 return true;
2035 /*
2036 * Good, we can skip the atime update:
2037 */
2038 return false;
2039}
2040
2041/**
2042 * inode_update_timestamps - update the timestamps on the inode
2043 * @inode: inode to be updated
2044 * @flags: S_* flags that needed to be updated
2045 *
2046 * The update_time function is called when an inode's timestamps need to be
2047 * updated for a read or write operation. This function handles updating the
2048 * actual timestamps. It's up to the caller to ensure that the inode is marked
2049 * dirty appropriately.
2050 *
2051 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
2052 * attempt to update all three of them. S_ATIME updates can be handled
2053 * independently of the rest.
2054 *
2055 * Returns a set of S_* flags indicating which values changed.
2056 */
2057int inode_update_timestamps(struct inode *inode, int flags)
2058{
2059 int updated = 0;
2060 struct timespec64 now;
2061
2062 if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
2063 struct timespec64 ctime = inode_get_ctime(inode);
2064 struct timespec64 mtime = inode_get_mtime(inode);
2065
2066 now = inode_set_ctime_current(inode);
2067 if (!timespec64_equal(&now, &ctime))
2068 updated |= S_CTIME;
2069 if (!timespec64_equal(&now, &mtime)) {
2070 inode_set_mtime_to_ts(inode, now);
2071 updated |= S_MTIME;
2072 }
2073 if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
2074 updated |= S_VERSION;
2075 } else {
2076 now = current_time(inode);
2077 }
2078
2079 if (flags & S_ATIME) {
2080 struct timespec64 atime = inode_get_atime(inode);
2081
2082 if (!timespec64_equal(&now, &atime)) {
2083 inode_set_atime_to_ts(inode, now);
2084 updated |= S_ATIME;
2085 }
2086 }
2087 return updated;
2088}
2089EXPORT_SYMBOL(inode_update_timestamps);
2090
2091/**
2092 * generic_update_time - update the timestamps on the inode
2093 * @inode: inode to be updated
2094 * @flags: S_* flags that needed to be updated
2095 *
2096 * The update_time function is called when an inode's timestamps need to be
2097 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
2098 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
2099 * updates can be handled done independently of the rest.
2100 *
2101 * Returns a S_* mask indicating which fields were updated.
2102 */
2103int generic_update_time(struct inode *inode, int flags)
2104{
2105 int updated = inode_update_timestamps(inode, flags);
2106 int dirty_flags = 0;
2107
2108 if (updated & (S_ATIME|S_MTIME|S_CTIME))
2109 dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
2110 if (updated & S_VERSION)
2111 dirty_flags |= I_DIRTY_SYNC;
2112 __mark_inode_dirty(inode, dirty_flags);
2113 return updated;
2114}
2115EXPORT_SYMBOL(generic_update_time);
2116
2117/*
2118 * This does the actual work of updating an inodes time or version. Must have
2119 * had called mnt_want_write() before calling this.
2120 */
2121int inode_update_time(struct inode *inode, int flags)
2122{
2123 if (inode->i_op->update_time)
2124 return inode->i_op->update_time(inode, flags);
2125 generic_update_time(inode, flags);
2126 return 0;
2127}
2128EXPORT_SYMBOL(inode_update_time);
2129
2130/**
2131 * atime_needs_update - update the access time
2132 * @path: the &struct path to update
2133 * @inode: inode to update
2134 *
2135 * Update the accessed time on an inode and mark it for writeback.
2136 * This function automatically handles read only file systems and media,
2137 * as well as the "noatime" flag and inode specific "noatime" markers.
2138 */
2139bool atime_needs_update(const struct path *path, struct inode *inode)
2140{
2141 struct vfsmount *mnt = path->mnt;
2142 struct timespec64 now, atime;
2143
2144 if (inode->i_flags & S_NOATIME)
2145 return false;
2146
2147 /* Atime updates will likely cause i_uid and i_gid to be written
2148 * back improprely if their true value is unknown to the vfs.
2149 */
2150 if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
2151 return false;
2152
2153 if (IS_NOATIME(inode))
2154 return false;
2155 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
2156 return false;
2157
2158 if (mnt->mnt_flags & MNT_NOATIME)
2159 return false;
2160 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
2161 return false;
2162
2163 now = current_time(inode);
2164
2165 if (!relatime_need_update(mnt, inode, now))
2166 return false;
2167
2168 atime = inode_get_atime(inode);
2169 if (timespec64_equal(&atime, &now))
2170 return false;
2171
2172 return true;
2173}
2174
2175void touch_atime(const struct path *path)
2176{
2177 struct vfsmount *mnt = path->mnt;
2178 struct inode *inode = d_inode(path->dentry);
2179
2180 if (!atime_needs_update(path, inode))
2181 return;
2182
2183 if (!sb_start_write_trylock(inode->i_sb))
2184 return;
2185
2186 if (mnt_get_write_access(mnt) != 0)
2187 goto skip_update;
2188 /*
2189 * File systems can error out when updating inodes if they need to
2190 * allocate new space to modify an inode (such is the case for
2191 * Btrfs), but since we touch atime while walking down the path we
2192 * really don't care if we failed to update the atime of the file,
2193 * so just ignore the return value.
2194 * We may also fail on filesystems that have the ability to make parts
2195 * of the fs read only, e.g. subvolumes in Btrfs.
2196 */
2197 inode_update_time(inode, S_ATIME);
2198 mnt_put_write_access(mnt);
2199skip_update:
2200 sb_end_write(inode->i_sb);
2201}
2202EXPORT_SYMBOL(touch_atime);
2203
2204/*
2205 * Return mask of changes for notify_change() that need to be done as a
2206 * response to write or truncate. Return 0 if nothing has to be changed.
2207 * Negative value on error (change should be denied).
2208 */
2209int dentry_needs_remove_privs(struct mnt_idmap *idmap,
2210 struct dentry *dentry)
2211{
2212 struct inode *inode = d_inode(dentry);
2213 int mask = 0;
2214 int ret;
2215
2216 if (IS_NOSEC(inode))
2217 return 0;
2218
2219 mask = setattr_should_drop_suidgid(idmap, inode);
2220 ret = security_inode_need_killpriv(dentry);
2221 if (ret < 0)
2222 return ret;
2223 if (ret)
2224 mask |= ATTR_KILL_PRIV;
2225 return mask;
2226}
2227
2228static int __remove_privs(struct mnt_idmap *idmap,
2229 struct dentry *dentry, int kill)
2230{
2231 struct iattr newattrs;
2232
2233 newattrs.ia_valid = ATTR_FORCE | kill;
2234 /*
2235 * Note we call this on write, so notify_change will not
2236 * encounter any conflicting delegations:
2237 */
2238 return notify_change(idmap, dentry, &newattrs, NULL);
2239}
2240
2241int file_remove_privs_flags(struct file *file, unsigned int flags)
2242{
2243 struct dentry *dentry = file_dentry(file);
2244 struct inode *inode = file_inode(file);
2245 int error = 0;
2246 int kill;
2247
2248 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
2249 return 0;
2250
2251 kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
2252 if (kill < 0)
2253 return kill;
2254
2255 if (kill) {
2256 if (flags & IOCB_NOWAIT)
2257 return -EAGAIN;
2258
2259 error = __remove_privs(file_mnt_idmap(file), dentry, kill);
2260 }
2261
2262 if (!error)
2263 inode_has_no_xattr(inode);
2264 return error;
2265}
2266EXPORT_SYMBOL_GPL(file_remove_privs_flags);
2267
2268/**
2269 * file_remove_privs - remove special file privileges (suid, capabilities)
2270 * @file: file to remove privileges from
2271 *
2272 * When file is modified by a write or truncation ensure that special
2273 * file privileges are removed.
2274 *
2275 * Return: 0 on success, negative errno on failure.
2276 */
2277int file_remove_privs(struct file *file)
2278{
2279 return file_remove_privs_flags(file, 0);
2280}
2281EXPORT_SYMBOL(file_remove_privs);
2282
2283/**
2284 * current_time - Return FS time (possibly fine-grained)
2285 * @inode: inode.
2286 *
2287 * Return the current time truncated to the time granularity supported by
2288 * the fs, as suitable for a ctime/mtime change. If the ctime is flagged
2289 * as having been QUERIED, get a fine-grained timestamp, but don't update
2290 * the floor.
2291 *
2292 * For a multigrain inode, this is effectively an estimate of the timestamp
2293 * that a file would receive. An actual update must go through
2294 * inode_set_ctime_current().
2295 */
2296struct timespec64 current_time(struct inode *inode)
2297{
2298 struct timespec64 now;
2299 u32 cns;
2300
2301 ktime_get_coarse_real_ts64_mg(&now);
2302
2303 if (!is_mgtime(inode))
2304 goto out;
2305
2306 /* If nothing has queried it, then coarse time is fine */
2307 cns = smp_load_acquire(&inode->i_ctime_nsec);
2308 if (cns & I_CTIME_QUERIED) {
2309 /*
2310 * If there is no apparent change, then get a fine-grained
2311 * timestamp.
2312 */
2313 if (now.tv_nsec == (cns & ~I_CTIME_QUERIED))
2314 ktime_get_real_ts64(&now);
2315 }
2316out:
2317 return timestamp_truncate(now, inode);
2318}
2319EXPORT_SYMBOL(current_time);
2320
2321static int inode_needs_update_time(struct inode *inode)
2322{
2323 struct timespec64 now, ts;
2324 int sync_it = 0;
2325
2326 /* First try to exhaust all avenues to not sync */
2327 if (IS_NOCMTIME(inode))
2328 return 0;
2329
2330 now = current_time(inode);
2331
2332 ts = inode_get_mtime(inode);
2333 if (!timespec64_equal(&ts, &now))
2334 sync_it |= S_MTIME;
2335
2336 ts = inode_get_ctime(inode);
2337 if (!timespec64_equal(&ts, &now))
2338 sync_it |= S_CTIME;
2339
2340 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
2341 sync_it |= S_VERSION;
2342
2343 return sync_it;
2344}
2345
2346static int __file_update_time(struct file *file, int sync_mode)
2347{
2348 int ret = 0;
2349 struct inode *inode = file_inode(file);
2350
2351 /* try to update time settings */
2352 if (!mnt_get_write_access_file(file)) {
2353 ret = inode_update_time(inode, sync_mode);
2354 mnt_put_write_access_file(file);
2355 }
2356
2357 return ret;
2358}
2359
2360/**
2361 * file_update_time - update mtime and ctime time
2362 * @file: file accessed
2363 *
2364 * Update the mtime and ctime members of an inode and mark the inode for
2365 * writeback. Note that this function is meant exclusively for usage in
2366 * the file write path of filesystems, and filesystems may choose to
2367 * explicitly ignore updates via this function with the _NOCMTIME inode
2368 * flag, e.g. for network filesystem where these imestamps are handled
2369 * by the server. This can return an error for file systems who need to
2370 * allocate space in order to update an inode.
2371 *
2372 * Return: 0 on success, negative errno on failure.
2373 */
2374int file_update_time(struct file *file)
2375{
2376 int ret;
2377 struct inode *inode = file_inode(file);
2378
2379 ret = inode_needs_update_time(inode);
2380 if (ret <= 0)
2381 return ret;
2382
2383 return __file_update_time(file, ret);
2384}
2385EXPORT_SYMBOL(file_update_time);
2386
2387/**
2388 * file_modified_flags - handle mandated vfs changes when modifying a file
2389 * @file: file that was modified
2390 * @flags: kiocb flags
2391 *
2392 * When file has been modified ensure that special
2393 * file privileges are removed and time settings are updated.
2394 *
2395 * If IOCB_NOWAIT is set, special file privileges will not be removed and
2396 * time settings will not be updated. It will return -EAGAIN.
2397 *
2398 * Context: Caller must hold the file's inode lock.
2399 *
2400 * Return: 0 on success, negative errno on failure.
2401 */
2402static int file_modified_flags(struct file *file, int flags)
2403{
2404 int ret;
2405 struct inode *inode = file_inode(file);
2406
2407 /*
2408 * Clear the security bits if the process is not being run by root.
2409 * This keeps people from modifying setuid and setgid binaries.
2410 */
2411 ret = file_remove_privs_flags(file, flags);
2412 if (ret)
2413 return ret;
2414
2415 if (unlikely(file->f_mode & FMODE_NOCMTIME))
2416 return 0;
2417
2418 ret = inode_needs_update_time(inode);
2419 if (ret <= 0)
2420 return ret;
2421 if (flags & IOCB_NOWAIT)
2422 return -EAGAIN;
2423
2424 return __file_update_time(file, ret);
2425}
2426
2427/**
2428 * file_modified - handle mandated vfs changes when modifying a file
2429 * @file: file that was modified
2430 *
2431 * When file has been modified ensure that special
2432 * file privileges are removed and time settings are updated.
2433 *
2434 * Context: Caller must hold the file's inode lock.
2435 *
2436 * Return: 0 on success, negative errno on failure.
2437 */
2438int file_modified(struct file *file)
2439{
2440 return file_modified_flags(file, 0);
2441}
2442EXPORT_SYMBOL(file_modified);
2443
2444/**
2445 * kiocb_modified - handle mandated vfs changes when modifying a file
2446 * @iocb: iocb that was modified
2447 *
2448 * When file has been modified ensure that special
2449 * file privileges are removed and time settings are updated.
2450 *
2451 * Context: Caller must hold the file's inode lock.
2452 *
2453 * Return: 0 on success, negative errno on failure.
2454 */
2455int kiocb_modified(struct kiocb *iocb)
2456{
2457 return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
2458}
2459EXPORT_SYMBOL_GPL(kiocb_modified);
2460
2461int inode_needs_sync(struct inode *inode)
2462{
2463 if (IS_SYNC(inode))
2464 return 1;
2465 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2466 return 1;
2467 return 0;
2468}
2469EXPORT_SYMBOL(inode_needs_sync);
2470
2471/*
2472 * If we try to find an inode in the inode hash while it is being
2473 * deleted, we have to wait until the filesystem completes its
2474 * deletion before reporting that it isn't found. This function waits
2475 * until the deletion _might_ have completed. Callers are responsible
2476 * to recheck inode state.
2477 *
2478 * It doesn't matter if I_NEW is not set initially, a call to
2479 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2480 * will DTRT.
2481 */
2482static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
2483{
2484 struct wait_bit_queue_entry wqe;
2485 struct wait_queue_head *wq_head;
2486
2487 /*
2488 * Handle racing against evict(), see that routine for more details.
2489 */
2490 if (unlikely(inode_unhashed(inode))) {
2491 WARN_ON(is_inode_hash_locked);
2492 spin_unlock(&inode->i_lock);
2493 return;
2494 }
2495
2496 wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
2497 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
2498 spin_unlock(&inode->i_lock);
2499 rcu_read_unlock();
2500 if (is_inode_hash_locked)
2501 spin_unlock(&inode_hash_lock);
2502 schedule();
2503 finish_wait(wq_head, &wqe.wq_entry);
2504 if (is_inode_hash_locked)
2505 spin_lock(&inode_hash_lock);
2506 rcu_read_lock();
2507}
2508
2509static __initdata unsigned long ihash_entries;
2510static int __init set_ihash_entries(char *str)
2511{
2512 if (!str)
2513 return 0;
2514 ihash_entries = simple_strtoul(str, &str, 0);
2515 return 1;
2516}
2517__setup("ihash_entries=", set_ihash_entries);
2518
2519/*
2520 * Initialize the waitqueues and inode hash table.
2521 */
2522void __init inode_init_early(void)
2523{
2524 /* If hashes are distributed across NUMA nodes, defer
2525 * hash allocation until vmalloc space is available.
2526 */
2527 if (hashdist)
2528 return;
2529
2530 inode_hashtable =
2531 alloc_large_system_hash("Inode-cache",
2532 sizeof(struct hlist_head),
2533 ihash_entries,
2534 14,
2535 HASH_EARLY | HASH_ZERO,
2536 &i_hash_shift,
2537 &i_hash_mask,
2538 0,
2539 0);
2540}
2541
2542void __init inode_init(void)
2543{
2544 /* inode slab cache */
2545 inode_cachep = kmem_cache_create("inode_cache",
2546 sizeof(struct inode),
2547 0,
2548 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2549 SLAB_ACCOUNT),
2550 init_once);
2551
2552 /* Hash may have been set up in inode_init_early */
2553 if (!hashdist)
2554 return;
2555
2556 inode_hashtable =
2557 alloc_large_system_hash("Inode-cache",
2558 sizeof(struct hlist_head),
2559 ihash_entries,
2560 14,
2561 HASH_ZERO,
2562 &i_hash_shift,
2563 &i_hash_mask,
2564 0,
2565 0);
2566}
2567
2568void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2569{
2570 inode->i_mode = mode;
2571 if (S_ISCHR(mode)) {
2572 inode->i_fop = &def_chr_fops;
2573 inode->i_rdev = rdev;
2574 } else if (S_ISBLK(mode)) {
2575 if (IS_ENABLED(CONFIG_BLOCK))
2576 inode->i_fop = &def_blk_fops;
2577 inode->i_rdev = rdev;
2578 } else if (S_ISFIFO(mode))
2579 inode->i_fop = &pipefifo_fops;
2580 else if (S_ISSOCK(mode))
2581 ; /* leave it no_open_fops */
2582 else
2583 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2584 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2585 inode->i_ino);
2586}
2587EXPORT_SYMBOL(init_special_inode);
2588
2589/**
2590 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2591 * @idmap: idmap of the mount the inode was created from
2592 * @inode: New inode
2593 * @dir: Directory inode
2594 * @mode: mode of the new inode
2595 *
2596 * If the inode has been created through an idmapped mount the idmap of
2597 * the vfsmount must be passed through @idmap. This function will then take
2598 * care to map the inode according to @idmap before checking permissions
2599 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2600 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2601 */
2602void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
2603 const struct inode *dir, umode_t mode)
2604{
2605 inode_fsuid_set(inode, idmap);
2606 if (dir && dir->i_mode & S_ISGID) {
2607 inode->i_gid = dir->i_gid;
2608
2609 /* Directories are special, and always inherit S_ISGID */
2610 if (S_ISDIR(mode))
2611 mode |= S_ISGID;
2612 } else
2613 inode_fsgid_set(inode, idmap);
2614 inode->i_mode = mode;
2615}
2616EXPORT_SYMBOL(inode_init_owner);
2617
2618/**
2619 * inode_owner_or_capable - check current task permissions to inode
2620 * @idmap: idmap of the mount the inode was found from
2621 * @inode: inode being checked
2622 *
2623 * Return true if current either has CAP_FOWNER in a namespace with the
2624 * inode owner uid mapped, or owns the file.
2625 *
2626 * If the inode has been found through an idmapped mount the idmap of
2627 * the vfsmount must be passed through @idmap. This function will then take
2628 * care to map the inode according to @idmap before checking permissions.
2629 * On non-idmapped mounts or if permission checking is to be performed on the
2630 * raw inode simply pass @nop_mnt_idmap.
2631 */
2632bool inode_owner_or_capable(struct mnt_idmap *idmap,
2633 const struct inode *inode)
2634{
2635 vfsuid_t vfsuid;
2636 struct user_namespace *ns;
2637
2638 vfsuid = i_uid_into_vfsuid(idmap, inode);
2639 if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
2640 return true;
2641
2642 ns = current_user_ns();
2643 if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
2644 return true;
2645 return false;
2646}
2647EXPORT_SYMBOL(inode_owner_or_capable);
2648
2649/*
2650 * Direct i/o helper functions
2651 */
2652bool inode_dio_finished(const struct inode *inode)
2653{
2654 return atomic_read(&inode->i_dio_count) == 0;
2655}
2656EXPORT_SYMBOL(inode_dio_finished);
2657
2658/**
2659 * inode_dio_wait - wait for outstanding DIO requests to finish
2660 * @inode: inode to wait for
2661 *
2662 * Waits for all pending direct I/O requests to finish so that we can
2663 * proceed with a truncate or equivalent operation.
2664 *
2665 * Must be called under a lock that serializes taking new references
2666 * to i_dio_count, usually by inode->i_mutex.
2667 */
2668void inode_dio_wait(struct inode *inode)
2669{
2670 wait_var_event(&inode->i_dio_count, inode_dio_finished(inode));
2671}
2672EXPORT_SYMBOL(inode_dio_wait);
2673
2674void inode_dio_wait_interruptible(struct inode *inode)
2675{
2676 wait_var_event_interruptible(&inode->i_dio_count,
2677 inode_dio_finished(inode));
2678}
2679EXPORT_SYMBOL(inode_dio_wait_interruptible);
2680
2681/*
2682 * inode_set_flags - atomically set some inode flags
2683 *
2684 * Note: the caller should be holding i_mutex, or else be sure that
2685 * they have exclusive access to the inode structure (i.e., while the
2686 * inode is being instantiated). The reason for the cmpxchg() loop
2687 * --- which wouldn't be necessary if all code paths which modify
2688 * i_flags actually followed this rule, is that there is at least one
2689 * code path which doesn't today so we use cmpxchg() out of an abundance
2690 * of caution.
2691 *
2692 * In the long run, i_mutex is overkill, and we should probably look
2693 * at using the i_lock spinlock to protect i_flags, and then make sure
2694 * it is so documented in include/linux/fs.h and that all code follows
2695 * the locking convention!!
2696 */
2697void inode_set_flags(struct inode *inode, unsigned int flags,
2698 unsigned int mask)
2699{
2700 WARN_ON_ONCE(flags & ~mask);
2701 set_mask_bits(&inode->i_flags, mask, flags);
2702}
2703EXPORT_SYMBOL(inode_set_flags);
2704
2705void inode_nohighmem(struct inode *inode)
2706{
2707 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2708}
2709EXPORT_SYMBOL(inode_nohighmem);
2710
2711struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts)
2712{
2713 trace_inode_set_ctime_to_ts(inode, &ts);
2714 set_normalized_timespec64(&ts, ts.tv_sec, ts.tv_nsec);
2715 inode->i_ctime_sec = ts.tv_sec;
2716 inode->i_ctime_nsec = ts.tv_nsec;
2717 return ts;
2718}
2719EXPORT_SYMBOL(inode_set_ctime_to_ts);
2720
2721/**
2722 * timestamp_truncate - Truncate timespec to a granularity
2723 * @t: Timespec
2724 * @inode: inode being updated
2725 *
2726 * Truncate a timespec to the granularity supported by the fs
2727 * containing the inode. Always rounds down. gran must
2728 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2729 */
2730struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2731{
2732 struct super_block *sb = inode->i_sb;
2733 unsigned int gran = sb->s_time_gran;
2734
2735 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2736 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2737 t.tv_nsec = 0;
2738
2739 /* Avoid division in the common cases 1 ns and 1 s. */
2740 if (gran == 1)
2741 ; /* nothing */
2742 else if (gran == NSEC_PER_SEC)
2743 t.tv_nsec = 0;
2744 else if (gran > 1 && gran < NSEC_PER_SEC)
2745 t.tv_nsec -= t.tv_nsec % gran;
2746 else
2747 WARN(1, "invalid file time granularity: %u", gran);
2748 return t;
2749}
2750EXPORT_SYMBOL(timestamp_truncate);
2751
2752/**
2753 * inode_set_ctime_current - set the ctime to current_time
2754 * @inode: inode
2755 *
2756 * Set the inode's ctime to the current value for the inode. Returns the
2757 * current value that was assigned. If this is not a multigrain inode, then we
2758 * set it to the later of the coarse time and floor value.
2759 *
2760 * If it is multigrain, then we first see if the coarse-grained timestamp is
2761 * distinct from what is already there. If so, then use that. Otherwise, get a
2762 * fine-grained timestamp.
2763 *
2764 * After that, try to swap the new value into i_ctime_nsec. Accept the
2765 * resulting ctime, regardless of the outcome of the swap. If it has
2766 * already been replaced, then that timestamp is later than the earlier
2767 * unacceptable one, and is thus acceptable.
2768 */
2769struct timespec64 inode_set_ctime_current(struct inode *inode)
2770{
2771 struct timespec64 now;
2772 u32 cns, cur;
2773
2774 ktime_get_coarse_real_ts64_mg(&now);
2775 now = timestamp_truncate(now, inode);
2776
2777 /* Just return that if this is not a multigrain fs */
2778 if (!is_mgtime(inode)) {
2779 inode_set_ctime_to_ts(inode, now);
2780 goto out;
2781 }
2782
2783 /*
2784 * A fine-grained time is only needed if someone has queried
2785 * for timestamps, and the current coarse grained time isn't
2786 * later than what's already there.
2787 */
2788 cns = smp_load_acquire(&inode->i_ctime_nsec);
2789 if (cns & I_CTIME_QUERIED) {
2790 struct timespec64 ctime = { .tv_sec = inode->i_ctime_sec,
2791 .tv_nsec = cns & ~I_CTIME_QUERIED };
2792
2793 if (timespec64_compare(&now, &ctime) <= 0) {
2794 ktime_get_real_ts64_mg(&now);
2795 now = timestamp_truncate(now, inode);
2796 mgtime_counter_inc(mg_fine_stamps);
2797 }
2798 }
2799 mgtime_counter_inc(mg_ctime_updates);
2800
2801 /* No need to cmpxchg if it's exactly the same */
2802 if (cns == now.tv_nsec && inode->i_ctime_sec == now.tv_sec) {
2803 trace_ctime_xchg_skip(inode, &now);
2804 goto out;
2805 }
2806 cur = cns;
2807retry:
2808 /* Try to swap the nsec value into place. */
2809 if (try_cmpxchg(&inode->i_ctime_nsec, &cur, now.tv_nsec)) {
2810 /* If swap occurred, then we're (mostly) done */
2811 inode->i_ctime_sec = now.tv_sec;
2812 trace_ctime_ns_xchg(inode, cns, now.tv_nsec, cur);
2813 mgtime_counter_inc(mg_ctime_swaps);
2814 } else {
2815 /*
2816 * Was the change due to someone marking the old ctime QUERIED?
2817 * If so then retry the swap. This can only happen once since
2818 * the only way to clear I_CTIME_QUERIED is to stamp the inode
2819 * with a new ctime.
2820 */
2821 if (!(cns & I_CTIME_QUERIED) && (cns | I_CTIME_QUERIED) == cur) {
2822 cns = cur;
2823 goto retry;
2824 }
2825 /* Otherwise, keep the existing ctime */
2826 now.tv_sec = inode->i_ctime_sec;
2827 now.tv_nsec = cur & ~I_CTIME_QUERIED;
2828 }
2829out:
2830 return now;
2831}
2832EXPORT_SYMBOL(inode_set_ctime_current);
2833
2834/**
2835 * inode_set_ctime_deleg - try to update the ctime on a delegated inode
2836 * @inode: inode to update
2837 * @update: timespec64 to set the ctime
2838 *
2839 * Attempt to atomically update the ctime on behalf of a delegation holder.
2840 *
2841 * The nfs server can call back the holder of a delegation to get updated
2842 * inode attributes, including the mtime. When updating the mtime, update
2843 * the ctime to a value at least equal to that.
2844 *
2845 * This can race with concurrent updates to the inode, in which
2846 * case the update is skipped.
2847 *
2848 * Note that this works even when multigrain timestamps are not enabled,
2849 * so it is used in either case.
2850 */
2851struct timespec64 inode_set_ctime_deleg(struct inode *inode, struct timespec64 update)
2852{
2853 struct timespec64 now, cur_ts;
2854 u32 cur, old;
2855
2856 /* pairs with try_cmpxchg below */
2857 cur = smp_load_acquire(&inode->i_ctime_nsec);
2858 cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
2859 cur_ts.tv_sec = inode->i_ctime_sec;
2860
2861 /* If the update is older than the existing value, skip it. */
2862 if (timespec64_compare(&update, &cur_ts) <= 0)
2863 return cur_ts;
2864
2865 ktime_get_coarse_real_ts64_mg(&now);
2866
2867 /* Clamp the update to "now" if it's in the future */
2868 if (timespec64_compare(&update, &now) > 0)
2869 update = now;
2870
2871 update = timestamp_truncate(update, inode);
2872
2873 /* No need to update if the values are already the same */
2874 if (timespec64_equal(&update, &cur_ts))
2875 return cur_ts;
2876
2877 /*
2878 * Try to swap the nsec value into place. If it fails, that means
2879 * it raced with an update due to a write or similar activity. That
2880 * stamp takes precedence, so just skip the update.
2881 */
2882retry:
2883 old = cur;
2884 if (try_cmpxchg(&inode->i_ctime_nsec, &cur, update.tv_nsec)) {
2885 inode->i_ctime_sec = update.tv_sec;
2886 mgtime_counter_inc(mg_ctime_swaps);
2887 return update;
2888 }
2889
2890 /*
2891 * Was the change due to another task marking the old ctime QUERIED?
2892 *
2893 * If so, then retry the swap. This can only happen once since
2894 * the only way to clear I_CTIME_QUERIED is to stamp the inode
2895 * with a new ctime.
2896 */
2897 if (!(old & I_CTIME_QUERIED) && (cur == (old | I_CTIME_QUERIED)))
2898 goto retry;
2899
2900 /* Otherwise, it was a new timestamp. */
2901 cur_ts.tv_sec = inode->i_ctime_sec;
2902 cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
2903 return cur_ts;
2904}
2905EXPORT_SYMBOL(inode_set_ctime_deleg);
2906
2907/**
2908 * in_group_or_capable - check whether caller is CAP_FSETID privileged
2909 * @idmap: idmap of the mount @inode was found from
2910 * @inode: inode to check
2911 * @vfsgid: the new/current vfsgid of @inode
2912 *
2913 * Check whether @vfsgid is in the caller's group list or if the caller is
2914 * privileged with CAP_FSETID over @inode. This can be used to determine
2915 * whether the setgid bit can be kept or must be dropped.
2916 *
2917 * Return: true if the caller is sufficiently privileged, false if not.
2918 */
2919bool in_group_or_capable(struct mnt_idmap *idmap,
2920 const struct inode *inode, vfsgid_t vfsgid)
2921{
2922 if (vfsgid_in_group_p(vfsgid))
2923 return true;
2924 if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
2925 return true;
2926 return false;
2927}
2928EXPORT_SYMBOL(in_group_or_capable);
2929
2930/**
2931 * mode_strip_sgid - handle the sgid bit for non-directories
2932 * @idmap: idmap of the mount the inode was created from
2933 * @dir: parent directory inode
2934 * @mode: mode of the file to be created in @dir
2935 *
2936 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2937 * raised and @dir has the S_ISGID bit raised ensure that the caller is
2938 * either in the group of the parent directory or they have CAP_FSETID
2939 * in their user namespace and are privileged over the parent directory.
2940 * In all other cases, strip the S_ISGID bit from @mode.
2941 *
2942 * Return: the new mode to use for the file
2943 */
2944umode_t mode_strip_sgid(struct mnt_idmap *idmap,
2945 const struct inode *dir, umode_t mode)
2946{
2947 if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
2948 return mode;
2949 if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
2950 return mode;
2951 if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
2952 return mode;
2953 return mode & ~S_ISGID;
2954}
2955EXPORT_SYMBOL(mode_strip_sgid);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/backing-dev.h>
10#include <linux/hash.h>
11#include <linux/swap.h>
12#include <linux/security.h>
13#include <linux/cdev.h>
14#include <linux/memblock.h>
15#include <linux/fscrypt.h>
16#include <linux/fsnotify.h>
17#include <linux/mount.h>
18#include <linux/posix_acl.h>
19#include <linux/prefetch.h>
20#include <linux/buffer_head.h> /* for inode_has_buffers */
21#include <linux/ratelimit.h>
22#include <linux/list_lru.h>
23#include <linux/iversion.h>
24#include <trace/events/writeback.h>
25#include "internal.h"
26
27/*
28 * Inode locking rules:
29 *
30 * inode->i_lock protects:
31 * inode->i_state, inode->i_hash, __iget()
32 * Inode LRU list locks protect:
33 * inode->i_sb->s_inode_lru, inode->i_lru
34 * inode->i_sb->s_inode_list_lock protects:
35 * inode->i_sb->s_inodes, inode->i_sb_list
36 * bdi->wb.list_lock protects:
37 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
38 * inode_hash_lock protects:
39 * inode_hashtable, inode->i_hash
40 *
41 * Lock ordering:
42 *
43 * inode->i_sb->s_inode_list_lock
44 * inode->i_lock
45 * Inode LRU list locks
46 *
47 * bdi->wb.list_lock
48 * inode->i_lock
49 *
50 * inode_hash_lock
51 * inode->i_sb->s_inode_list_lock
52 * inode->i_lock
53 *
54 * iunique_lock
55 * inode_hash_lock
56 */
57
58static unsigned int i_hash_mask __read_mostly;
59static unsigned int i_hash_shift __read_mostly;
60static struct hlist_head *inode_hashtable __read_mostly;
61static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
62
63/*
64 * Empty aops. Can be used for the cases where the user does not
65 * define any of the address_space operations.
66 */
67const struct address_space_operations empty_aops = {
68};
69EXPORT_SYMBOL(empty_aops);
70
71/*
72 * Statistics gathering..
73 */
74struct inodes_stat_t inodes_stat;
75
76static DEFINE_PER_CPU(unsigned long, nr_inodes);
77static DEFINE_PER_CPU(unsigned long, nr_unused);
78
79static struct kmem_cache *inode_cachep __read_mostly;
80
81static long get_nr_inodes(void)
82{
83 int i;
84 long sum = 0;
85 for_each_possible_cpu(i)
86 sum += per_cpu(nr_inodes, i);
87 return sum < 0 ? 0 : sum;
88}
89
90static inline long get_nr_inodes_unused(void)
91{
92 int i;
93 long sum = 0;
94 for_each_possible_cpu(i)
95 sum += per_cpu(nr_unused, i);
96 return sum < 0 ? 0 : sum;
97}
98
99long get_nr_dirty_inodes(void)
100{
101 /* not actually dirty inodes, but a wild approximation */
102 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
103 return nr_dirty > 0 ? nr_dirty : 0;
104}
105
106/*
107 * Handle nr_inode sysctl
108 */
109#ifdef CONFIG_SYSCTL
110int proc_nr_inodes(struct ctl_table *table, int write,
111 void *buffer, size_t *lenp, loff_t *ppos)
112{
113 inodes_stat.nr_inodes = get_nr_inodes();
114 inodes_stat.nr_unused = get_nr_inodes_unused();
115 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
116}
117#endif
118
119static int no_open(struct inode *inode, struct file *file)
120{
121 return -ENXIO;
122}
123
124/**
125 * inode_init_always - perform inode structure initialisation
126 * @sb: superblock inode belongs to
127 * @inode: inode to initialise
128 *
129 * These are initializations that need to be done on every inode
130 * allocation as the fields are not initialised by slab allocation.
131 */
132int inode_init_always(struct super_block *sb, struct inode *inode)
133{
134 static const struct inode_operations empty_iops;
135 static const struct file_operations no_open_fops = {.open = no_open};
136 struct address_space *const mapping = &inode->i_data;
137
138 inode->i_sb = sb;
139 inode->i_blkbits = sb->s_blocksize_bits;
140 inode->i_flags = 0;
141 atomic64_set(&inode->i_sequence, 0);
142 atomic_set(&inode->i_count, 1);
143 inode->i_op = &empty_iops;
144 inode->i_fop = &no_open_fops;
145 inode->__i_nlink = 1;
146 inode->i_opflags = 0;
147 if (sb->s_xattr)
148 inode->i_opflags |= IOP_XATTR;
149 i_uid_write(inode, 0);
150 i_gid_write(inode, 0);
151 atomic_set(&inode->i_writecount, 0);
152 inode->i_size = 0;
153 inode->i_write_hint = WRITE_LIFE_NOT_SET;
154 inode->i_blocks = 0;
155 inode->i_bytes = 0;
156 inode->i_generation = 0;
157 inode->i_pipe = NULL;
158 inode->i_bdev = NULL;
159 inode->i_cdev = NULL;
160 inode->i_link = NULL;
161 inode->i_dir_seq = 0;
162 inode->i_rdev = 0;
163 inode->dirtied_when = 0;
164
165#ifdef CONFIG_CGROUP_WRITEBACK
166 inode->i_wb_frn_winner = 0;
167 inode->i_wb_frn_avg_time = 0;
168 inode->i_wb_frn_history = 0;
169#endif
170
171 if (security_inode_alloc(inode))
172 goto out;
173 spin_lock_init(&inode->i_lock);
174 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
175
176 init_rwsem(&inode->i_rwsem);
177 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
178
179 atomic_set(&inode->i_dio_count, 0);
180
181 mapping->a_ops = &empty_aops;
182 mapping->host = inode;
183 mapping->flags = 0;
184 mapping->wb_err = 0;
185 atomic_set(&mapping->i_mmap_writable, 0);
186#ifdef CONFIG_READ_ONLY_THP_FOR_FS
187 atomic_set(&mapping->nr_thps, 0);
188#endif
189 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
190 mapping->private_data = NULL;
191 mapping->writeback_index = 0;
192 inode->i_private = NULL;
193 inode->i_mapping = mapping;
194 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
195#ifdef CONFIG_FS_POSIX_ACL
196 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
197#endif
198
199#ifdef CONFIG_FSNOTIFY
200 inode->i_fsnotify_mask = 0;
201#endif
202 inode->i_flctx = NULL;
203 this_cpu_inc(nr_inodes);
204
205 return 0;
206out:
207 return -ENOMEM;
208}
209EXPORT_SYMBOL(inode_init_always);
210
211void free_inode_nonrcu(struct inode *inode)
212{
213 kmem_cache_free(inode_cachep, inode);
214}
215EXPORT_SYMBOL(free_inode_nonrcu);
216
217static void i_callback(struct rcu_head *head)
218{
219 struct inode *inode = container_of(head, struct inode, i_rcu);
220 if (inode->free_inode)
221 inode->free_inode(inode);
222 else
223 free_inode_nonrcu(inode);
224}
225
226static struct inode *alloc_inode(struct super_block *sb)
227{
228 const struct super_operations *ops = sb->s_op;
229 struct inode *inode;
230
231 if (ops->alloc_inode)
232 inode = ops->alloc_inode(sb);
233 else
234 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
235
236 if (!inode)
237 return NULL;
238
239 if (unlikely(inode_init_always(sb, inode))) {
240 if (ops->destroy_inode) {
241 ops->destroy_inode(inode);
242 if (!ops->free_inode)
243 return NULL;
244 }
245 inode->free_inode = ops->free_inode;
246 i_callback(&inode->i_rcu);
247 return NULL;
248 }
249
250 return inode;
251}
252
253void __destroy_inode(struct inode *inode)
254{
255 BUG_ON(inode_has_buffers(inode));
256 inode_detach_wb(inode);
257 security_inode_free(inode);
258 fsnotify_inode_delete(inode);
259 locks_free_lock_context(inode);
260 if (!inode->i_nlink) {
261 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
262 atomic_long_dec(&inode->i_sb->s_remove_count);
263 }
264
265#ifdef CONFIG_FS_POSIX_ACL
266 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
267 posix_acl_release(inode->i_acl);
268 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
269 posix_acl_release(inode->i_default_acl);
270#endif
271 this_cpu_dec(nr_inodes);
272}
273EXPORT_SYMBOL(__destroy_inode);
274
275static void destroy_inode(struct inode *inode)
276{
277 const struct super_operations *ops = inode->i_sb->s_op;
278
279 BUG_ON(!list_empty(&inode->i_lru));
280 __destroy_inode(inode);
281 if (ops->destroy_inode) {
282 ops->destroy_inode(inode);
283 if (!ops->free_inode)
284 return;
285 }
286 inode->free_inode = ops->free_inode;
287 call_rcu(&inode->i_rcu, i_callback);
288}
289
290/**
291 * drop_nlink - directly drop an inode's link count
292 * @inode: inode
293 *
294 * This is a low-level filesystem helper to replace any
295 * direct filesystem manipulation of i_nlink. In cases
296 * where we are attempting to track writes to the
297 * filesystem, a decrement to zero means an imminent
298 * write when the file is truncated and actually unlinked
299 * on the filesystem.
300 */
301void drop_nlink(struct inode *inode)
302{
303 WARN_ON(inode->i_nlink == 0);
304 inode->__i_nlink--;
305 if (!inode->i_nlink)
306 atomic_long_inc(&inode->i_sb->s_remove_count);
307}
308EXPORT_SYMBOL(drop_nlink);
309
310/**
311 * clear_nlink - directly zero an inode's link count
312 * @inode: inode
313 *
314 * This is a low-level filesystem helper to replace any
315 * direct filesystem manipulation of i_nlink. See
316 * drop_nlink() for why we care about i_nlink hitting zero.
317 */
318void clear_nlink(struct inode *inode)
319{
320 if (inode->i_nlink) {
321 inode->__i_nlink = 0;
322 atomic_long_inc(&inode->i_sb->s_remove_count);
323 }
324}
325EXPORT_SYMBOL(clear_nlink);
326
327/**
328 * set_nlink - directly set an inode's link count
329 * @inode: inode
330 * @nlink: new nlink (should be non-zero)
331 *
332 * This is a low-level filesystem helper to replace any
333 * direct filesystem manipulation of i_nlink.
334 */
335void set_nlink(struct inode *inode, unsigned int nlink)
336{
337 if (!nlink) {
338 clear_nlink(inode);
339 } else {
340 /* Yes, some filesystems do change nlink from zero to one */
341 if (inode->i_nlink == 0)
342 atomic_long_dec(&inode->i_sb->s_remove_count);
343
344 inode->__i_nlink = nlink;
345 }
346}
347EXPORT_SYMBOL(set_nlink);
348
349/**
350 * inc_nlink - directly increment an inode's link count
351 * @inode: inode
352 *
353 * This is a low-level filesystem helper to replace any
354 * direct filesystem manipulation of i_nlink. Currently,
355 * it is only here for parity with dec_nlink().
356 */
357void inc_nlink(struct inode *inode)
358{
359 if (unlikely(inode->i_nlink == 0)) {
360 WARN_ON(!(inode->i_state & I_LINKABLE));
361 atomic_long_dec(&inode->i_sb->s_remove_count);
362 }
363
364 inode->__i_nlink++;
365}
366EXPORT_SYMBOL(inc_nlink);
367
368static void __address_space_init_once(struct address_space *mapping)
369{
370 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
371 init_rwsem(&mapping->i_mmap_rwsem);
372 INIT_LIST_HEAD(&mapping->private_list);
373 spin_lock_init(&mapping->private_lock);
374 mapping->i_mmap = RB_ROOT_CACHED;
375}
376
377void address_space_init_once(struct address_space *mapping)
378{
379 memset(mapping, 0, sizeof(*mapping));
380 __address_space_init_once(mapping);
381}
382EXPORT_SYMBOL(address_space_init_once);
383
384/*
385 * These are initializations that only need to be done
386 * once, because the fields are idempotent across use
387 * of the inode, so let the slab aware of that.
388 */
389void inode_init_once(struct inode *inode)
390{
391 memset(inode, 0, sizeof(*inode));
392 INIT_HLIST_NODE(&inode->i_hash);
393 INIT_LIST_HEAD(&inode->i_devices);
394 INIT_LIST_HEAD(&inode->i_io_list);
395 INIT_LIST_HEAD(&inode->i_wb_list);
396 INIT_LIST_HEAD(&inode->i_lru);
397 __address_space_init_once(&inode->i_data);
398 i_size_ordered_init(inode);
399}
400EXPORT_SYMBOL(inode_init_once);
401
402static void init_once(void *foo)
403{
404 struct inode *inode = (struct inode *) foo;
405
406 inode_init_once(inode);
407}
408
409/*
410 * inode->i_lock must be held
411 */
412void __iget(struct inode *inode)
413{
414 atomic_inc(&inode->i_count);
415}
416
417/*
418 * get additional reference to inode; caller must already hold one.
419 */
420void ihold(struct inode *inode)
421{
422 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
423}
424EXPORT_SYMBOL(ihold);
425
426static void inode_lru_list_add(struct inode *inode)
427{
428 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
429 this_cpu_inc(nr_unused);
430 else
431 inode->i_state |= I_REFERENCED;
432}
433
434/*
435 * Add inode to LRU if needed (inode is unused and clean).
436 *
437 * Needs inode->i_lock held.
438 */
439void inode_add_lru(struct inode *inode)
440{
441 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
442 I_FREEING | I_WILL_FREE)) &&
443 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
444 inode_lru_list_add(inode);
445}
446
447
448static void inode_lru_list_del(struct inode *inode)
449{
450
451 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
452 this_cpu_dec(nr_unused);
453}
454
455/**
456 * inode_sb_list_add - add inode to the superblock list of inodes
457 * @inode: inode to add
458 */
459void inode_sb_list_add(struct inode *inode)
460{
461 spin_lock(&inode->i_sb->s_inode_list_lock);
462 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
463 spin_unlock(&inode->i_sb->s_inode_list_lock);
464}
465EXPORT_SYMBOL_GPL(inode_sb_list_add);
466
467static inline void inode_sb_list_del(struct inode *inode)
468{
469 if (!list_empty(&inode->i_sb_list)) {
470 spin_lock(&inode->i_sb->s_inode_list_lock);
471 list_del_init(&inode->i_sb_list);
472 spin_unlock(&inode->i_sb->s_inode_list_lock);
473 }
474}
475
476static unsigned long hash(struct super_block *sb, unsigned long hashval)
477{
478 unsigned long tmp;
479
480 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
481 L1_CACHE_BYTES;
482 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
483 return tmp & i_hash_mask;
484}
485
486/**
487 * __insert_inode_hash - hash an inode
488 * @inode: unhashed inode
489 * @hashval: unsigned long value used to locate this object in the
490 * inode_hashtable.
491 *
492 * Add an inode to the inode hash for this superblock.
493 */
494void __insert_inode_hash(struct inode *inode, unsigned long hashval)
495{
496 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
497
498 spin_lock(&inode_hash_lock);
499 spin_lock(&inode->i_lock);
500 hlist_add_head_rcu(&inode->i_hash, b);
501 spin_unlock(&inode->i_lock);
502 spin_unlock(&inode_hash_lock);
503}
504EXPORT_SYMBOL(__insert_inode_hash);
505
506/**
507 * __remove_inode_hash - remove an inode from the hash
508 * @inode: inode to unhash
509 *
510 * Remove an inode from the superblock.
511 */
512void __remove_inode_hash(struct inode *inode)
513{
514 spin_lock(&inode_hash_lock);
515 spin_lock(&inode->i_lock);
516 hlist_del_init_rcu(&inode->i_hash);
517 spin_unlock(&inode->i_lock);
518 spin_unlock(&inode_hash_lock);
519}
520EXPORT_SYMBOL(__remove_inode_hash);
521
522void clear_inode(struct inode *inode)
523{
524 /*
525 * We have to cycle the i_pages lock here because reclaim can be in the
526 * process of removing the last page (in __delete_from_page_cache())
527 * and we must not free the mapping under it.
528 */
529 xa_lock_irq(&inode->i_data.i_pages);
530 BUG_ON(inode->i_data.nrpages);
531 BUG_ON(inode->i_data.nrexceptional);
532 xa_unlock_irq(&inode->i_data.i_pages);
533 BUG_ON(!list_empty(&inode->i_data.private_list));
534 BUG_ON(!(inode->i_state & I_FREEING));
535 BUG_ON(inode->i_state & I_CLEAR);
536 BUG_ON(!list_empty(&inode->i_wb_list));
537 /* don't need i_lock here, no concurrent mods to i_state */
538 inode->i_state = I_FREEING | I_CLEAR;
539}
540EXPORT_SYMBOL(clear_inode);
541
542/*
543 * Free the inode passed in, removing it from the lists it is still connected
544 * to. We remove any pages still attached to the inode and wait for any IO that
545 * is still in progress before finally destroying the inode.
546 *
547 * An inode must already be marked I_FREEING so that we avoid the inode being
548 * moved back onto lists if we race with other code that manipulates the lists
549 * (e.g. writeback_single_inode). The caller is responsible for setting this.
550 *
551 * An inode must already be removed from the LRU list before being evicted from
552 * the cache. This should occur atomically with setting the I_FREEING state
553 * flag, so no inodes here should ever be on the LRU when being evicted.
554 */
555static void evict(struct inode *inode)
556{
557 const struct super_operations *op = inode->i_sb->s_op;
558
559 BUG_ON(!(inode->i_state & I_FREEING));
560 BUG_ON(!list_empty(&inode->i_lru));
561
562 if (!list_empty(&inode->i_io_list))
563 inode_io_list_del(inode);
564
565 inode_sb_list_del(inode);
566
567 /*
568 * Wait for flusher thread to be done with the inode so that filesystem
569 * does not start destroying it while writeback is still running. Since
570 * the inode has I_FREEING set, flusher thread won't start new work on
571 * the inode. We just have to wait for running writeback to finish.
572 */
573 inode_wait_for_writeback(inode);
574
575 if (op->evict_inode) {
576 op->evict_inode(inode);
577 } else {
578 truncate_inode_pages_final(&inode->i_data);
579 clear_inode(inode);
580 }
581 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
582 bd_forget(inode);
583 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
584 cd_forget(inode);
585
586 remove_inode_hash(inode);
587
588 spin_lock(&inode->i_lock);
589 wake_up_bit(&inode->i_state, __I_NEW);
590 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
591 spin_unlock(&inode->i_lock);
592
593 destroy_inode(inode);
594}
595
596/*
597 * dispose_list - dispose of the contents of a local list
598 * @head: the head of the list to free
599 *
600 * Dispose-list gets a local list with local inodes in it, so it doesn't
601 * need to worry about list corruption and SMP locks.
602 */
603static void dispose_list(struct list_head *head)
604{
605 while (!list_empty(head)) {
606 struct inode *inode;
607
608 inode = list_first_entry(head, struct inode, i_lru);
609 list_del_init(&inode->i_lru);
610
611 evict(inode);
612 cond_resched();
613 }
614}
615
616/**
617 * evict_inodes - evict all evictable inodes for a superblock
618 * @sb: superblock to operate on
619 *
620 * Make sure that no inodes with zero refcount are retained. This is
621 * called by superblock shutdown after having SB_ACTIVE flag removed,
622 * so any inode reaching zero refcount during or after that call will
623 * be immediately evicted.
624 */
625void evict_inodes(struct super_block *sb)
626{
627 struct inode *inode, *next;
628 LIST_HEAD(dispose);
629
630again:
631 spin_lock(&sb->s_inode_list_lock);
632 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
633 if (atomic_read(&inode->i_count))
634 continue;
635
636 spin_lock(&inode->i_lock);
637 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
638 spin_unlock(&inode->i_lock);
639 continue;
640 }
641
642 inode->i_state |= I_FREEING;
643 inode_lru_list_del(inode);
644 spin_unlock(&inode->i_lock);
645 list_add(&inode->i_lru, &dispose);
646
647 /*
648 * We can have a ton of inodes to evict at unmount time given
649 * enough memory, check to see if we need to go to sleep for a
650 * bit so we don't livelock.
651 */
652 if (need_resched()) {
653 spin_unlock(&sb->s_inode_list_lock);
654 cond_resched();
655 dispose_list(&dispose);
656 goto again;
657 }
658 }
659 spin_unlock(&sb->s_inode_list_lock);
660
661 dispose_list(&dispose);
662}
663EXPORT_SYMBOL_GPL(evict_inodes);
664
665/**
666 * invalidate_inodes - attempt to free all inodes on a superblock
667 * @sb: superblock to operate on
668 * @kill_dirty: flag to guide handling of dirty inodes
669 *
670 * Attempts to free all inodes for a given superblock. If there were any
671 * busy inodes return a non-zero value, else zero.
672 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
673 * them as busy.
674 */
675int invalidate_inodes(struct super_block *sb, bool kill_dirty)
676{
677 int busy = 0;
678 struct inode *inode, *next;
679 LIST_HEAD(dispose);
680
681again:
682 spin_lock(&sb->s_inode_list_lock);
683 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
684 spin_lock(&inode->i_lock);
685 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
686 spin_unlock(&inode->i_lock);
687 continue;
688 }
689 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
690 spin_unlock(&inode->i_lock);
691 busy = 1;
692 continue;
693 }
694 if (atomic_read(&inode->i_count)) {
695 spin_unlock(&inode->i_lock);
696 busy = 1;
697 continue;
698 }
699
700 inode->i_state |= I_FREEING;
701 inode_lru_list_del(inode);
702 spin_unlock(&inode->i_lock);
703 list_add(&inode->i_lru, &dispose);
704 if (need_resched()) {
705 spin_unlock(&sb->s_inode_list_lock);
706 cond_resched();
707 dispose_list(&dispose);
708 goto again;
709 }
710 }
711 spin_unlock(&sb->s_inode_list_lock);
712
713 dispose_list(&dispose);
714
715 return busy;
716}
717
718/*
719 * Isolate the inode from the LRU in preparation for freeing it.
720 *
721 * Any inodes which are pinned purely because of attached pagecache have their
722 * pagecache removed. If the inode has metadata buffers attached to
723 * mapping->private_list then try to remove them.
724 *
725 * If the inode has the I_REFERENCED flag set, then it means that it has been
726 * used recently - the flag is set in iput_final(). When we encounter such an
727 * inode, clear the flag and move it to the back of the LRU so it gets another
728 * pass through the LRU before it gets reclaimed. This is necessary because of
729 * the fact we are doing lazy LRU updates to minimise lock contention so the
730 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
731 * with this flag set because they are the inodes that are out of order.
732 */
733static enum lru_status inode_lru_isolate(struct list_head *item,
734 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
735{
736 struct list_head *freeable = arg;
737 struct inode *inode = container_of(item, struct inode, i_lru);
738
739 /*
740 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
741 * If we fail to get the lock, just skip it.
742 */
743 if (!spin_trylock(&inode->i_lock))
744 return LRU_SKIP;
745
746 /*
747 * Referenced or dirty inodes are still in use. Give them another pass
748 * through the LRU as we canot reclaim them now.
749 */
750 if (atomic_read(&inode->i_count) ||
751 (inode->i_state & ~I_REFERENCED)) {
752 list_lru_isolate(lru, &inode->i_lru);
753 spin_unlock(&inode->i_lock);
754 this_cpu_dec(nr_unused);
755 return LRU_REMOVED;
756 }
757
758 /* recently referenced inodes get one more pass */
759 if (inode->i_state & I_REFERENCED) {
760 inode->i_state &= ~I_REFERENCED;
761 spin_unlock(&inode->i_lock);
762 return LRU_ROTATE;
763 }
764
765 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
766 __iget(inode);
767 spin_unlock(&inode->i_lock);
768 spin_unlock(lru_lock);
769 if (remove_inode_buffers(inode)) {
770 unsigned long reap;
771 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
772 if (current_is_kswapd())
773 __count_vm_events(KSWAPD_INODESTEAL, reap);
774 else
775 __count_vm_events(PGINODESTEAL, reap);
776 if (current->reclaim_state)
777 current->reclaim_state->reclaimed_slab += reap;
778 }
779 iput(inode);
780 spin_lock(lru_lock);
781 return LRU_RETRY;
782 }
783
784 WARN_ON(inode->i_state & I_NEW);
785 inode->i_state |= I_FREEING;
786 list_lru_isolate_move(lru, &inode->i_lru, freeable);
787 spin_unlock(&inode->i_lock);
788
789 this_cpu_dec(nr_unused);
790 return LRU_REMOVED;
791}
792
793/*
794 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
795 * This is called from the superblock shrinker function with a number of inodes
796 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
797 * then are freed outside inode_lock by dispose_list().
798 */
799long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
800{
801 LIST_HEAD(freeable);
802 long freed;
803
804 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
805 inode_lru_isolate, &freeable);
806 dispose_list(&freeable);
807 return freed;
808}
809
810static void __wait_on_freeing_inode(struct inode *inode);
811/*
812 * Called with the inode lock held.
813 */
814static struct inode *find_inode(struct super_block *sb,
815 struct hlist_head *head,
816 int (*test)(struct inode *, void *),
817 void *data)
818{
819 struct inode *inode = NULL;
820
821repeat:
822 hlist_for_each_entry(inode, head, i_hash) {
823 if (inode->i_sb != sb)
824 continue;
825 if (!test(inode, data))
826 continue;
827 spin_lock(&inode->i_lock);
828 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
829 __wait_on_freeing_inode(inode);
830 goto repeat;
831 }
832 if (unlikely(inode->i_state & I_CREATING)) {
833 spin_unlock(&inode->i_lock);
834 return ERR_PTR(-ESTALE);
835 }
836 __iget(inode);
837 spin_unlock(&inode->i_lock);
838 return inode;
839 }
840 return NULL;
841}
842
843/*
844 * find_inode_fast is the fast path version of find_inode, see the comment at
845 * iget_locked for details.
846 */
847static struct inode *find_inode_fast(struct super_block *sb,
848 struct hlist_head *head, unsigned long ino)
849{
850 struct inode *inode = NULL;
851
852repeat:
853 hlist_for_each_entry(inode, head, i_hash) {
854 if (inode->i_ino != ino)
855 continue;
856 if (inode->i_sb != sb)
857 continue;
858 spin_lock(&inode->i_lock);
859 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
860 __wait_on_freeing_inode(inode);
861 goto repeat;
862 }
863 if (unlikely(inode->i_state & I_CREATING)) {
864 spin_unlock(&inode->i_lock);
865 return ERR_PTR(-ESTALE);
866 }
867 __iget(inode);
868 spin_unlock(&inode->i_lock);
869 return inode;
870 }
871 return NULL;
872}
873
874/*
875 * Each cpu owns a range of LAST_INO_BATCH numbers.
876 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
877 * to renew the exhausted range.
878 *
879 * This does not significantly increase overflow rate because every CPU can
880 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
881 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
882 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
883 * overflow rate by 2x, which does not seem too significant.
884 *
885 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
886 * error if st_ino won't fit in target struct field. Use 32bit counter
887 * here to attempt to avoid that.
888 */
889#define LAST_INO_BATCH 1024
890static DEFINE_PER_CPU(unsigned int, last_ino);
891
892unsigned int get_next_ino(void)
893{
894 unsigned int *p = &get_cpu_var(last_ino);
895 unsigned int res = *p;
896
897#ifdef CONFIG_SMP
898 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
899 static atomic_t shared_last_ino;
900 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
901
902 res = next - LAST_INO_BATCH;
903 }
904#endif
905
906 res++;
907 /* get_next_ino should not provide a 0 inode number */
908 if (unlikely(!res))
909 res++;
910 *p = res;
911 put_cpu_var(last_ino);
912 return res;
913}
914EXPORT_SYMBOL(get_next_ino);
915
916/**
917 * new_inode_pseudo - obtain an inode
918 * @sb: superblock
919 *
920 * Allocates a new inode for given superblock.
921 * Inode wont be chained in superblock s_inodes list
922 * This means :
923 * - fs can't be unmount
924 * - quotas, fsnotify, writeback can't work
925 */
926struct inode *new_inode_pseudo(struct super_block *sb)
927{
928 struct inode *inode = alloc_inode(sb);
929
930 if (inode) {
931 spin_lock(&inode->i_lock);
932 inode->i_state = 0;
933 spin_unlock(&inode->i_lock);
934 INIT_LIST_HEAD(&inode->i_sb_list);
935 }
936 return inode;
937}
938
939/**
940 * new_inode - obtain an inode
941 * @sb: superblock
942 *
943 * Allocates a new inode for given superblock. The default gfp_mask
944 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
945 * If HIGHMEM pages are unsuitable or it is known that pages allocated
946 * for the page cache are not reclaimable or migratable,
947 * mapping_set_gfp_mask() must be called with suitable flags on the
948 * newly created inode's mapping
949 *
950 */
951struct inode *new_inode(struct super_block *sb)
952{
953 struct inode *inode;
954
955 spin_lock_prefetch(&sb->s_inode_list_lock);
956
957 inode = new_inode_pseudo(sb);
958 if (inode)
959 inode_sb_list_add(inode);
960 return inode;
961}
962EXPORT_SYMBOL(new_inode);
963
964#ifdef CONFIG_DEBUG_LOCK_ALLOC
965void lockdep_annotate_inode_mutex_key(struct inode *inode)
966{
967 if (S_ISDIR(inode->i_mode)) {
968 struct file_system_type *type = inode->i_sb->s_type;
969
970 /* Set new key only if filesystem hasn't already changed it */
971 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
972 /*
973 * ensure nobody is actually holding i_mutex
974 */
975 // mutex_destroy(&inode->i_mutex);
976 init_rwsem(&inode->i_rwsem);
977 lockdep_set_class(&inode->i_rwsem,
978 &type->i_mutex_dir_key);
979 }
980 }
981}
982EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
983#endif
984
985/**
986 * unlock_new_inode - clear the I_NEW state and wake up any waiters
987 * @inode: new inode to unlock
988 *
989 * Called when the inode is fully initialised to clear the new state of the
990 * inode and wake up anyone waiting for the inode to finish initialisation.
991 */
992void unlock_new_inode(struct inode *inode)
993{
994 lockdep_annotate_inode_mutex_key(inode);
995 spin_lock(&inode->i_lock);
996 WARN_ON(!(inode->i_state & I_NEW));
997 inode->i_state &= ~I_NEW & ~I_CREATING;
998 smp_mb();
999 wake_up_bit(&inode->i_state, __I_NEW);
1000 spin_unlock(&inode->i_lock);
1001}
1002EXPORT_SYMBOL(unlock_new_inode);
1003
1004void discard_new_inode(struct inode *inode)
1005{
1006 lockdep_annotate_inode_mutex_key(inode);
1007 spin_lock(&inode->i_lock);
1008 WARN_ON(!(inode->i_state & I_NEW));
1009 inode->i_state &= ~I_NEW;
1010 smp_mb();
1011 wake_up_bit(&inode->i_state, __I_NEW);
1012 spin_unlock(&inode->i_lock);
1013 iput(inode);
1014}
1015EXPORT_SYMBOL(discard_new_inode);
1016
1017/**
1018 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1019 *
1020 * Lock any non-NULL argument that is not a directory.
1021 * Zero, one or two objects may be locked by this function.
1022 *
1023 * @inode1: first inode to lock
1024 * @inode2: second inode to lock
1025 */
1026void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1027{
1028 if (inode1 > inode2)
1029 swap(inode1, inode2);
1030
1031 if (inode1 && !S_ISDIR(inode1->i_mode))
1032 inode_lock(inode1);
1033 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1034 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1035}
1036EXPORT_SYMBOL(lock_two_nondirectories);
1037
1038/**
1039 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1040 * @inode1: first inode to unlock
1041 * @inode2: second inode to unlock
1042 */
1043void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1044{
1045 if (inode1 && !S_ISDIR(inode1->i_mode))
1046 inode_unlock(inode1);
1047 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1048 inode_unlock(inode2);
1049}
1050EXPORT_SYMBOL(unlock_two_nondirectories);
1051
1052/**
1053 * inode_insert5 - obtain an inode from a mounted file system
1054 * @inode: pre-allocated inode to use for insert to cache
1055 * @hashval: hash value (usually inode number) to get
1056 * @test: callback used for comparisons between inodes
1057 * @set: callback used to initialize a new struct inode
1058 * @data: opaque data pointer to pass to @test and @set
1059 *
1060 * Search for the inode specified by @hashval and @data in the inode cache,
1061 * and if present it is return it with an increased reference count. This is
1062 * a variant of iget5_locked() for callers that don't want to fail on memory
1063 * allocation of inode.
1064 *
1065 * If the inode is not in cache, insert the pre-allocated inode to cache and
1066 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1067 * to fill it in before unlocking it via unlock_new_inode().
1068 *
1069 * Note both @test and @set are called with the inode_hash_lock held, so can't
1070 * sleep.
1071 */
1072struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1073 int (*test)(struct inode *, void *),
1074 int (*set)(struct inode *, void *), void *data)
1075{
1076 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1077 struct inode *old;
1078 bool creating = inode->i_state & I_CREATING;
1079
1080again:
1081 spin_lock(&inode_hash_lock);
1082 old = find_inode(inode->i_sb, head, test, data);
1083 if (unlikely(old)) {
1084 /*
1085 * Uhhuh, somebody else created the same inode under us.
1086 * Use the old inode instead of the preallocated one.
1087 */
1088 spin_unlock(&inode_hash_lock);
1089 if (IS_ERR(old))
1090 return NULL;
1091 wait_on_inode(old);
1092 if (unlikely(inode_unhashed(old))) {
1093 iput(old);
1094 goto again;
1095 }
1096 return old;
1097 }
1098
1099 if (set && unlikely(set(inode, data))) {
1100 inode = NULL;
1101 goto unlock;
1102 }
1103
1104 /*
1105 * Return the locked inode with I_NEW set, the
1106 * caller is responsible for filling in the contents
1107 */
1108 spin_lock(&inode->i_lock);
1109 inode->i_state |= I_NEW;
1110 hlist_add_head_rcu(&inode->i_hash, head);
1111 spin_unlock(&inode->i_lock);
1112 if (!creating)
1113 inode_sb_list_add(inode);
1114unlock:
1115 spin_unlock(&inode_hash_lock);
1116
1117 return inode;
1118}
1119EXPORT_SYMBOL(inode_insert5);
1120
1121/**
1122 * iget5_locked - obtain an inode from a mounted file system
1123 * @sb: super block of file system
1124 * @hashval: hash value (usually inode number) to get
1125 * @test: callback used for comparisons between inodes
1126 * @set: callback used to initialize a new struct inode
1127 * @data: opaque data pointer to pass to @test and @set
1128 *
1129 * Search for the inode specified by @hashval and @data in the inode cache,
1130 * and if present it is return it with an increased reference count. This is
1131 * a generalized version of iget_locked() for file systems where the inode
1132 * number is not sufficient for unique identification of an inode.
1133 *
1134 * If the inode is not in cache, allocate a new inode and return it locked,
1135 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1136 * before unlocking it via unlock_new_inode().
1137 *
1138 * Note both @test and @set are called with the inode_hash_lock held, so can't
1139 * sleep.
1140 */
1141struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1142 int (*test)(struct inode *, void *),
1143 int (*set)(struct inode *, void *), void *data)
1144{
1145 struct inode *inode = ilookup5(sb, hashval, test, data);
1146
1147 if (!inode) {
1148 struct inode *new = alloc_inode(sb);
1149
1150 if (new) {
1151 new->i_state = 0;
1152 inode = inode_insert5(new, hashval, test, set, data);
1153 if (unlikely(inode != new))
1154 destroy_inode(new);
1155 }
1156 }
1157 return inode;
1158}
1159EXPORT_SYMBOL(iget5_locked);
1160
1161/**
1162 * iget_locked - obtain an inode from a mounted file system
1163 * @sb: super block of file system
1164 * @ino: inode number to get
1165 *
1166 * Search for the inode specified by @ino in the inode cache and if present
1167 * return it with an increased reference count. This is for file systems
1168 * where the inode number is sufficient for unique identification of an inode.
1169 *
1170 * If the inode is not in cache, allocate a new inode and return it locked,
1171 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1172 * before unlocking it via unlock_new_inode().
1173 */
1174struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1175{
1176 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1177 struct inode *inode;
1178again:
1179 spin_lock(&inode_hash_lock);
1180 inode = find_inode_fast(sb, head, ino);
1181 spin_unlock(&inode_hash_lock);
1182 if (inode) {
1183 if (IS_ERR(inode))
1184 return NULL;
1185 wait_on_inode(inode);
1186 if (unlikely(inode_unhashed(inode))) {
1187 iput(inode);
1188 goto again;
1189 }
1190 return inode;
1191 }
1192
1193 inode = alloc_inode(sb);
1194 if (inode) {
1195 struct inode *old;
1196
1197 spin_lock(&inode_hash_lock);
1198 /* We released the lock, so.. */
1199 old = find_inode_fast(sb, head, ino);
1200 if (!old) {
1201 inode->i_ino = ino;
1202 spin_lock(&inode->i_lock);
1203 inode->i_state = I_NEW;
1204 hlist_add_head_rcu(&inode->i_hash, head);
1205 spin_unlock(&inode->i_lock);
1206 inode_sb_list_add(inode);
1207 spin_unlock(&inode_hash_lock);
1208
1209 /* Return the locked inode with I_NEW set, the
1210 * caller is responsible for filling in the contents
1211 */
1212 return inode;
1213 }
1214
1215 /*
1216 * Uhhuh, somebody else created the same inode under
1217 * us. Use the old inode instead of the one we just
1218 * allocated.
1219 */
1220 spin_unlock(&inode_hash_lock);
1221 destroy_inode(inode);
1222 if (IS_ERR(old))
1223 return NULL;
1224 inode = old;
1225 wait_on_inode(inode);
1226 if (unlikely(inode_unhashed(inode))) {
1227 iput(inode);
1228 goto again;
1229 }
1230 }
1231 return inode;
1232}
1233EXPORT_SYMBOL(iget_locked);
1234
1235/*
1236 * search the inode cache for a matching inode number.
1237 * If we find one, then the inode number we are trying to
1238 * allocate is not unique and so we should not use it.
1239 *
1240 * Returns 1 if the inode number is unique, 0 if it is not.
1241 */
1242static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1243{
1244 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1245 struct inode *inode;
1246
1247 hlist_for_each_entry_rcu(inode, b, i_hash) {
1248 if (inode->i_ino == ino && inode->i_sb == sb)
1249 return 0;
1250 }
1251 return 1;
1252}
1253
1254/**
1255 * iunique - get a unique inode number
1256 * @sb: superblock
1257 * @max_reserved: highest reserved inode number
1258 *
1259 * Obtain an inode number that is unique on the system for a given
1260 * superblock. This is used by file systems that have no natural
1261 * permanent inode numbering system. An inode number is returned that
1262 * is higher than the reserved limit but unique.
1263 *
1264 * BUGS:
1265 * With a large number of inodes live on the file system this function
1266 * currently becomes quite slow.
1267 */
1268ino_t iunique(struct super_block *sb, ino_t max_reserved)
1269{
1270 /*
1271 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1272 * error if st_ino won't fit in target struct field. Use 32bit counter
1273 * here to attempt to avoid that.
1274 */
1275 static DEFINE_SPINLOCK(iunique_lock);
1276 static unsigned int counter;
1277 ino_t res;
1278
1279 rcu_read_lock();
1280 spin_lock(&iunique_lock);
1281 do {
1282 if (counter <= max_reserved)
1283 counter = max_reserved + 1;
1284 res = counter++;
1285 } while (!test_inode_iunique(sb, res));
1286 spin_unlock(&iunique_lock);
1287 rcu_read_unlock();
1288
1289 return res;
1290}
1291EXPORT_SYMBOL(iunique);
1292
1293struct inode *igrab(struct inode *inode)
1294{
1295 spin_lock(&inode->i_lock);
1296 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1297 __iget(inode);
1298 spin_unlock(&inode->i_lock);
1299 } else {
1300 spin_unlock(&inode->i_lock);
1301 /*
1302 * Handle the case where s_op->clear_inode is not been
1303 * called yet, and somebody is calling igrab
1304 * while the inode is getting freed.
1305 */
1306 inode = NULL;
1307 }
1308 return inode;
1309}
1310EXPORT_SYMBOL(igrab);
1311
1312/**
1313 * ilookup5_nowait - search for an inode in the inode cache
1314 * @sb: super block of file system to search
1315 * @hashval: hash value (usually inode number) to search for
1316 * @test: callback used for comparisons between inodes
1317 * @data: opaque data pointer to pass to @test
1318 *
1319 * Search for the inode specified by @hashval and @data in the inode cache.
1320 * If the inode is in the cache, the inode is returned with an incremented
1321 * reference count.
1322 *
1323 * Note: I_NEW is not waited upon so you have to be very careful what you do
1324 * with the returned inode. You probably should be using ilookup5() instead.
1325 *
1326 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1327 */
1328struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1329 int (*test)(struct inode *, void *), void *data)
1330{
1331 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1332 struct inode *inode;
1333
1334 spin_lock(&inode_hash_lock);
1335 inode = find_inode(sb, head, test, data);
1336 spin_unlock(&inode_hash_lock);
1337
1338 return IS_ERR(inode) ? NULL : inode;
1339}
1340EXPORT_SYMBOL(ilookup5_nowait);
1341
1342/**
1343 * ilookup5 - search for an inode in the inode cache
1344 * @sb: super block of file system to search
1345 * @hashval: hash value (usually inode number) to search for
1346 * @test: callback used for comparisons between inodes
1347 * @data: opaque data pointer to pass to @test
1348 *
1349 * Search for the inode specified by @hashval and @data in the inode cache,
1350 * and if the inode is in the cache, return the inode with an incremented
1351 * reference count. Waits on I_NEW before returning the inode.
1352 * returned with an incremented reference count.
1353 *
1354 * This is a generalized version of ilookup() for file systems where the
1355 * inode number is not sufficient for unique identification of an inode.
1356 *
1357 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1358 */
1359struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1360 int (*test)(struct inode *, void *), void *data)
1361{
1362 struct inode *inode;
1363again:
1364 inode = ilookup5_nowait(sb, hashval, test, data);
1365 if (inode) {
1366 wait_on_inode(inode);
1367 if (unlikely(inode_unhashed(inode))) {
1368 iput(inode);
1369 goto again;
1370 }
1371 }
1372 return inode;
1373}
1374EXPORT_SYMBOL(ilookup5);
1375
1376/**
1377 * ilookup - search for an inode in the inode cache
1378 * @sb: super block of file system to search
1379 * @ino: inode number to search for
1380 *
1381 * Search for the inode @ino in the inode cache, and if the inode is in the
1382 * cache, the inode is returned with an incremented reference count.
1383 */
1384struct inode *ilookup(struct super_block *sb, unsigned long ino)
1385{
1386 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1387 struct inode *inode;
1388again:
1389 spin_lock(&inode_hash_lock);
1390 inode = find_inode_fast(sb, head, ino);
1391 spin_unlock(&inode_hash_lock);
1392
1393 if (inode) {
1394 if (IS_ERR(inode))
1395 return NULL;
1396 wait_on_inode(inode);
1397 if (unlikely(inode_unhashed(inode))) {
1398 iput(inode);
1399 goto again;
1400 }
1401 }
1402 return inode;
1403}
1404EXPORT_SYMBOL(ilookup);
1405
1406/**
1407 * find_inode_nowait - find an inode in the inode cache
1408 * @sb: super block of file system to search
1409 * @hashval: hash value (usually inode number) to search for
1410 * @match: callback used for comparisons between inodes
1411 * @data: opaque data pointer to pass to @match
1412 *
1413 * Search for the inode specified by @hashval and @data in the inode
1414 * cache, where the helper function @match will return 0 if the inode
1415 * does not match, 1 if the inode does match, and -1 if the search
1416 * should be stopped. The @match function must be responsible for
1417 * taking the i_lock spin_lock and checking i_state for an inode being
1418 * freed or being initialized, and incrementing the reference count
1419 * before returning 1. It also must not sleep, since it is called with
1420 * the inode_hash_lock spinlock held.
1421 *
1422 * This is a even more generalized version of ilookup5() when the
1423 * function must never block --- find_inode() can block in
1424 * __wait_on_freeing_inode() --- or when the caller can not increment
1425 * the reference count because the resulting iput() might cause an
1426 * inode eviction. The tradeoff is that the @match funtion must be
1427 * very carefully implemented.
1428 */
1429struct inode *find_inode_nowait(struct super_block *sb,
1430 unsigned long hashval,
1431 int (*match)(struct inode *, unsigned long,
1432 void *),
1433 void *data)
1434{
1435 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1436 struct inode *inode, *ret_inode = NULL;
1437 int mval;
1438
1439 spin_lock(&inode_hash_lock);
1440 hlist_for_each_entry(inode, head, i_hash) {
1441 if (inode->i_sb != sb)
1442 continue;
1443 mval = match(inode, hashval, data);
1444 if (mval == 0)
1445 continue;
1446 if (mval == 1)
1447 ret_inode = inode;
1448 goto out;
1449 }
1450out:
1451 spin_unlock(&inode_hash_lock);
1452 return ret_inode;
1453}
1454EXPORT_SYMBOL(find_inode_nowait);
1455
1456/**
1457 * find_inode_rcu - find an inode in the inode cache
1458 * @sb: Super block of file system to search
1459 * @hashval: Key to hash
1460 * @test: Function to test match on an inode
1461 * @data: Data for test function
1462 *
1463 * Search for the inode specified by @hashval and @data in the inode cache,
1464 * where the helper function @test will return 0 if the inode does not match
1465 * and 1 if it does. The @test function must be responsible for taking the
1466 * i_lock spin_lock and checking i_state for an inode being freed or being
1467 * initialized.
1468 *
1469 * If successful, this will return the inode for which the @test function
1470 * returned 1 and NULL otherwise.
1471 *
1472 * The @test function is not permitted to take a ref on any inode presented.
1473 * It is also not permitted to sleep.
1474 *
1475 * The caller must hold the RCU read lock.
1476 */
1477struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1478 int (*test)(struct inode *, void *), void *data)
1479{
1480 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1481 struct inode *inode;
1482
1483 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1484 "suspicious find_inode_rcu() usage");
1485
1486 hlist_for_each_entry_rcu(inode, head, i_hash) {
1487 if (inode->i_sb == sb &&
1488 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1489 test(inode, data))
1490 return inode;
1491 }
1492 return NULL;
1493}
1494EXPORT_SYMBOL(find_inode_rcu);
1495
1496/**
1497 * find_inode_by_rcu - Find an inode in the inode cache
1498 * @sb: Super block of file system to search
1499 * @ino: The inode number to match
1500 *
1501 * Search for the inode specified by @hashval and @data in the inode cache,
1502 * where the helper function @test will return 0 if the inode does not match
1503 * and 1 if it does. The @test function must be responsible for taking the
1504 * i_lock spin_lock and checking i_state for an inode being freed or being
1505 * initialized.
1506 *
1507 * If successful, this will return the inode for which the @test function
1508 * returned 1 and NULL otherwise.
1509 *
1510 * The @test function is not permitted to take a ref on any inode presented.
1511 * It is also not permitted to sleep.
1512 *
1513 * The caller must hold the RCU read lock.
1514 */
1515struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1516 unsigned long ino)
1517{
1518 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1519 struct inode *inode;
1520
1521 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1522 "suspicious find_inode_by_ino_rcu() usage");
1523
1524 hlist_for_each_entry_rcu(inode, head, i_hash) {
1525 if (inode->i_ino == ino &&
1526 inode->i_sb == sb &&
1527 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1528 return inode;
1529 }
1530 return NULL;
1531}
1532EXPORT_SYMBOL(find_inode_by_ino_rcu);
1533
1534int insert_inode_locked(struct inode *inode)
1535{
1536 struct super_block *sb = inode->i_sb;
1537 ino_t ino = inode->i_ino;
1538 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1539
1540 while (1) {
1541 struct inode *old = NULL;
1542 spin_lock(&inode_hash_lock);
1543 hlist_for_each_entry(old, head, i_hash) {
1544 if (old->i_ino != ino)
1545 continue;
1546 if (old->i_sb != sb)
1547 continue;
1548 spin_lock(&old->i_lock);
1549 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1550 spin_unlock(&old->i_lock);
1551 continue;
1552 }
1553 break;
1554 }
1555 if (likely(!old)) {
1556 spin_lock(&inode->i_lock);
1557 inode->i_state |= I_NEW | I_CREATING;
1558 hlist_add_head_rcu(&inode->i_hash, head);
1559 spin_unlock(&inode->i_lock);
1560 spin_unlock(&inode_hash_lock);
1561 return 0;
1562 }
1563 if (unlikely(old->i_state & I_CREATING)) {
1564 spin_unlock(&old->i_lock);
1565 spin_unlock(&inode_hash_lock);
1566 return -EBUSY;
1567 }
1568 __iget(old);
1569 spin_unlock(&old->i_lock);
1570 spin_unlock(&inode_hash_lock);
1571 wait_on_inode(old);
1572 if (unlikely(!inode_unhashed(old))) {
1573 iput(old);
1574 return -EBUSY;
1575 }
1576 iput(old);
1577 }
1578}
1579EXPORT_SYMBOL(insert_inode_locked);
1580
1581int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1582 int (*test)(struct inode *, void *), void *data)
1583{
1584 struct inode *old;
1585
1586 inode->i_state |= I_CREATING;
1587 old = inode_insert5(inode, hashval, test, NULL, data);
1588
1589 if (old != inode) {
1590 iput(old);
1591 return -EBUSY;
1592 }
1593 return 0;
1594}
1595EXPORT_SYMBOL(insert_inode_locked4);
1596
1597
1598int generic_delete_inode(struct inode *inode)
1599{
1600 return 1;
1601}
1602EXPORT_SYMBOL(generic_delete_inode);
1603
1604/*
1605 * Called when we're dropping the last reference
1606 * to an inode.
1607 *
1608 * Call the FS "drop_inode()" function, defaulting to
1609 * the legacy UNIX filesystem behaviour. If it tells
1610 * us to evict inode, do so. Otherwise, retain inode
1611 * in cache if fs is alive, sync and evict if fs is
1612 * shutting down.
1613 */
1614static void iput_final(struct inode *inode)
1615{
1616 struct super_block *sb = inode->i_sb;
1617 const struct super_operations *op = inode->i_sb->s_op;
1618 unsigned long state;
1619 int drop;
1620
1621 WARN_ON(inode->i_state & I_NEW);
1622
1623 if (op->drop_inode)
1624 drop = op->drop_inode(inode);
1625 else
1626 drop = generic_drop_inode(inode);
1627
1628 if (!drop && (sb->s_flags & SB_ACTIVE)) {
1629 inode_add_lru(inode);
1630 spin_unlock(&inode->i_lock);
1631 return;
1632 }
1633
1634 state = inode->i_state;
1635 if (!drop) {
1636 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1637 spin_unlock(&inode->i_lock);
1638
1639 write_inode_now(inode, 1);
1640
1641 spin_lock(&inode->i_lock);
1642 state = inode->i_state;
1643 WARN_ON(state & I_NEW);
1644 state &= ~I_WILL_FREE;
1645 }
1646
1647 WRITE_ONCE(inode->i_state, state | I_FREEING);
1648 if (!list_empty(&inode->i_lru))
1649 inode_lru_list_del(inode);
1650 spin_unlock(&inode->i_lock);
1651
1652 evict(inode);
1653}
1654
1655/**
1656 * iput - put an inode
1657 * @inode: inode to put
1658 *
1659 * Puts an inode, dropping its usage count. If the inode use count hits
1660 * zero, the inode is then freed and may also be destroyed.
1661 *
1662 * Consequently, iput() can sleep.
1663 */
1664void iput(struct inode *inode)
1665{
1666 if (!inode)
1667 return;
1668 BUG_ON(inode->i_state & I_CLEAR);
1669retry:
1670 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1671 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1672 atomic_inc(&inode->i_count);
1673 spin_unlock(&inode->i_lock);
1674 trace_writeback_lazytime_iput(inode);
1675 mark_inode_dirty_sync(inode);
1676 goto retry;
1677 }
1678 iput_final(inode);
1679 }
1680}
1681EXPORT_SYMBOL(iput);
1682
1683#ifdef CONFIG_BLOCK
1684/**
1685 * bmap - find a block number in a file
1686 * @inode: inode owning the block number being requested
1687 * @block: pointer containing the block to find
1688 *
1689 * Replaces the value in ``*block`` with the block number on the device holding
1690 * corresponding to the requested block number in the file.
1691 * That is, asked for block 4 of inode 1 the function will replace the
1692 * 4 in ``*block``, with disk block relative to the disk start that holds that
1693 * block of the file.
1694 *
1695 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1696 * hole, returns 0 and ``*block`` is also set to 0.
1697 */
1698int bmap(struct inode *inode, sector_t *block)
1699{
1700 if (!inode->i_mapping->a_ops->bmap)
1701 return -EINVAL;
1702
1703 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1704 return 0;
1705}
1706EXPORT_SYMBOL(bmap);
1707#endif
1708
1709/*
1710 * With relative atime, only update atime if the previous atime is
1711 * earlier than either the ctime or mtime or if at least a day has
1712 * passed since the last atime update.
1713 */
1714static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1715 struct timespec64 now)
1716{
1717
1718 if (!(mnt->mnt_flags & MNT_RELATIME))
1719 return 1;
1720 /*
1721 * Is mtime younger than atime? If yes, update atime:
1722 */
1723 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1724 return 1;
1725 /*
1726 * Is ctime younger than atime? If yes, update atime:
1727 */
1728 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1729 return 1;
1730
1731 /*
1732 * Is the previous atime value older than a day? If yes,
1733 * update atime:
1734 */
1735 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1736 return 1;
1737 /*
1738 * Good, we can skip the atime update:
1739 */
1740 return 0;
1741}
1742
1743int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1744{
1745 int iflags = I_DIRTY_TIME;
1746 bool dirty = false;
1747
1748 if (flags & S_ATIME)
1749 inode->i_atime = *time;
1750 if (flags & S_VERSION)
1751 dirty = inode_maybe_inc_iversion(inode, false);
1752 if (flags & S_CTIME)
1753 inode->i_ctime = *time;
1754 if (flags & S_MTIME)
1755 inode->i_mtime = *time;
1756 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
1757 !(inode->i_sb->s_flags & SB_LAZYTIME))
1758 dirty = true;
1759
1760 if (dirty)
1761 iflags |= I_DIRTY_SYNC;
1762 __mark_inode_dirty(inode, iflags);
1763 return 0;
1764}
1765EXPORT_SYMBOL(generic_update_time);
1766
1767/*
1768 * This does the actual work of updating an inodes time or version. Must have
1769 * had called mnt_want_write() before calling this.
1770 */
1771static int update_time(struct inode *inode, struct timespec64 *time, int flags)
1772{
1773 if (inode->i_op->update_time)
1774 return inode->i_op->update_time(inode, time, flags);
1775 return generic_update_time(inode, time, flags);
1776}
1777
1778/**
1779 * touch_atime - update the access time
1780 * @path: the &struct path to update
1781 * @inode: inode to update
1782 *
1783 * Update the accessed time on an inode and mark it for writeback.
1784 * This function automatically handles read only file systems and media,
1785 * as well as the "noatime" flag and inode specific "noatime" markers.
1786 */
1787bool atime_needs_update(const struct path *path, struct inode *inode)
1788{
1789 struct vfsmount *mnt = path->mnt;
1790 struct timespec64 now;
1791
1792 if (inode->i_flags & S_NOATIME)
1793 return false;
1794
1795 /* Atime updates will likely cause i_uid and i_gid to be written
1796 * back improprely if their true value is unknown to the vfs.
1797 */
1798 if (HAS_UNMAPPED_ID(inode))
1799 return false;
1800
1801 if (IS_NOATIME(inode))
1802 return false;
1803 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1804 return false;
1805
1806 if (mnt->mnt_flags & MNT_NOATIME)
1807 return false;
1808 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1809 return false;
1810
1811 now = current_time(inode);
1812
1813 if (!relatime_need_update(mnt, inode, now))
1814 return false;
1815
1816 if (timespec64_equal(&inode->i_atime, &now))
1817 return false;
1818
1819 return true;
1820}
1821
1822void touch_atime(const struct path *path)
1823{
1824 struct vfsmount *mnt = path->mnt;
1825 struct inode *inode = d_inode(path->dentry);
1826 struct timespec64 now;
1827
1828 if (!atime_needs_update(path, inode))
1829 return;
1830
1831 if (!sb_start_write_trylock(inode->i_sb))
1832 return;
1833
1834 if (__mnt_want_write(mnt) != 0)
1835 goto skip_update;
1836 /*
1837 * File systems can error out when updating inodes if they need to
1838 * allocate new space to modify an inode (such is the case for
1839 * Btrfs), but since we touch atime while walking down the path we
1840 * really don't care if we failed to update the atime of the file,
1841 * so just ignore the return value.
1842 * We may also fail on filesystems that have the ability to make parts
1843 * of the fs read only, e.g. subvolumes in Btrfs.
1844 */
1845 now = current_time(inode);
1846 update_time(inode, &now, S_ATIME);
1847 __mnt_drop_write(mnt);
1848skip_update:
1849 sb_end_write(inode->i_sb);
1850}
1851EXPORT_SYMBOL(touch_atime);
1852
1853/*
1854 * The logic we want is
1855 *
1856 * if suid or (sgid and xgrp)
1857 * remove privs
1858 */
1859int should_remove_suid(struct dentry *dentry)
1860{
1861 umode_t mode = d_inode(dentry)->i_mode;
1862 int kill = 0;
1863
1864 /* suid always must be killed */
1865 if (unlikely(mode & S_ISUID))
1866 kill = ATTR_KILL_SUID;
1867
1868 /*
1869 * sgid without any exec bits is just a mandatory locking mark; leave
1870 * it alone. If some exec bits are set, it's a real sgid; kill it.
1871 */
1872 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1873 kill |= ATTR_KILL_SGID;
1874
1875 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1876 return kill;
1877
1878 return 0;
1879}
1880EXPORT_SYMBOL(should_remove_suid);
1881
1882/*
1883 * Return mask of changes for notify_change() that need to be done as a
1884 * response to write or truncate. Return 0 if nothing has to be changed.
1885 * Negative value on error (change should be denied).
1886 */
1887int dentry_needs_remove_privs(struct dentry *dentry)
1888{
1889 struct inode *inode = d_inode(dentry);
1890 int mask = 0;
1891 int ret;
1892
1893 if (IS_NOSEC(inode))
1894 return 0;
1895
1896 mask = should_remove_suid(dentry);
1897 ret = security_inode_need_killpriv(dentry);
1898 if (ret < 0)
1899 return ret;
1900 if (ret)
1901 mask |= ATTR_KILL_PRIV;
1902 return mask;
1903}
1904
1905static int __remove_privs(struct dentry *dentry, int kill)
1906{
1907 struct iattr newattrs;
1908
1909 newattrs.ia_valid = ATTR_FORCE | kill;
1910 /*
1911 * Note we call this on write, so notify_change will not
1912 * encounter any conflicting delegations:
1913 */
1914 return notify_change(dentry, &newattrs, NULL);
1915}
1916
1917/*
1918 * Remove special file priviledges (suid, capabilities) when file is written
1919 * to or truncated.
1920 */
1921int file_remove_privs(struct file *file)
1922{
1923 struct dentry *dentry = file_dentry(file);
1924 struct inode *inode = file_inode(file);
1925 int kill;
1926 int error = 0;
1927
1928 /*
1929 * Fast path for nothing security related.
1930 * As well for non-regular files, e.g. blkdev inodes.
1931 * For example, blkdev_write_iter() might get here
1932 * trying to remove privs which it is not allowed to.
1933 */
1934 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1935 return 0;
1936
1937 kill = dentry_needs_remove_privs(dentry);
1938 if (kill < 0)
1939 return kill;
1940 if (kill)
1941 error = __remove_privs(dentry, kill);
1942 if (!error)
1943 inode_has_no_xattr(inode);
1944
1945 return error;
1946}
1947EXPORT_SYMBOL(file_remove_privs);
1948
1949/**
1950 * file_update_time - update mtime and ctime time
1951 * @file: file accessed
1952 *
1953 * Update the mtime and ctime members of an inode and mark the inode
1954 * for writeback. Note that this function is meant exclusively for
1955 * usage in the file write path of filesystems, and filesystems may
1956 * choose to explicitly ignore update via this function with the
1957 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1958 * timestamps are handled by the server. This can return an error for
1959 * file systems who need to allocate space in order to update an inode.
1960 */
1961
1962int file_update_time(struct file *file)
1963{
1964 struct inode *inode = file_inode(file);
1965 struct timespec64 now;
1966 int sync_it = 0;
1967 int ret;
1968
1969 /* First try to exhaust all avenues to not sync */
1970 if (IS_NOCMTIME(inode))
1971 return 0;
1972
1973 now = current_time(inode);
1974 if (!timespec64_equal(&inode->i_mtime, &now))
1975 sync_it = S_MTIME;
1976
1977 if (!timespec64_equal(&inode->i_ctime, &now))
1978 sync_it |= S_CTIME;
1979
1980 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1981 sync_it |= S_VERSION;
1982
1983 if (!sync_it)
1984 return 0;
1985
1986 /* Finally allowed to write? Takes lock. */
1987 if (__mnt_want_write_file(file))
1988 return 0;
1989
1990 ret = update_time(inode, &now, sync_it);
1991 __mnt_drop_write_file(file);
1992
1993 return ret;
1994}
1995EXPORT_SYMBOL(file_update_time);
1996
1997/* Caller must hold the file's inode lock */
1998int file_modified(struct file *file)
1999{
2000 int err;
2001
2002 /*
2003 * Clear the security bits if the process is not being run by root.
2004 * This keeps people from modifying setuid and setgid binaries.
2005 */
2006 err = file_remove_privs(file);
2007 if (err)
2008 return err;
2009
2010 if (unlikely(file->f_mode & FMODE_NOCMTIME))
2011 return 0;
2012
2013 return file_update_time(file);
2014}
2015EXPORT_SYMBOL(file_modified);
2016
2017int inode_needs_sync(struct inode *inode)
2018{
2019 if (IS_SYNC(inode))
2020 return 1;
2021 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2022 return 1;
2023 return 0;
2024}
2025EXPORT_SYMBOL(inode_needs_sync);
2026
2027/*
2028 * If we try to find an inode in the inode hash while it is being
2029 * deleted, we have to wait until the filesystem completes its
2030 * deletion before reporting that it isn't found. This function waits
2031 * until the deletion _might_ have completed. Callers are responsible
2032 * to recheck inode state.
2033 *
2034 * It doesn't matter if I_NEW is not set initially, a call to
2035 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2036 * will DTRT.
2037 */
2038static void __wait_on_freeing_inode(struct inode *inode)
2039{
2040 wait_queue_head_t *wq;
2041 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2042 wq = bit_waitqueue(&inode->i_state, __I_NEW);
2043 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2044 spin_unlock(&inode->i_lock);
2045 spin_unlock(&inode_hash_lock);
2046 schedule();
2047 finish_wait(wq, &wait.wq_entry);
2048 spin_lock(&inode_hash_lock);
2049}
2050
2051static __initdata unsigned long ihash_entries;
2052static int __init set_ihash_entries(char *str)
2053{
2054 if (!str)
2055 return 0;
2056 ihash_entries = simple_strtoul(str, &str, 0);
2057 return 1;
2058}
2059__setup("ihash_entries=", set_ihash_entries);
2060
2061/*
2062 * Initialize the waitqueues and inode hash table.
2063 */
2064void __init inode_init_early(void)
2065{
2066 /* If hashes are distributed across NUMA nodes, defer
2067 * hash allocation until vmalloc space is available.
2068 */
2069 if (hashdist)
2070 return;
2071
2072 inode_hashtable =
2073 alloc_large_system_hash("Inode-cache",
2074 sizeof(struct hlist_head),
2075 ihash_entries,
2076 14,
2077 HASH_EARLY | HASH_ZERO,
2078 &i_hash_shift,
2079 &i_hash_mask,
2080 0,
2081 0);
2082}
2083
2084void __init inode_init(void)
2085{
2086 /* inode slab cache */
2087 inode_cachep = kmem_cache_create("inode_cache",
2088 sizeof(struct inode),
2089 0,
2090 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2091 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2092 init_once);
2093
2094 /* Hash may have been set up in inode_init_early */
2095 if (!hashdist)
2096 return;
2097
2098 inode_hashtable =
2099 alloc_large_system_hash("Inode-cache",
2100 sizeof(struct hlist_head),
2101 ihash_entries,
2102 14,
2103 HASH_ZERO,
2104 &i_hash_shift,
2105 &i_hash_mask,
2106 0,
2107 0);
2108}
2109
2110void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2111{
2112 inode->i_mode = mode;
2113 if (S_ISCHR(mode)) {
2114 inode->i_fop = &def_chr_fops;
2115 inode->i_rdev = rdev;
2116 } else if (S_ISBLK(mode)) {
2117 inode->i_fop = &def_blk_fops;
2118 inode->i_rdev = rdev;
2119 } else if (S_ISFIFO(mode))
2120 inode->i_fop = &pipefifo_fops;
2121 else if (S_ISSOCK(mode))
2122 ; /* leave it no_open_fops */
2123 else
2124 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2125 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2126 inode->i_ino);
2127}
2128EXPORT_SYMBOL(init_special_inode);
2129
2130/**
2131 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2132 * @inode: New inode
2133 * @dir: Directory inode
2134 * @mode: mode of the new inode
2135 */
2136void inode_init_owner(struct inode *inode, const struct inode *dir,
2137 umode_t mode)
2138{
2139 inode->i_uid = current_fsuid();
2140 if (dir && dir->i_mode & S_ISGID) {
2141 inode->i_gid = dir->i_gid;
2142
2143 /* Directories are special, and always inherit S_ISGID */
2144 if (S_ISDIR(mode))
2145 mode |= S_ISGID;
2146 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2147 !in_group_p(inode->i_gid) &&
2148 !capable_wrt_inode_uidgid(dir, CAP_FSETID))
2149 mode &= ~S_ISGID;
2150 } else
2151 inode->i_gid = current_fsgid();
2152 inode->i_mode = mode;
2153}
2154EXPORT_SYMBOL(inode_init_owner);
2155
2156/**
2157 * inode_owner_or_capable - check current task permissions to inode
2158 * @inode: inode being checked
2159 *
2160 * Return true if current either has CAP_FOWNER in a namespace with the
2161 * inode owner uid mapped, or owns the file.
2162 */
2163bool inode_owner_or_capable(const struct inode *inode)
2164{
2165 struct user_namespace *ns;
2166
2167 if (uid_eq(current_fsuid(), inode->i_uid))
2168 return true;
2169
2170 ns = current_user_ns();
2171 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2172 return true;
2173 return false;
2174}
2175EXPORT_SYMBOL(inode_owner_or_capable);
2176
2177/*
2178 * Direct i/o helper functions
2179 */
2180static void __inode_dio_wait(struct inode *inode)
2181{
2182 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2183 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2184
2185 do {
2186 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2187 if (atomic_read(&inode->i_dio_count))
2188 schedule();
2189 } while (atomic_read(&inode->i_dio_count));
2190 finish_wait(wq, &q.wq_entry);
2191}
2192
2193/**
2194 * inode_dio_wait - wait for outstanding DIO requests to finish
2195 * @inode: inode to wait for
2196 *
2197 * Waits for all pending direct I/O requests to finish so that we can
2198 * proceed with a truncate or equivalent operation.
2199 *
2200 * Must be called under a lock that serializes taking new references
2201 * to i_dio_count, usually by inode->i_mutex.
2202 */
2203void inode_dio_wait(struct inode *inode)
2204{
2205 if (atomic_read(&inode->i_dio_count))
2206 __inode_dio_wait(inode);
2207}
2208EXPORT_SYMBOL(inode_dio_wait);
2209
2210/*
2211 * inode_set_flags - atomically set some inode flags
2212 *
2213 * Note: the caller should be holding i_mutex, or else be sure that
2214 * they have exclusive access to the inode structure (i.e., while the
2215 * inode is being instantiated). The reason for the cmpxchg() loop
2216 * --- which wouldn't be necessary if all code paths which modify
2217 * i_flags actually followed this rule, is that there is at least one
2218 * code path which doesn't today so we use cmpxchg() out of an abundance
2219 * of caution.
2220 *
2221 * In the long run, i_mutex is overkill, and we should probably look
2222 * at using the i_lock spinlock to protect i_flags, and then make sure
2223 * it is so documented in include/linux/fs.h and that all code follows
2224 * the locking convention!!
2225 */
2226void inode_set_flags(struct inode *inode, unsigned int flags,
2227 unsigned int mask)
2228{
2229 WARN_ON_ONCE(flags & ~mask);
2230 set_mask_bits(&inode->i_flags, mask, flags);
2231}
2232EXPORT_SYMBOL(inode_set_flags);
2233
2234void inode_nohighmem(struct inode *inode)
2235{
2236 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2237}
2238EXPORT_SYMBOL(inode_nohighmem);
2239
2240/**
2241 * timestamp_truncate - Truncate timespec to a granularity
2242 * @t: Timespec
2243 * @inode: inode being updated
2244 *
2245 * Truncate a timespec to the granularity supported by the fs
2246 * containing the inode. Always rounds down. gran must
2247 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2248 */
2249struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2250{
2251 struct super_block *sb = inode->i_sb;
2252 unsigned int gran = sb->s_time_gran;
2253
2254 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2255 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2256 t.tv_nsec = 0;
2257
2258 /* Avoid division in the common cases 1 ns and 1 s. */
2259 if (gran == 1)
2260 ; /* nothing */
2261 else if (gran == NSEC_PER_SEC)
2262 t.tv_nsec = 0;
2263 else if (gran > 1 && gran < NSEC_PER_SEC)
2264 t.tv_nsec -= t.tv_nsec % gran;
2265 else
2266 WARN(1, "invalid file time granularity: %u", gran);
2267 return t;
2268}
2269EXPORT_SYMBOL(timestamp_truncate);
2270
2271/**
2272 * current_time - Return FS time
2273 * @inode: inode.
2274 *
2275 * Return the current time truncated to the time granularity supported by
2276 * the fs.
2277 *
2278 * Note that inode and inode->sb cannot be NULL.
2279 * Otherwise, the function warns and returns time without truncation.
2280 */
2281struct timespec64 current_time(struct inode *inode)
2282{
2283 struct timespec64 now;
2284
2285 ktime_get_coarse_real_ts64(&now);
2286
2287 if (unlikely(!inode->i_sb)) {
2288 WARN(1, "current_time() called with uninitialized super_block in the inode");
2289 return now;
2290 }
2291
2292 return timestamp_truncate(now, inode);
2293}
2294EXPORT_SYMBOL(current_time);
2295
2296/*
2297 * Generic function to check FS_IOC_SETFLAGS values and reject any invalid
2298 * configurations.
2299 *
2300 * Note: the caller should be holding i_mutex, or else be sure that they have
2301 * exclusive access to the inode structure.
2302 */
2303int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
2304 unsigned int flags)
2305{
2306 /*
2307 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
2308 * the relevant capability.
2309 *
2310 * This test looks nicer. Thanks to Pauline Middelink
2311 */
2312 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
2313 !capable(CAP_LINUX_IMMUTABLE))
2314 return -EPERM;
2315
2316 return fscrypt_prepare_setflags(inode, oldflags, flags);
2317}
2318EXPORT_SYMBOL(vfs_ioc_setflags_prepare);
2319
2320/*
2321 * Generic function to check FS_IOC_FSSETXATTR values and reject any invalid
2322 * configurations.
2323 *
2324 * Note: the caller should be holding i_mutex, or else be sure that they have
2325 * exclusive access to the inode structure.
2326 */
2327int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
2328 struct fsxattr *fa)
2329{
2330 /*
2331 * Can't modify an immutable/append-only file unless we have
2332 * appropriate permission.
2333 */
2334 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2335 (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND) &&
2336 !capable(CAP_LINUX_IMMUTABLE))
2337 return -EPERM;
2338
2339 /*
2340 * Project Quota ID state is only allowed to change from within the init
2341 * namespace. Enforce that restriction only if we are trying to change
2342 * the quota ID state. Everything else is allowed in user namespaces.
2343 */
2344 if (current_user_ns() != &init_user_ns) {
2345 if (old_fa->fsx_projid != fa->fsx_projid)
2346 return -EINVAL;
2347 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2348 FS_XFLAG_PROJINHERIT)
2349 return -EINVAL;
2350 }
2351
2352 /* Check extent size hints. */
2353 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
2354 return -EINVAL;
2355
2356 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
2357 !S_ISDIR(inode->i_mode))
2358 return -EINVAL;
2359
2360 if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
2361 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
2362 return -EINVAL;
2363
2364 /*
2365 * It is only valid to set the DAX flag on regular files and
2366 * directories on filesystems.
2367 */
2368 if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
2369 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
2370 return -EINVAL;
2371
2372 /* Extent size hints of zero turn off the flags. */
2373 if (fa->fsx_extsize == 0)
2374 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
2375 if (fa->fsx_cowextsize == 0)
2376 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
2377
2378 return 0;
2379}
2380EXPORT_SYMBOL(vfs_ioc_fssetxattr_check);