Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/filelock.h>
9#include <linux/mm.h>
10#include <linux/backing-dev.h>
11#include <linux/hash.h>
12#include <linux/swap.h>
13#include <linux/security.h>
14#include <linux/cdev.h>
15#include <linux/memblock.h>
16#include <linux/fsnotify.h>
17#include <linux/mount.h>
18#include <linux/posix_acl.h>
19#include <linux/buffer_head.h> /* for inode_has_buffers */
20#include <linux/ratelimit.h>
21#include <linux/list_lru.h>
22#include <linux/iversion.h>
23#include <trace/events/writeback.h>
24#include "internal.h"
25
26/*
27 * Inode locking rules:
28 *
29 * inode->i_lock protects:
30 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
31 * Inode LRU list locks protect:
32 * inode->i_sb->s_inode_lru, inode->i_lru
33 * inode->i_sb->s_inode_list_lock protects:
34 * inode->i_sb->s_inodes, inode->i_sb_list
35 * bdi->wb.list_lock protects:
36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
37 * inode_hash_lock protects:
38 * inode_hashtable, inode->i_hash
39 *
40 * Lock ordering:
41 *
42 * inode->i_sb->s_inode_list_lock
43 * inode->i_lock
44 * Inode LRU list locks
45 *
46 * bdi->wb.list_lock
47 * inode->i_lock
48 *
49 * inode_hash_lock
50 * inode->i_sb->s_inode_list_lock
51 * inode->i_lock
52 *
53 * iunique_lock
54 * inode_hash_lock
55 */
56
57static unsigned int i_hash_mask __ro_after_init;
58static unsigned int i_hash_shift __ro_after_init;
59static struct hlist_head *inode_hashtable __ro_after_init;
60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
61
62/*
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
65 */
66const struct address_space_operations empty_aops = {
67};
68EXPORT_SYMBOL(empty_aops);
69
70static DEFINE_PER_CPU(unsigned long, nr_inodes);
71static DEFINE_PER_CPU(unsigned long, nr_unused);
72
73static struct kmem_cache *inode_cachep __ro_after_init;
74
75static long get_nr_inodes(void)
76{
77 int i;
78 long sum = 0;
79 for_each_possible_cpu(i)
80 sum += per_cpu(nr_inodes, i);
81 return sum < 0 ? 0 : sum;
82}
83
84static inline long get_nr_inodes_unused(void)
85{
86 int i;
87 long sum = 0;
88 for_each_possible_cpu(i)
89 sum += per_cpu(nr_unused, i);
90 return sum < 0 ? 0 : sum;
91}
92
93long get_nr_dirty_inodes(void)
94{
95 /* not actually dirty inodes, but a wild approximation */
96 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
97 return nr_dirty > 0 ? nr_dirty : 0;
98}
99
100/*
101 * Handle nr_inode sysctl
102 */
103#ifdef CONFIG_SYSCTL
104/*
105 * Statistics gathering..
106 */
107static struct inodes_stat_t inodes_stat;
108
109static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
110 size_t *lenp, loff_t *ppos)
111{
112 inodes_stat.nr_inodes = get_nr_inodes();
113 inodes_stat.nr_unused = get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
115}
116
117static struct ctl_table inodes_sysctls[] = {
118 {
119 .procname = "inode-nr",
120 .data = &inodes_stat,
121 .maxlen = 2*sizeof(long),
122 .mode = 0444,
123 .proc_handler = proc_nr_inodes,
124 },
125 {
126 .procname = "inode-state",
127 .data = &inodes_stat,
128 .maxlen = 7*sizeof(long),
129 .mode = 0444,
130 .proc_handler = proc_nr_inodes,
131 },
132};
133
134static int __init init_fs_inode_sysctls(void)
135{
136 register_sysctl_init("fs", inodes_sysctls);
137 return 0;
138}
139early_initcall(init_fs_inode_sysctls);
140#endif
141
142static int no_open(struct inode *inode, struct file *file)
143{
144 return -ENXIO;
145}
146
147/**
148 * inode_init_always - perform inode structure initialisation
149 * @sb: superblock inode belongs to
150 * @inode: inode to initialise
151 *
152 * These are initializations that need to be done on every inode
153 * allocation as the fields are not initialised by slab allocation.
154 */
155int inode_init_always(struct super_block *sb, struct inode *inode)
156{
157 static const struct inode_operations empty_iops;
158 static const struct file_operations no_open_fops = {.open = no_open};
159 struct address_space *const mapping = &inode->i_data;
160
161 inode->i_sb = sb;
162 inode->i_blkbits = sb->s_blocksize_bits;
163 inode->i_flags = 0;
164 atomic64_set(&inode->i_sequence, 0);
165 atomic_set(&inode->i_count, 1);
166 inode->i_op = &empty_iops;
167 inode->i_fop = &no_open_fops;
168 inode->i_ino = 0;
169 inode->__i_nlink = 1;
170 inode->i_opflags = 0;
171 if (sb->s_xattr)
172 inode->i_opflags |= IOP_XATTR;
173 i_uid_write(inode, 0);
174 i_gid_write(inode, 0);
175 atomic_set(&inode->i_writecount, 0);
176 inode->i_size = 0;
177 inode->i_write_hint = WRITE_LIFE_NOT_SET;
178 inode->i_blocks = 0;
179 inode->i_bytes = 0;
180 inode->i_generation = 0;
181 inode->i_pipe = NULL;
182 inode->i_cdev = NULL;
183 inode->i_link = NULL;
184 inode->i_dir_seq = 0;
185 inode->i_rdev = 0;
186 inode->dirtied_when = 0;
187
188#ifdef CONFIG_CGROUP_WRITEBACK
189 inode->i_wb_frn_winner = 0;
190 inode->i_wb_frn_avg_time = 0;
191 inode->i_wb_frn_history = 0;
192#endif
193
194 spin_lock_init(&inode->i_lock);
195 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
196
197 init_rwsem(&inode->i_rwsem);
198 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
199
200 atomic_set(&inode->i_dio_count, 0);
201
202 mapping->a_ops = &empty_aops;
203 mapping->host = inode;
204 mapping->flags = 0;
205 mapping->wb_err = 0;
206 atomic_set(&mapping->i_mmap_writable, 0);
207#ifdef CONFIG_READ_ONLY_THP_FOR_FS
208 atomic_set(&mapping->nr_thps, 0);
209#endif
210 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
211 mapping->i_private_data = NULL;
212 mapping->writeback_index = 0;
213 init_rwsem(&mapping->invalidate_lock);
214 lockdep_set_class_and_name(&mapping->invalidate_lock,
215 &sb->s_type->invalidate_lock_key,
216 "mapping.invalidate_lock");
217 if (sb->s_iflags & SB_I_STABLE_WRITES)
218 mapping_set_stable_writes(mapping);
219 inode->i_private = NULL;
220 inode->i_mapping = mapping;
221 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
222#ifdef CONFIG_FS_POSIX_ACL
223 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
224#endif
225
226#ifdef CONFIG_FSNOTIFY
227 inode->i_fsnotify_mask = 0;
228#endif
229 inode->i_flctx = NULL;
230
231 if (unlikely(security_inode_alloc(inode)))
232 return -ENOMEM;
233 this_cpu_inc(nr_inodes);
234
235 return 0;
236}
237EXPORT_SYMBOL(inode_init_always);
238
239void free_inode_nonrcu(struct inode *inode)
240{
241 kmem_cache_free(inode_cachep, inode);
242}
243EXPORT_SYMBOL(free_inode_nonrcu);
244
245static void i_callback(struct rcu_head *head)
246{
247 struct inode *inode = container_of(head, struct inode, i_rcu);
248 if (inode->free_inode)
249 inode->free_inode(inode);
250 else
251 free_inode_nonrcu(inode);
252}
253
254static struct inode *alloc_inode(struct super_block *sb)
255{
256 const struct super_operations *ops = sb->s_op;
257 struct inode *inode;
258
259 if (ops->alloc_inode)
260 inode = ops->alloc_inode(sb);
261 else
262 inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL);
263
264 if (!inode)
265 return NULL;
266
267 if (unlikely(inode_init_always(sb, inode))) {
268 if (ops->destroy_inode) {
269 ops->destroy_inode(inode);
270 if (!ops->free_inode)
271 return NULL;
272 }
273 inode->free_inode = ops->free_inode;
274 i_callback(&inode->i_rcu);
275 return NULL;
276 }
277
278 return inode;
279}
280
281void __destroy_inode(struct inode *inode)
282{
283 BUG_ON(inode_has_buffers(inode));
284 inode_detach_wb(inode);
285 security_inode_free(inode);
286 fsnotify_inode_delete(inode);
287 locks_free_lock_context(inode);
288 if (!inode->i_nlink) {
289 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
290 atomic_long_dec(&inode->i_sb->s_remove_count);
291 }
292
293#ifdef CONFIG_FS_POSIX_ACL
294 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
295 posix_acl_release(inode->i_acl);
296 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
297 posix_acl_release(inode->i_default_acl);
298#endif
299 this_cpu_dec(nr_inodes);
300}
301EXPORT_SYMBOL(__destroy_inode);
302
303static void destroy_inode(struct inode *inode)
304{
305 const struct super_operations *ops = inode->i_sb->s_op;
306
307 BUG_ON(!list_empty(&inode->i_lru));
308 __destroy_inode(inode);
309 if (ops->destroy_inode) {
310 ops->destroy_inode(inode);
311 if (!ops->free_inode)
312 return;
313 }
314 inode->free_inode = ops->free_inode;
315 call_rcu(&inode->i_rcu, i_callback);
316}
317
318/**
319 * drop_nlink - directly drop an inode's link count
320 * @inode: inode
321 *
322 * This is a low-level filesystem helper to replace any
323 * direct filesystem manipulation of i_nlink. In cases
324 * where we are attempting to track writes to the
325 * filesystem, a decrement to zero means an imminent
326 * write when the file is truncated and actually unlinked
327 * on the filesystem.
328 */
329void drop_nlink(struct inode *inode)
330{
331 WARN_ON(inode->i_nlink == 0);
332 inode->__i_nlink--;
333 if (!inode->i_nlink)
334 atomic_long_inc(&inode->i_sb->s_remove_count);
335}
336EXPORT_SYMBOL(drop_nlink);
337
338/**
339 * clear_nlink - directly zero an inode's link count
340 * @inode: inode
341 *
342 * This is a low-level filesystem helper to replace any
343 * direct filesystem manipulation of i_nlink. See
344 * drop_nlink() for why we care about i_nlink hitting zero.
345 */
346void clear_nlink(struct inode *inode)
347{
348 if (inode->i_nlink) {
349 inode->__i_nlink = 0;
350 atomic_long_inc(&inode->i_sb->s_remove_count);
351 }
352}
353EXPORT_SYMBOL(clear_nlink);
354
355/**
356 * set_nlink - directly set an inode's link count
357 * @inode: inode
358 * @nlink: new nlink (should be non-zero)
359 *
360 * This is a low-level filesystem helper to replace any
361 * direct filesystem manipulation of i_nlink.
362 */
363void set_nlink(struct inode *inode, unsigned int nlink)
364{
365 if (!nlink) {
366 clear_nlink(inode);
367 } else {
368 /* Yes, some filesystems do change nlink from zero to one */
369 if (inode->i_nlink == 0)
370 atomic_long_dec(&inode->i_sb->s_remove_count);
371
372 inode->__i_nlink = nlink;
373 }
374}
375EXPORT_SYMBOL(set_nlink);
376
377/**
378 * inc_nlink - directly increment an inode's link count
379 * @inode: inode
380 *
381 * This is a low-level filesystem helper to replace any
382 * direct filesystem manipulation of i_nlink. Currently,
383 * it is only here for parity with dec_nlink().
384 */
385void inc_nlink(struct inode *inode)
386{
387 if (unlikely(inode->i_nlink == 0)) {
388 WARN_ON(!(inode->i_state & I_LINKABLE));
389 atomic_long_dec(&inode->i_sb->s_remove_count);
390 }
391
392 inode->__i_nlink++;
393}
394EXPORT_SYMBOL(inc_nlink);
395
396static void __address_space_init_once(struct address_space *mapping)
397{
398 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
399 init_rwsem(&mapping->i_mmap_rwsem);
400 INIT_LIST_HEAD(&mapping->i_private_list);
401 spin_lock_init(&mapping->i_private_lock);
402 mapping->i_mmap = RB_ROOT_CACHED;
403}
404
405void address_space_init_once(struct address_space *mapping)
406{
407 memset(mapping, 0, sizeof(*mapping));
408 __address_space_init_once(mapping);
409}
410EXPORT_SYMBOL(address_space_init_once);
411
412/*
413 * These are initializations that only need to be done
414 * once, because the fields are idempotent across use
415 * of the inode, so let the slab aware of that.
416 */
417void inode_init_once(struct inode *inode)
418{
419 memset(inode, 0, sizeof(*inode));
420 INIT_HLIST_NODE(&inode->i_hash);
421 INIT_LIST_HEAD(&inode->i_devices);
422 INIT_LIST_HEAD(&inode->i_io_list);
423 INIT_LIST_HEAD(&inode->i_wb_list);
424 INIT_LIST_HEAD(&inode->i_lru);
425 INIT_LIST_HEAD(&inode->i_sb_list);
426 __address_space_init_once(&inode->i_data);
427 i_size_ordered_init(inode);
428}
429EXPORT_SYMBOL(inode_init_once);
430
431static void init_once(void *foo)
432{
433 struct inode *inode = (struct inode *) foo;
434
435 inode_init_once(inode);
436}
437
438/*
439 * inode->i_lock must be held
440 */
441void __iget(struct inode *inode)
442{
443 atomic_inc(&inode->i_count);
444}
445
446/*
447 * get additional reference to inode; caller must already hold one.
448 */
449void ihold(struct inode *inode)
450{
451 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
452}
453EXPORT_SYMBOL(ihold);
454
455static void __inode_add_lru(struct inode *inode, bool rotate)
456{
457 if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
458 return;
459 if (atomic_read(&inode->i_count))
460 return;
461 if (!(inode->i_sb->s_flags & SB_ACTIVE))
462 return;
463 if (!mapping_shrinkable(&inode->i_data))
464 return;
465
466 if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
467 this_cpu_inc(nr_unused);
468 else if (rotate)
469 inode->i_state |= I_REFERENCED;
470}
471
472/*
473 * Add inode to LRU if needed (inode is unused and clean).
474 *
475 * Needs inode->i_lock held.
476 */
477void inode_add_lru(struct inode *inode)
478{
479 __inode_add_lru(inode, false);
480}
481
482static void inode_lru_list_del(struct inode *inode)
483{
484 if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
485 this_cpu_dec(nr_unused);
486}
487
488/**
489 * inode_sb_list_add - add inode to the superblock list of inodes
490 * @inode: inode to add
491 */
492void inode_sb_list_add(struct inode *inode)
493{
494 spin_lock(&inode->i_sb->s_inode_list_lock);
495 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
496 spin_unlock(&inode->i_sb->s_inode_list_lock);
497}
498EXPORT_SYMBOL_GPL(inode_sb_list_add);
499
500static inline void inode_sb_list_del(struct inode *inode)
501{
502 if (!list_empty(&inode->i_sb_list)) {
503 spin_lock(&inode->i_sb->s_inode_list_lock);
504 list_del_init(&inode->i_sb_list);
505 spin_unlock(&inode->i_sb->s_inode_list_lock);
506 }
507}
508
509static unsigned long hash(struct super_block *sb, unsigned long hashval)
510{
511 unsigned long tmp;
512
513 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
514 L1_CACHE_BYTES;
515 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
516 return tmp & i_hash_mask;
517}
518
519/**
520 * __insert_inode_hash - hash an inode
521 * @inode: unhashed inode
522 * @hashval: unsigned long value used to locate this object in the
523 * inode_hashtable.
524 *
525 * Add an inode to the inode hash for this superblock.
526 */
527void __insert_inode_hash(struct inode *inode, unsigned long hashval)
528{
529 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
530
531 spin_lock(&inode_hash_lock);
532 spin_lock(&inode->i_lock);
533 hlist_add_head_rcu(&inode->i_hash, b);
534 spin_unlock(&inode->i_lock);
535 spin_unlock(&inode_hash_lock);
536}
537EXPORT_SYMBOL(__insert_inode_hash);
538
539/**
540 * __remove_inode_hash - remove an inode from the hash
541 * @inode: inode to unhash
542 *
543 * Remove an inode from the superblock.
544 */
545void __remove_inode_hash(struct inode *inode)
546{
547 spin_lock(&inode_hash_lock);
548 spin_lock(&inode->i_lock);
549 hlist_del_init_rcu(&inode->i_hash);
550 spin_unlock(&inode->i_lock);
551 spin_unlock(&inode_hash_lock);
552}
553EXPORT_SYMBOL(__remove_inode_hash);
554
555void dump_mapping(const struct address_space *mapping)
556{
557 struct inode *host;
558 const struct address_space_operations *a_ops;
559 struct hlist_node *dentry_first;
560 struct dentry *dentry_ptr;
561 struct dentry dentry;
562 unsigned long ino;
563
564 /*
565 * If mapping is an invalid pointer, we don't want to crash
566 * accessing it, so probe everything depending on it carefully.
567 */
568 if (get_kernel_nofault(host, &mapping->host) ||
569 get_kernel_nofault(a_ops, &mapping->a_ops)) {
570 pr_warn("invalid mapping:%px\n", mapping);
571 return;
572 }
573
574 if (!host) {
575 pr_warn("aops:%ps\n", a_ops);
576 return;
577 }
578
579 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
580 get_kernel_nofault(ino, &host->i_ino)) {
581 pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
582 return;
583 }
584
585 if (!dentry_first) {
586 pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
587 return;
588 }
589
590 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
591 if (get_kernel_nofault(dentry, dentry_ptr)) {
592 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
593 a_ops, ino, dentry_ptr);
594 return;
595 }
596
597 /*
598 * if dentry is corrupted, the %pd handler may still crash,
599 * but it's unlikely that we reach here with a corrupt mapping
600 */
601 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
602}
603
604void clear_inode(struct inode *inode)
605{
606 /*
607 * We have to cycle the i_pages lock here because reclaim can be in the
608 * process of removing the last page (in __filemap_remove_folio())
609 * and we must not free the mapping under it.
610 */
611 xa_lock_irq(&inode->i_data.i_pages);
612 BUG_ON(inode->i_data.nrpages);
613 /*
614 * Almost always, mapping_empty(&inode->i_data) here; but there are
615 * two known and long-standing ways in which nodes may get left behind
616 * (when deep radix-tree node allocation failed partway; or when THP
617 * collapse_file() failed). Until those two known cases are cleaned up,
618 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
619 * nor even WARN_ON(!mapping_empty).
620 */
621 xa_unlock_irq(&inode->i_data.i_pages);
622 BUG_ON(!list_empty(&inode->i_data.i_private_list));
623 BUG_ON(!(inode->i_state & I_FREEING));
624 BUG_ON(inode->i_state & I_CLEAR);
625 BUG_ON(!list_empty(&inode->i_wb_list));
626 /* don't need i_lock here, no concurrent mods to i_state */
627 inode->i_state = I_FREEING | I_CLEAR;
628}
629EXPORT_SYMBOL(clear_inode);
630
631/*
632 * Free the inode passed in, removing it from the lists it is still connected
633 * to. We remove any pages still attached to the inode and wait for any IO that
634 * is still in progress before finally destroying the inode.
635 *
636 * An inode must already be marked I_FREEING so that we avoid the inode being
637 * moved back onto lists if we race with other code that manipulates the lists
638 * (e.g. writeback_single_inode). The caller is responsible for setting this.
639 *
640 * An inode must already be removed from the LRU list before being evicted from
641 * the cache. This should occur atomically with setting the I_FREEING state
642 * flag, so no inodes here should ever be on the LRU when being evicted.
643 */
644static void evict(struct inode *inode)
645{
646 const struct super_operations *op = inode->i_sb->s_op;
647
648 BUG_ON(!(inode->i_state & I_FREEING));
649 BUG_ON(!list_empty(&inode->i_lru));
650
651 if (!list_empty(&inode->i_io_list))
652 inode_io_list_del(inode);
653
654 inode_sb_list_del(inode);
655
656 /*
657 * Wait for flusher thread to be done with the inode so that filesystem
658 * does not start destroying it while writeback is still running. Since
659 * the inode has I_FREEING set, flusher thread won't start new work on
660 * the inode. We just have to wait for running writeback to finish.
661 */
662 inode_wait_for_writeback(inode);
663
664 if (op->evict_inode) {
665 op->evict_inode(inode);
666 } else {
667 truncate_inode_pages_final(&inode->i_data);
668 clear_inode(inode);
669 }
670 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
671 cd_forget(inode);
672
673 remove_inode_hash(inode);
674
675 spin_lock(&inode->i_lock);
676 wake_up_bit(&inode->i_state, __I_NEW);
677 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
678 spin_unlock(&inode->i_lock);
679
680 destroy_inode(inode);
681}
682
683/*
684 * dispose_list - dispose of the contents of a local list
685 * @head: the head of the list to free
686 *
687 * Dispose-list gets a local list with local inodes in it, so it doesn't
688 * need to worry about list corruption and SMP locks.
689 */
690static void dispose_list(struct list_head *head)
691{
692 while (!list_empty(head)) {
693 struct inode *inode;
694
695 inode = list_first_entry(head, struct inode, i_lru);
696 list_del_init(&inode->i_lru);
697
698 evict(inode);
699 cond_resched();
700 }
701}
702
703/**
704 * evict_inodes - evict all evictable inodes for a superblock
705 * @sb: superblock to operate on
706 *
707 * Make sure that no inodes with zero refcount are retained. This is
708 * called by superblock shutdown after having SB_ACTIVE flag removed,
709 * so any inode reaching zero refcount during or after that call will
710 * be immediately evicted.
711 */
712void evict_inodes(struct super_block *sb)
713{
714 struct inode *inode, *next;
715 LIST_HEAD(dispose);
716
717again:
718 spin_lock(&sb->s_inode_list_lock);
719 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
720 if (atomic_read(&inode->i_count))
721 continue;
722
723 spin_lock(&inode->i_lock);
724 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
725 spin_unlock(&inode->i_lock);
726 continue;
727 }
728
729 inode->i_state |= I_FREEING;
730 inode_lru_list_del(inode);
731 spin_unlock(&inode->i_lock);
732 list_add(&inode->i_lru, &dispose);
733
734 /*
735 * We can have a ton of inodes to evict at unmount time given
736 * enough memory, check to see if we need to go to sleep for a
737 * bit so we don't livelock.
738 */
739 if (need_resched()) {
740 spin_unlock(&sb->s_inode_list_lock);
741 cond_resched();
742 dispose_list(&dispose);
743 goto again;
744 }
745 }
746 spin_unlock(&sb->s_inode_list_lock);
747
748 dispose_list(&dispose);
749}
750EXPORT_SYMBOL_GPL(evict_inodes);
751
752/**
753 * invalidate_inodes - attempt to free all inodes on a superblock
754 * @sb: superblock to operate on
755 *
756 * Attempts to free all inodes (including dirty inodes) for a given superblock.
757 */
758void invalidate_inodes(struct super_block *sb)
759{
760 struct inode *inode, *next;
761 LIST_HEAD(dispose);
762
763again:
764 spin_lock(&sb->s_inode_list_lock);
765 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
766 spin_lock(&inode->i_lock);
767 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
768 spin_unlock(&inode->i_lock);
769 continue;
770 }
771 if (atomic_read(&inode->i_count)) {
772 spin_unlock(&inode->i_lock);
773 continue;
774 }
775
776 inode->i_state |= I_FREEING;
777 inode_lru_list_del(inode);
778 spin_unlock(&inode->i_lock);
779 list_add(&inode->i_lru, &dispose);
780 if (need_resched()) {
781 spin_unlock(&sb->s_inode_list_lock);
782 cond_resched();
783 dispose_list(&dispose);
784 goto again;
785 }
786 }
787 spin_unlock(&sb->s_inode_list_lock);
788
789 dispose_list(&dispose);
790}
791
792/*
793 * Isolate the inode from the LRU in preparation for freeing it.
794 *
795 * If the inode has the I_REFERENCED flag set, then it means that it has been
796 * used recently - the flag is set in iput_final(). When we encounter such an
797 * inode, clear the flag and move it to the back of the LRU so it gets another
798 * pass through the LRU before it gets reclaimed. This is necessary because of
799 * the fact we are doing lazy LRU updates to minimise lock contention so the
800 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
801 * with this flag set because they are the inodes that are out of order.
802 */
803static enum lru_status inode_lru_isolate(struct list_head *item,
804 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
805{
806 struct list_head *freeable = arg;
807 struct inode *inode = container_of(item, struct inode, i_lru);
808
809 /*
810 * We are inverting the lru lock/inode->i_lock here, so use a
811 * trylock. If we fail to get the lock, just skip it.
812 */
813 if (!spin_trylock(&inode->i_lock))
814 return LRU_SKIP;
815
816 /*
817 * Inodes can get referenced, redirtied, or repopulated while
818 * they're already on the LRU, and this can make them
819 * unreclaimable for a while. Remove them lazily here; iput,
820 * sync, or the last page cache deletion will requeue them.
821 */
822 if (atomic_read(&inode->i_count) ||
823 (inode->i_state & ~I_REFERENCED) ||
824 !mapping_shrinkable(&inode->i_data)) {
825 list_lru_isolate(lru, &inode->i_lru);
826 spin_unlock(&inode->i_lock);
827 this_cpu_dec(nr_unused);
828 return LRU_REMOVED;
829 }
830
831 /* Recently referenced inodes get one more pass */
832 if (inode->i_state & I_REFERENCED) {
833 inode->i_state &= ~I_REFERENCED;
834 spin_unlock(&inode->i_lock);
835 return LRU_ROTATE;
836 }
837
838 /*
839 * On highmem systems, mapping_shrinkable() permits dropping
840 * page cache in order to free up struct inodes: lowmem might
841 * be under pressure before the cache inside the highmem zone.
842 */
843 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
844 __iget(inode);
845 spin_unlock(&inode->i_lock);
846 spin_unlock(lru_lock);
847 if (remove_inode_buffers(inode)) {
848 unsigned long reap;
849 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
850 if (current_is_kswapd())
851 __count_vm_events(KSWAPD_INODESTEAL, reap);
852 else
853 __count_vm_events(PGINODESTEAL, reap);
854 mm_account_reclaimed_pages(reap);
855 }
856 iput(inode);
857 spin_lock(lru_lock);
858 return LRU_RETRY;
859 }
860
861 WARN_ON(inode->i_state & I_NEW);
862 inode->i_state |= I_FREEING;
863 list_lru_isolate_move(lru, &inode->i_lru, freeable);
864 spin_unlock(&inode->i_lock);
865
866 this_cpu_dec(nr_unused);
867 return LRU_REMOVED;
868}
869
870/*
871 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
872 * This is called from the superblock shrinker function with a number of inodes
873 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
874 * then are freed outside inode_lock by dispose_list().
875 */
876long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
877{
878 LIST_HEAD(freeable);
879 long freed;
880
881 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
882 inode_lru_isolate, &freeable);
883 dispose_list(&freeable);
884 return freed;
885}
886
887static void __wait_on_freeing_inode(struct inode *inode);
888/*
889 * Called with the inode lock held.
890 */
891static struct inode *find_inode(struct super_block *sb,
892 struct hlist_head *head,
893 int (*test)(struct inode *, void *),
894 void *data)
895{
896 struct inode *inode = NULL;
897
898repeat:
899 hlist_for_each_entry(inode, head, i_hash) {
900 if (inode->i_sb != sb)
901 continue;
902 if (!test(inode, data))
903 continue;
904 spin_lock(&inode->i_lock);
905 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
906 __wait_on_freeing_inode(inode);
907 goto repeat;
908 }
909 if (unlikely(inode->i_state & I_CREATING)) {
910 spin_unlock(&inode->i_lock);
911 return ERR_PTR(-ESTALE);
912 }
913 __iget(inode);
914 spin_unlock(&inode->i_lock);
915 return inode;
916 }
917 return NULL;
918}
919
920/*
921 * find_inode_fast is the fast path version of find_inode, see the comment at
922 * iget_locked for details.
923 */
924static struct inode *find_inode_fast(struct super_block *sb,
925 struct hlist_head *head, unsigned long ino)
926{
927 struct inode *inode = NULL;
928
929repeat:
930 hlist_for_each_entry(inode, head, i_hash) {
931 if (inode->i_ino != ino)
932 continue;
933 if (inode->i_sb != sb)
934 continue;
935 spin_lock(&inode->i_lock);
936 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
937 __wait_on_freeing_inode(inode);
938 goto repeat;
939 }
940 if (unlikely(inode->i_state & I_CREATING)) {
941 spin_unlock(&inode->i_lock);
942 return ERR_PTR(-ESTALE);
943 }
944 __iget(inode);
945 spin_unlock(&inode->i_lock);
946 return inode;
947 }
948 return NULL;
949}
950
951/*
952 * Each cpu owns a range of LAST_INO_BATCH numbers.
953 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
954 * to renew the exhausted range.
955 *
956 * This does not significantly increase overflow rate because every CPU can
957 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
958 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
959 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
960 * overflow rate by 2x, which does not seem too significant.
961 *
962 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
963 * error if st_ino won't fit in target struct field. Use 32bit counter
964 * here to attempt to avoid that.
965 */
966#define LAST_INO_BATCH 1024
967static DEFINE_PER_CPU(unsigned int, last_ino);
968
969unsigned int get_next_ino(void)
970{
971 unsigned int *p = &get_cpu_var(last_ino);
972 unsigned int res = *p;
973
974#ifdef CONFIG_SMP
975 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
976 static atomic_t shared_last_ino;
977 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
978
979 res = next - LAST_INO_BATCH;
980 }
981#endif
982
983 res++;
984 /* get_next_ino should not provide a 0 inode number */
985 if (unlikely(!res))
986 res++;
987 *p = res;
988 put_cpu_var(last_ino);
989 return res;
990}
991EXPORT_SYMBOL(get_next_ino);
992
993/**
994 * new_inode_pseudo - obtain an inode
995 * @sb: superblock
996 *
997 * Allocates a new inode for given superblock.
998 * Inode wont be chained in superblock s_inodes list
999 * This means :
1000 * - fs can't be unmount
1001 * - quotas, fsnotify, writeback can't work
1002 */
1003struct inode *new_inode_pseudo(struct super_block *sb)
1004{
1005 struct inode *inode = alloc_inode(sb);
1006
1007 if (inode) {
1008 spin_lock(&inode->i_lock);
1009 inode->i_state = 0;
1010 spin_unlock(&inode->i_lock);
1011 }
1012 return inode;
1013}
1014
1015/**
1016 * new_inode - obtain an inode
1017 * @sb: superblock
1018 *
1019 * Allocates a new inode for given superblock. The default gfp_mask
1020 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1021 * If HIGHMEM pages are unsuitable or it is known that pages allocated
1022 * for the page cache are not reclaimable or migratable,
1023 * mapping_set_gfp_mask() must be called with suitable flags on the
1024 * newly created inode's mapping
1025 *
1026 */
1027struct inode *new_inode(struct super_block *sb)
1028{
1029 struct inode *inode;
1030
1031 inode = new_inode_pseudo(sb);
1032 if (inode)
1033 inode_sb_list_add(inode);
1034 return inode;
1035}
1036EXPORT_SYMBOL(new_inode);
1037
1038#ifdef CONFIG_DEBUG_LOCK_ALLOC
1039void lockdep_annotate_inode_mutex_key(struct inode *inode)
1040{
1041 if (S_ISDIR(inode->i_mode)) {
1042 struct file_system_type *type = inode->i_sb->s_type;
1043
1044 /* Set new key only if filesystem hasn't already changed it */
1045 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
1046 /*
1047 * ensure nobody is actually holding i_mutex
1048 */
1049 // mutex_destroy(&inode->i_mutex);
1050 init_rwsem(&inode->i_rwsem);
1051 lockdep_set_class(&inode->i_rwsem,
1052 &type->i_mutex_dir_key);
1053 }
1054 }
1055}
1056EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
1057#endif
1058
1059/**
1060 * unlock_new_inode - clear the I_NEW state and wake up any waiters
1061 * @inode: new inode to unlock
1062 *
1063 * Called when the inode is fully initialised to clear the new state of the
1064 * inode and wake up anyone waiting for the inode to finish initialisation.
1065 */
1066void unlock_new_inode(struct inode *inode)
1067{
1068 lockdep_annotate_inode_mutex_key(inode);
1069 spin_lock(&inode->i_lock);
1070 WARN_ON(!(inode->i_state & I_NEW));
1071 inode->i_state &= ~I_NEW & ~I_CREATING;
1072 smp_mb();
1073 wake_up_bit(&inode->i_state, __I_NEW);
1074 spin_unlock(&inode->i_lock);
1075}
1076EXPORT_SYMBOL(unlock_new_inode);
1077
1078void discard_new_inode(struct inode *inode)
1079{
1080 lockdep_annotate_inode_mutex_key(inode);
1081 spin_lock(&inode->i_lock);
1082 WARN_ON(!(inode->i_state & I_NEW));
1083 inode->i_state &= ~I_NEW;
1084 smp_mb();
1085 wake_up_bit(&inode->i_state, __I_NEW);
1086 spin_unlock(&inode->i_lock);
1087 iput(inode);
1088}
1089EXPORT_SYMBOL(discard_new_inode);
1090
1091/**
1092 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1093 *
1094 * Lock any non-NULL argument. Passed objects must not be directories.
1095 * Zero, one or two objects may be locked by this function.
1096 *
1097 * @inode1: first inode to lock
1098 * @inode2: second inode to lock
1099 */
1100void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1101{
1102 if (inode1)
1103 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1104 if (inode2)
1105 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1106 if (inode1 > inode2)
1107 swap(inode1, inode2);
1108 if (inode1)
1109 inode_lock(inode1);
1110 if (inode2 && inode2 != inode1)
1111 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1112}
1113EXPORT_SYMBOL(lock_two_nondirectories);
1114
1115/**
1116 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1117 * @inode1: first inode to unlock
1118 * @inode2: second inode to unlock
1119 */
1120void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1121{
1122 if (inode1) {
1123 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1124 inode_unlock(inode1);
1125 }
1126 if (inode2 && inode2 != inode1) {
1127 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1128 inode_unlock(inode2);
1129 }
1130}
1131EXPORT_SYMBOL(unlock_two_nondirectories);
1132
1133/**
1134 * inode_insert5 - obtain an inode from a mounted file system
1135 * @inode: pre-allocated inode to use for insert to cache
1136 * @hashval: hash value (usually inode number) to get
1137 * @test: callback used for comparisons between inodes
1138 * @set: callback used to initialize a new struct inode
1139 * @data: opaque data pointer to pass to @test and @set
1140 *
1141 * Search for the inode specified by @hashval and @data in the inode cache,
1142 * and if present it is return it with an increased reference count. This is
1143 * a variant of iget5_locked() for callers that don't want to fail on memory
1144 * allocation of inode.
1145 *
1146 * If the inode is not in cache, insert the pre-allocated inode to cache and
1147 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1148 * to fill it in before unlocking it via unlock_new_inode().
1149 *
1150 * Note both @test and @set are called with the inode_hash_lock held, so can't
1151 * sleep.
1152 */
1153struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1154 int (*test)(struct inode *, void *),
1155 int (*set)(struct inode *, void *), void *data)
1156{
1157 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1158 struct inode *old;
1159
1160again:
1161 spin_lock(&inode_hash_lock);
1162 old = find_inode(inode->i_sb, head, test, data);
1163 if (unlikely(old)) {
1164 /*
1165 * Uhhuh, somebody else created the same inode under us.
1166 * Use the old inode instead of the preallocated one.
1167 */
1168 spin_unlock(&inode_hash_lock);
1169 if (IS_ERR(old))
1170 return NULL;
1171 wait_on_inode(old);
1172 if (unlikely(inode_unhashed(old))) {
1173 iput(old);
1174 goto again;
1175 }
1176 return old;
1177 }
1178
1179 if (set && unlikely(set(inode, data))) {
1180 inode = NULL;
1181 goto unlock;
1182 }
1183
1184 /*
1185 * Return the locked inode with I_NEW set, the
1186 * caller is responsible for filling in the contents
1187 */
1188 spin_lock(&inode->i_lock);
1189 inode->i_state |= I_NEW;
1190 hlist_add_head_rcu(&inode->i_hash, head);
1191 spin_unlock(&inode->i_lock);
1192
1193 /*
1194 * Add inode to the sb list if it's not already. It has I_NEW at this
1195 * point, so it should be safe to test i_sb_list locklessly.
1196 */
1197 if (list_empty(&inode->i_sb_list))
1198 inode_sb_list_add(inode);
1199unlock:
1200 spin_unlock(&inode_hash_lock);
1201
1202 return inode;
1203}
1204EXPORT_SYMBOL(inode_insert5);
1205
1206/**
1207 * iget5_locked - obtain an inode from a mounted file system
1208 * @sb: super block of file system
1209 * @hashval: hash value (usually inode number) to get
1210 * @test: callback used for comparisons between inodes
1211 * @set: callback used to initialize a new struct inode
1212 * @data: opaque data pointer to pass to @test and @set
1213 *
1214 * Search for the inode specified by @hashval and @data in the inode cache,
1215 * and if present it is return it with an increased reference count. This is
1216 * a generalized version of iget_locked() for file systems where the inode
1217 * number is not sufficient for unique identification of an inode.
1218 *
1219 * If the inode is not in cache, allocate a new inode and return it locked,
1220 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1221 * before unlocking it via unlock_new_inode().
1222 *
1223 * Note both @test and @set are called with the inode_hash_lock held, so can't
1224 * sleep.
1225 */
1226struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1227 int (*test)(struct inode *, void *),
1228 int (*set)(struct inode *, void *), void *data)
1229{
1230 struct inode *inode = ilookup5(sb, hashval, test, data);
1231
1232 if (!inode) {
1233 struct inode *new = alloc_inode(sb);
1234
1235 if (new) {
1236 new->i_state = 0;
1237 inode = inode_insert5(new, hashval, test, set, data);
1238 if (unlikely(inode != new))
1239 destroy_inode(new);
1240 }
1241 }
1242 return inode;
1243}
1244EXPORT_SYMBOL(iget5_locked);
1245
1246/**
1247 * iget_locked - obtain an inode from a mounted file system
1248 * @sb: super block of file system
1249 * @ino: inode number to get
1250 *
1251 * Search for the inode specified by @ino in the inode cache and if present
1252 * return it with an increased reference count. This is for file systems
1253 * where the inode number is sufficient for unique identification of an inode.
1254 *
1255 * If the inode is not in cache, allocate a new inode and return it locked,
1256 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1257 * before unlocking it via unlock_new_inode().
1258 */
1259struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1260{
1261 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1262 struct inode *inode;
1263again:
1264 spin_lock(&inode_hash_lock);
1265 inode = find_inode_fast(sb, head, ino);
1266 spin_unlock(&inode_hash_lock);
1267 if (inode) {
1268 if (IS_ERR(inode))
1269 return NULL;
1270 wait_on_inode(inode);
1271 if (unlikely(inode_unhashed(inode))) {
1272 iput(inode);
1273 goto again;
1274 }
1275 return inode;
1276 }
1277
1278 inode = alloc_inode(sb);
1279 if (inode) {
1280 struct inode *old;
1281
1282 spin_lock(&inode_hash_lock);
1283 /* We released the lock, so.. */
1284 old = find_inode_fast(sb, head, ino);
1285 if (!old) {
1286 inode->i_ino = ino;
1287 spin_lock(&inode->i_lock);
1288 inode->i_state = I_NEW;
1289 hlist_add_head_rcu(&inode->i_hash, head);
1290 spin_unlock(&inode->i_lock);
1291 inode_sb_list_add(inode);
1292 spin_unlock(&inode_hash_lock);
1293
1294 /* Return the locked inode with I_NEW set, the
1295 * caller is responsible for filling in the contents
1296 */
1297 return inode;
1298 }
1299
1300 /*
1301 * Uhhuh, somebody else created the same inode under
1302 * us. Use the old inode instead of the one we just
1303 * allocated.
1304 */
1305 spin_unlock(&inode_hash_lock);
1306 destroy_inode(inode);
1307 if (IS_ERR(old))
1308 return NULL;
1309 inode = old;
1310 wait_on_inode(inode);
1311 if (unlikely(inode_unhashed(inode))) {
1312 iput(inode);
1313 goto again;
1314 }
1315 }
1316 return inode;
1317}
1318EXPORT_SYMBOL(iget_locked);
1319
1320/*
1321 * search the inode cache for a matching inode number.
1322 * If we find one, then the inode number we are trying to
1323 * allocate is not unique and so we should not use it.
1324 *
1325 * Returns 1 if the inode number is unique, 0 if it is not.
1326 */
1327static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1328{
1329 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1330 struct inode *inode;
1331
1332 hlist_for_each_entry_rcu(inode, b, i_hash) {
1333 if (inode->i_ino == ino && inode->i_sb == sb)
1334 return 0;
1335 }
1336 return 1;
1337}
1338
1339/**
1340 * iunique - get a unique inode number
1341 * @sb: superblock
1342 * @max_reserved: highest reserved inode number
1343 *
1344 * Obtain an inode number that is unique on the system for a given
1345 * superblock. This is used by file systems that have no natural
1346 * permanent inode numbering system. An inode number is returned that
1347 * is higher than the reserved limit but unique.
1348 *
1349 * BUGS:
1350 * With a large number of inodes live on the file system this function
1351 * currently becomes quite slow.
1352 */
1353ino_t iunique(struct super_block *sb, ino_t max_reserved)
1354{
1355 /*
1356 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1357 * error if st_ino won't fit in target struct field. Use 32bit counter
1358 * here to attempt to avoid that.
1359 */
1360 static DEFINE_SPINLOCK(iunique_lock);
1361 static unsigned int counter;
1362 ino_t res;
1363
1364 rcu_read_lock();
1365 spin_lock(&iunique_lock);
1366 do {
1367 if (counter <= max_reserved)
1368 counter = max_reserved + 1;
1369 res = counter++;
1370 } while (!test_inode_iunique(sb, res));
1371 spin_unlock(&iunique_lock);
1372 rcu_read_unlock();
1373
1374 return res;
1375}
1376EXPORT_SYMBOL(iunique);
1377
1378struct inode *igrab(struct inode *inode)
1379{
1380 spin_lock(&inode->i_lock);
1381 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1382 __iget(inode);
1383 spin_unlock(&inode->i_lock);
1384 } else {
1385 spin_unlock(&inode->i_lock);
1386 /*
1387 * Handle the case where s_op->clear_inode is not been
1388 * called yet, and somebody is calling igrab
1389 * while the inode is getting freed.
1390 */
1391 inode = NULL;
1392 }
1393 return inode;
1394}
1395EXPORT_SYMBOL(igrab);
1396
1397/**
1398 * ilookup5_nowait - search for an inode in the inode cache
1399 * @sb: super block of file system to search
1400 * @hashval: hash value (usually inode number) to search for
1401 * @test: callback used for comparisons between inodes
1402 * @data: opaque data pointer to pass to @test
1403 *
1404 * Search for the inode specified by @hashval and @data in the inode cache.
1405 * If the inode is in the cache, the inode is returned with an incremented
1406 * reference count.
1407 *
1408 * Note: I_NEW is not waited upon so you have to be very careful what you do
1409 * with the returned inode. You probably should be using ilookup5() instead.
1410 *
1411 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1412 */
1413struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1414 int (*test)(struct inode *, void *), void *data)
1415{
1416 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1417 struct inode *inode;
1418
1419 spin_lock(&inode_hash_lock);
1420 inode = find_inode(sb, head, test, data);
1421 spin_unlock(&inode_hash_lock);
1422
1423 return IS_ERR(inode) ? NULL : inode;
1424}
1425EXPORT_SYMBOL(ilookup5_nowait);
1426
1427/**
1428 * ilookup5 - search for an inode in the inode cache
1429 * @sb: super block of file system to search
1430 * @hashval: hash value (usually inode number) to search for
1431 * @test: callback used for comparisons between inodes
1432 * @data: opaque data pointer to pass to @test
1433 *
1434 * Search for the inode specified by @hashval and @data in the inode cache,
1435 * and if the inode is in the cache, return the inode with an incremented
1436 * reference count. Waits on I_NEW before returning the inode.
1437 * returned with an incremented reference count.
1438 *
1439 * This is a generalized version of ilookup() for file systems where the
1440 * inode number is not sufficient for unique identification of an inode.
1441 *
1442 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1443 */
1444struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1445 int (*test)(struct inode *, void *), void *data)
1446{
1447 struct inode *inode;
1448again:
1449 inode = ilookup5_nowait(sb, hashval, test, data);
1450 if (inode) {
1451 wait_on_inode(inode);
1452 if (unlikely(inode_unhashed(inode))) {
1453 iput(inode);
1454 goto again;
1455 }
1456 }
1457 return inode;
1458}
1459EXPORT_SYMBOL(ilookup5);
1460
1461/**
1462 * ilookup - search for an inode in the inode cache
1463 * @sb: super block of file system to search
1464 * @ino: inode number to search for
1465 *
1466 * Search for the inode @ino in the inode cache, and if the inode is in the
1467 * cache, the inode is returned with an incremented reference count.
1468 */
1469struct inode *ilookup(struct super_block *sb, unsigned long ino)
1470{
1471 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1472 struct inode *inode;
1473again:
1474 spin_lock(&inode_hash_lock);
1475 inode = find_inode_fast(sb, head, ino);
1476 spin_unlock(&inode_hash_lock);
1477
1478 if (inode) {
1479 if (IS_ERR(inode))
1480 return NULL;
1481 wait_on_inode(inode);
1482 if (unlikely(inode_unhashed(inode))) {
1483 iput(inode);
1484 goto again;
1485 }
1486 }
1487 return inode;
1488}
1489EXPORT_SYMBOL(ilookup);
1490
1491/**
1492 * find_inode_nowait - find an inode in the inode cache
1493 * @sb: super block of file system to search
1494 * @hashval: hash value (usually inode number) to search for
1495 * @match: callback used for comparisons between inodes
1496 * @data: opaque data pointer to pass to @match
1497 *
1498 * Search for the inode specified by @hashval and @data in the inode
1499 * cache, where the helper function @match will return 0 if the inode
1500 * does not match, 1 if the inode does match, and -1 if the search
1501 * should be stopped. The @match function must be responsible for
1502 * taking the i_lock spin_lock and checking i_state for an inode being
1503 * freed or being initialized, and incrementing the reference count
1504 * before returning 1. It also must not sleep, since it is called with
1505 * the inode_hash_lock spinlock held.
1506 *
1507 * This is a even more generalized version of ilookup5() when the
1508 * function must never block --- find_inode() can block in
1509 * __wait_on_freeing_inode() --- or when the caller can not increment
1510 * the reference count because the resulting iput() might cause an
1511 * inode eviction. The tradeoff is that the @match funtion must be
1512 * very carefully implemented.
1513 */
1514struct inode *find_inode_nowait(struct super_block *sb,
1515 unsigned long hashval,
1516 int (*match)(struct inode *, unsigned long,
1517 void *),
1518 void *data)
1519{
1520 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1521 struct inode *inode, *ret_inode = NULL;
1522 int mval;
1523
1524 spin_lock(&inode_hash_lock);
1525 hlist_for_each_entry(inode, head, i_hash) {
1526 if (inode->i_sb != sb)
1527 continue;
1528 mval = match(inode, hashval, data);
1529 if (mval == 0)
1530 continue;
1531 if (mval == 1)
1532 ret_inode = inode;
1533 goto out;
1534 }
1535out:
1536 spin_unlock(&inode_hash_lock);
1537 return ret_inode;
1538}
1539EXPORT_SYMBOL(find_inode_nowait);
1540
1541/**
1542 * find_inode_rcu - find an inode in the inode cache
1543 * @sb: Super block of file system to search
1544 * @hashval: Key to hash
1545 * @test: Function to test match on an inode
1546 * @data: Data for test function
1547 *
1548 * Search for the inode specified by @hashval and @data in the inode cache,
1549 * where the helper function @test will return 0 if the inode does not match
1550 * and 1 if it does. The @test function must be responsible for taking the
1551 * i_lock spin_lock and checking i_state for an inode being freed or being
1552 * initialized.
1553 *
1554 * If successful, this will return the inode for which the @test function
1555 * returned 1 and NULL otherwise.
1556 *
1557 * The @test function is not permitted to take a ref on any inode presented.
1558 * It is also not permitted to sleep.
1559 *
1560 * The caller must hold the RCU read lock.
1561 */
1562struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1563 int (*test)(struct inode *, void *), void *data)
1564{
1565 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1566 struct inode *inode;
1567
1568 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1569 "suspicious find_inode_rcu() usage");
1570
1571 hlist_for_each_entry_rcu(inode, head, i_hash) {
1572 if (inode->i_sb == sb &&
1573 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1574 test(inode, data))
1575 return inode;
1576 }
1577 return NULL;
1578}
1579EXPORT_SYMBOL(find_inode_rcu);
1580
1581/**
1582 * find_inode_by_ino_rcu - Find an inode in the inode cache
1583 * @sb: Super block of file system to search
1584 * @ino: The inode number to match
1585 *
1586 * Search for the inode specified by @hashval and @data in the inode cache,
1587 * where the helper function @test will return 0 if the inode does not match
1588 * and 1 if it does. The @test function must be responsible for taking the
1589 * i_lock spin_lock and checking i_state for an inode being freed or being
1590 * initialized.
1591 *
1592 * If successful, this will return the inode for which the @test function
1593 * returned 1 and NULL otherwise.
1594 *
1595 * The @test function is not permitted to take a ref on any inode presented.
1596 * It is also not permitted to sleep.
1597 *
1598 * The caller must hold the RCU read lock.
1599 */
1600struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1601 unsigned long ino)
1602{
1603 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1604 struct inode *inode;
1605
1606 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1607 "suspicious find_inode_by_ino_rcu() usage");
1608
1609 hlist_for_each_entry_rcu(inode, head, i_hash) {
1610 if (inode->i_ino == ino &&
1611 inode->i_sb == sb &&
1612 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1613 return inode;
1614 }
1615 return NULL;
1616}
1617EXPORT_SYMBOL(find_inode_by_ino_rcu);
1618
1619int insert_inode_locked(struct inode *inode)
1620{
1621 struct super_block *sb = inode->i_sb;
1622 ino_t ino = inode->i_ino;
1623 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1624
1625 while (1) {
1626 struct inode *old = NULL;
1627 spin_lock(&inode_hash_lock);
1628 hlist_for_each_entry(old, head, i_hash) {
1629 if (old->i_ino != ino)
1630 continue;
1631 if (old->i_sb != sb)
1632 continue;
1633 spin_lock(&old->i_lock);
1634 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1635 spin_unlock(&old->i_lock);
1636 continue;
1637 }
1638 break;
1639 }
1640 if (likely(!old)) {
1641 spin_lock(&inode->i_lock);
1642 inode->i_state |= I_NEW | I_CREATING;
1643 hlist_add_head_rcu(&inode->i_hash, head);
1644 spin_unlock(&inode->i_lock);
1645 spin_unlock(&inode_hash_lock);
1646 return 0;
1647 }
1648 if (unlikely(old->i_state & I_CREATING)) {
1649 spin_unlock(&old->i_lock);
1650 spin_unlock(&inode_hash_lock);
1651 return -EBUSY;
1652 }
1653 __iget(old);
1654 spin_unlock(&old->i_lock);
1655 spin_unlock(&inode_hash_lock);
1656 wait_on_inode(old);
1657 if (unlikely(!inode_unhashed(old))) {
1658 iput(old);
1659 return -EBUSY;
1660 }
1661 iput(old);
1662 }
1663}
1664EXPORT_SYMBOL(insert_inode_locked);
1665
1666int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1667 int (*test)(struct inode *, void *), void *data)
1668{
1669 struct inode *old;
1670
1671 inode->i_state |= I_CREATING;
1672 old = inode_insert5(inode, hashval, test, NULL, data);
1673
1674 if (old != inode) {
1675 iput(old);
1676 return -EBUSY;
1677 }
1678 return 0;
1679}
1680EXPORT_SYMBOL(insert_inode_locked4);
1681
1682
1683int generic_delete_inode(struct inode *inode)
1684{
1685 return 1;
1686}
1687EXPORT_SYMBOL(generic_delete_inode);
1688
1689/*
1690 * Called when we're dropping the last reference
1691 * to an inode.
1692 *
1693 * Call the FS "drop_inode()" function, defaulting to
1694 * the legacy UNIX filesystem behaviour. If it tells
1695 * us to evict inode, do so. Otherwise, retain inode
1696 * in cache if fs is alive, sync and evict if fs is
1697 * shutting down.
1698 */
1699static void iput_final(struct inode *inode)
1700{
1701 struct super_block *sb = inode->i_sb;
1702 const struct super_operations *op = inode->i_sb->s_op;
1703 unsigned long state;
1704 int drop;
1705
1706 WARN_ON(inode->i_state & I_NEW);
1707
1708 if (op->drop_inode)
1709 drop = op->drop_inode(inode);
1710 else
1711 drop = generic_drop_inode(inode);
1712
1713 if (!drop &&
1714 !(inode->i_state & I_DONTCACHE) &&
1715 (sb->s_flags & SB_ACTIVE)) {
1716 __inode_add_lru(inode, true);
1717 spin_unlock(&inode->i_lock);
1718 return;
1719 }
1720
1721 state = inode->i_state;
1722 if (!drop) {
1723 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1724 spin_unlock(&inode->i_lock);
1725
1726 write_inode_now(inode, 1);
1727
1728 spin_lock(&inode->i_lock);
1729 state = inode->i_state;
1730 WARN_ON(state & I_NEW);
1731 state &= ~I_WILL_FREE;
1732 }
1733
1734 WRITE_ONCE(inode->i_state, state | I_FREEING);
1735 if (!list_empty(&inode->i_lru))
1736 inode_lru_list_del(inode);
1737 spin_unlock(&inode->i_lock);
1738
1739 evict(inode);
1740}
1741
1742/**
1743 * iput - put an inode
1744 * @inode: inode to put
1745 *
1746 * Puts an inode, dropping its usage count. If the inode use count hits
1747 * zero, the inode is then freed and may also be destroyed.
1748 *
1749 * Consequently, iput() can sleep.
1750 */
1751void iput(struct inode *inode)
1752{
1753 if (!inode)
1754 return;
1755 BUG_ON(inode->i_state & I_CLEAR);
1756retry:
1757 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1758 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1759 atomic_inc(&inode->i_count);
1760 spin_unlock(&inode->i_lock);
1761 trace_writeback_lazytime_iput(inode);
1762 mark_inode_dirty_sync(inode);
1763 goto retry;
1764 }
1765 iput_final(inode);
1766 }
1767}
1768EXPORT_SYMBOL(iput);
1769
1770#ifdef CONFIG_BLOCK
1771/**
1772 * bmap - find a block number in a file
1773 * @inode: inode owning the block number being requested
1774 * @block: pointer containing the block to find
1775 *
1776 * Replaces the value in ``*block`` with the block number on the device holding
1777 * corresponding to the requested block number in the file.
1778 * That is, asked for block 4 of inode 1 the function will replace the
1779 * 4 in ``*block``, with disk block relative to the disk start that holds that
1780 * block of the file.
1781 *
1782 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1783 * hole, returns 0 and ``*block`` is also set to 0.
1784 */
1785int bmap(struct inode *inode, sector_t *block)
1786{
1787 if (!inode->i_mapping->a_ops->bmap)
1788 return -EINVAL;
1789
1790 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1791 return 0;
1792}
1793EXPORT_SYMBOL(bmap);
1794#endif
1795
1796/*
1797 * With relative atime, only update atime if the previous atime is
1798 * earlier than or equal to either the ctime or mtime,
1799 * or if at least a day has passed since the last atime update.
1800 */
1801static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1802 struct timespec64 now)
1803{
1804 struct timespec64 atime, mtime, ctime;
1805
1806 if (!(mnt->mnt_flags & MNT_RELATIME))
1807 return true;
1808 /*
1809 * Is mtime younger than or equal to atime? If yes, update atime:
1810 */
1811 atime = inode_get_atime(inode);
1812 mtime = inode_get_mtime(inode);
1813 if (timespec64_compare(&mtime, &atime) >= 0)
1814 return true;
1815 /*
1816 * Is ctime younger than or equal to atime? If yes, update atime:
1817 */
1818 ctime = inode_get_ctime(inode);
1819 if (timespec64_compare(&ctime, &atime) >= 0)
1820 return true;
1821
1822 /*
1823 * Is the previous atime value older than a day? If yes,
1824 * update atime:
1825 */
1826 if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
1827 return true;
1828 /*
1829 * Good, we can skip the atime update:
1830 */
1831 return false;
1832}
1833
1834/**
1835 * inode_update_timestamps - update the timestamps on the inode
1836 * @inode: inode to be updated
1837 * @flags: S_* flags that needed to be updated
1838 *
1839 * The update_time function is called when an inode's timestamps need to be
1840 * updated for a read or write operation. This function handles updating the
1841 * actual timestamps. It's up to the caller to ensure that the inode is marked
1842 * dirty appropriately.
1843 *
1844 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
1845 * attempt to update all three of them. S_ATIME updates can be handled
1846 * independently of the rest.
1847 *
1848 * Returns a set of S_* flags indicating which values changed.
1849 */
1850int inode_update_timestamps(struct inode *inode, int flags)
1851{
1852 int updated = 0;
1853 struct timespec64 now;
1854
1855 if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
1856 struct timespec64 ctime = inode_get_ctime(inode);
1857 struct timespec64 mtime = inode_get_mtime(inode);
1858
1859 now = inode_set_ctime_current(inode);
1860 if (!timespec64_equal(&now, &ctime))
1861 updated |= S_CTIME;
1862 if (!timespec64_equal(&now, &mtime)) {
1863 inode_set_mtime_to_ts(inode, now);
1864 updated |= S_MTIME;
1865 }
1866 if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
1867 updated |= S_VERSION;
1868 } else {
1869 now = current_time(inode);
1870 }
1871
1872 if (flags & S_ATIME) {
1873 struct timespec64 atime = inode_get_atime(inode);
1874
1875 if (!timespec64_equal(&now, &atime)) {
1876 inode_set_atime_to_ts(inode, now);
1877 updated |= S_ATIME;
1878 }
1879 }
1880 return updated;
1881}
1882EXPORT_SYMBOL(inode_update_timestamps);
1883
1884/**
1885 * generic_update_time - update the timestamps on the inode
1886 * @inode: inode to be updated
1887 * @flags: S_* flags that needed to be updated
1888 *
1889 * The update_time function is called when an inode's timestamps need to be
1890 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
1891 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
1892 * updates can be handled done independently of the rest.
1893 *
1894 * Returns a S_* mask indicating which fields were updated.
1895 */
1896int generic_update_time(struct inode *inode, int flags)
1897{
1898 int updated = inode_update_timestamps(inode, flags);
1899 int dirty_flags = 0;
1900
1901 if (updated & (S_ATIME|S_MTIME|S_CTIME))
1902 dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
1903 if (updated & S_VERSION)
1904 dirty_flags |= I_DIRTY_SYNC;
1905 __mark_inode_dirty(inode, dirty_flags);
1906 return updated;
1907}
1908EXPORT_SYMBOL(generic_update_time);
1909
1910/*
1911 * This does the actual work of updating an inodes time or version. Must have
1912 * had called mnt_want_write() before calling this.
1913 */
1914int inode_update_time(struct inode *inode, int flags)
1915{
1916 if (inode->i_op->update_time)
1917 return inode->i_op->update_time(inode, flags);
1918 generic_update_time(inode, flags);
1919 return 0;
1920}
1921EXPORT_SYMBOL(inode_update_time);
1922
1923/**
1924 * atime_needs_update - update the access time
1925 * @path: the &struct path to update
1926 * @inode: inode to update
1927 *
1928 * Update the accessed time on an inode and mark it for writeback.
1929 * This function automatically handles read only file systems and media,
1930 * as well as the "noatime" flag and inode specific "noatime" markers.
1931 */
1932bool atime_needs_update(const struct path *path, struct inode *inode)
1933{
1934 struct vfsmount *mnt = path->mnt;
1935 struct timespec64 now, atime;
1936
1937 if (inode->i_flags & S_NOATIME)
1938 return false;
1939
1940 /* Atime updates will likely cause i_uid and i_gid to be written
1941 * back improprely if their true value is unknown to the vfs.
1942 */
1943 if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
1944 return false;
1945
1946 if (IS_NOATIME(inode))
1947 return false;
1948 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1949 return false;
1950
1951 if (mnt->mnt_flags & MNT_NOATIME)
1952 return false;
1953 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1954 return false;
1955
1956 now = current_time(inode);
1957
1958 if (!relatime_need_update(mnt, inode, now))
1959 return false;
1960
1961 atime = inode_get_atime(inode);
1962 if (timespec64_equal(&atime, &now))
1963 return false;
1964
1965 return true;
1966}
1967
1968void touch_atime(const struct path *path)
1969{
1970 struct vfsmount *mnt = path->mnt;
1971 struct inode *inode = d_inode(path->dentry);
1972
1973 if (!atime_needs_update(path, inode))
1974 return;
1975
1976 if (!sb_start_write_trylock(inode->i_sb))
1977 return;
1978
1979 if (mnt_get_write_access(mnt) != 0)
1980 goto skip_update;
1981 /*
1982 * File systems can error out when updating inodes if they need to
1983 * allocate new space to modify an inode (such is the case for
1984 * Btrfs), but since we touch atime while walking down the path we
1985 * really don't care if we failed to update the atime of the file,
1986 * so just ignore the return value.
1987 * We may also fail on filesystems that have the ability to make parts
1988 * of the fs read only, e.g. subvolumes in Btrfs.
1989 */
1990 inode_update_time(inode, S_ATIME);
1991 mnt_put_write_access(mnt);
1992skip_update:
1993 sb_end_write(inode->i_sb);
1994}
1995EXPORT_SYMBOL(touch_atime);
1996
1997/*
1998 * Return mask of changes for notify_change() that need to be done as a
1999 * response to write or truncate. Return 0 if nothing has to be changed.
2000 * Negative value on error (change should be denied).
2001 */
2002int dentry_needs_remove_privs(struct mnt_idmap *idmap,
2003 struct dentry *dentry)
2004{
2005 struct inode *inode = d_inode(dentry);
2006 int mask = 0;
2007 int ret;
2008
2009 if (IS_NOSEC(inode))
2010 return 0;
2011
2012 mask = setattr_should_drop_suidgid(idmap, inode);
2013 ret = security_inode_need_killpriv(dentry);
2014 if (ret < 0)
2015 return ret;
2016 if (ret)
2017 mask |= ATTR_KILL_PRIV;
2018 return mask;
2019}
2020
2021static int __remove_privs(struct mnt_idmap *idmap,
2022 struct dentry *dentry, int kill)
2023{
2024 struct iattr newattrs;
2025
2026 newattrs.ia_valid = ATTR_FORCE | kill;
2027 /*
2028 * Note we call this on write, so notify_change will not
2029 * encounter any conflicting delegations:
2030 */
2031 return notify_change(idmap, dentry, &newattrs, NULL);
2032}
2033
2034static int __file_remove_privs(struct file *file, unsigned int flags)
2035{
2036 struct dentry *dentry = file_dentry(file);
2037 struct inode *inode = file_inode(file);
2038 int error = 0;
2039 int kill;
2040
2041 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
2042 return 0;
2043
2044 kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
2045 if (kill < 0)
2046 return kill;
2047
2048 if (kill) {
2049 if (flags & IOCB_NOWAIT)
2050 return -EAGAIN;
2051
2052 error = __remove_privs(file_mnt_idmap(file), dentry, kill);
2053 }
2054
2055 if (!error)
2056 inode_has_no_xattr(inode);
2057 return error;
2058}
2059
2060/**
2061 * file_remove_privs - remove special file privileges (suid, capabilities)
2062 * @file: file to remove privileges from
2063 *
2064 * When file is modified by a write or truncation ensure that special
2065 * file privileges are removed.
2066 *
2067 * Return: 0 on success, negative errno on failure.
2068 */
2069int file_remove_privs(struct file *file)
2070{
2071 return __file_remove_privs(file, 0);
2072}
2073EXPORT_SYMBOL(file_remove_privs);
2074
2075static int inode_needs_update_time(struct inode *inode)
2076{
2077 int sync_it = 0;
2078 struct timespec64 now = current_time(inode);
2079 struct timespec64 ts;
2080
2081 /* First try to exhaust all avenues to not sync */
2082 if (IS_NOCMTIME(inode))
2083 return 0;
2084
2085 ts = inode_get_mtime(inode);
2086 if (!timespec64_equal(&ts, &now))
2087 sync_it = S_MTIME;
2088
2089 ts = inode_get_ctime(inode);
2090 if (!timespec64_equal(&ts, &now))
2091 sync_it |= S_CTIME;
2092
2093 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
2094 sync_it |= S_VERSION;
2095
2096 return sync_it;
2097}
2098
2099static int __file_update_time(struct file *file, int sync_mode)
2100{
2101 int ret = 0;
2102 struct inode *inode = file_inode(file);
2103
2104 /* try to update time settings */
2105 if (!mnt_get_write_access_file(file)) {
2106 ret = inode_update_time(inode, sync_mode);
2107 mnt_put_write_access_file(file);
2108 }
2109
2110 return ret;
2111}
2112
2113/**
2114 * file_update_time - update mtime and ctime time
2115 * @file: file accessed
2116 *
2117 * Update the mtime and ctime members of an inode and mark the inode for
2118 * writeback. Note that this function is meant exclusively for usage in
2119 * the file write path of filesystems, and filesystems may choose to
2120 * explicitly ignore updates via this function with the _NOCMTIME inode
2121 * flag, e.g. for network filesystem where these imestamps are handled
2122 * by the server. This can return an error for file systems who need to
2123 * allocate space in order to update an inode.
2124 *
2125 * Return: 0 on success, negative errno on failure.
2126 */
2127int file_update_time(struct file *file)
2128{
2129 int ret;
2130 struct inode *inode = file_inode(file);
2131
2132 ret = inode_needs_update_time(inode);
2133 if (ret <= 0)
2134 return ret;
2135
2136 return __file_update_time(file, ret);
2137}
2138EXPORT_SYMBOL(file_update_time);
2139
2140/**
2141 * file_modified_flags - handle mandated vfs changes when modifying a file
2142 * @file: file that was modified
2143 * @flags: kiocb flags
2144 *
2145 * When file has been modified ensure that special
2146 * file privileges are removed and time settings are updated.
2147 *
2148 * If IOCB_NOWAIT is set, special file privileges will not be removed and
2149 * time settings will not be updated. It will return -EAGAIN.
2150 *
2151 * Context: Caller must hold the file's inode lock.
2152 *
2153 * Return: 0 on success, negative errno on failure.
2154 */
2155static int file_modified_flags(struct file *file, int flags)
2156{
2157 int ret;
2158 struct inode *inode = file_inode(file);
2159
2160 /*
2161 * Clear the security bits if the process is not being run by root.
2162 * This keeps people from modifying setuid and setgid binaries.
2163 */
2164 ret = __file_remove_privs(file, flags);
2165 if (ret)
2166 return ret;
2167
2168 if (unlikely(file->f_mode & FMODE_NOCMTIME))
2169 return 0;
2170
2171 ret = inode_needs_update_time(inode);
2172 if (ret <= 0)
2173 return ret;
2174 if (flags & IOCB_NOWAIT)
2175 return -EAGAIN;
2176
2177 return __file_update_time(file, ret);
2178}
2179
2180/**
2181 * file_modified - handle mandated vfs changes when modifying a file
2182 * @file: file that was modified
2183 *
2184 * When file has been modified ensure that special
2185 * file privileges are removed and time settings are updated.
2186 *
2187 * Context: Caller must hold the file's inode lock.
2188 *
2189 * Return: 0 on success, negative errno on failure.
2190 */
2191int file_modified(struct file *file)
2192{
2193 return file_modified_flags(file, 0);
2194}
2195EXPORT_SYMBOL(file_modified);
2196
2197/**
2198 * kiocb_modified - handle mandated vfs changes when modifying a file
2199 * @iocb: iocb that was modified
2200 *
2201 * When file has been modified ensure that special
2202 * file privileges are removed and time settings are updated.
2203 *
2204 * Context: Caller must hold the file's inode lock.
2205 *
2206 * Return: 0 on success, negative errno on failure.
2207 */
2208int kiocb_modified(struct kiocb *iocb)
2209{
2210 return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
2211}
2212EXPORT_SYMBOL_GPL(kiocb_modified);
2213
2214int inode_needs_sync(struct inode *inode)
2215{
2216 if (IS_SYNC(inode))
2217 return 1;
2218 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2219 return 1;
2220 return 0;
2221}
2222EXPORT_SYMBOL(inode_needs_sync);
2223
2224/*
2225 * If we try to find an inode in the inode hash while it is being
2226 * deleted, we have to wait until the filesystem completes its
2227 * deletion before reporting that it isn't found. This function waits
2228 * until the deletion _might_ have completed. Callers are responsible
2229 * to recheck inode state.
2230 *
2231 * It doesn't matter if I_NEW is not set initially, a call to
2232 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2233 * will DTRT.
2234 */
2235static void __wait_on_freeing_inode(struct inode *inode)
2236{
2237 wait_queue_head_t *wq;
2238 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2239 wq = bit_waitqueue(&inode->i_state, __I_NEW);
2240 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2241 spin_unlock(&inode->i_lock);
2242 spin_unlock(&inode_hash_lock);
2243 schedule();
2244 finish_wait(wq, &wait.wq_entry);
2245 spin_lock(&inode_hash_lock);
2246}
2247
2248static __initdata unsigned long ihash_entries;
2249static int __init set_ihash_entries(char *str)
2250{
2251 if (!str)
2252 return 0;
2253 ihash_entries = simple_strtoul(str, &str, 0);
2254 return 1;
2255}
2256__setup("ihash_entries=", set_ihash_entries);
2257
2258/*
2259 * Initialize the waitqueues and inode hash table.
2260 */
2261void __init inode_init_early(void)
2262{
2263 /* If hashes are distributed across NUMA nodes, defer
2264 * hash allocation until vmalloc space is available.
2265 */
2266 if (hashdist)
2267 return;
2268
2269 inode_hashtable =
2270 alloc_large_system_hash("Inode-cache",
2271 sizeof(struct hlist_head),
2272 ihash_entries,
2273 14,
2274 HASH_EARLY | HASH_ZERO,
2275 &i_hash_shift,
2276 &i_hash_mask,
2277 0,
2278 0);
2279}
2280
2281void __init inode_init(void)
2282{
2283 /* inode slab cache */
2284 inode_cachep = kmem_cache_create("inode_cache",
2285 sizeof(struct inode),
2286 0,
2287 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2288 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2289 init_once);
2290
2291 /* Hash may have been set up in inode_init_early */
2292 if (!hashdist)
2293 return;
2294
2295 inode_hashtable =
2296 alloc_large_system_hash("Inode-cache",
2297 sizeof(struct hlist_head),
2298 ihash_entries,
2299 14,
2300 HASH_ZERO,
2301 &i_hash_shift,
2302 &i_hash_mask,
2303 0,
2304 0);
2305}
2306
2307void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2308{
2309 inode->i_mode = mode;
2310 if (S_ISCHR(mode)) {
2311 inode->i_fop = &def_chr_fops;
2312 inode->i_rdev = rdev;
2313 } else if (S_ISBLK(mode)) {
2314 if (IS_ENABLED(CONFIG_BLOCK))
2315 inode->i_fop = &def_blk_fops;
2316 inode->i_rdev = rdev;
2317 } else if (S_ISFIFO(mode))
2318 inode->i_fop = &pipefifo_fops;
2319 else if (S_ISSOCK(mode))
2320 ; /* leave it no_open_fops */
2321 else
2322 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2323 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2324 inode->i_ino);
2325}
2326EXPORT_SYMBOL(init_special_inode);
2327
2328/**
2329 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2330 * @idmap: idmap of the mount the inode was created from
2331 * @inode: New inode
2332 * @dir: Directory inode
2333 * @mode: mode of the new inode
2334 *
2335 * If the inode has been created through an idmapped mount the idmap of
2336 * the vfsmount must be passed through @idmap. This function will then take
2337 * care to map the inode according to @idmap before checking permissions
2338 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2339 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2340 */
2341void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
2342 const struct inode *dir, umode_t mode)
2343{
2344 inode_fsuid_set(inode, idmap);
2345 if (dir && dir->i_mode & S_ISGID) {
2346 inode->i_gid = dir->i_gid;
2347
2348 /* Directories are special, and always inherit S_ISGID */
2349 if (S_ISDIR(mode))
2350 mode |= S_ISGID;
2351 } else
2352 inode_fsgid_set(inode, idmap);
2353 inode->i_mode = mode;
2354}
2355EXPORT_SYMBOL(inode_init_owner);
2356
2357/**
2358 * inode_owner_or_capable - check current task permissions to inode
2359 * @idmap: idmap of the mount the inode was found from
2360 * @inode: inode being checked
2361 *
2362 * Return true if current either has CAP_FOWNER in a namespace with the
2363 * inode owner uid mapped, or owns the file.
2364 *
2365 * If the inode has been found through an idmapped mount the idmap of
2366 * the vfsmount must be passed through @idmap. This function will then take
2367 * care to map the inode according to @idmap before checking permissions.
2368 * On non-idmapped mounts or if permission checking is to be performed on the
2369 * raw inode simply pass @nop_mnt_idmap.
2370 */
2371bool inode_owner_or_capable(struct mnt_idmap *idmap,
2372 const struct inode *inode)
2373{
2374 vfsuid_t vfsuid;
2375 struct user_namespace *ns;
2376
2377 vfsuid = i_uid_into_vfsuid(idmap, inode);
2378 if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
2379 return true;
2380
2381 ns = current_user_ns();
2382 if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
2383 return true;
2384 return false;
2385}
2386EXPORT_SYMBOL(inode_owner_or_capable);
2387
2388/*
2389 * Direct i/o helper functions
2390 */
2391static void __inode_dio_wait(struct inode *inode)
2392{
2393 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2394 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2395
2396 do {
2397 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2398 if (atomic_read(&inode->i_dio_count))
2399 schedule();
2400 } while (atomic_read(&inode->i_dio_count));
2401 finish_wait(wq, &q.wq_entry);
2402}
2403
2404/**
2405 * inode_dio_wait - wait for outstanding DIO requests to finish
2406 * @inode: inode to wait for
2407 *
2408 * Waits for all pending direct I/O requests to finish so that we can
2409 * proceed with a truncate or equivalent operation.
2410 *
2411 * Must be called under a lock that serializes taking new references
2412 * to i_dio_count, usually by inode->i_mutex.
2413 */
2414void inode_dio_wait(struct inode *inode)
2415{
2416 if (atomic_read(&inode->i_dio_count))
2417 __inode_dio_wait(inode);
2418}
2419EXPORT_SYMBOL(inode_dio_wait);
2420
2421/*
2422 * inode_set_flags - atomically set some inode flags
2423 *
2424 * Note: the caller should be holding i_mutex, or else be sure that
2425 * they have exclusive access to the inode structure (i.e., while the
2426 * inode is being instantiated). The reason for the cmpxchg() loop
2427 * --- which wouldn't be necessary if all code paths which modify
2428 * i_flags actually followed this rule, is that there is at least one
2429 * code path which doesn't today so we use cmpxchg() out of an abundance
2430 * of caution.
2431 *
2432 * In the long run, i_mutex is overkill, and we should probably look
2433 * at using the i_lock spinlock to protect i_flags, and then make sure
2434 * it is so documented in include/linux/fs.h and that all code follows
2435 * the locking convention!!
2436 */
2437void inode_set_flags(struct inode *inode, unsigned int flags,
2438 unsigned int mask)
2439{
2440 WARN_ON_ONCE(flags & ~mask);
2441 set_mask_bits(&inode->i_flags, mask, flags);
2442}
2443EXPORT_SYMBOL(inode_set_flags);
2444
2445void inode_nohighmem(struct inode *inode)
2446{
2447 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2448}
2449EXPORT_SYMBOL(inode_nohighmem);
2450
2451/**
2452 * timestamp_truncate - Truncate timespec to a granularity
2453 * @t: Timespec
2454 * @inode: inode being updated
2455 *
2456 * Truncate a timespec to the granularity supported by the fs
2457 * containing the inode. Always rounds down. gran must
2458 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2459 */
2460struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2461{
2462 struct super_block *sb = inode->i_sb;
2463 unsigned int gran = sb->s_time_gran;
2464
2465 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2466 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2467 t.tv_nsec = 0;
2468
2469 /* Avoid division in the common cases 1 ns and 1 s. */
2470 if (gran == 1)
2471 ; /* nothing */
2472 else if (gran == NSEC_PER_SEC)
2473 t.tv_nsec = 0;
2474 else if (gran > 1 && gran < NSEC_PER_SEC)
2475 t.tv_nsec -= t.tv_nsec % gran;
2476 else
2477 WARN(1, "invalid file time granularity: %u", gran);
2478 return t;
2479}
2480EXPORT_SYMBOL(timestamp_truncate);
2481
2482/**
2483 * current_time - Return FS time
2484 * @inode: inode.
2485 *
2486 * Return the current time truncated to the time granularity supported by
2487 * the fs.
2488 *
2489 * Note that inode and inode->sb cannot be NULL.
2490 * Otherwise, the function warns and returns time without truncation.
2491 */
2492struct timespec64 current_time(struct inode *inode)
2493{
2494 struct timespec64 now;
2495
2496 ktime_get_coarse_real_ts64(&now);
2497 return timestamp_truncate(now, inode);
2498}
2499EXPORT_SYMBOL(current_time);
2500
2501/**
2502 * inode_set_ctime_current - set the ctime to current_time
2503 * @inode: inode
2504 *
2505 * Set the inode->i_ctime to the current value for the inode. Returns
2506 * the current value that was assigned to i_ctime.
2507 */
2508struct timespec64 inode_set_ctime_current(struct inode *inode)
2509{
2510 struct timespec64 now = current_time(inode);
2511
2512 inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
2513 return now;
2514}
2515EXPORT_SYMBOL(inode_set_ctime_current);
2516
2517/**
2518 * in_group_or_capable - check whether caller is CAP_FSETID privileged
2519 * @idmap: idmap of the mount @inode was found from
2520 * @inode: inode to check
2521 * @vfsgid: the new/current vfsgid of @inode
2522 *
2523 * Check wether @vfsgid is in the caller's group list or if the caller is
2524 * privileged with CAP_FSETID over @inode. This can be used to determine
2525 * whether the setgid bit can be kept or must be dropped.
2526 *
2527 * Return: true if the caller is sufficiently privileged, false if not.
2528 */
2529bool in_group_or_capable(struct mnt_idmap *idmap,
2530 const struct inode *inode, vfsgid_t vfsgid)
2531{
2532 if (vfsgid_in_group_p(vfsgid))
2533 return true;
2534 if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
2535 return true;
2536 return false;
2537}
2538
2539/**
2540 * mode_strip_sgid - handle the sgid bit for non-directories
2541 * @idmap: idmap of the mount the inode was created from
2542 * @dir: parent directory inode
2543 * @mode: mode of the file to be created in @dir
2544 *
2545 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2546 * raised and @dir has the S_ISGID bit raised ensure that the caller is
2547 * either in the group of the parent directory or they have CAP_FSETID
2548 * in their user namespace and are privileged over the parent directory.
2549 * In all other cases, strip the S_ISGID bit from @mode.
2550 *
2551 * Return: the new mode to use for the file
2552 */
2553umode_t mode_strip_sgid(struct mnt_idmap *idmap,
2554 const struct inode *dir, umode_t mode)
2555{
2556 if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
2557 return mode;
2558 if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
2559 return mode;
2560 if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
2561 return mode;
2562 return mode & ~S_ISGID;
2563}
2564EXPORT_SYMBOL(mode_strip_sgid);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/backing-dev.h>
10#include <linux/hash.h>
11#include <linux/swap.h>
12#include <linux/security.h>
13#include <linux/cdev.h>
14#include <linux/memblock.h>
15#include <linux/fsnotify.h>
16#include <linux/mount.h>
17#include <linux/posix_acl.h>
18#include <linux/prefetch.h>
19#include <linux/buffer_head.h> /* for inode_has_buffers */
20#include <linux/ratelimit.h>
21#include <linux/list_lru.h>
22#include <linux/iversion.h>
23#include <trace/events/writeback.h>
24#include "internal.h"
25
26/*
27 * Inode locking rules:
28 *
29 * inode->i_lock protects:
30 * inode->i_state, inode->i_hash, __iget()
31 * Inode LRU list locks protect:
32 * inode->i_sb->s_inode_lru, inode->i_lru
33 * inode->i_sb->s_inode_list_lock protects:
34 * inode->i_sb->s_inodes, inode->i_sb_list
35 * bdi->wb.list_lock protects:
36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
37 * inode_hash_lock protects:
38 * inode_hashtable, inode->i_hash
39 *
40 * Lock ordering:
41 *
42 * inode->i_sb->s_inode_list_lock
43 * inode->i_lock
44 * Inode LRU list locks
45 *
46 * bdi->wb.list_lock
47 * inode->i_lock
48 *
49 * inode_hash_lock
50 * inode->i_sb->s_inode_list_lock
51 * inode->i_lock
52 *
53 * iunique_lock
54 * inode_hash_lock
55 */
56
57static unsigned int i_hash_mask __read_mostly;
58static unsigned int i_hash_shift __read_mostly;
59static struct hlist_head *inode_hashtable __read_mostly;
60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
61
62/*
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
65 */
66const struct address_space_operations empty_aops = {
67};
68EXPORT_SYMBOL(empty_aops);
69
70/*
71 * Statistics gathering..
72 */
73struct inodes_stat_t inodes_stat;
74
75static DEFINE_PER_CPU(unsigned long, nr_inodes);
76static DEFINE_PER_CPU(unsigned long, nr_unused);
77
78static struct kmem_cache *inode_cachep __read_mostly;
79
80static long get_nr_inodes(void)
81{
82 int i;
83 long sum = 0;
84 for_each_possible_cpu(i)
85 sum += per_cpu(nr_inodes, i);
86 return sum < 0 ? 0 : sum;
87}
88
89static inline long get_nr_inodes_unused(void)
90{
91 int i;
92 long sum = 0;
93 for_each_possible_cpu(i)
94 sum += per_cpu(nr_unused, i);
95 return sum < 0 ? 0 : sum;
96}
97
98long get_nr_dirty_inodes(void)
99{
100 /* not actually dirty inodes, but a wild approximation */
101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
102 return nr_dirty > 0 ? nr_dirty : 0;
103}
104
105/*
106 * Handle nr_inode sysctl
107 */
108#ifdef CONFIG_SYSCTL
109int proc_nr_inodes(struct ctl_table *table, int write,
110 void __user *buffer, size_t *lenp, loff_t *ppos)
111{
112 inodes_stat.nr_inodes = get_nr_inodes();
113 inodes_stat.nr_unused = get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
115}
116#endif
117
118static int no_open(struct inode *inode, struct file *file)
119{
120 return -ENXIO;
121}
122
123/**
124 * inode_init_always - perform inode structure initialisation
125 * @sb: superblock inode belongs to
126 * @inode: inode to initialise
127 *
128 * These are initializations that need to be done on every inode
129 * allocation as the fields are not initialised by slab allocation.
130 */
131int inode_init_always(struct super_block *sb, struct inode *inode)
132{
133 static const struct inode_operations empty_iops;
134 static const struct file_operations no_open_fops = {.open = no_open};
135 struct address_space *const mapping = &inode->i_data;
136
137 inode->i_sb = sb;
138 inode->i_blkbits = sb->s_blocksize_bits;
139 inode->i_flags = 0;
140 atomic_set(&inode->i_count, 1);
141 inode->i_op = &empty_iops;
142 inode->i_fop = &no_open_fops;
143 inode->__i_nlink = 1;
144 inode->i_opflags = 0;
145 if (sb->s_xattr)
146 inode->i_opflags |= IOP_XATTR;
147 i_uid_write(inode, 0);
148 i_gid_write(inode, 0);
149 atomic_set(&inode->i_writecount, 0);
150 inode->i_size = 0;
151 inode->i_write_hint = WRITE_LIFE_NOT_SET;
152 inode->i_blocks = 0;
153 inode->i_bytes = 0;
154 inode->i_generation = 0;
155 inode->i_pipe = NULL;
156 inode->i_bdev = NULL;
157 inode->i_cdev = NULL;
158 inode->i_link = NULL;
159 inode->i_dir_seq = 0;
160 inode->i_rdev = 0;
161 inode->dirtied_when = 0;
162
163#ifdef CONFIG_CGROUP_WRITEBACK
164 inode->i_wb_frn_winner = 0;
165 inode->i_wb_frn_avg_time = 0;
166 inode->i_wb_frn_history = 0;
167#endif
168
169 if (security_inode_alloc(inode))
170 goto out;
171 spin_lock_init(&inode->i_lock);
172 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
173
174 init_rwsem(&inode->i_rwsem);
175 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
176
177 atomic_set(&inode->i_dio_count, 0);
178
179 mapping->a_ops = &empty_aops;
180 mapping->host = inode;
181 mapping->flags = 0;
182 mapping->wb_err = 0;
183 atomic_set(&mapping->i_mmap_writable, 0);
184#ifdef CONFIG_READ_ONLY_THP_FOR_FS
185 atomic_set(&mapping->nr_thps, 0);
186#endif
187 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
188 mapping->private_data = NULL;
189 mapping->writeback_index = 0;
190 inode->i_private = NULL;
191 inode->i_mapping = mapping;
192 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
193#ifdef CONFIG_FS_POSIX_ACL
194 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
195#endif
196
197#ifdef CONFIG_FSNOTIFY
198 inode->i_fsnotify_mask = 0;
199#endif
200 inode->i_flctx = NULL;
201 this_cpu_inc(nr_inodes);
202
203 return 0;
204out:
205 return -ENOMEM;
206}
207EXPORT_SYMBOL(inode_init_always);
208
209void free_inode_nonrcu(struct inode *inode)
210{
211 kmem_cache_free(inode_cachep, inode);
212}
213EXPORT_SYMBOL(free_inode_nonrcu);
214
215static void i_callback(struct rcu_head *head)
216{
217 struct inode *inode = container_of(head, struct inode, i_rcu);
218 if (inode->free_inode)
219 inode->free_inode(inode);
220 else
221 free_inode_nonrcu(inode);
222}
223
224static struct inode *alloc_inode(struct super_block *sb)
225{
226 const struct super_operations *ops = sb->s_op;
227 struct inode *inode;
228
229 if (ops->alloc_inode)
230 inode = ops->alloc_inode(sb);
231 else
232 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
233
234 if (!inode)
235 return NULL;
236
237 if (unlikely(inode_init_always(sb, inode))) {
238 if (ops->destroy_inode) {
239 ops->destroy_inode(inode);
240 if (!ops->free_inode)
241 return NULL;
242 }
243 inode->free_inode = ops->free_inode;
244 i_callback(&inode->i_rcu);
245 return NULL;
246 }
247
248 return inode;
249}
250
251void __destroy_inode(struct inode *inode)
252{
253 BUG_ON(inode_has_buffers(inode));
254 inode_detach_wb(inode);
255 security_inode_free(inode);
256 fsnotify_inode_delete(inode);
257 locks_free_lock_context(inode);
258 if (!inode->i_nlink) {
259 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
260 atomic_long_dec(&inode->i_sb->s_remove_count);
261 }
262
263#ifdef CONFIG_FS_POSIX_ACL
264 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
265 posix_acl_release(inode->i_acl);
266 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
267 posix_acl_release(inode->i_default_acl);
268#endif
269 this_cpu_dec(nr_inodes);
270}
271EXPORT_SYMBOL(__destroy_inode);
272
273static void destroy_inode(struct inode *inode)
274{
275 const struct super_operations *ops = inode->i_sb->s_op;
276
277 BUG_ON(!list_empty(&inode->i_lru));
278 __destroy_inode(inode);
279 if (ops->destroy_inode) {
280 ops->destroy_inode(inode);
281 if (!ops->free_inode)
282 return;
283 }
284 inode->free_inode = ops->free_inode;
285 call_rcu(&inode->i_rcu, i_callback);
286}
287
288/**
289 * drop_nlink - directly drop an inode's link count
290 * @inode: inode
291 *
292 * This is a low-level filesystem helper to replace any
293 * direct filesystem manipulation of i_nlink. In cases
294 * where we are attempting to track writes to the
295 * filesystem, a decrement to zero means an imminent
296 * write when the file is truncated and actually unlinked
297 * on the filesystem.
298 */
299void drop_nlink(struct inode *inode)
300{
301 WARN_ON(inode->i_nlink == 0);
302 inode->__i_nlink--;
303 if (!inode->i_nlink)
304 atomic_long_inc(&inode->i_sb->s_remove_count);
305}
306EXPORT_SYMBOL(drop_nlink);
307
308/**
309 * clear_nlink - directly zero an inode's link count
310 * @inode: inode
311 *
312 * This is a low-level filesystem helper to replace any
313 * direct filesystem manipulation of i_nlink. See
314 * drop_nlink() for why we care about i_nlink hitting zero.
315 */
316void clear_nlink(struct inode *inode)
317{
318 if (inode->i_nlink) {
319 inode->__i_nlink = 0;
320 atomic_long_inc(&inode->i_sb->s_remove_count);
321 }
322}
323EXPORT_SYMBOL(clear_nlink);
324
325/**
326 * set_nlink - directly set an inode's link count
327 * @inode: inode
328 * @nlink: new nlink (should be non-zero)
329 *
330 * This is a low-level filesystem helper to replace any
331 * direct filesystem manipulation of i_nlink.
332 */
333void set_nlink(struct inode *inode, unsigned int nlink)
334{
335 if (!nlink) {
336 clear_nlink(inode);
337 } else {
338 /* Yes, some filesystems do change nlink from zero to one */
339 if (inode->i_nlink == 0)
340 atomic_long_dec(&inode->i_sb->s_remove_count);
341
342 inode->__i_nlink = nlink;
343 }
344}
345EXPORT_SYMBOL(set_nlink);
346
347/**
348 * inc_nlink - directly increment an inode's link count
349 * @inode: inode
350 *
351 * This is a low-level filesystem helper to replace any
352 * direct filesystem manipulation of i_nlink. Currently,
353 * it is only here for parity with dec_nlink().
354 */
355void inc_nlink(struct inode *inode)
356{
357 if (unlikely(inode->i_nlink == 0)) {
358 WARN_ON(!(inode->i_state & I_LINKABLE));
359 atomic_long_dec(&inode->i_sb->s_remove_count);
360 }
361
362 inode->__i_nlink++;
363}
364EXPORT_SYMBOL(inc_nlink);
365
366static void __address_space_init_once(struct address_space *mapping)
367{
368 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
369 init_rwsem(&mapping->i_mmap_rwsem);
370 INIT_LIST_HEAD(&mapping->private_list);
371 spin_lock_init(&mapping->private_lock);
372 mapping->i_mmap = RB_ROOT_CACHED;
373}
374
375void address_space_init_once(struct address_space *mapping)
376{
377 memset(mapping, 0, sizeof(*mapping));
378 __address_space_init_once(mapping);
379}
380EXPORT_SYMBOL(address_space_init_once);
381
382/*
383 * These are initializations that only need to be done
384 * once, because the fields are idempotent across use
385 * of the inode, so let the slab aware of that.
386 */
387void inode_init_once(struct inode *inode)
388{
389 memset(inode, 0, sizeof(*inode));
390 INIT_HLIST_NODE(&inode->i_hash);
391 INIT_LIST_HEAD(&inode->i_devices);
392 INIT_LIST_HEAD(&inode->i_io_list);
393 INIT_LIST_HEAD(&inode->i_wb_list);
394 INIT_LIST_HEAD(&inode->i_lru);
395 __address_space_init_once(&inode->i_data);
396 i_size_ordered_init(inode);
397}
398EXPORT_SYMBOL(inode_init_once);
399
400static void init_once(void *foo)
401{
402 struct inode *inode = (struct inode *) foo;
403
404 inode_init_once(inode);
405}
406
407/*
408 * inode->i_lock must be held
409 */
410void __iget(struct inode *inode)
411{
412 atomic_inc(&inode->i_count);
413}
414
415/*
416 * get additional reference to inode; caller must already hold one.
417 */
418void ihold(struct inode *inode)
419{
420 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
421}
422EXPORT_SYMBOL(ihold);
423
424static void inode_lru_list_add(struct inode *inode)
425{
426 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
427 this_cpu_inc(nr_unused);
428 else
429 inode->i_state |= I_REFERENCED;
430}
431
432/*
433 * Add inode to LRU if needed (inode is unused and clean).
434 *
435 * Needs inode->i_lock held.
436 */
437void inode_add_lru(struct inode *inode)
438{
439 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
440 I_FREEING | I_WILL_FREE)) &&
441 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
442 inode_lru_list_add(inode);
443}
444
445
446static void inode_lru_list_del(struct inode *inode)
447{
448
449 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
450 this_cpu_dec(nr_unused);
451}
452
453/**
454 * inode_sb_list_add - add inode to the superblock list of inodes
455 * @inode: inode to add
456 */
457void inode_sb_list_add(struct inode *inode)
458{
459 spin_lock(&inode->i_sb->s_inode_list_lock);
460 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
461 spin_unlock(&inode->i_sb->s_inode_list_lock);
462}
463EXPORT_SYMBOL_GPL(inode_sb_list_add);
464
465static inline void inode_sb_list_del(struct inode *inode)
466{
467 if (!list_empty(&inode->i_sb_list)) {
468 spin_lock(&inode->i_sb->s_inode_list_lock);
469 list_del_init(&inode->i_sb_list);
470 spin_unlock(&inode->i_sb->s_inode_list_lock);
471 }
472}
473
474static unsigned long hash(struct super_block *sb, unsigned long hashval)
475{
476 unsigned long tmp;
477
478 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
479 L1_CACHE_BYTES;
480 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
481 return tmp & i_hash_mask;
482}
483
484/**
485 * __insert_inode_hash - hash an inode
486 * @inode: unhashed inode
487 * @hashval: unsigned long value used to locate this object in the
488 * inode_hashtable.
489 *
490 * Add an inode to the inode hash for this superblock.
491 */
492void __insert_inode_hash(struct inode *inode, unsigned long hashval)
493{
494 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
495
496 spin_lock(&inode_hash_lock);
497 spin_lock(&inode->i_lock);
498 hlist_add_head(&inode->i_hash, b);
499 spin_unlock(&inode->i_lock);
500 spin_unlock(&inode_hash_lock);
501}
502EXPORT_SYMBOL(__insert_inode_hash);
503
504/**
505 * __remove_inode_hash - remove an inode from the hash
506 * @inode: inode to unhash
507 *
508 * Remove an inode from the superblock.
509 */
510void __remove_inode_hash(struct inode *inode)
511{
512 spin_lock(&inode_hash_lock);
513 spin_lock(&inode->i_lock);
514 hlist_del_init(&inode->i_hash);
515 spin_unlock(&inode->i_lock);
516 spin_unlock(&inode_hash_lock);
517}
518EXPORT_SYMBOL(__remove_inode_hash);
519
520void clear_inode(struct inode *inode)
521{
522 /*
523 * We have to cycle the i_pages lock here because reclaim can be in the
524 * process of removing the last page (in __delete_from_page_cache())
525 * and we must not free the mapping under it.
526 */
527 xa_lock_irq(&inode->i_data.i_pages);
528 BUG_ON(inode->i_data.nrpages);
529 BUG_ON(inode->i_data.nrexceptional);
530 xa_unlock_irq(&inode->i_data.i_pages);
531 BUG_ON(!list_empty(&inode->i_data.private_list));
532 BUG_ON(!(inode->i_state & I_FREEING));
533 BUG_ON(inode->i_state & I_CLEAR);
534 BUG_ON(!list_empty(&inode->i_wb_list));
535 /* don't need i_lock here, no concurrent mods to i_state */
536 inode->i_state = I_FREEING | I_CLEAR;
537}
538EXPORT_SYMBOL(clear_inode);
539
540/*
541 * Free the inode passed in, removing it from the lists it is still connected
542 * to. We remove any pages still attached to the inode and wait for any IO that
543 * is still in progress before finally destroying the inode.
544 *
545 * An inode must already be marked I_FREEING so that we avoid the inode being
546 * moved back onto lists if we race with other code that manipulates the lists
547 * (e.g. writeback_single_inode). The caller is responsible for setting this.
548 *
549 * An inode must already be removed from the LRU list before being evicted from
550 * the cache. This should occur atomically with setting the I_FREEING state
551 * flag, so no inodes here should ever be on the LRU when being evicted.
552 */
553static void evict(struct inode *inode)
554{
555 const struct super_operations *op = inode->i_sb->s_op;
556
557 BUG_ON(!(inode->i_state & I_FREEING));
558 BUG_ON(!list_empty(&inode->i_lru));
559
560 if (!list_empty(&inode->i_io_list))
561 inode_io_list_del(inode);
562
563 inode_sb_list_del(inode);
564
565 /*
566 * Wait for flusher thread to be done with the inode so that filesystem
567 * does not start destroying it while writeback is still running. Since
568 * the inode has I_FREEING set, flusher thread won't start new work on
569 * the inode. We just have to wait for running writeback to finish.
570 */
571 inode_wait_for_writeback(inode);
572
573 if (op->evict_inode) {
574 op->evict_inode(inode);
575 } else {
576 truncate_inode_pages_final(&inode->i_data);
577 clear_inode(inode);
578 }
579 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
580 bd_forget(inode);
581 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
582 cd_forget(inode);
583
584 remove_inode_hash(inode);
585
586 spin_lock(&inode->i_lock);
587 wake_up_bit(&inode->i_state, __I_NEW);
588 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
589 spin_unlock(&inode->i_lock);
590
591 destroy_inode(inode);
592}
593
594/*
595 * dispose_list - dispose of the contents of a local list
596 * @head: the head of the list to free
597 *
598 * Dispose-list gets a local list with local inodes in it, so it doesn't
599 * need to worry about list corruption and SMP locks.
600 */
601static void dispose_list(struct list_head *head)
602{
603 while (!list_empty(head)) {
604 struct inode *inode;
605
606 inode = list_first_entry(head, struct inode, i_lru);
607 list_del_init(&inode->i_lru);
608
609 evict(inode);
610 cond_resched();
611 }
612}
613
614/**
615 * evict_inodes - evict all evictable inodes for a superblock
616 * @sb: superblock to operate on
617 *
618 * Make sure that no inodes with zero refcount are retained. This is
619 * called by superblock shutdown after having SB_ACTIVE flag removed,
620 * so any inode reaching zero refcount during or after that call will
621 * be immediately evicted.
622 */
623void evict_inodes(struct super_block *sb)
624{
625 struct inode *inode, *next;
626 LIST_HEAD(dispose);
627
628again:
629 spin_lock(&sb->s_inode_list_lock);
630 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
631 if (atomic_read(&inode->i_count))
632 continue;
633
634 spin_lock(&inode->i_lock);
635 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
636 spin_unlock(&inode->i_lock);
637 continue;
638 }
639
640 inode->i_state |= I_FREEING;
641 inode_lru_list_del(inode);
642 spin_unlock(&inode->i_lock);
643 list_add(&inode->i_lru, &dispose);
644
645 /*
646 * We can have a ton of inodes to evict at unmount time given
647 * enough memory, check to see if we need to go to sleep for a
648 * bit so we don't livelock.
649 */
650 if (need_resched()) {
651 spin_unlock(&sb->s_inode_list_lock);
652 cond_resched();
653 dispose_list(&dispose);
654 goto again;
655 }
656 }
657 spin_unlock(&sb->s_inode_list_lock);
658
659 dispose_list(&dispose);
660}
661EXPORT_SYMBOL_GPL(evict_inodes);
662
663/**
664 * invalidate_inodes - attempt to free all inodes on a superblock
665 * @sb: superblock to operate on
666 * @kill_dirty: flag to guide handling of dirty inodes
667 *
668 * Attempts to free all inodes for a given superblock. If there were any
669 * busy inodes return a non-zero value, else zero.
670 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
671 * them as busy.
672 */
673int invalidate_inodes(struct super_block *sb, bool kill_dirty)
674{
675 int busy = 0;
676 struct inode *inode, *next;
677 LIST_HEAD(dispose);
678
679 spin_lock(&sb->s_inode_list_lock);
680 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
681 spin_lock(&inode->i_lock);
682 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
683 spin_unlock(&inode->i_lock);
684 continue;
685 }
686 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
687 spin_unlock(&inode->i_lock);
688 busy = 1;
689 continue;
690 }
691 if (atomic_read(&inode->i_count)) {
692 spin_unlock(&inode->i_lock);
693 busy = 1;
694 continue;
695 }
696
697 inode->i_state |= I_FREEING;
698 inode_lru_list_del(inode);
699 spin_unlock(&inode->i_lock);
700 list_add(&inode->i_lru, &dispose);
701 }
702 spin_unlock(&sb->s_inode_list_lock);
703
704 dispose_list(&dispose);
705
706 return busy;
707}
708
709/*
710 * Isolate the inode from the LRU in preparation for freeing it.
711 *
712 * Any inodes which are pinned purely because of attached pagecache have their
713 * pagecache removed. If the inode has metadata buffers attached to
714 * mapping->private_list then try to remove them.
715 *
716 * If the inode has the I_REFERENCED flag set, then it means that it has been
717 * used recently - the flag is set in iput_final(). When we encounter such an
718 * inode, clear the flag and move it to the back of the LRU so it gets another
719 * pass through the LRU before it gets reclaimed. This is necessary because of
720 * the fact we are doing lazy LRU updates to minimise lock contention so the
721 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
722 * with this flag set because they are the inodes that are out of order.
723 */
724static enum lru_status inode_lru_isolate(struct list_head *item,
725 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
726{
727 struct list_head *freeable = arg;
728 struct inode *inode = container_of(item, struct inode, i_lru);
729
730 /*
731 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
732 * If we fail to get the lock, just skip it.
733 */
734 if (!spin_trylock(&inode->i_lock))
735 return LRU_SKIP;
736
737 /*
738 * Referenced or dirty inodes are still in use. Give them another pass
739 * through the LRU as we canot reclaim them now.
740 */
741 if (atomic_read(&inode->i_count) ||
742 (inode->i_state & ~I_REFERENCED)) {
743 list_lru_isolate(lru, &inode->i_lru);
744 spin_unlock(&inode->i_lock);
745 this_cpu_dec(nr_unused);
746 return LRU_REMOVED;
747 }
748
749 /* recently referenced inodes get one more pass */
750 if (inode->i_state & I_REFERENCED) {
751 inode->i_state &= ~I_REFERENCED;
752 spin_unlock(&inode->i_lock);
753 return LRU_ROTATE;
754 }
755
756 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
757 __iget(inode);
758 spin_unlock(&inode->i_lock);
759 spin_unlock(lru_lock);
760 if (remove_inode_buffers(inode)) {
761 unsigned long reap;
762 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
763 if (current_is_kswapd())
764 __count_vm_events(KSWAPD_INODESTEAL, reap);
765 else
766 __count_vm_events(PGINODESTEAL, reap);
767 if (current->reclaim_state)
768 current->reclaim_state->reclaimed_slab += reap;
769 }
770 iput(inode);
771 spin_lock(lru_lock);
772 return LRU_RETRY;
773 }
774
775 WARN_ON(inode->i_state & I_NEW);
776 inode->i_state |= I_FREEING;
777 list_lru_isolate_move(lru, &inode->i_lru, freeable);
778 spin_unlock(&inode->i_lock);
779
780 this_cpu_dec(nr_unused);
781 return LRU_REMOVED;
782}
783
784/*
785 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
786 * This is called from the superblock shrinker function with a number of inodes
787 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
788 * then are freed outside inode_lock by dispose_list().
789 */
790long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
791{
792 LIST_HEAD(freeable);
793 long freed;
794
795 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
796 inode_lru_isolate, &freeable);
797 dispose_list(&freeable);
798 return freed;
799}
800
801static void __wait_on_freeing_inode(struct inode *inode);
802/*
803 * Called with the inode lock held.
804 */
805static struct inode *find_inode(struct super_block *sb,
806 struct hlist_head *head,
807 int (*test)(struct inode *, void *),
808 void *data)
809{
810 struct inode *inode = NULL;
811
812repeat:
813 hlist_for_each_entry(inode, head, i_hash) {
814 if (inode->i_sb != sb)
815 continue;
816 if (!test(inode, data))
817 continue;
818 spin_lock(&inode->i_lock);
819 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
820 __wait_on_freeing_inode(inode);
821 goto repeat;
822 }
823 if (unlikely(inode->i_state & I_CREATING)) {
824 spin_unlock(&inode->i_lock);
825 return ERR_PTR(-ESTALE);
826 }
827 __iget(inode);
828 spin_unlock(&inode->i_lock);
829 return inode;
830 }
831 return NULL;
832}
833
834/*
835 * find_inode_fast is the fast path version of find_inode, see the comment at
836 * iget_locked for details.
837 */
838static struct inode *find_inode_fast(struct super_block *sb,
839 struct hlist_head *head, unsigned long ino)
840{
841 struct inode *inode = NULL;
842
843repeat:
844 hlist_for_each_entry(inode, head, i_hash) {
845 if (inode->i_ino != ino)
846 continue;
847 if (inode->i_sb != sb)
848 continue;
849 spin_lock(&inode->i_lock);
850 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
851 __wait_on_freeing_inode(inode);
852 goto repeat;
853 }
854 if (unlikely(inode->i_state & I_CREATING)) {
855 spin_unlock(&inode->i_lock);
856 return ERR_PTR(-ESTALE);
857 }
858 __iget(inode);
859 spin_unlock(&inode->i_lock);
860 return inode;
861 }
862 return NULL;
863}
864
865/*
866 * Each cpu owns a range of LAST_INO_BATCH numbers.
867 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
868 * to renew the exhausted range.
869 *
870 * This does not significantly increase overflow rate because every CPU can
871 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
872 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
873 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
874 * overflow rate by 2x, which does not seem too significant.
875 *
876 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
877 * error if st_ino won't fit in target struct field. Use 32bit counter
878 * here to attempt to avoid that.
879 */
880#define LAST_INO_BATCH 1024
881static DEFINE_PER_CPU(unsigned int, last_ino);
882
883unsigned int get_next_ino(void)
884{
885 unsigned int *p = &get_cpu_var(last_ino);
886 unsigned int res = *p;
887
888#ifdef CONFIG_SMP
889 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
890 static atomic_t shared_last_ino;
891 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
892
893 res = next - LAST_INO_BATCH;
894 }
895#endif
896
897 res++;
898 /* get_next_ino should not provide a 0 inode number */
899 if (unlikely(!res))
900 res++;
901 *p = res;
902 put_cpu_var(last_ino);
903 return res;
904}
905EXPORT_SYMBOL(get_next_ino);
906
907/**
908 * new_inode_pseudo - obtain an inode
909 * @sb: superblock
910 *
911 * Allocates a new inode for given superblock.
912 * Inode wont be chained in superblock s_inodes list
913 * This means :
914 * - fs can't be unmount
915 * - quotas, fsnotify, writeback can't work
916 */
917struct inode *new_inode_pseudo(struct super_block *sb)
918{
919 struct inode *inode = alloc_inode(sb);
920
921 if (inode) {
922 spin_lock(&inode->i_lock);
923 inode->i_state = 0;
924 spin_unlock(&inode->i_lock);
925 INIT_LIST_HEAD(&inode->i_sb_list);
926 }
927 return inode;
928}
929
930/**
931 * new_inode - obtain an inode
932 * @sb: superblock
933 *
934 * Allocates a new inode for given superblock. The default gfp_mask
935 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
936 * If HIGHMEM pages are unsuitable or it is known that pages allocated
937 * for the page cache are not reclaimable or migratable,
938 * mapping_set_gfp_mask() must be called with suitable flags on the
939 * newly created inode's mapping
940 *
941 */
942struct inode *new_inode(struct super_block *sb)
943{
944 struct inode *inode;
945
946 spin_lock_prefetch(&sb->s_inode_list_lock);
947
948 inode = new_inode_pseudo(sb);
949 if (inode)
950 inode_sb_list_add(inode);
951 return inode;
952}
953EXPORT_SYMBOL(new_inode);
954
955#ifdef CONFIG_DEBUG_LOCK_ALLOC
956void lockdep_annotate_inode_mutex_key(struct inode *inode)
957{
958 if (S_ISDIR(inode->i_mode)) {
959 struct file_system_type *type = inode->i_sb->s_type;
960
961 /* Set new key only if filesystem hasn't already changed it */
962 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
963 /*
964 * ensure nobody is actually holding i_mutex
965 */
966 // mutex_destroy(&inode->i_mutex);
967 init_rwsem(&inode->i_rwsem);
968 lockdep_set_class(&inode->i_rwsem,
969 &type->i_mutex_dir_key);
970 }
971 }
972}
973EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
974#endif
975
976/**
977 * unlock_new_inode - clear the I_NEW state and wake up any waiters
978 * @inode: new inode to unlock
979 *
980 * Called when the inode is fully initialised to clear the new state of the
981 * inode and wake up anyone waiting for the inode to finish initialisation.
982 */
983void unlock_new_inode(struct inode *inode)
984{
985 lockdep_annotate_inode_mutex_key(inode);
986 spin_lock(&inode->i_lock);
987 WARN_ON(!(inode->i_state & I_NEW));
988 inode->i_state &= ~I_NEW & ~I_CREATING;
989 smp_mb();
990 wake_up_bit(&inode->i_state, __I_NEW);
991 spin_unlock(&inode->i_lock);
992}
993EXPORT_SYMBOL(unlock_new_inode);
994
995void discard_new_inode(struct inode *inode)
996{
997 lockdep_annotate_inode_mutex_key(inode);
998 spin_lock(&inode->i_lock);
999 WARN_ON(!(inode->i_state & I_NEW));
1000 inode->i_state &= ~I_NEW;
1001 smp_mb();
1002 wake_up_bit(&inode->i_state, __I_NEW);
1003 spin_unlock(&inode->i_lock);
1004 iput(inode);
1005}
1006EXPORT_SYMBOL(discard_new_inode);
1007
1008/**
1009 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1010 *
1011 * Lock any non-NULL argument that is not a directory.
1012 * Zero, one or two objects may be locked by this function.
1013 *
1014 * @inode1: first inode to lock
1015 * @inode2: second inode to lock
1016 */
1017void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1018{
1019 if (inode1 > inode2)
1020 swap(inode1, inode2);
1021
1022 if (inode1 && !S_ISDIR(inode1->i_mode))
1023 inode_lock(inode1);
1024 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1025 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1026}
1027EXPORT_SYMBOL(lock_two_nondirectories);
1028
1029/**
1030 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1031 * @inode1: first inode to unlock
1032 * @inode2: second inode to unlock
1033 */
1034void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1035{
1036 if (inode1 && !S_ISDIR(inode1->i_mode))
1037 inode_unlock(inode1);
1038 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1039 inode_unlock(inode2);
1040}
1041EXPORT_SYMBOL(unlock_two_nondirectories);
1042
1043/**
1044 * inode_insert5 - obtain an inode from a mounted file system
1045 * @inode: pre-allocated inode to use for insert to cache
1046 * @hashval: hash value (usually inode number) to get
1047 * @test: callback used for comparisons between inodes
1048 * @set: callback used to initialize a new struct inode
1049 * @data: opaque data pointer to pass to @test and @set
1050 *
1051 * Search for the inode specified by @hashval and @data in the inode cache,
1052 * and if present it is return it with an increased reference count. This is
1053 * a variant of iget5_locked() for callers that don't want to fail on memory
1054 * allocation of inode.
1055 *
1056 * If the inode is not in cache, insert the pre-allocated inode to cache and
1057 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1058 * to fill it in before unlocking it via unlock_new_inode().
1059 *
1060 * Note both @test and @set are called with the inode_hash_lock held, so can't
1061 * sleep.
1062 */
1063struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1064 int (*test)(struct inode *, void *),
1065 int (*set)(struct inode *, void *), void *data)
1066{
1067 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1068 struct inode *old;
1069 bool creating = inode->i_state & I_CREATING;
1070
1071again:
1072 spin_lock(&inode_hash_lock);
1073 old = find_inode(inode->i_sb, head, test, data);
1074 if (unlikely(old)) {
1075 /*
1076 * Uhhuh, somebody else created the same inode under us.
1077 * Use the old inode instead of the preallocated one.
1078 */
1079 spin_unlock(&inode_hash_lock);
1080 if (IS_ERR(old))
1081 return NULL;
1082 wait_on_inode(old);
1083 if (unlikely(inode_unhashed(old))) {
1084 iput(old);
1085 goto again;
1086 }
1087 return old;
1088 }
1089
1090 if (set && unlikely(set(inode, data))) {
1091 inode = NULL;
1092 goto unlock;
1093 }
1094
1095 /*
1096 * Return the locked inode with I_NEW set, the
1097 * caller is responsible for filling in the contents
1098 */
1099 spin_lock(&inode->i_lock);
1100 inode->i_state |= I_NEW;
1101 hlist_add_head(&inode->i_hash, head);
1102 spin_unlock(&inode->i_lock);
1103 if (!creating)
1104 inode_sb_list_add(inode);
1105unlock:
1106 spin_unlock(&inode_hash_lock);
1107
1108 return inode;
1109}
1110EXPORT_SYMBOL(inode_insert5);
1111
1112/**
1113 * iget5_locked - obtain an inode from a mounted file system
1114 * @sb: super block of file system
1115 * @hashval: hash value (usually inode number) to get
1116 * @test: callback used for comparisons between inodes
1117 * @set: callback used to initialize a new struct inode
1118 * @data: opaque data pointer to pass to @test and @set
1119 *
1120 * Search for the inode specified by @hashval and @data in the inode cache,
1121 * and if present it is return it with an increased reference count. This is
1122 * a generalized version of iget_locked() for file systems where the inode
1123 * number is not sufficient for unique identification of an inode.
1124 *
1125 * If the inode is not in cache, allocate a new inode and return it locked,
1126 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1127 * before unlocking it via unlock_new_inode().
1128 *
1129 * Note both @test and @set are called with the inode_hash_lock held, so can't
1130 * sleep.
1131 */
1132struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1133 int (*test)(struct inode *, void *),
1134 int (*set)(struct inode *, void *), void *data)
1135{
1136 struct inode *inode = ilookup5(sb, hashval, test, data);
1137
1138 if (!inode) {
1139 struct inode *new = alloc_inode(sb);
1140
1141 if (new) {
1142 new->i_state = 0;
1143 inode = inode_insert5(new, hashval, test, set, data);
1144 if (unlikely(inode != new))
1145 destroy_inode(new);
1146 }
1147 }
1148 return inode;
1149}
1150EXPORT_SYMBOL(iget5_locked);
1151
1152/**
1153 * iget_locked - obtain an inode from a mounted file system
1154 * @sb: super block of file system
1155 * @ino: inode number to get
1156 *
1157 * Search for the inode specified by @ino in the inode cache and if present
1158 * return it with an increased reference count. This is for file systems
1159 * where the inode number is sufficient for unique identification of an inode.
1160 *
1161 * If the inode is not in cache, allocate a new inode and return it locked,
1162 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1163 * before unlocking it via unlock_new_inode().
1164 */
1165struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1166{
1167 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1168 struct inode *inode;
1169again:
1170 spin_lock(&inode_hash_lock);
1171 inode = find_inode_fast(sb, head, ino);
1172 spin_unlock(&inode_hash_lock);
1173 if (inode) {
1174 if (IS_ERR(inode))
1175 return NULL;
1176 wait_on_inode(inode);
1177 if (unlikely(inode_unhashed(inode))) {
1178 iput(inode);
1179 goto again;
1180 }
1181 return inode;
1182 }
1183
1184 inode = alloc_inode(sb);
1185 if (inode) {
1186 struct inode *old;
1187
1188 spin_lock(&inode_hash_lock);
1189 /* We released the lock, so.. */
1190 old = find_inode_fast(sb, head, ino);
1191 if (!old) {
1192 inode->i_ino = ino;
1193 spin_lock(&inode->i_lock);
1194 inode->i_state = I_NEW;
1195 hlist_add_head(&inode->i_hash, head);
1196 spin_unlock(&inode->i_lock);
1197 inode_sb_list_add(inode);
1198 spin_unlock(&inode_hash_lock);
1199
1200 /* Return the locked inode with I_NEW set, the
1201 * caller is responsible for filling in the contents
1202 */
1203 return inode;
1204 }
1205
1206 /*
1207 * Uhhuh, somebody else created the same inode under
1208 * us. Use the old inode instead of the one we just
1209 * allocated.
1210 */
1211 spin_unlock(&inode_hash_lock);
1212 destroy_inode(inode);
1213 if (IS_ERR(old))
1214 return NULL;
1215 inode = old;
1216 wait_on_inode(inode);
1217 if (unlikely(inode_unhashed(inode))) {
1218 iput(inode);
1219 goto again;
1220 }
1221 }
1222 return inode;
1223}
1224EXPORT_SYMBOL(iget_locked);
1225
1226/*
1227 * search the inode cache for a matching inode number.
1228 * If we find one, then the inode number we are trying to
1229 * allocate is not unique and so we should not use it.
1230 *
1231 * Returns 1 if the inode number is unique, 0 if it is not.
1232 */
1233static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1234{
1235 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1236 struct inode *inode;
1237
1238 spin_lock(&inode_hash_lock);
1239 hlist_for_each_entry(inode, b, i_hash) {
1240 if (inode->i_ino == ino && inode->i_sb == sb) {
1241 spin_unlock(&inode_hash_lock);
1242 return 0;
1243 }
1244 }
1245 spin_unlock(&inode_hash_lock);
1246
1247 return 1;
1248}
1249
1250/**
1251 * iunique - get a unique inode number
1252 * @sb: superblock
1253 * @max_reserved: highest reserved inode number
1254 *
1255 * Obtain an inode number that is unique on the system for a given
1256 * superblock. This is used by file systems that have no natural
1257 * permanent inode numbering system. An inode number is returned that
1258 * is higher than the reserved limit but unique.
1259 *
1260 * BUGS:
1261 * With a large number of inodes live on the file system this function
1262 * currently becomes quite slow.
1263 */
1264ino_t iunique(struct super_block *sb, ino_t max_reserved)
1265{
1266 /*
1267 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1268 * error if st_ino won't fit in target struct field. Use 32bit counter
1269 * here to attempt to avoid that.
1270 */
1271 static DEFINE_SPINLOCK(iunique_lock);
1272 static unsigned int counter;
1273 ino_t res;
1274
1275 spin_lock(&iunique_lock);
1276 do {
1277 if (counter <= max_reserved)
1278 counter = max_reserved + 1;
1279 res = counter++;
1280 } while (!test_inode_iunique(sb, res));
1281 spin_unlock(&iunique_lock);
1282
1283 return res;
1284}
1285EXPORT_SYMBOL(iunique);
1286
1287struct inode *igrab(struct inode *inode)
1288{
1289 spin_lock(&inode->i_lock);
1290 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1291 __iget(inode);
1292 spin_unlock(&inode->i_lock);
1293 } else {
1294 spin_unlock(&inode->i_lock);
1295 /*
1296 * Handle the case where s_op->clear_inode is not been
1297 * called yet, and somebody is calling igrab
1298 * while the inode is getting freed.
1299 */
1300 inode = NULL;
1301 }
1302 return inode;
1303}
1304EXPORT_SYMBOL(igrab);
1305
1306/**
1307 * ilookup5_nowait - search for an inode in the inode cache
1308 * @sb: super block of file system to search
1309 * @hashval: hash value (usually inode number) to search for
1310 * @test: callback used for comparisons between inodes
1311 * @data: opaque data pointer to pass to @test
1312 *
1313 * Search for the inode specified by @hashval and @data in the inode cache.
1314 * If the inode is in the cache, the inode is returned with an incremented
1315 * reference count.
1316 *
1317 * Note: I_NEW is not waited upon so you have to be very careful what you do
1318 * with the returned inode. You probably should be using ilookup5() instead.
1319 *
1320 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1321 */
1322struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1323 int (*test)(struct inode *, void *), void *data)
1324{
1325 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1326 struct inode *inode;
1327
1328 spin_lock(&inode_hash_lock);
1329 inode = find_inode(sb, head, test, data);
1330 spin_unlock(&inode_hash_lock);
1331
1332 return IS_ERR(inode) ? NULL : inode;
1333}
1334EXPORT_SYMBOL(ilookup5_nowait);
1335
1336/**
1337 * ilookup5 - search for an inode in the inode cache
1338 * @sb: super block of file system to search
1339 * @hashval: hash value (usually inode number) to search for
1340 * @test: callback used for comparisons between inodes
1341 * @data: opaque data pointer to pass to @test
1342 *
1343 * Search for the inode specified by @hashval and @data in the inode cache,
1344 * and if the inode is in the cache, return the inode with an incremented
1345 * reference count. Waits on I_NEW before returning the inode.
1346 * returned with an incremented reference count.
1347 *
1348 * This is a generalized version of ilookup() for file systems where the
1349 * inode number is not sufficient for unique identification of an inode.
1350 *
1351 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1352 */
1353struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1354 int (*test)(struct inode *, void *), void *data)
1355{
1356 struct inode *inode;
1357again:
1358 inode = ilookup5_nowait(sb, hashval, test, data);
1359 if (inode) {
1360 wait_on_inode(inode);
1361 if (unlikely(inode_unhashed(inode))) {
1362 iput(inode);
1363 goto again;
1364 }
1365 }
1366 return inode;
1367}
1368EXPORT_SYMBOL(ilookup5);
1369
1370/**
1371 * ilookup - search for an inode in the inode cache
1372 * @sb: super block of file system to search
1373 * @ino: inode number to search for
1374 *
1375 * Search for the inode @ino in the inode cache, and if the inode is in the
1376 * cache, the inode is returned with an incremented reference count.
1377 */
1378struct inode *ilookup(struct super_block *sb, unsigned long ino)
1379{
1380 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1381 struct inode *inode;
1382again:
1383 spin_lock(&inode_hash_lock);
1384 inode = find_inode_fast(sb, head, ino);
1385 spin_unlock(&inode_hash_lock);
1386
1387 if (inode) {
1388 if (IS_ERR(inode))
1389 return NULL;
1390 wait_on_inode(inode);
1391 if (unlikely(inode_unhashed(inode))) {
1392 iput(inode);
1393 goto again;
1394 }
1395 }
1396 return inode;
1397}
1398EXPORT_SYMBOL(ilookup);
1399
1400/**
1401 * find_inode_nowait - find an inode in the inode cache
1402 * @sb: super block of file system to search
1403 * @hashval: hash value (usually inode number) to search for
1404 * @match: callback used for comparisons between inodes
1405 * @data: opaque data pointer to pass to @match
1406 *
1407 * Search for the inode specified by @hashval and @data in the inode
1408 * cache, where the helper function @match will return 0 if the inode
1409 * does not match, 1 if the inode does match, and -1 if the search
1410 * should be stopped. The @match function must be responsible for
1411 * taking the i_lock spin_lock and checking i_state for an inode being
1412 * freed or being initialized, and incrementing the reference count
1413 * before returning 1. It also must not sleep, since it is called with
1414 * the inode_hash_lock spinlock held.
1415 *
1416 * This is a even more generalized version of ilookup5() when the
1417 * function must never block --- find_inode() can block in
1418 * __wait_on_freeing_inode() --- or when the caller can not increment
1419 * the reference count because the resulting iput() might cause an
1420 * inode eviction. The tradeoff is that the @match funtion must be
1421 * very carefully implemented.
1422 */
1423struct inode *find_inode_nowait(struct super_block *sb,
1424 unsigned long hashval,
1425 int (*match)(struct inode *, unsigned long,
1426 void *),
1427 void *data)
1428{
1429 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1430 struct inode *inode, *ret_inode = NULL;
1431 int mval;
1432
1433 spin_lock(&inode_hash_lock);
1434 hlist_for_each_entry(inode, head, i_hash) {
1435 if (inode->i_sb != sb)
1436 continue;
1437 mval = match(inode, hashval, data);
1438 if (mval == 0)
1439 continue;
1440 if (mval == 1)
1441 ret_inode = inode;
1442 goto out;
1443 }
1444out:
1445 spin_unlock(&inode_hash_lock);
1446 return ret_inode;
1447}
1448EXPORT_SYMBOL(find_inode_nowait);
1449
1450int insert_inode_locked(struct inode *inode)
1451{
1452 struct super_block *sb = inode->i_sb;
1453 ino_t ino = inode->i_ino;
1454 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1455
1456 while (1) {
1457 struct inode *old = NULL;
1458 spin_lock(&inode_hash_lock);
1459 hlist_for_each_entry(old, head, i_hash) {
1460 if (old->i_ino != ino)
1461 continue;
1462 if (old->i_sb != sb)
1463 continue;
1464 spin_lock(&old->i_lock);
1465 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1466 spin_unlock(&old->i_lock);
1467 continue;
1468 }
1469 break;
1470 }
1471 if (likely(!old)) {
1472 spin_lock(&inode->i_lock);
1473 inode->i_state |= I_NEW | I_CREATING;
1474 hlist_add_head(&inode->i_hash, head);
1475 spin_unlock(&inode->i_lock);
1476 spin_unlock(&inode_hash_lock);
1477 return 0;
1478 }
1479 if (unlikely(old->i_state & I_CREATING)) {
1480 spin_unlock(&old->i_lock);
1481 spin_unlock(&inode_hash_lock);
1482 return -EBUSY;
1483 }
1484 __iget(old);
1485 spin_unlock(&old->i_lock);
1486 spin_unlock(&inode_hash_lock);
1487 wait_on_inode(old);
1488 if (unlikely(!inode_unhashed(old))) {
1489 iput(old);
1490 return -EBUSY;
1491 }
1492 iput(old);
1493 }
1494}
1495EXPORT_SYMBOL(insert_inode_locked);
1496
1497int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1498 int (*test)(struct inode *, void *), void *data)
1499{
1500 struct inode *old;
1501
1502 inode->i_state |= I_CREATING;
1503 old = inode_insert5(inode, hashval, test, NULL, data);
1504
1505 if (old != inode) {
1506 iput(old);
1507 return -EBUSY;
1508 }
1509 return 0;
1510}
1511EXPORT_SYMBOL(insert_inode_locked4);
1512
1513
1514int generic_delete_inode(struct inode *inode)
1515{
1516 return 1;
1517}
1518EXPORT_SYMBOL(generic_delete_inode);
1519
1520/*
1521 * Called when we're dropping the last reference
1522 * to an inode.
1523 *
1524 * Call the FS "drop_inode()" function, defaulting to
1525 * the legacy UNIX filesystem behaviour. If it tells
1526 * us to evict inode, do so. Otherwise, retain inode
1527 * in cache if fs is alive, sync and evict if fs is
1528 * shutting down.
1529 */
1530static void iput_final(struct inode *inode)
1531{
1532 struct super_block *sb = inode->i_sb;
1533 const struct super_operations *op = inode->i_sb->s_op;
1534 int drop;
1535
1536 WARN_ON(inode->i_state & I_NEW);
1537
1538 if (op->drop_inode)
1539 drop = op->drop_inode(inode);
1540 else
1541 drop = generic_drop_inode(inode);
1542
1543 if (!drop && (sb->s_flags & SB_ACTIVE)) {
1544 inode_add_lru(inode);
1545 spin_unlock(&inode->i_lock);
1546 return;
1547 }
1548
1549 if (!drop) {
1550 inode->i_state |= I_WILL_FREE;
1551 spin_unlock(&inode->i_lock);
1552 write_inode_now(inode, 1);
1553 spin_lock(&inode->i_lock);
1554 WARN_ON(inode->i_state & I_NEW);
1555 inode->i_state &= ~I_WILL_FREE;
1556 }
1557
1558 inode->i_state |= I_FREEING;
1559 if (!list_empty(&inode->i_lru))
1560 inode_lru_list_del(inode);
1561 spin_unlock(&inode->i_lock);
1562
1563 evict(inode);
1564}
1565
1566/**
1567 * iput - put an inode
1568 * @inode: inode to put
1569 *
1570 * Puts an inode, dropping its usage count. If the inode use count hits
1571 * zero, the inode is then freed and may also be destroyed.
1572 *
1573 * Consequently, iput() can sleep.
1574 */
1575void iput(struct inode *inode)
1576{
1577 if (!inode)
1578 return;
1579 BUG_ON(inode->i_state & I_CLEAR);
1580retry:
1581 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1582 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1583 atomic_inc(&inode->i_count);
1584 spin_unlock(&inode->i_lock);
1585 trace_writeback_lazytime_iput(inode);
1586 mark_inode_dirty_sync(inode);
1587 goto retry;
1588 }
1589 iput_final(inode);
1590 }
1591}
1592EXPORT_SYMBOL(iput);
1593
1594/**
1595 * bmap - find a block number in a file
1596 * @inode: inode of file
1597 * @block: block to find
1598 *
1599 * Returns the block number on the device holding the inode that
1600 * is the disk block number for the block of the file requested.
1601 * That is, asked for block 4 of inode 1 the function will return the
1602 * disk block relative to the disk start that holds that block of the
1603 * file.
1604 */
1605sector_t bmap(struct inode *inode, sector_t block)
1606{
1607 sector_t res = 0;
1608 if (inode->i_mapping->a_ops->bmap)
1609 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1610 return res;
1611}
1612EXPORT_SYMBOL(bmap);
1613
1614/*
1615 * With relative atime, only update atime if the previous atime is
1616 * earlier than either the ctime or mtime or if at least a day has
1617 * passed since the last atime update.
1618 */
1619static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1620 struct timespec64 now)
1621{
1622
1623 if (!(mnt->mnt_flags & MNT_RELATIME))
1624 return 1;
1625 /*
1626 * Is mtime younger than atime? If yes, update atime:
1627 */
1628 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1629 return 1;
1630 /*
1631 * Is ctime younger than atime? If yes, update atime:
1632 */
1633 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1634 return 1;
1635
1636 /*
1637 * Is the previous atime value older than a day? If yes,
1638 * update atime:
1639 */
1640 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1641 return 1;
1642 /*
1643 * Good, we can skip the atime update:
1644 */
1645 return 0;
1646}
1647
1648int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1649{
1650 int iflags = I_DIRTY_TIME;
1651 bool dirty = false;
1652
1653 if (flags & S_ATIME)
1654 inode->i_atime = *time;
1655 if (flags & S_VERSION)
1656 dirty = inode_maybe_inc_iversion(inode, false);
1657 if (flags & S_CTIME)
1658 inode->i_ctime = *time;
1659 if (flags & S_MTIME)
1660 inode->i_mtime = *time;
1661 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
1662 !(inode->i_sb->s_flags & SB_LAZYTIME))
1663 dirty = true;
1664
1665 if (dirty)
1666 iflags |= I_DIRTY_SYNC;
1667 __mark_inode_dirty(inode, iflags);
1668 return 0;
1669}
1670EXPORT_SYMBOL(generic_update_time);
1671
1672/*
1673 * This does the actual work of updating an inodes time or version. Must have
1674 * had called mnt_want_write() before calling this.
1675 */
1676static int update_time(struct inode *inode, struct timespec64 *time, int flags)
1677{
1678 int (*update_time)(struct inode *, struct timespec64 *, int);
1679
1680 update_time = inode->i_op->update_time ? inode->i_op->update_time :
1681 generic_update_time;
1682
1683 return update_time(inode, time, flags);
1684}
1685
1686/**
1687 * touch_atime - update the access time
1688 * @path: the &struct path to update
1689 * @inode: inode to update
1690 *
1691 * Update the accessed time on an inode and mark it for writeback.
1692 * This function automatically handles read only file systems and media,
1693 * as well as the "noatime" flag and inode specific "noatime" markers.
1694 */
1695bool atime_needs_update(const struct path *path, struct inode *inode)
1696{
1697 struct vfsmount *mnt = path->mnt;
1698 struct timespec64 now;
1699
1700 if (inode->i_flags & S_NOATIME)
1701 return false;
1702
1703 /* Atime updates will likely cause i_uid and i_gid to be written
1704 * back improprely if their true value is unknown to the vfs.
1705 */
1706 if (HAS_UNMAPPED_ID(inode))
1707 return false;
1708
1709 if (IS_NOATIME(inode))
1710 return false;
1711 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1712 return false;
1713
1714 if (mnt->mnt_flags & MNT_NOATIME)
1715 return false;
1716 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1717 return false;
1718
1719 now = current_time(inode);
1720
1721 if (!relatime_need_update(mnt, inode, now))
1722 return false;
1723
1724 if (timespec64_equal(&inode->i_atime, &now))
1725 return false;
1726
1727 return true;
1728}
1729
1730void touch_atime(const struct path *path)
1731{
1732 struct vfsmount *mnt = path->mnt;
1733 struct inode *inode = d_inode(path->dentry);
1734 struct timespec64 now;
1735
1736 if (!atime_needs_update(path, inode))
1737 return;
1738
1739 if (!sb_start_write_trylock(inode->i_sb))
1740 return;
1741
1742 if (__mnt_want_write(mnt) != 0)
1743 goto skip_update;
1744 /*
1745 * File systems can error out when updating inodes if they need to
1746 * allocate new space to modify an inode (such is the case for
1747 * Btrfs), but since we touch atime while walking down the path we
1748 * really don't care if we failed to update the atime of the file,
1749 * so just ignore the return value.
1750 * We may also fail on filesystems that have the ability to make parts
1751 * of the fs read only, e.g. subvolumes in Btrfs.
1752 */
1753 now = current_time(inode);
1754 update_time(inode, &now, S_ATIME);
1755 __mnt_drop_write(mnt);
1756skip_update:
1757 sb_end_write(inode->i_sb);
1758}
1759EXPORT_SYMBOL(touch_atime);
1760
1761/*
1762 * The logic we want is
1763 *
1764 * if suid or (sgid and xgrp)
1765 * remove privs
1766 */
1767int should_remove_suid(struct dentry *dentry)
1768{
1769 umode_t mode = d_inode(dentry)->i_mode;
1770 int kill = 0;
1771
1772 /* suid always must be killed */
1773 if (unlikely(mode & S_ISUID))
1774 kill = ATTR_KILL_SUID;
1775
1776 /*
1777 * sgid without any exec bits is just a mandatory locking mark; leave
1778 * it alone. If some exec bits are set, it's a real sgid; kill it.
1779 */
1780 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1781 kill |= ATTR_KILL_SGID;
1782
1783 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1784 return kill;
1785
1786 return 0;
1787}
1788EXPORT_SYMBOL(should_remove_suid);
1789
1790/*
1791 * Return mask of changes for notify_change() that need to be done as a
1792 * response to write or truncate. Return 0 if nothing has to be changed.
1793 * Negative value on error (change should be denied).
1794 */
1795int dentry_needs_remove_privs(struct dentry *dentry)
1796{
1797 struct inode *inode = d_inode(dentry);
1798 int mask = 0;
1799 int ret;
1800
1801 if (IS_NOSEC(inode))
1802 return 0;
1803
1804 mask = should_remove_suid(dentry);
1805 ret = security_inode_need_killpriv(dentry);
1806 if (ret < 0)
1807 return ret;
1808 if (ret)
1809 mask |= ATTR_KILL_PRIV;
1810 return mask;
1811}
1812
1813static int __remove_privs(struct dentry *dentry, int kill)
1814{
1815 struct iattr newattrs;
1816
1817 newattrs.ia_valid = ATTR_FORCE | kill;
1818 /*
1819 * Note we call this on write, so notify_change will not
1820 * encounter any conflicting delegations:
1821 */
1822 return notify_change(dentry, &newattrs, NULL);
1823}
1824
1825/*
1826 * Remove special file priviledges (suid, capabilities) when file is written
1827 * to or truncated.
1828 */
1829int file_remove_privs(struct file *file)
1830{
1831 struct dentry *dentry = file_dentry(file);
1832 struct inode *inode = file_inode(file);
1833 int kill;
1834 int error = 0;
1835
1836 /*
1837 * Fast path for nothing security related.
1838 * As well for non-regular files, e.g. blkdev inodes.
1839 * For example, blkdev_write_iter() might get here
1840 * trying to remove privs which it is not allowed to.
1841 */
1842 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1843 return 0;
1844
1845 kill = dentry_needs_remove_privs(dentry);
1846 if (kill < 0)
1847 return kill;
1848 if (kill)
1849 error = __remove_privs(dentry, kill);
1850 if (!error)
1851 inode_has_no_xattr(inode);
1852
1853 return error;
1854}
1855EXPORT_SYMBOL(file_remove_privs);
1856
1857/**
1858 * file_update_time - update mtime and ctime time
1859 * @file: file accessed
1860 *
1861 * Update the mtime and ctime members of an inode and mark the inode
1862 * for writeback. Note that this function is meant exclusively for
1863 * usage in the file write path of filesystems, and filesystems may
1864 * choose to explicitly ignore update via this function with the
1865 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1866 * timestamps are handled by the server. This can return an error for
1867 * file systems who need to allocate space in order to update an inode.
1868 */
1869
1870int file_update_time(struct file *file)
1871{
1872 struct inode *inode = file_inode(file);
1873 struct timespec64 now;
1874 int sync_it = 0;
1875 int ret;
1876
1877 /* First try to exhaust all avenues to not sync */
1878 if (IS_NOCMTIME(inode))
1879 return 0;
1880
1881 now = current_time(inode);
1882 if (!timespec64_equal(&inode->i_mtime, &now))
1883 sync_it = S_MTIME;
1884
1885 if (!timespec64_equal(&inode->i_ctime, &now))
1886 sync_it |= S_CTIME;
1887
1888 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1889 sync_it |= S_VERSION;
1890
1891 if (!sync_it)
1892 return 0;
1893
1894 /* Finally allowed to write? Takes lock. */
1895 if (__mnt_want_write_file(file))
1896 return 0;
1897
1898 ret = update_time(inode, &now, sync_it);
1899 __mnt_drop_write_file(file);
1900
1901 return ret;
1902}
1903EXPORT_SYMBOL(file_update_time);
1904
1905/* Caller must hold the file's inode lock */
1906int file_modified(struct file *file)
1907{
1908 int err;
1909
1910 /*
1911 * Clear the security bits if the process is not being run by root.
1912 * This keeps people from modifying setuid and setgid binaries.
1913 */
1914 err = file_remove_privs(file);
1915 if (err)
1916 return err;
1917
1918 if (unlikely(file->f_mode & FMODE_NOCMTIME))
1919 return 0;
1920
1921 return file_update_time(file);
1922}
1923EXPORT_SYMBOL(file_modified);
1924
1925int inode_needs_sync(struct inode *inode)
1926{
1927 if (IS_SYNC(inode))
1928 return 1;
1929 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1930 return 1;
1931 return 0;
1932}
1933EXPORT_SYMBOL(inode_needs_sync);
1934
1935/*
1936 * If we try to find an inode in the inode hash while it is being
1937 * deleted, we have to wait until the filesystem completes its
1938 * deletion before reporting that it isn't found. This function waits
1939 * until the deletion _might_ have completed. Callers are responsible
1940 * to recheck inode state.
1941 *
1942 * It doesn't matter if I_NEW is not set initially, a call to
1943 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1944 * will DTRT.
1945 */
1946static void __wait_on_freeing_inode(struct inode *inode)
1947{
1948 wait_queue_head_t *wq;
1949 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1950 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1951 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1952 spin_unlock(&inode->i_lock);
1953 spin_unlock(&inode_hash_lock);
1954 schedule();
1955 finish_wait(wq, &wait.wq_entry);
1956 spin_lock(&inode_hash_lock);
1957}
1958
1959static __initdata unsigned long ihash_entries;
1960static int __init set_ihash_entries(char *str)
1961{
1962 if (!str)
1963 return 0;
1964 ihash_entries = simple_strtoul(str, &str, 0);
1965 return 1;
1966}
1967__setup("ihash_entries=", set_ihash_entries);
1968
1969/*
1970 * Initialize the waitqueues and inode hash table.
1971 */
1972void __init inode_init_early(void)
1973{
1974 /* If hashes are distributed across NUMA nodes, defer
1975 * hash allocation until vmalloc space is available.
1976 */
1977 if (hashdist)
1978 return;
1979
1980 inode_hashtable =
1981 alloc_large_system_hash("Inode-cache",
1982 sizeof(struct hlist_head),
1983 ihash_entries,
1984 14,
1985 HASH_EARLY | HASH_ZERO,
1986 &i_hash_shift,
1987 &i_hash_mask,
1988 0,
1989 0);
1990}
1991
1992void __init inode_init(void)
1993{
1994 /* inode slab cache */
1995 inode_cachep = kmem_cache_create("inode_cache",
1996 sizeof(struct inode),
1997 0,
1998 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1999 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2000 init_once);
2001
2002 /* Hash may have been set up in inode_init_early */
2003 if (!hashdist)
2004 return;
2005
2006 inode_hashtable =
2007 alloc_large_system_hash("Inode-cache",
2008 sizeof(struct hlist_head),
2009 ihash_entries,
2010 14,
2011 HASH_ZERO,
2012 &i_hash_shift,
2013 &i_hash_mask,
2014 0,
2015 0);
2016}
2017
2018void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2019{
2020 inode->i_mode = mode;
2021 if (S_ISCHR(mode)) {
2022 inode->i_fop = &def_chr_fops;
2023 inode->i_rdev = rdev;
2024 } else if (S_ISBLK(mode)) {
2025 inode->i_fop = &def_blk_fops;
2026 inode->i_rdev = rdev;
2027 } else if (S_ISFIFO(mode))
2028 inode->i_fop = &pipefifo_fops;
2029 else if (S_ISSOCK(mode))
2030 ; /* leave it no_open_fops */
2031 else
2032 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2033 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2034 inode->i_ino);
2035}
2036EXPORT_SYMBOL(init_special_inode);
2037
2038/**
2039 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2040 * @inode: New inode
2041 * @dir: Directory inode
2042 * @mode: mode of the new inode
2043 */
2044void inode_init_owner(struct inode *inode, const struct inode *dir,
2045 umode_t mode)
2046{
2047 inode->i_uid = current_fsuid();
2048 if (dir && dir->i_mode & S_ISGID) {
2049 inode->i_gid = dir->i_gid;
2050
2051 /* Directories are special, and always inherit S_ISGID */
2052 if (S_ISDIR(mode))
2053 mode |= S_ISGID;
2054 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2055 !in_group_p(inode->i_gid) &&
2056 !capable_wrt_inode_uidgid(dir, CAP_FSETID))
2057 mode &= ~S_ISGID;
2058 } else
2059 inode->i_gid = current_fsgid();
2060 inode->i_mode = mode;
2061}
2062EXPORT_SYMBOL(inode_init_owner);
2063
2064/**
2065 * inode_owner_or_capable - check current task permissions to inode
2066 * @inode: inode being checked
2067 *
2068 * Return true if current either has CAP_FOWNER in a namespace with the
2069 * inode owner uid mapped, or owns the file.
2070 */
2071bool inode_owner_or_capable(const struct inode *inode)
2072{
2073 struct user_namespace *ns;
2074
2075 if (uid_eq(current_fsuid(), inode->i_uid))
2076 return true;
2077
2078 ns = current_user_ns();
2079 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2080 return true;
2081 return false;
2082}
2083EXPORT_SYMBOL(inode_owner_or_capable);
2084
2085/*
2086 * Direct i/o helper functions
2087 */
2088static void __inode_dio_wait(struct inode *inode)
2089{
2090 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2091 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2092
2093 do {
2094 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2095 if (atomic_read(&inode->i_dio_count))
2096 schedule();
2097 } while (atomic_read(&inode->i_dio_count));
2098 finish_wait(wq, &q.wq_entry);
2099}
2100
2101/**
2102 * inode_dio_wait - wait for outstanding DIO requests to finish
2103 * @inode: inode to wait for
2104 *
2105 * Waits for all pending direct I/O requests to finish so that we can
2106 * proceed with a truncate or equivalent operation.
2107 *
2108 * Must be called under a lock that serializes taking new references
2109 * to i_dio_count, usually by inode->i_mutex.
2110 */
2111void inode_dio_wait(struct inode *inode)
2112{
2113 if (atomic_read(&inode->i_dio_count))
2114 __inode_dio_wait(inode);
2115}
2116EXPORT_SYMBOL(inode_dio_wait);
2117
2118/*
2119 * inode_set_flags - atomically set some inode flags
2120 *
2121 * Note: the caller should be holding i_mutex, or else be sure that
2122 * they have exclusive access to the inode structure (i.e., while the
2123 * inode is being instantiated). The reason for the cmpxchg() loop
2124 * --- which wouldn't be necessary if all code paths which modify
2125 * i_flags actually followed this rule, is that there is at least one
2126 * code path which doesn't today so we use cmpxchg() out of an abundance
2127 * of caution.
2128 *
2129 * In the long run, i_mutex is overkill, and we should probably look
2130 * at using the i_lock spinlock to protect i_flags, and then make sure
2131 * it is so documented in include/linux/fs.h and that all code follows
2132 * the locking convention!!
2133 */
2134void inode_set_flags(struct inode *inode, unsigned int flags,
2135 unsigned int mask)
2136{
2137 WARN_ON_ONCE(flags & ~mask);
2138 set_mask_bits(&inode->i_flags, mask, flags);
2139}
2140EXPORT_SYMBOL(inode_set_flags);
2141
2142void inode_nohighmem(struct inode *inode)
2143{
2144 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2145}
2146EXPORT_SYMBOL(inode_nohighmem);
2147
2148/**
2149 * timespec64_trunc - Truncate timespec64 to a granularity
2150 * @t: Timespec64
2151 * @gran: Granularity in ns.
2152 *
2153 * Truncate a timespec64 to a granularity. Always rounds down. gran must
2154 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2155 */
2156struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
2157{
2158 /* Avoid division in the common cases 1 ns and 1 s. */
2159 if (gran == 1) {
2160 /* nothing */
2161 } else if (gran == NSEC_PER_SEC) {
2162 t.tv_nsec = 0;
2163 } else if (gran > 1 && gran < NSEC_PER_SEC) {
2164 t.tv_nsec -= t.tv_nsec % gran;
2165 } else {
2166 WARN(1, "illegal file time granularity: %u", gran);
2167 }
2168 return t;
2169}
2170EXPORT_SYMBOL(timespec64_trunc);
2171
2172/**
2173 * timestamp_truncate - Truncate timespec to a granularity
2174 * @t: Timespec
2175 * @inode: inode being updated
2176 *
2177 * Truncate a timespec to the granularity supported by the fs
2178 * containing the inode. Always rounds down. gran must
2179 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2180 */
2181struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2182{
2183 struct super_block *sb = inode->i_sb;
2184 unsigned int gran = sb->s_time_gran;
2185
2186 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2187 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2188 t.tv_nsec = 0;
2189
2190 /* Avoid division in the common cases 1 ns and 1 s. */
2191 if (gran == 1)
2192 ; /* nothing */
2193 else if (gran == NSEC_PER_SEC)
2194 t.tv_nsec = 0;
2195 else if (gran > 1 && gran < NSEC_PER_SEC)
2196 t.tv_nsec -= t.tv_nsec % gran;
2197 else
2198 WARN(1, "invalid file time granularity: %u", gran);
2199 return t;
2200}
2201EXPORT_SYMBOL(timestamp_truncate);
2202
2203/**
2204 * current_time - Return FS time
2205 * @inode: inode.
2206 *
2207 * Return the current time truncated to the time granularity supported by
2208 * the fs.
2209 *
2210 * Note that inode and inode->sb cannot be NULL.
2211 * Otherwise, the function warns and returns time without truncation.
2212 */
2213struct timespec64 current_time(struct inode *inode)
2214{
2215 struct timespec64 now;
2216
2217 ktime_get_coarse_real_ts64(&now);
2218
2219 if (unlikely(!inode->i_sb)) {
2220 WARN(1, "current_time() called with uninitialized super_block in the inode");
2221 return now;
2222 }
2223
2224 return timestamp_truncate(now, inode);
2225}
2226EXPORT_SYMBOL(current_time);
2227
2228/*
2229 * Generic function to check FS_IOC_SETFLAGS values and reject any invalid
2230 * configurations.
2231 *
2232 * Note: the caller should be holding i_mutex, or else be sure that they have
2233 * exclusive access to the inode structure.
2234 */
2235int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
2236 unsigned int flags)
2237{
2238 /*
2239 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
2240 * the relevant capability.
2241 *
2242 * This test looks nicer. Thanks to Pauline Middelink
2243 */
2244 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
2245 !capable(CAP_LINUX_IMMUTABLE))
2246 return -EPERM;
2247
2248 return 0;
2249}
2250EXPORT_SYMBOL(vfs_ioc_setflags_prepare);
2251
2252/*
2253 * Generic function to check FS_IOC_FSSETXATTR values and reject any invalid
2254 * configurations.
2255 *
2256 * Note: the caller should be holding i_mutex, or else be sure that they have
2257 * exclusive access to the inode structure.
2258 */
2259int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
2260 struct fsxattr *fa)
2261{
2262 /*
2263 * Can't modify an immutable/append-only file unless we have
2264 * appropriate permission.
2265 */
2266 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2267 (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND) &&
2268 !capable(CAP_LINUX_IMMUTABLE))
2269 return -EPERM;
2270
2271 /*
2272 * Project Quota ID state is only allowed to change from within the init
2273 * namespace. Enforce that restriction only if we are trying to change
2274 * the quota ID state. Everything else is allowed in user namespaces.
2275 */
2276 if (current_user_ns() != &init_user_ns) {
2277 if (old_fa->fsx_projid != fa->fsx_projid)
2278 return -EINVAL;
2279 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2280 FS_XFLAG_PROJINHERIT)
2281 return -EINVAL;
2282 }
2283
2284 /* Check extent size hints. */
2285 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
2286 return -EINVAL;
2287
2288 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
2289 !S_ISDIR(inode->i_mode))
2290 return -EINVAL;
2291
2292 if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
2293 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
2294 return -EINVAL;
2295
2296 /*
2297 * It is only valid to set the DAX flag on regular files and
2298 * directories on filesystems.
2299 */
2300 if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
2301 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
2302 return -EINVAL;
2303
2304 /* Extent size hints of zero turn off the flags. */
2305 if (fa->fsx_extsize == 0)
2306 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
2307 if (fa->fsx_cowextsize == 0)
2308 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
2309
2310 return 0;
2311}
2312EXPORT_SYMBOL(vfs_ioc_fssetxattr_check);