Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * (C) 1997 Linus Torvalds
   3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
   4 */
   5#include <linux/export.h>
   6#include <linux/fs.h>
   7#include <linux/mm.h>
   8#include <linux/backing-dev.h>
   9#include <linux/hash.h>
  10#include <linux/swap.h>
  11#include <linux/security.h>
  12#include <linux/cdev.h>
  13#include <linux/bootmem.h>
  14#include <linux/fsnotify.h>
  15#include <linux/mount.h>
  16#include <linux/posix_acl.h>
  17#include <linux/prefetch.h>
  18#include <linux/buffer_head.h> /* for inode_has_buffers */
  19#include <linux/ratelimit.h>
  20#include <linux/list_lru.h>
 
  21#include <trace/events/writeback.h>
  22#include "internal.h"
  23
  24/*
  25 * Inode locking rules:
  26 *
  27 * inode->i_lock protects:
  28 *   inode->i_state, inode->i_hash, __iget()
  29 * Inode LRU list locks protect:
  30 *   inode->i_sb->s_inode_lru, inode->i_lru
  31 * inode->i_sb->s_inode_list_lock protects:
  32 *   inode->i_sb->s_inodes, inode->i_sb_list
  33 * bdi->wb.list_lock protects:
  34 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
  35 * inode_hash_lock protects:
  36 *   inode_hashtable, inode->i_hash
  37 *
  38 * Lock ordering:
  39 *
  40 * inode->i_sb->s_inode_list_lock
  41 *   inode->i_lock
  42 *     Inode LRU list locks
  43 *
  44 * bdi->wb.list_lock
  45 *   inode->i_lock
  46 *
  47 * inode_hash_lock
  48 *   inode->i_sb->s_inode_list_lock
  49 *   inode->i_lock
  50 *
  51 * iunique_lock
  52 *   inode_hash_lock
  53 */
  54
  55static unsigned int i_hash_mask __read_mostly;
  56static unsigned int i_hash_shift __read_mostly;
  57static struct hlist_head *inode_hashtable __read_mostly;
  58static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
  59
  60/*
  61 * Empty aops. Can be used for the cases where the user does not
  62 * define any of the address_space operations.
  63 */
  64const struct address_space_operations empty_aops = {
  65};
  66EXPORT_SYMBOL(empty_aops);
  67
  68/*
  69 * Statistics gathering..
  70 */
  71struct inodes_stat_t inodes_stat;
  72
  73static DEFINE_PER_CPU(unsigned long, nr_inodes);
  74static DEFINE_PER_CPU(unsigned long, nr_unused);
  75
  76static struct kmem_cache *inode_cachep __read_mostly;
  77
  78static long get_nr_inodes(void)
  79{
  80	int i;
  81	long sum = 0;
  82	for_each_possible_cpu(i)
  83		sum += per_cpu(nr_inodes, i);
  84	return sum < 0 ? 0 : sum;
  85}
  86
  87static inline long get_nr_inodes_unused(void)
  88{
  89	int i;
  90	long sum = 0;
  91	for_each_possible_cpu(i)
  92		sum += per_cpu(nr_unused, i);
  93	return sum < 0 ? 0 : sum;
  94}
  95
  96long get_nr_dirty_inodes(void)
  97{
  98	/* not actually dirty inodes, but a wild approximation */
  99	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
 100	return nr_dirty > 0 ? nr_dirty : 0;
 101}
 102
 103/*
 104 * Handle nr_inode sysctl
 105 */
 106#ifdef CONFIG_SYSCTL
 107int proc_nr_inodes(struct ctl_table *table, int write,
 108		   void __user *buffer, size_t *lenp, loff_t *ppos)
 109{
 110	inodes_stat.nr_inodes = get_nr_inodes();
 111	inodes_stat.nr_unused = get_nr_inodes_unused();
 112	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 113}
 114#endif
 115
 116static int no_open(struct inode *inode, struct file *file)
 117{
 118	return -ENXIO;
 119}
 120
 121/**
 122 * inode_init_always - perform inode structure intialisation
 123 * @sb: superblock inode belongs to
 124 * @inode: inode to initialise
 125 *
 126 * These are initializations that need to be done on every inode
 127 * allocation as the fields are not initialised by slab allocation.
 128 */
 129int inode_init_always(struct super_block *sb, struct inode *inode)
 130{
 131	static const struct inode_operations empty_iops;
 132	static const struct file_operations no_open_fops = {.open = no_open};
 133	struct address_space *const mapping = &inode->i_data;
 134
 135	inode->i_sb = sb;
 136	inode->i_blkbits = sb->s_blocksize_bits;
 137	inode->i_flags = 0;
 
 138	atomic_set(&inode->i_count, 1);
 139	inode->i_op = &empty_iops;
 140	inode->i_fop = &no_open_fops;
 
 141	inode->__i_nlink = 1;
 142	inode->i_opflags = 0;
 143	if (sb->s_xattr)
 144		inode->i_opflags |= IOP_XATTR;
 145	i_uid_write(inode, 0);
 146	i_gid_write(inode, 0);
 147	atomic_set(&inode->i_writecount, 0);
 148	inode->i_size = 0;
 
 149	inode->i_blocks = 0;
 150	inode->i_bytes = 0;
 151	inode->i_generation = 0;
 152	inode->i_pipe = NULL;
 153	inode->i_bdev = NULL;
 154	inode->i_cdev = NULL;
 155	inode->i_link = NULL;
 156	inode->i_dir_seq = 0;
 157	inode->i_rdev = 0;
 158	inode->dirtied_when = 0;
 159
 160#ifdef CONFIG_CGROUP_WRITEBACK
 161	inode->i_wb_frn_winner = 0;
 162	inode->i_wb_frn_avg_time = 0;
 163	inode->i_wb_frn_history = 0;
 164#endif
 165
 166	if (security_inode_alloc(inode))
 167		goto out;
 168	spin_lock_init(&inode->i_lock);
 169	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
 170
 171	init_rwsem(&inode->i_rwsem);
 172	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
 173
 174	atomic_set(&inode->i_dio_count, 0);
 175
 176	mapping->a_ops = &empty_aops;
 177	mapping->host = inode;
 178	mapping->flags = 0;
 
 
 
 179	atomic_set(&mapping->i_mmap_writable, 0);
 
 
 
 180	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 181	mapping->private_data = NULL;
 182	mapping->writeback_index = 0;
 183	inode->i_private = NULL;
 184	inode->i_mapping = mapping;
 185	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
 186#ifdef CONFIG_FS_POSIX_ACL
 187	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
 188#endif
 189
 190#ifdef CONFIG_FSNOTIFY
 191	inode->i_fsnotify_mask = 0;
 192#endif
 193	inode->i_flctx = NULL;
 194	this_cpu_inc(nr_inodes);
 195
 196	return 0;
 197out:
 198	return -ENOMEM;
 199}
 200EXPORT_SYMBOL(inode_init_always);
 201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202static struct inode *alloc_inode(struct super_block *sb)
 203{
 
 204	struct inode *inode;
 205
 206	if (sb->s_op->alloc_inode)
 207		inode = sb->s_op->alloc_inode(sb);
 208	else
 209		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 210
 211	if (!inode)
 212		return NULL;
 213
 214	if (unlikely(inode_init_always(sb, inode))) {
 215		if (inode->i_sb->s_op->destroy_inode)
 216			inode->i_sb->s_op->destroy_inode(inode);
 217		else
 218			kmem_cache_free(inode_cachep, inode);
 
 
 
 219		return NULL;
 220	}
 221
 222	return inode;
 223}
 224
 225void free_inode_nonrcu(struct inode *inode)
 226{
 227	kmem_cache_free(inode_cachep, inode);
 228}
 229EXPORT_SYMBOL(free_inode_nonrcu);
 230
 231void __destroy_inode(struct inode *inode)
 232{
 233	BUG_ON(inode_has_buffers(inode));
 234	inode_detach_wb(inode);
 235	security_inode_free(inode);
 236	fsnotify_inode_delete(inode);
 237	locks_free_lock_context(inode);
 238	if (!inode->i_nlink) {
 239		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
 240		atomic_long_dec(&inode->i_sb->s_remove_count);
 241	}
 242
 243#ifdef CONFIG_FS_POSIX_ACL
 244	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
 245		posix_acl_release(inode->i_acl);
 246	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
 247		posix_acl_release(inode->i_default_acl);
 248#endif
 249	this_cpu_dec(nr_inodes);
 250}
 251EXPORT_SYMBOL(__destroy_inode);
 252
 253static void i_callback(struct rcu_head *head)
 254{
 255	struct inode *inode = container_of(head, struct inode, i_rcu);
 256	kmem_cache_free(inode_cachep, inode);
 257}
 258
 259static void destroy_inode(struct inode *inode)
 260{
 
 
 261	BUG_ON(!list_empty(&inode->i_lru));
 262	__destroy_inode(inode);
 263	if (inode->i_sb->s_op->destroy_inode)
 264		inode->i_sb->s_op->destroy_inode(inode);
 265	else
 266		call_rcu(&inode->i_rcu, i_callback);
 
 
 
 267}
 268
 269/**
 270 * drop_nlink - directly drop an inode's link count
 271 * @inode: inode
 272 *
 273 * This is a low-level filesystem helper to replace any
 274 * direct filesystem manipulation of i_nlink.  In cases
 275 * where we are attempting to track writes to the
 276 * filesystem, a decrement to zero means an imminent
 277 * write when the file is truncated and actually unlinked
 278 * on the filesystem.
 279 */
 280void drop_nlink(struct inode *inode)
 281{
 282	WARN_ON(inode->i_nlink == 0);
 283	inode->__i_nlink--;
 284	if (!inode->i_nlink)
 285		atomic_long_inc(&inode->i_sb->s_remove_count);
 286}
 287EXPORT_SYMBOL(drop_nlink);
 288
 289/**
 290 * clear_nlink - directly zero an inode's link count
 291 * @inode: inode
 292 *
 293 * This is a low-level filesystem helper to replace any
 294 * direct filesystem manipulation of i_nlink.  See
 295 * drop_nlink() for why we care about i_nlink hitting zero.
 296 */
 297void clear_nlink(struct inode *inode)
 298{
 299	if (inode->i_nlink) {
 300		inode->__i_nlink = 0;
 301		atomic_long_inc(&inode->i_sb->s_remove_count);
 302	}
 303}
 304EXPORT_SYMBOL(clear_nlink);
 305
 306/**
 307 * set_nlink - directly set an inode's link count
 308 * @inode: inode
 309 * @nlink: new nlink (should be non-zero)
 310 *
 311 * This is a low-level filesystem helper to replace any
 312 * direct filesystem manipulation of i_nlink.
 313 */
 314void set_nlink(struct inode *inode, unsigned int nlink)
 315{
 316	if (!nlink) {
 317		clear_nlink(inode);
 318	} else {
 319		/* Yes, some filesystems do change nlink from zero to one */
 320		if (inode->i_nlink == 0)
 321			atomic_long_dec(&inode->i_sb->s_remove_count);
 322
 323		inode->__i_nlink = nlink;
 324	}
 325}
 326EXPORT_SYMBOL(set_nlink);
 327
 328/**
 329 * inc_nlink - directly increment an inode's link count
 330 * @inode: inode
 331 *
 332 * This is a low-level filesystem helper to replace any
 333 * direct filesystem manipulation of i_nlink.  Currently,
 334 * it is only here for parity with dec_nlink().
 335 */
 336void inc_nlink(struct inode *inode)
 337{
 338	if (unlikely(inode->i_nlink == 0)) {
 339		WARN_ON(!(inode->i_state & I_LINKABLE));
 340		atomic_long_dec(&inode->i_sb->s_remove_count);
 341	}
 342
 343	inode->__i_nlink++;
 344}
 345EXPORT_SYMBOL(inc_nlink);
 346
 347void address_space_init_once(struct address_space *mapping)
 348{
 349	memset(mapping, 0, sizeof(*mapping));
 350	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
 351	spin_lock_init(&mapping->tree_lock);
 352	init_rwsem(&mapping->i_mmap_rwsem);
 353	INIT_LIST_HEAD(&mapping->private_list);
 354	spin_lock_init(&mapping->private_lock);
 355	mapping->i_mmap = RB_ROOT;
 
 
 
 
 
 
 356}
 357EXPORT_SYMBOL(address_space_init_once);
 358
 359/*
 360 * These are initializations that only need to be done
 361 * once, because the fields are idempotent across use
 362 * of the inode, so let the slab aware of that.
 363 */
 364void inode_init_once(struct inode *inode)
 365{
 366	memset(inode, 0, sizeof(*inode));
 367	INIT_HLIST_NODE(&inode->i_hash);
 368	INIT_LIST_HEAD(&inode->i_devices);
 369	INIT_LIST_HEAD(&inode->i_io_list);
 370	INIT_LIST_HEAD(&inode->i_wb_list);
 371	INIT_LIST_HEAD(&inode->i_lru);
 372	address_space_init_once(&inode->i_data);
 373	i_size_ordered_init(inode);
 374#ifdef CONFIG_FSNOTIFY
 375	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
 376#endif
 377}
 378EXPORT_SYMBOL(inode_init_once);
 379
 380static void init_once(void *foo)
 381{
 382	struct inode *inode = (struct inode *) foo;
 383
 384	inode_init_once(inode);
 385}
 386
 387/*
 388 * inode->i_lock must be held
 389 */
 390void __iget(struct inode *inode)
 391{
 392	atomic_inc(&inode->i_count);
 393}
 394
 395/*
 396 * get additional reference to inode; caller must already hold one.
 397 */
 398void ihold(struct inode *inode)
 399{
 400	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
 401}
 402EXPORT_SYMBOL(ihold);
 403
 404static void inode_lru_list_add(struct inode *inode)
 405{
 406	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
 407		this_cpu_inc(nr_unused);
 
 
 408}
 409
 410/*
 411 * Add inode to LRU if needed (inode is unused and clean).
 412 *
 413 * Needs inode->i_lock held.
 414 */
 415void inode_add_lru(struct inode *inode)
 416{
 417	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
 418				I_FREEING | I_WILL_FREE)) &&
 419	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
 420		inode_lru_list_add(inode);
 421}
 422
 423
 424static void inode_lru_list_del(struct inode *inode)
 425{
 426
 427	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
 428		this_cpu_dec(nr_unused);
 429}
 430
 431/**
 432 * inode_sb_list_add - add inode to the superblock list of inodes
 433 * @inode: inode to add
 434 */
 435void inode_sb_list_add(struct inode *inode)
 436{
 437	spin_lock(&inode->i_sb->s_inode_list_lock);
 438	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
 439	spin_unlock(&inode->i_sb->s_inode_list_lock);
 440}
 441EXPORT_SYMBOL_GPL(inode_sb_list_add);
 442
 443static inline void inode_sb_list_del(struct inode *inode)
 444{
 445	if (!list_empty(&inode->i_sb_list)) {
 446		spin_lock(&inode->i_sb->s_inode_list_lock);
 447		list_del_init(&inode->i_sb_list);
 448		spin_unlock(&inode->i_sb->s_inode_list_lock);
 449	}
 450}
 451
 452static unsigned long hash(struct super_block *sb, unsigned long hashval)
 453{
 454	unsigned long tmp;
 455
 456	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
 457			L1_CACHE_BYTES;
 458	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
 459	return tmp & i_hash_mask;
 460}
 461
 462/**
 463 *	__insert_inode_hash - hash an inode
 464 *	@inode: unhashed inode
 465 *	@hashval: unsigned long value used to locate this object in the
 466 *		inode_hashtable.
 467 *
 468 *	Add an inode to the inode hash for this superblock.
 469 */
 470void __insert_inode_hash(struct inode *inode, unsigned long hashval)
 471{
 472	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
 473
 474	spin_lock(&inode_hash_lock);
 475	spin_lock(&inode->i_lock);
 476	hlist_add_head(&inode->i_hash, b);
 477	spin_unlock(&inode->i_lock);
 478	spin_unlock(&inode_hash_lock);
 479}
 480EXPORT_SYMBOL(__insert_inode_hash);
 481
 482/**
 483 *	__remove_inode_hash - remove an inode from the hash
 484 *	@inode: inode to unhash
 485 *
 486 *	Remove an inode from the superblock.
 487 */
 488void __remove_inode_hash(struct inode *inode)
 489{
 490	spin_lock(&inode_hash_lock);
 491	spin_lock(&inode->i_lock);
 492	hlist_del_init(&inode->i_hash);
 493	spin_unlock(&inode->i_lock);
 494	spin_unlock(&inode_hash_lock);
 495}
 496EXPORT_SYMBOL(__remove_inode_hash);
 497
 498void clear_inode(struct inode *inode)
 499{
 500	might_sleep();
 501	/*
 502	 * We have to cycle tree_lock here because reclaim can be still in the
 503	 * process of removing the last page (in __delete_from_page_cache())
 504	 * and we must not free mapping under it.
 505	 */
 506	spin_lock_irq(&inode->i_data.tree_lock);
 507	BUG_ON(inode->i_data.nrpages);
 508	BUG_ON(inode->i_data.nrexceptional);
 509	spin_unlock_irq(&inode->i_data.tree_lock);
 
 
 
 
 
 
 
 510	BUG_ON(!list_empty(&inode->i_data.private_list));
 511	BUG_ON(!(inode->i_state & I_FREEING));
 512	BUG_ON(inode->i_state & I_CLEAR);
 513	BUG_ON(!list_empty(&inode->i_wb_list));
 514	/* don't need i_lock here, no concurrent mods to i_state */
 515	inode->i_state = I_FREEING | I_CLEAR;
 516}
 517EXPORT_SYMBOL(clear_inode);
 518
 519/*
 520 * Free the inode passed in, removing it from the lists it is still connected
 521 * to. We remove any pages still attached to the inode and wait for any IO that
 522 * is still in progress before finally destroying the inode.
 523 *
 524 * An inode must already be marked I_FREEING so that we avoid the inode being
 525 * moved back onto lists if we race with other code that manipulates the lists
 526 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 527 *
 528 * An inode must already be removed from the LRU list before being evicted from
 529 * the cache. This should occur atomically with setting the I_FREEING state
 530 * flag, so no inodes here should ever be on the LRU when being evicted.
 531 */
 532static void evict(struct inode *inode)
 533{
 534	const struct super_operations *op = inode->i_sb->s_op;
 535
 536	BUG_ON(!(inode->i_state & I_FREEING));
 537	BUG_ON(!list_empty(&inode->i_lru));
 538
 539	if (!list_empty(&inode->i_io_list))
 540		inode_io_list_del(inode);
 541
 542	inode_sb_list_del(inode);
 543
 544	/*
 545	 * Wait for flusher thread to be done with the inode so that filesystem
 546	 * does not start destroying it while writeback is still running. Since
 547	 * the inode has I_FREEING set, flusher thread won't start new work on
 548	 * the inode.  We just have to wait for running writeback to finish.
 549	 */
 550	inode_wait_for_writeback(inode);
 551
 552	if (op->evict_inode) {
 553		op->evict_inode(inode);
 554	} else {
 555		truncate_inode_pages_final(&inode->i_data);
 556		clear_inode(inode);
 557	}
 558	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
 559		bd_forget(inode);
 560	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
 561		cd_forget(inode);
 562
 563	remove_inode_hash(inode);
 564
 565	spin_lock(&inode->i_lock);
 566	wake_up_bit(&inode->i_state, __I_NEW);
 567	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
 568	spin_unlock(&inode->i_lock);
 569
 570	destroy_inode(inode);
 571}
 572
 573/*
 574 * dispose_list - dispose of the contents of a local list
 575 * @head: the head of the list to free
 576 *
 577 * Dispose-list gets a local list with local inodes in it, so it doesn't
 578 * need to worry about list corruption and SMP locks.
 579 */
 580static void dispose_list(struct list_head *head)
 581{
 582	while (!list_empty(head)) {
 583		struct inode *inode;
 584
 585		inode = list_first_entry(head, struct inode, i_lru);
 586		list_del_init(&inode->i_lru);
 587
 588		evict(inode);
 589		cond_resched();
 590	}
 591}
 592
 593/**
 594 * evict_inodes	- evict all evictable inodes for a superblock
 595 * @sb:		superblock to operate on
 596 *
 597 * Make sure that no inodes with zero refcount are retained.  This is
 598 * called by superblock shutdown after having MS_ACTIVE flag removed,
 599 * so any inode reaching zero refcount during or after that call will
 600 * be immediately evicted.
 601 */
 602void evict_inodes(struct super_block *sb)
 603{
 604	struct inode *inode, *next;
 605	LIST_HEAD(dispose);
 606
 607again:
 608	spin_lock(&sb->s_inode_list_lock);
 609	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 610		if (atomic_read(&inode->i_count))
 611			continue;
 612
 613		spin_lock(&inode->i_lock);
 614		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 615			spin_unlock(&inode->i_lock);
 616			continue;
 617		}
 618
 619		inode->i_state |= I_FREEING;
 620		inode_lru_list_del(inode);
 621		spin_unlock(&inode->i_lock);
 622		list_add(&inode->i_lru, &dispose);
 623
 624		/*
 625		 * We can have a ton of inodes to evict at unmount time given
 626		 * enough memory, check to see if we need to go to sleep for a
 627		 * bit so we don't livelock.
 628		 */
 629		if (need_resched()) {
 630			spin_unlock(&sb->s_inode_list_lock);
 631			cond_resched();
 632			dispose_list(&dispose);
 633			goto again;
 634		}
 635	}
 636	spin_unlock(&sb->s_inode_list_lock);
 637
 638	dispose_list(&dispose);
 639}
 
 640
 641/**
 642 * invalidate_inodes	- attempt to free all inodes on a superblock
 643 * @sb:		superblock to operate on
 644 * @kill_dirty: flag to guide handling of dirty inodes
 645 *
 646 * Attempts to free all inodes for a given superblock.  If there were any
 647 * busy inodes return a non-zero value, else zero.
 648 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 649 * them as busy.
 650 */
 651int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 652{
 653	int busy = 0;
 654	struct inode *inode, *next;
 655	LIST_HEAD(dispose);
 656
 
 657	spin_lock(&sb->s_inode_list_lock);
 658	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 659		spin_lock(&inode->i_lock);
 660		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 661			spin_unlock(&inode->i_lock);
 662			continue;
 663		}
 664		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
 665			spin_unlock(&inode->i_lock);
 666			busy = 1;
 667			continue;
 668		}
 669		if (atomic_read(&inode->i_count)) {
 670			spin_unlock(&inode->i_lock);
 671			busy = 1;
 672			continue;
 673		}
 674
 675		inode->i_state |= I_FREEING;
 676		inode_lru_list_del(inode);
 677		spin_unlock(&inode->i_lock);
 678		list_add(&inode->i_lru, &dispose);
 
 
 
 
 
 
 679	}
 680	spin_unlock(&sb->s_inode_list_lock);
 681
 682	dispose_list(&dispose);
 683
 684	return busy;
 685}
 686
 687/*
 688 * Isolate the inode from the LRU in preparation for freeing it.
 689 *
 690 * Any inodes which are pinned purely because of attached pagecache have their
 691 * pagecache removed.  If the inode has metadata buffers attached to
 692 * mapping->private_list then try to remove them.
 693 *
 694 * If the inode has the I_REFERENCED flag set, then it means that it has been
 695 * used recently - the flag is set in iput_final(). When we encounter such an
 696 * inode, clear the flag and move it to the back of the LRU so it gets another
 697 * pass through the LRU before it gets reclaimed. This is necessary because of
 698 * the fact we are doing lazy LRU updates to minimise lock contention so the
 699 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 700 * with this flag set because they are the inodes that are out of order.
 701 */
 702static enum lru_status inode_lru_isolate(struct list_head *item,
 703		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 704{
 705	struct list_head *freeable = arg;
 706	struct inode	*inode = container_of(item, struct inode, i_lru);
 707
 708	/*
 709	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
 710	 * If we fail to get the lock, just skip it.
 711	 */
 712	if (!spin_trylock(&inode->i_lock))
 713		return LRU_SKIP;
 714
 715	/*
 716	 * Referenced or dirty inodes are still in use. Give them another pass
 717	 * through the LRU as we canot reclaim them now.
 718	 */
 719	if (atomic_read(&inode->i_count) ||
 720	    (inode->i_state & ~I_REFERENCED)) {
 721		list_lru_isolate(lru, &inode->i_lru);
 722		spin_unlock(&inode->i_lock);
 723		this_cpu_dec(nr_unused);
 724		return LRU_REMOVED;
 725	}
 726
 727	/* recently referenced inodes get one more pass */
 728	if (inode->i_state & I_REFERENCED) {
 729		inode->i_state &= ~I_REFERENCED;
 730		spin_unlock(&inode->i_lock);
 731		return LRU_ROTATE;
 732	}
 733
 734	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
 735		__iget(inode);
 736		spin_unlock(&inode->i_lock);
 737		spin_unlock(lru_lock);
 738		if (remove_inode_buffers(inode)) {
 739			unsigned long reap;
 740			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
 741			if (current_is_kswapd())
 742				__count_vm_events(KSWAPD_INODESTEAL, reap);
 743			else
 744				__count_vm_events(PGINODESTEAL, reap);
 745			if (current->reclaim_state)
 746				current->reclaim_state->reclaimed_slab += reap;
 747		}
 748		iput(inode);
 749		spin_lock(lru_lock);
 750		return LRU_RETRY;
 751	}
 752
 753	WARN_ON(inode->i_state & I_NEW);
 754	inode->i_state |= I_FREEING;
 755	list_lru_isolate_move(lru, &inode->i_lru, freeable);
 756	spin_unlock(&inode->i_lock);
 757
 758	this_cpu_dec(nr_unused);
 759	return LRU_REMOVED;
 760}
 761
 762/*
 763 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 764 * This is called from the superblock shrinker function with a number of inodes
 765 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 766 * then are freed outside inode_lock by dispose_list().
 767 */
 768long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
 769{
 770	LIST_HEAD(freeable);
 771	long freed;
 772
 773	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
 774				     inode_lru_isolate, &freeable);
 775	dispose_list(&freeable);
 776	return freed;
 777}
 778
 779static void __wait_on_freeing_inode(struct inode *inode);
 780/*
 781 * Called with the inode lock held.
 782 */
 783static struct inode *find_inode(struct super_block *sb,
 784				struct hlist_head *head,
 785				int (*test)(struct inode *, void *),
 786				void *data)
 787{
 788	struct inode *inode = NULL;
 789
 790repeat:
 791	hlist_for_each_entry(inode, head, i_hash) {
 792		if (inode->i_sb != sb)
 793			continue;
 794		if (!test(inode, data))
 795			continue;
 796		spin_lock(&inode->i_lock);
 797		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 798			__wait_on_freeing_inode(inode);
 799			goto repeat;
 800		}
 
 
 
 
 801		__iget(inode);
 802		spin_unlock(&inode->i_lock);
 803		return inode;
 804	}
 805	return NULL;
 806}
 807
 808/*
 809 * find_inode_fast is the fast path version of find_inode, see the comment at
 810 * iget_locked for details.
 811 */
 812static struct inode *find_inode_fast(struct super_block *sb,
 813				struct hlist_head *head, unsigned long ino)
 814{
 815	struct inode *inode = NULL;
 816
 817repeat:
 818	hlist_for_each_entry(inode, head, i_hash) {
 819		if (inode->i_ino != ino)
 820			continue;
 821		if (inode->i_sb != sb)
 822			continue;
 823		spin_lock(&inode->i_lock);
 824		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 825			__wait_on_freeing_inode(inode);
 826			goto repeat;
 827		}
 
 
 
 
 828		__iget(inode);
 829		spin_unlock(&inode->i_lock);
 830		return inode;
 831	}
 832	return NULL;
 833}
 834
 835/*
 836 * Each cpu owns a range of LAST_INO_BATCH numbers.
 837 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 838 * to renew the exhausted range.
 839 *
 840 * This does not significantly increase overflow rate because every CPU can
 841 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 842 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 843 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 844 * overflow rate by 2x, which does not seem too significant.
 845 *
 846 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 847 * error if st_ino won't fit in target struct field. Use 32bit counter
 848 * here to attempt to avoid that.
 849 */
 850#define LAST_INO_BATCH 1024
 851static DEFINE_PER_CPU(unsigned int, last_ino);
 852
 853unsigned int get_next_ino(void)
 854{
 855	unsigned int *p = &get_cpu_var(last_ino);
 856	unsigned int res = *p;
 857
 858#ifdef CONFIG_SMP
 859	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
 860		static atomic_t shared_last_ino;
 861		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
 862
 863		res = next - LAST_INO_BATCH;
 864	}
 865#endif
 866
 867	res++;
 868	/* get_next_ino should not provide a 0 inode number */
 869	if (unlikely(!res))
 870		res++;
 871	*p = res;
 872	put_cpu_var(last_ino);
 873	return res;
 874}
 875EXPORT_SYMBOL(get_next_ino);
 876
 877/**
 878 *	new_inode_pseudo 	- obtain an inode
 879 *	@sb: superblock
 880 *
 881 *	Allocates a new inode for given superblock.
 882 *	Inode wont be chained in superblock s_inodes list
 883 *	This means :
 884 *	- fs can't be unmount
 885 *	- quotas, fsnotify, writeback can't work
 886 */
 887struct inode *new_inode_pseudo(struct super_block *sb)
 888{
 889	struct inode *inode = alloc_inode(sb);
 890
 891	if (inode) {
 892		spin_lock(&inode->i_lock);
 893		inode->i_state = 0;
 894		spin_unlock(&inode->i_lock);
 895		INIT_LIST_HEAD(&inode->i_sb_list);
 896	}
 897	return inode;
 898}
 899
 900/**
 901 *	new_inode 	- obtain an inode
 902 *	@sb: superblock
 903 *
 904 *	Allocates a new inode for given superblock. The default gfp_mask
 905 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
 906 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 907 *	for the page cache are not reclaimable or migratable,
 908 *	mapping_set_gfp_mask() must be called with suitable flags on the
 909 *	newly created inode's mapping
 910 *
 911 */
 912struct inode *new_inode(struct super_block *sb)
 913{
 914	struct inode *inode;
 915
 916	spin_lock_prefetch(&sb->s_inode_list_lock);
 917
 918	inode = new_inode_pseudo(sb);
 919	if (inode)
 920		inode_sb_list_add(inode);
 921	return inode;
 922}
 923EXPORT_SYMBOL(new_inode);
 924
 925#ifdef CONFIG_DEBUG_LOCK_ALLOC
 926void lockdep_annotate_inode_mutex_key(struct inode *inode)
 927{
 928	if (S_ISDIR(inode->i_mode)) {
 929		struct file_system_type *type = inode->i_sb->s_type;
 930
 931		/* Set new key only if filesystem hasn't already changed it */
 932		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
 933			/*
 934			 * ensure nobody is actually holding i_mutex
 935			 */
 936			// mutex_destroy(&inode->i_mutex);
 937			init_rwsem(&inode->i_rwsem);
 938			lockdep_set_class(&inode->i_rwsem,
 939					  &type->i_mutex_dir_key);
 940		}
 941	}
 942}
 943EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
 944#endif
 945
 946/**
 947 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 948 * @inode:	new inode to unlock
 949 *
 950 * Called when the inode is fully initialised to clear the new state of the
 951 * inode and wake up anyone waiting for the inode to finish initialisation.
 952 */
 953void unlock_new_inode(struct inode *inode)
 954{
 955	lockdep_annotate_inode_mutex_key(inode);
 956	spin_lock(&inode->i_lock);
 957	WARN_ON(!(inode->i_state & I_NEW));
 958	inode->i_state &= ~I_NEW;
 959	smp_mb();
 960	wake_up_bit(&inode->i_state, __I_NEW);
 961	spin_unlock(&inode->i_lock);
 962}
 963EXPORT_SYMBOL(unlock_new_inode);
 964
 
 
 
 
 
 
 
 
 
 
 
 
 
 965/**
 966 * lock_two_nondirectories - take two i_mutexes on non-directory objects
 967 *
 968 * Lock any non-NULL argument that is not a directory.
 969 * Zero, one or two objects may be locked by this function.
 970 *
 971 * @inode1: first inode to lock
 972 * @inode2: second inode to lock
 973 */
 974void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
 975{
 976	if (inode1 > inode2)
 977		swap(inode1, inode2);
 978
 979	if (inode1 && !S_ISDIR(inode1->i_mode))
 980		inode_lock(inode1);
 981	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
 982		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
 983}
 984EXPORT_SYMBOL(lock_two_nondirectories);
 985
 986/**
 987 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
 988 * @inode1: first inode to unlock
 989 * @inode2: second inode to unlock
 990 */
 991void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
 992{
 993	if (inode1 && !S_ISDIR(inode1->i_mode))
 994		inode_unlock(inode1);
 995	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
 996		inode_unlock(inode2);
 997}
 998EXPORT_SYMBOL(unlock_two_nondirectories);
 999
1000/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001 * iget5_locked - obtain an inode from a mounted file system
1002 * @sb:		super block of file system
1003 * @hashval:	hash value (usually inode number) to get
1004 * @test:	callback used for comparisons between inodes
1005 * @set:	callback used to initialize a new struct inode
1006 * @data:	opaque data pointer to pass to @test and @set
1007 *
1008 * Search for the inode specified by @hashval and @data in the inode cache,
1009 * and if present it is return it with an increased reference count. This is
1010 * a generalized version of iget_locked() for file systems where the inode
1011 * number is not sufficient for unique identification of an inode.
1012 *
1013 * If the inode is not in cache, allocate a new inode and return it locked,
1014 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1015 * before unlocking it via unlock_new_inode().
1016 *
1017 * Note both @test and @set are called with the inode_hash_lock held, so can't
1018 * sleep.
1019 */
1020struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1021		int (*test)(struct inode *, void *),
1022		int (*set)(struct inode *, void *), void *data)
1023{
1024	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1025	struct inode *inode;
1026again:
1027	spin_lock(&inode_hash_lock);
1028	inode = find_inode(sb, head, test, data);
1029	spin_unlock(&inode_hash_lock);
1030
1031	if (inode) {
1032		wait_on_inode(inode);
1033		if (unlikely(inode_unhashed(inode))) {
1034			iput(inode);
1035			goto again;
1036		}
1037		return inode;
1038	}
1039
1040	inode = alloc_inode(sb);
1041	if (inode) {
1042		struct inode *old;
1043
1044		spin_lock(&inode_hash_lock);
1045		/* We released the lock, so.. */
1046		old = find_inode(sb, head, test, data);
1047		if (!old) {
1048			if (set(inode, data))
1049				goto set_failed;
1050
1051			spin_lock(&inode->i_lock);
1052			inode->i_state = I_NEW;
1053			hlist_add_head(&inode->i_hash, head);
1054			spin_unlock(&inode->i_lock);
1055			inode_sb_list_add(inode);
1056			spin_unlock(&inode_hash_lock);
1057
1058			/* Return the locked inode with I_NEW set, the
1059			 * caller is responsible for filling in the contents
1060			 */
1061			return inode;
1062		}
1063
1064		/*
1065		 * Uhhuh, somebody else created the same inode under
1066		 * us. Use the old inode instead of the one we just
1067		 * allocated.
1068		 */
1069		spin_unlock(&inode_hash_lock);
1070		destroy_inode(inode);
1071		inode = old;
1072		wait_on_inode(inode);
1073		if (unlikely(inode_unhashed(inode))) {
1074			iput(inode);
1075			goto again;
1076		}
1077	}
1078	return inode;
1079
1080set_failed:
1081	spin_unlock(&inode_hash_lock);
1082	destroy_inode(inode);
1083	return NULL;
1084}
1085EXPORT_SYMBOL(iget5_locked);
1086
1087/**
1088 * iget_locked - obtain an inode from a mounted file system
1089 * @sb:		super block of file system
1090 * @ino:	inode number to get
1091 *
1092 * Search for the inode specified by @ino in the inode cache and if present
1093 * return it with an increased reference count. This is for file systems
1094 * where the inode number is sufficient for unique identification of an inode.
1095 *
1096 * If the inode is not in cache, allocate a new inode and return it locked,
1097 * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1098 * before unlocking it via unlock_new_inode().
1099 */
1100struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1101{
1102	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1103	struct inode *inode;
1104again:
1105	spin_lock(&inode_hash_lock);
1106	inode = find_inode_fast(sb, head, ino);
1107	spin_unlock(&inode_hash_lock);
1108	if (inode) {
 
 
1109		wait_on_inode(inode);
1110		if (unlikely(inode_unhashed(inode))) {
1111			iput(inode);
1112			goto again;
1113		}
1114		return inode;
1115	}
1116
1117	inode = alloc_inode(sb);
1118	if (inode) {
1119		struct inode *old;
1120
1121		spin_lock(&inode_hash_lock);
1122		/* We released the lock, so.. */
1123		old = find_inode_fast(sb, head, ino);
1124		if (!old) {
1125			inode->i_ino = ino;
1126			spin_lock(&inode->i_lock);
1127			inode->i_state = I_NEW;
1128			hlist_add_head(&inode->i_hash, head);
1129			spin_unlock(&inode->i_lock);
1130			inode_sb_list_add(inode);
1131			spin_unlock(&inode_hash_lock);
1132
1133			/* Return the locked inode with I_NEW set, the
1134			 * caller is responsible for filling in the contents
1135			 */
1136			return inode;
1137		}
1138
1139		/*
1140		 * Uhhuh, somebody else created the same inode under
1141		 * us. Use the old inode instead of the one we just
1142		 * allocated.
1143		 */
1144		spin_unlock(&inode_hash_lock);
1145		destroy_inode(inode);
 
 
1146		inode = old;
1147		wait_on_inode(inode);
1148		if (unlikely(inode_unhashed(inode))) {
1149			iput(inode);
1150			goto again;
1151		}
1152	}
1153	return inode;
1154}
1155EXPORT_SYMBOL(iget_locked);
1156
1157/*
1158 * search the inode cache for a matching inode number.
1159 * If we find one, then the inode number we are trying to
1160 * allocate is not unique and so we should not use it.
1161 *
1162 * Returns 1 if the inode number is unique, 0 if it is not.
1163 */
1164static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1165{
1166	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1167	struct inode *inode;
1168
1169	spin_lock(&inode_hash_lock);
1170	hlist_for_each_entry(inode, b, i_hash) {
1171		if (inode->i_ino == ino && inode->i_sb == sb) {
1172			spin_unlock(&inode_hash_lock);
1173			return 0;
1174		}
1175	}
1176	spin_unlock(&inode_hash_lock);
1177
1178	return 1;
1179}
1180
1181/**
1182 *	iunique - get a unique inode number
1183 *	@sb: superblock
1184 *	@max_reserved: highest reserved inode number
1185 *
1186 *	Obtain an inode number that is unique on the system for a given
1187 *	superblock. This is used by file systems that have no natural
1188 *	permanent inode numbering system. An inode number is returned that
1189 *	is higher than the reserved limit but unique.
1190 *
1191 *	BUGS:
1192 *	With a large number of inodes live on the file system this function
1193 *	currently becomes quite slow.
1194 */
1195ino_t iunique(struct super_block *sb, ino_t max_reserved)
1196{
1197	/*
1198	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1199	 * error if st_ino won't fit in target struct field. Use 32bit counter
1200	 * here to attempt to avoid that.
1201	 */
1202	static DEFINE_SPINLOCK(iunique_lock);
1203	static unsigned int counter;
1204	ino_t res;
1205
 
1206	spin_lock(&iunique_lock);
1207	do {
1208		if (counter <= max_reserved)
1209			counter = max_reserved + 1;
1210		res = counter++;
1211	} while (!test_inode_iunique(sb, res));
1212	spin_unlock(&iunique_lock);
 
1213
1214	return res;
1215}
1216EXPORT_SYMBOL(iunique);
1217
1218struct inode *igrab(struct inode *inode)
1219{
1220	spin_lock(&inode->i_lock);
1221	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1222		__iget(inode);
1223		spin_unlock(&inode->i_lock);
1224	} else {
1225		spin_unlock(&inode->i_lock);
1226		/*
1227		 * Handle the case where s_op->clear_inode is not been
1228		 * called yet, and somebody is calling igrab
1229		 * while the inode is getting freed.
1230		 */
1231		inode = NULL;
1232	}
1233	return inode;
1234}
1235EXPORT_SYMBOL(igrab);
1236
1237/**
1238 * ilookup5_nowait - search for an inode in the inode cache
1239 * @sb:		super block of file system to search
1240 * @hashval:	hash value (usually inode number) to search for
1241 * @test:	callback used for comparisons between inodes
1242 * @data:	opaque data pointer to pass to @test
1243 *
1244 * Search for the inode specified by @hashval and @data in the inode cache.
1245 * If the inode is in the cache, the inode is returned with an incremented
1246 * reference count.
1247 *
1248 * Note: I_NEW is not waited upon so you have to be very careful what you do
1249 * with the returned inode.  You probably should be using ilookup5() instead.
1250 *
1251 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1252 */
1253struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1254		int (*test)(struct inode *, void *), void *data)
1255{
1256	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1257	struct inode *inode;
1258
1259	spin_lock(&inode_hash_lock);
1260	inode = find_inode(sb, head, test, data);
1261	spin_unlock(&inode_hash_lock);
1262
1263	return inode;
1264}
1265EXPORT_SYMBOL(ilookup5_nowait);
1266
1267/**
1268 * ilookup5 - search for an inode in the inode cache
1269 * @sb:		super block of file system to search
1270 * @hashval:	hash value (usually inode number) to search for
1271 * @test:	callback used for comparisons between inodes
1272 * @data:	opaque data pointer to pass to @test
1273 *
1274 * Search for the inode specified by @hashval and @data in the inode cache,
1275 * and if the inode is in the cache, return the inode with an incremented
1276 * reference count.  Waits on I_NEW before returning the inode.
1277 * returned with an incremented reference count.
1278 *
1279 * This is a generalized version of ilookup() for file systems where the
1280 * inode number is not sufficient for unique identification of an inode.
1281 *
1282 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1283 */
1284struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1285		int (*test)(struct inode *, void *), void *data)
1286{
1287	struct inode *inode;
1288again:
1289	inode = ilookup5_nowait(sb, hashval, test, data);
1290	if (inode) {
1291		wait_on_inode(inode);
1292		if (unlikely(inode_unhashed(inode))) {
1293			iput(inode);
1294			goto again;
1295		}
1296	}
1297	return inode;
1298}
1299EXPORT_SYMBOL(ilookup5);
1300
1301/**
1302 * ilookup - search for an inode in the inode cache
1303 * @sb:		super block of file system to search
1304 * @ino:	inode number to search for
1305 *
1306 * Search for the inode @ino in the inode cache, and if the inode is in the
1307 * cache, the inode is returned with an incremented reference count.
1308 */
1309struct inode *ilookup(struct super_block *sb, unsigned long ino)
1310{
1311	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1312	struct inode *inode;
1313again:
1314	spin_lock(&inode_hash_lock);
1315	inode = find_inode_fast(sb, head, ino);
1316	spin_unlock(&inode_hash_lock);
1317
1318	if (inode) {
 
 
1319		wait_on_inode(inode);
1320		if (unlikely(inode_unhashed(inode))) {
1321			iput(inode);
1322			goto again;
1323		}
1324	}
1325	return inode;
1326}
1327EXPORT_SYMBOL(ilookup);
1328
1329/**
1330 * find_inode_nowait - find an inode in the inode cache
1331 * @sb:		super block of file system to search
1332 * @hashval:	hash value (usually inode number) to search for
1333 * @match:	callback used for comparisons between inodes
1334 * @data:	opaque data pointer to pass to @match
1335 *
1336 * Search for the inode specified by @hashval and @data in the inode
1337 * cache, where the helper function @match will return 0 if the inode
1338 * does not match, 1 if the inode does match, and -1 if the search
1339 * should be stopped.  The @match function must be responsible for
1340 * taking the i_lock spin_lock and checking i_state for an inode being
1341 * freed or being initialized, and incrementing the reference count
1342 * before returning 1.  It also must not sleep, since it is called with
1343 * the inode_hash_lock spinlock held.
1344 *
1345 * This is a even more generalized version of ilookup5() when the
1346 * function must never block --- find_inode() can block in
1347 * __wait_on_freeing_inode() --- or when the caller can not increment
1348 * the reference count because the resulting iput() might cause an
1349 * inode eviction.  The tradeoff is that the @match funtion must be
1350 * very carefully implemented.
1351 */
1352struct inode *find_inode_nowait(struct super_block *sb,
1353				unsigned long hashval,
1354				int (*match)(struct inode *, unsigned long,
1355					     void *),
1356				void *data)
1357{
1358	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1359	struct inode *inode, *ret_inode = NULL;
1360	int mval;
1361
1362	spin_lock(&inode_hash_lock);
1363	hlist_for_each_entry(inode, head, i_hash) {
1364		if (inode->i_sb != sb)
1365			continue;
1366		mval = match(inode, hashval, data);
1367		if (mval == 0)
1368			continue;
1369		if (mval == 1)
1370			ret_inode = inode;
1371		goto out;
1372	}
1373out:
1374	spin_unlock(&inode_hash_lock);
1375	return ret_inode;
1376}
1377EXPORT_SYMBOL(find_inode_nowait);
1378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379int insert_inode_locked(struct inode *inode)
1380{
1381	struct super_block *sb = inode->i_sb;
1382	ino_t ino = inode->i_ino;
1383	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1384
1385	while (1) {
1386		struct inode *old = NULL;
1387		spin_lock(&inode_hash_lock);
1388		hlist_for_each_entry(old, head, i_hash) {
1389			if (old->i_ino != ino)
1390				continue;
1391			if (old->i_sb != sb)
1392				continue;
1393			spin_lock(&old->i_lock);
1394			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1395				spin_unlock(&old->i_lock);
1396				continue;
1397			}
1398			break;
1399		}
1400		if (likely(!old)) {
1401			spin_lock(&inode->i_lock);
1402			inode->i_state |= I_NEW;
1403			hlist_add_head(&inode->i_hash, head);
1404			spin_unlock(&inode->i_lock);
1405			spin_unlock(&inode_hash_lock);
1406			return 0;
1407		}
 
 
 
 
 
1408		__iget(old);
1409		spin_unlock(&old->i_lock);
1410		spin_unlock(&inode_hash_lock);
1411		wait_on_inode(old);
1412		if (unlikely(!inode_unhashed(old))) {
1413			iput(old);
1414			return -EBUSY;
1415		}
1416		iput(old);
1417	}
1418}
1419EXPORT_SYMBOL(insert_inode_locked);
1420
1421int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1422		int (*test)(struct inode *, void *), void *data)
1423{
1424	struct super_block *sb = inode->i_sb;
1425	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1426
1427	while (1) {
1428		struct inode *old = NULL;
1429
1430		spin_lock(&inode_hash_lock);
1431		hlist_for_each_entry(old, head, i_hash) {
1432			if (old->i_sb != sb)
1433				continue;
1434			if (!test(old, data))
1435				continue;
1436			spin_lock(&old->i_lock);
1437			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1438				spin_unlock(&old->i_lock);
1439				continue;
1440			}
1441			break;
1442		}
1443		if (likely(!old)) {
1444			spin_lock(&inode->i_lock);
1445			inode->i_state |= I_NEW;
1446			hlist_add_head(&inode->i_hash, head);
1447			spin_unlock(&inode->i_lock);
1448			spin_unlock(&inode_hash_lock);
1449			return 0;
1450		}
1451		__iget(old);
1452		spin_unlock(&old->i_lock);
1453		spin_unlock(&inode_hash_lock);
1454		wait_on_inode(old);
1455		if (unlikely(!inode_unhashed(old))) {
1456			iput(old);
1457			return -EBUSY;
1458		}
1459		iput(old);
 
1460	}
 
1461}
1462EXPORT_SYMBOL(insert_inode_locked4);
1463
1464
1465int generic_delete_inode(struct inode *inode)
1466{
1467	return 1;
1468}
1469EXPORT_SYMBOL(generic_delete_inode);
1470
1471/*
1472 * Called when we're dropping the last reference
1473 * to an inode.
1474 *
1475 * Call the FS "drop_inode()" function, defaulting to
1476 * the legacy UNIX filesystem behaviour.  If it tells
1477 * us to evict inode, do so.  Otherwise, retain inode
1478 * in cache if fs is alive, sync and evict if fs is
1479 * shutting down.
1480 */
1481static void iput_final(struct inode *inode)
1482{
1483	struct super_block *sb = inode->i_sb;
1484	const struct super_operations *op = inode->i_sb->s_op;
 
1485	int drop;
1486
1487	WARN_ON(inode->i_state & I_NEW);
1488
1489	if (op->drop_inode)
1490		drop = op->drop_inode(inode);
1491	else
1492		drop = generic_drop_inode(inode);
1493
1494	if (!drop && (sb->s_flags & MS_ACTIVE)) {
1495		inode->i_state |= I_REFERENCED;
 
1496		inode_add_lru(inode);
1497		spin_unlock(&inode->i_lock);
1498		return;
1499	}
1500
 
1501	if (!drop) {
1502		inode->i_state |= I_WILL_FREE;
1503		spin_unlock(&inode->i_lock);
 
1504		write_inode_now(inode, 1);
 
1505		spin_lock(&inode->i_lock);
1506		WARN_ON(inode->i_state & I_NEW);
1507		inode->i_state &= ~I_WILL_FREE;
 
1508	}
1509
1510	inode->i_state |= I_FREEING;
1511	if (!list_empty(&inode->i_lru))
1512		inode_lru_list_del(inode);
1513	spin_unlock(&inode->i_lock);
1514
1515	evict(inode);
1516}
1517
1518/**
1519 *	iput	- put an inode
1520 *	@inode: inode to put
1521 *
1522 *	Puts an inode, dropping its usage count. If the inode use count hits
1523 *	zero, the inode is then freed and may also be destroyed.
1524 *
1525 *	Consequently, iput() can sleep.
1526 */
1527void iput(struct inode *inode)
1528{
1529	if (!inode)
1530		return;
1531	BUG_ON(inode->i_state & I_CLEAR);
1532retry:
1533	if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1534		if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1535			atomic_inc(&inode->i_count);
1536			inode->i_state &= ~I_DIRTY_TIME;
1537			spin_unlock(&inode->i_lock);
1538			trace_writeback_lazytime_iput(inode);
1539			mark_inode_dirty_sync(inode);
1540			goto retry;
1541		}
1542		iput_final(inode);
1543	}
1544}
1545EXPORT_SYMBOL(iput);
1546
 
1547/**
1548 *	bmap	- find a block number in a file
1549 *	@inode: inode of file
1550 *	@block: block to find
1551 *
1552 *	Returns the block number on the device holding the inode that
1553 *	is the disk block number for the block of the file requested.
1554 *	That is, asked for block 4 of inode 1 the function will return the
1555 *	disk block relative to the disk start that holds that block of the
1556 *	file.
1557 */
1558sector_t bmap(struct inode *inode, sector_t block)
1559{
1560	sector_t res = 0;
1561	if (inode->i_mapping->a_ops->bmap)
1562		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1563	return res;
1564}
1565EXPORT_SYMBOL(bmap);
1566
1567/*
1568 * Update times in overlayed inode from underlying real inode
1569 */
1570static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode,
1571			       bool rcu)
1572{
1573	if (!rcu) {
1574		struct inode *realinode = d_real_inode(dentry);
1575
1576		if (unlikely(inode != realinode) &&
1577		    (!timespec_equal(&inode->i_mtime, &realinode->i_mtime) ||
1578		     !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) {
1579			inode->i_mtime = realinode->i_mtime;
1580			inode->i_ctime = realinode->i_ctime;
1581		}
1582	}
1583}
 
 
1584
1585/*
1586 * With relative atime, only update atime if the previous atime is
1587 * earlier than either the ctime or mtime or if at least a day has
1588 * passed since the last atime update.
1589 */
1590static int relatime_need_update(const struct path *path, struct inode *inode,
1591				struct timespec now, bool rcu)
1592{
1593
1594	if (!(path->mnt->mnt_flags & MNT_RELATIME))
1595		return 1;
1596
1597	update_ovl_inode_times(path->dentry, inode, rcu);
1598	/*
1599	 * Is mtime younger than atime? If yes, update atime:
1600	 */
1601	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1602		return 1;
1603	/*
1604	 * Is ctime younger than atime? If yes, update atime:
1605	 */
1606	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1607		return 1;
1608
1609	/*
1610	 * Is the previous atime value older than a day? If yes,
1611	 * update atime:
1612	 */
1613	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1614		return 1;
1615	/*
1616	 * Good, we can skip the atime update:
1617	 */
1618	return 0;
1619}
1620
1621int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1622{
1623	int iflags = I_DIRTY_TIME;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1624
1625	if (flags & S_ATIME)
1626		inode->i_atime = *time;
1627	if (flags & S_VERSION)
1628		inode_inc_iversion(inode);
1629	if (flags & S_CTIME)
1630		inode->i_ctime = *time;
1631	if (flags & S_MTIME)
1632		inode->i_mtime = *time;
1633
1634	if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1635		iflags |= I_DIRTY_SYNC;
1636	__mark_inode_dirty(inode, iflags);
1637	return 0;
1638}
1639EXPORT_SYMBOL(generic_update_time);
1640
1641/*
1642 * This does the actual work of updating an inodes time or version.  Must have
1643 * had called mnt_want_write() before calling this.
1644 */
1645static int update_time(struct inode *inode, struct timespec *time, int flags)
1646{
1647	int (*update_time)(struct inode *, struct timespec *, int);
1648
1649	update_time = inode->i_op->update_time ? inode->i_op->update_time :
1650		generic_update_time;
1651
1652	return update_time(inode, time, flags);
1653}
1654
1655/**
1656 *	touch_atime	-	update the access time
1657 *	@path: the &struct path to update
1658 *	@inode: inode to update
1659 *
1660 *	Update the accessed time on an inode and mark it for writeback.
1661 *	This function automatically handles read only file systems and media,
1662 *	as well as the "noatime" flag and inode specific "noatime" markers.
1663 */
1664bool __atime_needs_update(const struct path *path, struct inode *inode,
1665			  bool rcu)
1666{
1667	struct vfsmount *mnt = path->mnt;
1668	struct timespec now;
1669
1670	if (inode->i_flags & S_NOATIME)
1671		return false;
1672
1673	/* Atime updates will likely cause i_uid and i_gid to be written
1674	 * back improprely if their true value is unknown to the vfs.
1675	 */
1676	if (HAS_UNMAPPED_ID(inode))
1677		return false;
1678
1679	if (IS_NOATIME(inode))
1680		return false;
1681	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1682		return false;
1683
1684	if (mnt->mnt_flags & MNT_NOATIME)
1685		return false;
1686	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1687		return false;
1688
1689	now = current_time(inode);
1690
1691	if (!relatime_need_update(path, inode, now, rcu))
1692		return false;
1693
1694	if (timespec_equal(&inode->i_atime, &now))
1695		return false;
1696
1697	return true;
1698}
1699
1700void touch_atime(const struct path *path)
1701{
1702	struct vfsmount *mnt = path->mnt;
1703	struct inode *inode = d_inode(path->dentry);
1704	struct timespec now;
1705
1706	if (!__atime_needs_update(path, inode, false))
1707		return;
1708
1709	if (!sb_start_write_trylock(inode->i_sb))
1710		return;
1711
1712	if (__mnt_want_write(mnt) != 0)
1713		goto skip_update;
1714	/*
1715	 * File systems can error out when updating inodes if they need to
1716	 * allocate new space to modify an inode (such is the case for
1717	 * Btrfs), but since we touch atime while walking down the path we
1718	 * really don't care if we failed to update the atime of the file,
1719	 * so just ignore the return value.
1720	 * We may also fail on filesystems that have the ability to make parts
1721	 * of the fs read only, e.g. subvolumes in Btrfs.
1722	 */
1723	now = current_time(inode);
1724	update_time(inode, &now, S_ATIME);
1725	__mnt_drop_write(mnt);
1726skip_update:
1727	sb_end_write(inode->i_sb);
1728}
1729EXPORT_SYMBOL(touch_atime);
1730
1731/*
1732 * The logic we want is
1733 *
1734 *	if suid or (sgid and xgrp)
1735 *		remove privs
1736 */
1737int should_remove_suid(struct dentry *dentry)
1738{
1739	umode_t mode = d_inode(dentry)->i_mode;
1740	int kill = 0;
1741
1742	/* suid always must be killed */
1743	if (unlikely(mode & S_ISUID))
1744		kill = ATTR_KILL_SUID;
1745
1746	/*
1747	 * sgid without any exec bits is just a mandatory locking mark; leave
1748	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1749	 */
1750	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1751		kill |= ATTR_KILL_SGID;
1752
1753	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1754		return kill;
1755
1756	return 0;
1757}
1758EXPORT_SYMBOL(should_remove_suid);
1759
1760/*
1761 * Return mask of changes for notify_change() that need to be done as a
1762 * response to write or truncate. Return 0 if nothing has to be changed.
1763 * Negative value on error (change should be denied).
1764 */
1765int dentry_needs_remove_privs(struct dentry *dentry)
1766{
1767	struct inode *inode = d_inode(dentry);
1768	int mask = 0;
1769	int ret;
1770
1771	if (IS_NOSEC(inode))
1772		return 0;
1773
1774	mask = should_remove_suid(dentry);
1775	ret = security_inode_need_killpriv(dentry);
1776	if (ret < 0)
1777		return ret;
1778	if (ret)
1779		mask |= ATTR_KILL_PRIV;
1780	return mask;
1781}
1782
1783static int __remove_privs(struct dentry *dentry, int kill)
 
1784{
1785	struct iattr newattrs;
1786
1787	newattrs.ia_valid = ATTR_FORCE | kill;
1788	/*
1789	 * Note we call this on write, so notify_change will not
1790	 * encounter any conflicting delegations:
1791	 */
1792	return notify_change(dentry, &newattrs, NULL);
1793}
1794
1795/*
1796 * Remove special file priviledges (suid, capabilities) when file is written
1797 * to or truncated.
1798 */
1799int file_remove_privs(struct file *file)
1800{
1801	struct dentry *dentry = file_dentry(file);
1802	struct inode *inode = file_inode(file);
1803	int kill;
1804	int error = 0;
1805
1806	/* Fast path for nothing security related */
1807	if (IS_NOSEC(inode))
 
 
 
 
 
1808		return 0;
1809
1810	kill = dentry_needs_remove_privs(dentry);
1811	if (kill < 0)
1812		return kill;
1813	if (kill)
1814		error = __remove_privs(dentry, kill);
1815	if (!error)
1816		inode_has_no_xattr(inode);
1817
1818	return error;
1819}
1820EXPORT_SYMBOL(file_remove_privs);
1821
1822/**
1823 *	file_update_time	-	update mtime and ctime time
1824 *	@file: file accessed
1825 *
1826 *	Update the mtime and ctime members of an inode and mark the inode
1827 *	for writeback.  Note that this function is meant exclusively for
1828 *	usage in the file write path of filesystems, and filesystems may
1829 *	choose to explicitly ignore update via this function with the
1830 *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1831 *	timestamps are handled by the server.  This can return an error for
1832 *	file systems who need to allocate space in order to update an inode.
1833 */
1834
1835int file_update_time(struct file *file)
1836{
1837	struct inode *inode = file_inode(file);
1838	struct timespec now;
1839	int sync_it = 0;
1840	int ret;
1841
1842	/* First try to exhaust all avenues to not sync */
1843	if (IS_NOCMTIME(inode))
1844		return 0;
1845
1846	now = current_time(inode);
1847	if (!timespec_equal(&inode->i_mtime, &now))
1848		sync_it = S_MTIME;
1849
1850	if (!timespec_equal(&inode->i_ctime, &now))
1851		sync_it |= S_CTIME;
1852
1853	if (IS_I_VERSION(inode))
1854		sync_it |= S_VERSION;
1855
1856	if (!sync_it)
1857		return 0;
1858
1859	/* Finally allowed to write? Takes lock. */
1860	if (__mnt_want_write_file(file))
1861		return 0;
1862
1863	ret = update_time(inode, &now, sync_it);
1864	__mnt_drop_write_file(file);
1865
1866	return ret;
1867}
1868EXPORT_SYMBOL(file_update_time);
1869
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1870int inode_needs_sync(struct inode *inode)
1871{
1872	if (IS_SYNC(inode))
1873		return 1;
1874	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1875		return 1;
1876	return 0;
1877}
1878EXPORT_SYMBOL(inode_needs_sync);
1879
1880/*
1881 * If we try to find an inode in the inode hash while it is being
1882 * deleted, we have to wait until the filesystem completes its
1883 * deletion before reporting that it isn't found.  This function waits
1884 * until the deletion _might_ have completed.  Callers are responsible
1885 * to recheck inode state.
1886 *
1887 * It doesn't matter if I_NEW is not set initially, a call to
1888 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1889 * will DTRT.
1890 */
1891static void __wait_on_freeing_inode(struct inode *inode)
1892{
1893	wait_queue_head_t *wq;
1894	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1895	wq = bit_waitqueue(&inode->i_state, __I_NEW);
1896	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1897	spin_unlock(&inode->i_lock);
1898	spin_unlock(&inode_hash_lock);
1899	schedule();
1900	finish_wait(wq, &wait.wait);
1901	spin_lock(&inode_hash_lock);
1902}
1903
1904static __initdata unsigned long ihash_entries;
1905static int __init set_ihash_entries(char *str)
1906{
1907	if (!str)
1908		return 0;
1909	ihash_entries = simple_strtoul(str, &str, 0);
1910	return 1;
1911}
1912__setup("ihash_entries=", set_ihash_entries);
1913
1914/*
1915 * Initialize the waitqueues and inode hash table.
1916 */
1917void __init inode_init_early(void)
1918{
1919	unsigned int loop;
1920
1921	/* If hashes are distributed across NUMA nodes, defer
1922	 * hash allocation until vmalloc space is available.
1923	 */
1924	if (hashdist)
1925		return;
1926
1927	inode_hashtable =
1928		alloc_large_system_hash("Inode-cache",
1929					sizeof(struct hlist_head),
1930					ihash_entries,
1931					14,
1932					HASH_EARLY,
1933					&i_hash_shift,
1934					&i_hash_mask,
1935					0,
1936					0);
1937
1938	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1939		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1940}
1941
1942void __init inode_init(void)
1943{
1944	unsigned int loop;
1945
1946	/* inode slab cache */
1947	inode_cachep = kmem_cache_create("inode_cache",
1948					 sizeof(struct inode),
1949					 0,
1950					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1951					 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1952					 init_once);
1953
1954	/* Hash may have been set up in inode_init_early */
1955	if (!hashdist)
1956		return;
1957
1958	inode_hashtable =
1959		alloc_large_system_hash("Inode-cache",
1960					sizeof(struct hlist_head),
1961					ihash_entries,
1962					14,
1963					0,
1964					&i_hash_shift,
1965					&i_hash_mask,
1966					0,
1967					0);
1968
1969	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1970		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1971}
1972
1973void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1974{
1975	inode->i_mode = mode;
1976	if (S_ISCHR(mode)) {
1977		inode->i_fop = &def_chr_fops;
1978		inode->i_rdev = rdev;
1979	} else if (S_ISBLK(mode)) {
1980		inode->i_fop = &def_blk_fops;
1981		inode->i_rdev = rdev;
1982	} else if (S_ISFIFO(mode))
1983		inode->i_fop = &pipefifo_fops;
1984	else if (S_ISSOCK(mode))
1985		;	/* leave it no_open_fops */
1986	else
1987		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1988				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1989				  inode->i_ino);
1990}
1991EXPORT_SYMBOL(init_special_inode);
1992
1993/**
1994 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
 
1995 * @inode: New inode
1996 * @dir: Directory inode
1997 * @mode: mode of the new inode
 
 
 
 
 
 
1998 */
1999void inode_init_owner(struct inode *inode, const struct inode *dir,
2000			umode_t mode)
2001{
2002	inode->i_uid = current_fsuid();
2003	if (dir && dir->i_mode & S_ISGID) {
2004		inode->i_gid = dir->i_gid;
 
 
2005		if (S_ISDIR(mode))
2006			mode |= S_ISGID;
 
 
 
 
2007	} else
2008		inode->i_gid = current_fsgid();
2009	inode->i_mode = mode;
2010}
2011EXPORT_SYMBOL(inode_init_owner);
2012
2013/**
2014 * inode_owner_or_capable - check current task permissions to inode
 
2015 * @inode: inode being checked
2016 *
2017 * Return true if current either has CAP_FOWNER in a namespace with the
2018 * inode owner uid mapped, or owns the file.
 
 
 
 
 
 
2019 */
2020bool inode_owner_or_capable(const struct inode *inode)
 
2021{
 
2022	struct user_namespace *ns;
2023
2024	if (uid_eq(current_fsuid(), inode->i_uid))
 
2025		return true;
2026
2027	ns = current_user_ns();
2028	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
2029		return true;
2030	return false;
2031}
2032EXPORT_SYMBOL(inode_owner_or_capable);
2033
2034/*
2035 * Direct i/o helper functions
2036 */
2037static void __inode_dio_wait(struct inode *inode)
2038{
2039	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2040	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2041
2042	do {
2043		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
2044		if (atomic_read(&inode->i_dio_count))
2045			schedule();
2046	} while (atomic_read(&inode->i_dio_count));
2047	finish_wait(wq, &q.wait);
2048}
2049
2050/**
2051 * inode_dio_wait - wait for outstanding DIO requests to finish
2052 * @inode: inode to wait for
2053 *
2054 * Waits for all pending direct I/O requests to finish so that we can
2055 * proceed with a truncate or equivalent operation.
2056 *
2057 * Must be called under a lock that serializes taking new references
2058 * to i_dio_count, usually by inode->i_mutex.
2059 */
2060void inode_dio_wait(struct inode *inode)
2061{
2062	if (atomic_read(&inode->i_dio_count))
2063		__inode_dio_wait(inode);
2064}
2065EXPORT_SYMBOL(inode_dio_wait);
2066
2067/*
2068 * inode_set_flags - atomically set some inode flags
2069 *
2070 * Note: the caller should be holding i_mutex, or else be sure that
2071 * they have exclusive access to the inode structure (i.e., while the
2072 * inode is being instantiated).  The reason for the cmpxchg() loop
2073 * --- which wouldn't be necessary if all code paths which modify
2074 * i_flags actually followed this rule, is that there is at least one
2075 * code path which doesn't today so we use cmpxchg() out of an abundance
2076 * of caution.
2077 *
2078 * In the long run, i_mutex is overkill, and we should probably look
2079 * at using the i_lock spinlock to protect i_flags, and then make sure
2080 * it is so documented in include/linux/fs.h and that all code follows
2081 * the locking convention!!
2082 */
2083void inode_set_flags(struct inode *inode, unsigned int flags,
2084		     unsigned int mask)
2085{
2086	unsigned int old_flags, new_flags;
2087
2088	WARN_ON_ONCE(flags & ~mask);
2089	do {
2090		old_flags = ACCESS_ONCE(inode->i_flags);
2091		new_flags = (old_flags & ~mask) | flags;
2092	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2093				  new_flags) != old_flags));
2094}
2095EXPORT_SYMBOL(inode_set_flags);
2096
2097void inode_nohighmem(struct inode *inode)
2098{
2099	mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2100}
2101EXPORT_SYMBOL(inode_nohighmem);
2102
2103/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104 * current_time - Return FS time
2105 * @inode: inode.
2106 *
2107 * Return the current time truncated to the time granularity supported by
2108 * the fs.
2109 *
2110 * Note that inode and inode->sb cannot be NULL.
2111 * Otherwise, the function warns and returns time without truncation.
2112 */
2113struct timespec current_time(struct inode *inode)
2114{
2115	struct timespec now = current_kernel_time();
 
 
2116
2117	if (unlikely(!inode->i_sb)) {
2118		WARN(1, "current_time() called with uninitialized super_block in the inode");
2119		return now;
2120	}
2121
2122	return timespec_trunc(now, inode->i_sb->s_time_gran);
2123}
2124EXPORT_SYMBOL(current_time);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * (C) 1997 Linus Torvalds
   4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
   5 */
   6#include <linux/export.h>
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/backing-dev.h>
  10#include <linux/hash.h>
  11#include <linux/swap.h>
  12#include <linux/security.h>
  13#include <linux/cdev.h>
  14#include <linux/memblock.h>
  15#include <linux/fsnotify.h>
  16#include <linux/mount.h>
  17#include <linux/posix_acl.h>
  18#include <linux/prefetch.h>
  19#include <linux/buffer_head.h> /* for inode_has_buffers */
  20#include <linux/ratelimit.h>
  21#include <linux/list_lru.h>
  22#include <linux/iversion.h>
  23#include <trace/events/writeback.h>
  24#include "internal.h"
  25
  26/*
  27 * Inode locking rules:
  28 *
  29 * inode->i_lock protects:
  30 *   inode->i_state, inode->i_hash, __iget()
  31 * Inode LRU list locks protect:
  32 *   inode->i_sb->s_inode_lru, inode->i_lru
  33 * inode->i_sb->s_inode_list_lock protects:
  34 *   inode->i_sb->s_inodes, inode->i_sb_list
  35 * bdi->wb.list_lock protects:
  36 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
  37 * inode_hash_lock protects:
  38 *   inode_hashtable, inode->i_hash
  39 *
  40 * Lock ordering:
  41 *
  42 * inode->i_sb->s_inode_list_lock
  43 *   inode->i_lock
  44 *     Inode LRU list locks
  45 *
  46 * bdi->wb.list_lock
  47 *   inode->i_lock
  48 *
  49 * inode_hash_lock
  50 *   inode->i_sb->s_inode_list_lock
  51 *   inode->i_lock
  52 *
  53 * iunique_lock
  54 *   inode_hash_lock
  55 */
  56
  57static unsigned int i_hash_mask __read_mostly;
  58static unsigned int i_hash_shift __read_mostly;
  59static struct hlist_head *inode_hashtable __read_mostly;
  60static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
  61
  62/*
  63 * Empty aops. Can be used for the cases where the user does not
  64 * define any of the address_space operations.
  65 */
  66const struct address_space_operations empty_aops = {
  67};
  68EXPORT_SYMBOL(empty_aops);
  69
  70/*
  71 * Statistics gathering..
  72 */
  73struct inodes_stat_t inodes_stat;
  74
  75static DEFINE_PER_CPU(unsigned long, nr_inodes);
  76static DEFINE_PER_CPU(unsigned long, nr_unused);
  77
  78static struct kmem_cache *inode_cachep __read_mostly;
  79
  80static long get_nr_inodes(void)
  81{
  82	int i;
  83	long sum = 0;
  84	for_each_possible_cpu(i)
  85		sum += per_cpu(nr_inodes, i);
  86	return sum < 0 ? 0 : sum;
  87}
  88
  89static inline long get_nr_inodes_unused(void)
  90{
  91	int i;
  92	long sum = 0;
  93	for_each_possible_cpu(i)
  94		sum += per_cpu(nr_unused, i);
  95	return sum < 0 ? 0 : sum;
  96}
  97
  98long get_nr_dirty_inodes(void)
  99{
 100	/* not actually dirty inodes, but a wild approximation */
 101	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
 102	return nr_dirty > 0 ? nr_dirty : 0;
 103}
 104
 105/*
 106 * Handle nr_inode sysctl
 107 */
 108#ifdef CONFIG_SYSCTL
 109int proc_nr_inodes(struct ctl_table *table, int write,
 110		   void *buffer, size_t *lenp, loff_t *ppos)
 111{
 112	inodes_stat.nr_inodes = get_nr_inodes();
 113	inodes_stat.nr_unused = get_nr_inodes_unused();
 114	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 115}
 116#endif
 117
 118static int no_open(struct inode *inode, struct file *file)
 119{
 120	return -ENXIO;
 121}
 122
 123/**
 124 * inode_init_always - perform inode structure initialisation
 125 * @sb: superblock inode belongs to
 126 * @inode: inode to initialise
 127 *
 128 * These are initializations that need to be done on every inode
 129 * allocation as the fields are not initialised by slab allocation.
 130 */
 131int inode_init_always(struct super_block *sb, struct inode *inode)
 132{
 133	static const struct inode_operations empty_iops;
 134	static const struct file_operations no_open_fops = {.open = no_open};
 135	struct address_space *const mapping = &inode->i_data;
 136
 137	inode->i_sb = sb;
 138	inode->i_blkbits = sb->s_blocksize_bits;
 139	inode->i_flags = 0;
 140	atomic64_set(&inode->i_sequence, 0);
 141	atomic_set(&inode->i_count, 1);
 142	inode->i_op = &empty_iops;
 143	inode->i_fop = &no_open_fops;
 144	inode->i_ino = 0;
 145	inode->__i_nlink = 1;
 146	inode->i_opflags = 0;
 147	if (sb->s_xattr)
 148		inode->i_opflags |= IOP_XATTR;
 149	i_uid_write(inode, 0);
 150	i_gid_write(inode, 0);
 151	atomic_set(&inode->i_writecount, 0);
 152	inode->i_size = 0;
 153	inode->i_write_hint = WRITE_LIFE_NOT_SET;
 154	inode->i_blocks = 0;
 155	inode->i_bytes = 0;
 156	inode->i_generation = 0;
 157	inode->i_pipe = NULL;
 
 158	inode->i_cdev = NULL;
 159	inode->i_link = NULL;
 160	inode->i_dir_seq = 0;
 161	inode->i_rdev = 0;
 162	inode->dirtied_when = 0;
 163
 164#ifdef CONFIG_CGROUP_WRITEBACK
 165	inode->i_wb_frn_winner = 0;
 166	inode->i_wb_frn_avg_time = 0;
 167	inode->i_wb_frn_history = 0;
 168#endif
 169
 170	if (security_inode_alloc(inode))
 171		goto out;
 172	spin_lock_init(&inode->i_lock);
 173	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
 174
 175	init_rwsem(&inode->i_rwsem);
 176	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
 177
 178	atomic_set(&inode->i_dio_count, 0);
 179
 180	mapping->a_ops = &empty_aops;
 181	mapping->host = inode;
 182	mapping->flags = 0;
 183	if (sb->s_type->fs_flags & FS_THP_SUPPORT)
 184		__set_bit(AS_THP_SUPPORT, &mapping->flags);
 185	mapping->wb_err = 0;
 186	atomic_set(&mapping->i_mmap_writable, 0);
 187#ifdef CONFIG_READ_ONLY_THP_FOR_FS
 188	atomic_set(&mapping->nr_thps, 0);
 189#endif
 190	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 191	mapping->private_data = NULL;
 192	mapping->writeback_index = 0;
 193	inode->i_private = NULL;
 194	inode->i_mapping = mapping;
 195	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
 196#ifdef CONFIG_FS_POSIX_ACL
 197	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
 198#endif
 199
 200#ifdef CONFIG_FSNOTIFY
 201	inode->i_fsnotify_mask = 0;
 202#endif
 203	inode->i_flctx = NULL;
 204	this_cpu_inc(nr_inodes);
 205
 206	return 0;
 207out:
 208	return -ENOMEM;
 209}
 210EXPORT_SYMBOL(inode_init_always);
 211
 212void free_inode_nonrcu(struct inode *inode)
 213{
 214	kmem_cache_free(inode_cachep, inode);
 215}
 216EXPORT_SYMBOL(free_inode_nonrcu);
 217
 218static void i_callback(struct rcu_head *head)
 219{
 220	struct inode *inode = container_of(head, struct inode, i_rcu);
 221	if (inode->free_inode)
 222		inode->free_inode(inode);
 223	else
 224		free_inode_nonrcu(inode);
 225}
 226
 227static struct inode *alloc_inode(struct super_block *sb)
 228{
 229	const struct super_operations *ops = sb->s_op;
 230	struct inode *inode;
 231
 232	if (ops->alloc_inode)
 233		inode = ops->alloc_inode(sb);
 234	else
 235		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 236
 237	if (!inode)
 238		return NULL;
 239
 240	if (unlikely(inode_init_always(sb, inode))) {
 241		if (ops->destroy_inode) {
 242			ops->destroy_inode(inode);
 243			if (!ops->free_inode)
 244				return NULL;
 245		}
 246		inode->free_inode = ops->free_inode;
 247		i_callback(&inode->i_rcu);
 248		return NULL;
 249	}
 250
 251	return inode;
 252}
 253
 
 
 
 
 
 
 254void __destroy_inode(struct inode *inode)
 255{
 256	BUG_ON(inode_has_buffers(inode));
 257	inode_detach_wb(inode);
 258	security_inode_free(inode);
 259	fsnotify_inode_delete(inode);
 260	locks_free_lock_context(inode);
 261	if (!inode->i_nlink) {
 262		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
 263		atomic_long_dec(&inode->i_sb->s_remove_count);
 264	}
 265
 266#ifdef CONFIG_FS_POSIX_ACL
 267	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
 268		posix_acl_release(inode->i_acl);
 269	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
 270		posix_acl_release(inode->i_default_acl);
 271#endif
 272	this_cpu_dec(nr_inodes);
 273}
 274EXPORT_SYMBOL(__destroy_inode);
 275
 
 
 
 
 
 
 276static void destroy_inode(struct inode *inode)
 277{
 278	const struct super_operations *ops = inode->i_sb->s_op;
 279
 280	BUG_ON(!list_empty(&inode->i_lru));
 281	__destroy_inode(inode);
 282	if (ops->destroy_inode) {
 283		ops->destroy_inode(inode);
 284		if (!ops->free_inode)
 285			return;
 286	}
 287	inode->free_inode = ops->free_inode;
 288	call_rcu(&inode->i_rcu, i_callback);
 289}
 290
 291/**
 292 * drop_nlink - directly drop an inode's link count
 293 * @inode: inode
 294 *
 295 * This is a low-level filesystem helper to replace any
 296 * direct filesystem manipulation of i_nlink.  In cases
 297 * where we are attempting to track writes to the
 298 * filesystem, a decrement to zero means an imminent
 299 * write when the file is truncated and actually unlinked
 300 * on the filesystem.
 301 */
 302void drop_nlink(struct inode *inode)
 303{
 304	WARN_ON(inode->i_nlink == 0);
 305	inode->__i_nlink--;
 306	if (!inode->i_nlink)
 307		atomic_long_inc(&inode->i_sb->s_remove_count);
 308}
 309EXPORT_SYMBOL(drop_nlink);
 310
 311/**
 312 * clear_nlink - directly zero an inode's link count
 313 * @inode: inode
 314 *
 315 * This is a low-level filesystem helper to replace any
 316 * direct filesystem manipulation of i_nlink.  See
 317 * drop_nlink() for why we care about i_nlink hitting zero.
 318 */
 319void clear_nlink(struct inode *inode)
 320{
 321	if (inode->i_nlink) {
 322		inode->__i_nlink = 0;
 323		atomic_long_inc(&inode->i_sb->s_remove_count);
 324	}
 325}
 326EXPORT_SYMBOL(clear_nlink);
 327
 328/**
 329 * set_nlink - directly set an inode's link count
 330 * @inode: inode
 331 * @nlink: new nlink (should be non-zero)
 332 *
 333 * This is a low-level filesystem helper to replace any
 334 * direct filesystem manipulation of i_nlink.
 335 */
 336void set_nlink(struct inode *inode, unsigned int nlink)
 337{
 338	if (!nlink) {
 339		clear_nlink(inode);
 340	} else {
 341		/* Yes, some filesystems do change nlink from zero to one */
 342		if (inode->i_nlink == 0)
 343			atomic_long_dec(&inode->i_sb->s_remove_count);
 344
 345		inode->__i_nlink = nlink;
 346	}
 347}
 348EXPORT_SYMBOL(set_nlink);
 349
 350/**
 351 * inc_nlink - directly increment an inode's link count
 352 * @inode: inode
 353 *
 354 * This is a low-level filesystem helper to replace any
 355 * direct filesystem manipulation of i_nlink.  Currently,
 356 * it is only here for parity with dec_nlink().
 357 */
 358void inc_nlink(struct inode *inode)
 359{
 360	if (unlikely(inode->i_nlink == 0)) {
 361		WARN_ON(!(inode->i_state & I_LINKABLE));
 362		atomic_long_dec(&inode->i_sb->s_remove_count);
 363	}
 364
 365	inode->__i_nlink++;
 366}
 367EXPORT_SYMBOL(inc_nlink);
 368
 369static void __address_space_init_once(struct address_space *mapping)
 370{
 371	xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
 
 
 372	init_rwsem(&mapping->i_mmap_rwsem);
 373	INIT_LIST_HEAD(&mapping->private_list);
 374	spin_lock_init(&mapping->private_lock);
 375	mapping->i_mmap = RB_ROOT_CACHED;
 376}
 377
 378void address_space_init_once(struct address_space *mapping)
 379{
 380	memset(mapping, 0, sizeof(*mapping));
 381	__address_space_init_once(mapping);
 382}
 383EXPORT_SYMBOL(address_space_init_once);
 384
 385/*
 386 * These are initializations that only need to be done
 387 * once, because the fields are idempotent across use
 388 * of the inode, so let the slab aware of that.
 389 */
 390void inode_init_once(struct inode *inode)
 391{
 392	memset(inode, 0, sizeof(*inode));
 393	INIT_HLIST_NODE(&inode->i_hash);
 394	INIT_LIST_HEAD(&inode->i_devices);
 395	INIT_LIST_HEAD(&inode->i_io_list);
 396	INIT_LIST_HEAD(&inode->i_wb_list);
 397	INIT_LIST_HEAD(&inode->i_lru);
 398	__address_space_init_once(&inode->i_data);
 399	i_size_ordered_init(inode);
 
 
 
 400}
 401EXPORT_SYMBOL(inode_init_once);
 402
 403static void init_once(void *foo)
 404{
 405	struct inode *inode = (struct inode *) foo;
 406
 407	inode_init_once(inode);
 408}
 409
 410/*
 411 * inode->i_lock must be held
 412 */
 413void __iget(struct inode *inode)
 414{
 415	atomic_inc(&inode->i_count);
 416}
 417
 418/*
 419 * get additional reference to inode; caller must already hold one.
 420 */
 421void ihold(struct inode *inode)
 422{
 423	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
 424}
 425EXPORT_SYMBOL(ihold);
 426
 427static void inode_lru_list_add(struct inode *inode)
 428{
 429	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
 430		this_cpu_inc(nr_unused);
 431	else
 432		inode->i_state |= I_REFERENCED;
 433}
 434
 435/*
 436 * Add inode to LRU if needed (inode is unused and clean).
 437 *
 438 * Needs inode->i_lock held.
 439 */
 440void inode_add_lru(struct inode *inode)
 441{
 442	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
 443				I_FREEING | I_WILL_FREE)) &&
 444	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
 445		inode_lru_list_add(inode);
 446}
 447
 448
 449static void inode_lru_list_del(struct inode *inode)
 450{
 451
 452	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
 453		this_cpu_dec(nr_unused);
 454}
 455
 456/**
 457 * inode_sb_list_add - add inode to the superblock list of inodes
 458 * @inode: inode to add
 459 */
 460void inode_sb_list_add(struct inode *inode)
 461{
 462	spin_lock(&inode->i_sb->s_inode_list_lock);
 463	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
 464	spin_unlock(&inode->i_sb->s_inode_list_lock);
 465}
 466EXPORT_SYMBOL_GPL(inode_sb_list_add);
 467
 468static inline void inode_sb_list_del(struct inode *inode)
 469{
 470	if (!list_empty(&inode->i_sb_list)) {
 471		spin_lock(&inode->i_sb->s_inode_list_lock);
 472		list_del_init(&inode->i_sb_list);
 473		spin_unlock(&inode->i_sb->s_inode_list_lock);
 474	}
 475}
 476
 477static unsigned long hash(struct super_block *sb, unsigned long hashval)
 478{
 479	unsigned long tmp;
 480
 481	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
 482			L1_CACHE_BYTES;
 483	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
 484	return tmp & i_hash_mask;
 485}
 486
 487/**
 488 *	__insert_inode_hash - hash an inode
 489 *	@inode: unhashed inode
 490 *	@hashval: unsigned long value used to locate this object in the
 491 *		inode_hashtable.
 492 *
 493 *	Add an inode to the inode hash for this superblock.
 494 */
 495void __insert_inode_hash(struct inode *inode, unsigned long hashval)
 496{
 497	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
 498
 499	spin_lock(&inode_hash_lock);
 500	spin_lock(&inode->i_lock);
 501	hlist_add_head_rcu(&inode->i_hash, b);
 502	spin_unlock(&inode->i_lock);
 503	spin_unlock(&inode_hash_lock);
 504}
 505EXPORT_SYMBOL(__insert_inode_hash);
 506
 507/**
 508 *	__remove_inode_hash - remove an inode from the hash
 509 *	@inode: inode to unhash
 510 *
 511 *	Remove an inode from the superblock.
 512 */
 513void __remove_inode_hash(struct inode *inode)
 514{
 515	spin_lock(&inode_hash_lock);
 516	spin_lock(&inode->i_lock);
 517	hlist_del_init_rcu(&inode->i_hash);
 518	spin_unlock(&inode->i_lock);
 519	spin_unlock(&inode_hash_lock);
 520}
 521EXPORT_SYMBOL(__remove_inode_hash);
 522
 523void clear_inode(struct inode *inode)
 524{
 
 525	/*
 526	 * We have to cycle the i_pages lock here because reclaim can be in the
 527	 * process of removing the last page (in __delete_from_page_cache())
 528	 * and we must not free the mapping under it.
 529	 */
 530	xa_lock_irq(&inode->i_data.i_pages);
 531	BUG_ON(inode->i_data.nrpages);
 532	/*
 533	 * Almost always, mapping_empty(&inode->i_data) here; but there are
 534	 * two known and long-standing ways in which nodes may get left behind
 535	 * (when deep radix-tree node allocation failed partway; or when THP
 536	 * collapse_file() failed). Until those two known cases are cleaned up,
 537	 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
 538	 * nor even WARN_ON(!mapping_empty).
 539	 */
 540	xa_unlock_irq(&inode->i_data.i_pages);
 541	BUG_ON(!list_empty(&inode->i_data.private_list));
 542	BUG_ON(!(inode->i_state & I_FREEING));
 543	BUG_ON(inode->i_state & I_CLEAR);
 544	BUG_ON(!list_empty(&inode->i_wb_list));
 545	/* don't need i_lock here, no concurrent mods to i_state */
 546	inode->i_state = I_FREEING | I_CLEAR;
 547}
 548EXPORT_SYMBOL(clear_inode);
 549
 550/*
 551 * Free the inode passed in, removing it from the lists it is still connected
 552 * to. We remove any pages still attached to the inode and wait for any IO that
 553 * is still in progress before finally destroying the inode.
 554 *
 555 * An inode must already be marked I_FREEING so that we avoid the inode being
 556 * moved back onto lists if we race with other code that manipulates the lists
 557 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 558 *
 559 * An inode must already be removed from the LRU list before being evicted from
 560 * the cache. This should occur atomically with setting the I_FREEING state
 561 * flag, so no inodes here should ever be on the LRU when being evicted.
 562 */
 563static void evict(struct inode *inode)
 564{
 565	const struct super_operations *op = inode->i_sb->s_op;
 566
 567	BUG_ON(!(inode->i_state & I_FREEING));
 568	BUG_ON(!list_empty(&inode->i_lru));
 569
 570	if (!list_empty(&inode->i_io_list))
 571		inode_io_list_del(inode);
 572
 573	inode_sb_list_del(inode);
 574
 575	/*
 576	 * Wait for flusher thread to be done with the inode so that filesystem
 577	 * does not start destroying it while writeback is still running. Since
 578	 * the inode has I_FREEING set, flusher thread won't start new work on
 579	 * the inode.  We just have to wait for running writeback to finish.
 580	 */
 581	inode_wait_for_writeback(inode);
 582
 583	if (op->evict_inode) {
 584		op->evict_inode(inode);
 585	} else {
 586		truncate_inode_pages_final(&inode->i_data);
 587		clear_inode(inode);
 588	}
 
 
 589	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
 590		cd_forget(inode);
 591
 592	remove_inode_hash(inode);
 593
 594	spin_lock(&inode->i_lock);
 595	wake_up_bit(&inode->i_state, __I_NEW);
 596	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
 597	spin_unlock(&inode->i_lock);
 598
 599	destroy_inode(inode);
 600}
 601
 602/*
 603 * dispose_list - dispose of the contents of a local list
 604 * @head: the head of the list to free
 605 *
 606 * Dispose-list gets a local list with local inodes in it, so it doesn't
 607 * need to worry about list corruption and SMP locks.
 608 */
 609static void dispose_list(struct list_head *head)
 610{
 611	while (!list_empty(head)) {
 612		struct inode *inode;
 613
 614		inode = list_first_entry(head, struct inode, i_lru);
 615		list_del_init(&inode->i_lru);
 616
 617		evict(inode);
 618		cond_resched();
 619	}
 620}
 621
 622/**
 623 * evict_inodes	- evict all evictable inodes for a superblock
 624 * @sb:		superblock to operate on
 625 *
 626 * Make sure that no inodes with zero refcount are retained.  This is
 627 * called by superblock shutdown after having SB_ACTIVE flag removed,
 628 * so any inode reaching zero refcount during or after that call will
 629 * be immediately evicted.
 630 */
 631void evict_inodes(struct super_block *sb)
 632{
 633	struct inode *inode, *next;
 634	LIST_HEAD(dispose);
 635
 636again:
 637	spin_lock(&sb->s_inode_list_lock);
 638	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 639		if (atomic_read(&inode->i_count))
 640			continue;
 641
 642		spin_lock(&inode->i_lock);
 643		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 644			spin_unlock(&inode->i_lock);
 645			continue;
 646		}
 647
 648		inode->i_state |= I_FREEING;
 649		inode_lru_list_del(inode);
 650		spin_unlock(&inode->i_lock);
 651		list_add(&inode->i_lru, &dispose);
 652
 653		/*
 654		 * We can have a ton of inodes to evict at unmount time given
 655		 * enough memory, check to see if we need to go to sleep for a
 656		 * bit so we don't livelock.
 657		 */
 658		if (need_resched()) {
 659			spin_unlock(&sb->s_inode_list_lock);
 660			cond_resched();
 661			dispose_list(&dispose);
 662			goto again;
 663		}
 664	}
 665	spin_unlock(&sb->s_inode_list_lock);
 666
 667	dispose_list(&dispose);
 668}
 669EXPORT_SYMBOL_GPL(evict_inodes);
 670
 671/**
 672 * invalidate_inodes	- attempt to free all inodes on a superblock
 673 * @sb:		superblock to operate on
 674 * @kill_dirty: flag to guide handling of dirty inodes
 675 *
 676 * Attempts to free all inodes for a given superblock.  If there were any
 677 * busy inodes return a non-zero value, else zero.
 678 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 679 * them as busy.
 680 */
 681int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 682{
 683	int busy = 0;
 684	struct inode *inode, *next;
 685	LIST_HEAD(dispose);
 686
 687again:
 688	spin_lock(&sb->s_inode_list_lock);
 689	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 690		spin_lock(&inode->i_lock);
 691		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 692			spin_unlock(&inode->i_lock);
 693			continue;
 694		}
 695		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
 696			spin_unlock(&inode->i_lock);
 697			busy = 1;
 698			continue;
 699		}
 700		if (atomic_read(&inode->i_count)) {
 701			spin_unlock(&inode->i_lock);
 702			busy = 1;
 703			continue;
 704		}
 705
 706		inode->i_state |= I_FREEING;
 707		inode_lru_list_del(inode);
 708		spin_unlock(&inode->i_lock);
 709		list_add(&inode->i_lru, &dispose);
 710		if (need_resched()) {
 711			spin_unlock(&sb->s_inode_list_lock);
 712			cond_resched();
 713			dispose_list(&dispose);
 714			goto again;
 715		}
 716	}
 717	spin_unlock(&sb->s_inode_list_lock);
 718
 719	dispose_list(&dispose);
 720
 721	return busy;
 722}
 723
 724/*
 725 * Isolate the inode from the LRU in preparation for freeing it.
 726 *
 727 * Any inodes which are pinned purely because of attached pagecache have their
 728 * pagecache removed.  If the inode has metadata buffers attached to
 729 * mapping->private_list then try to remove them.
 730 *
 731 * If the inode has the I_REFERENCED flag set, then it means that it has been
 732 * used recently - the flag is set in iput_final(). When we encounter such an
 733 * inode, clear the flag and move it to the back of the LRU so it gets another
 734 * pass through the LRU before it gets reclaimed. This is necessary because of
 735 * the fact we are doing lazy LRU updates to minimise lock contention so the
 736 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 737 * with this flag set because they are the inodes that are out of order.
 738 */
 739static enum lru_status inode_lru_isolate(struct list_head *item,
 740		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 741{
 742	struct list_head *freeable = arg;
 743	struct inode	*inode = container_of(item, struct inode, i_lru);
 744
 745	/*
 746	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
 747	 * If we fail to get the lock, just skip it.
 748	 */
 749	if (!spin_trylock(&inode->i_lock))
 750		return LRU_SKIP;
 751
 752	/*
 753	 * Referenced or dirty inodes are still in use. Give them another pass
 754	 * through the LRU as we canot reclaim them now.
 755	 */
 756	if (atomic_read(&inode->i_count) ||
 757	    (inode->i_state & ~I_REFERENCED)) {
 758		list_lru_isolate(lru, &inode->i_lru);
 759		spin_unlock(&inode->i_lock);
 760		this_cpu_dec(nr_unused);
 761		return LRU_REMOVED;
 762	}
 763
 764	/* recently referenced inodes get one more pass */
 765	if (inode->i_state & I_REFERENCED) {
 766		inode->i_state &= ~I_REFERENCED;
 767		spin_unlock(&inode->i_lock);
 768		return LRU_ROTATE;
 769	}
 770
 771	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
 772		__iget(inode);
 773		spin_unlock(&inode->i_lock);
 774		spin_unlock(lru_lock);
 775		if (remove_inode_buffers(inode)) {
 776			unsigned long reap;
 777			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
 778			if (current_is_kswapd())
 779				__count_vm_events(KSWAPD_INODESTEAL, reap);
 780			else
 781				__count_vm_events(PGINODESTEAL, reap);
 782			if (current->reclaim_state)
 783				current->reclaim_state->reclaimed_slab += reap;
 784		}
 785		iput(inode);
 786		spin_lock(lru_lock);
 787		return LRU_RETRY;
 788	}
 789
 790	WARN_ON(inode->i_state & I_NEW);
 791	inode->i_state |= I_FREEING;
 792	list_lru_isolate_move(lru, &inode->i_lru, freeable);
 793	spin_unlock(&inode->i_lock);
 794
 795	this_cpu_dec(nr_unused);
 796	return LRU_REMOVED;
 797}
 798
 799/*
 800 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 801 * This is called from the superblock shrinker function with a number of inodes
 802 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 803 * then are freed outside inode_lock by dispose_list().
 804 */
 805long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
 806{
 807	LIST_HEAD(freeable);
 808	long freed;
 809
 810	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
 811				     inode_lru_isolate, &freeable);
 812	dispose_list(&freeable);
 813	return freed;
 814}
 815
 816static void __wait_on_freeing_inode(struct inode *inode);
 817/*
 818 * Called with the inode lock held.
 819 */
 820static struct inode *find_inode(struct super_block *sb,
 821				struct hlist_head *head,
 822				int (*test)(struct inode *, void *),
 823				void *data)
 824{
 825	struct inode *inode = NULL;
 826
 827repeat:
 828	hlist_for_each_entry(inode, head, i_hash) {
 829		if (inode->i_sb != sb)
 830			continue;
 831		if (!test(inode, data))
 832			continue;
 833		spin_lock(&inode->i_lock);
 834		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 835			__wait_on_freeing_inode(inode);
 836			goto repeat;
 837		}
 838		if (unlikely(inode->i_state & I_CREATING)) {
 839			spin_unlock(&inode->i_lock);
 840			return ERR_PTR(-ESTALE);
 841		}
 842		__iget(inode);
 843		spin_unlock(&inode->i_lock);
 844		return inode;
 845	}
 846	return NULL;
 847}
 848
 849/*
 850 * find_inode_fast is the fast path version of find_inode, see the comment at
 851 * iget_locked for details.
 852 */
 853static struct inode *find_inode_fast(struct super_block *sb,
 854				struct hlist_head *head, unsigned long ino)
 855{
 856	struct inode *inode = NULL;
 857
 858repeat:
 859	hlist_for_each_entry(inode, head, i_hash) {
 860		if (inode->i_ino != ino)
 861			continue;
 862		if (inode->i_sb != sb)
 863			continue;
 864		spin_lock(&inode->i_lock);
 865		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 866			__wait_on_freeing_inode(inode);
 867			goto repeat;
 868		}
 869		if (unlikely(inode->i_state & I_CREATING)) {
 870			spin_unlock(&inode->i_lock);
 871			return ERR_PTR(-ESTALE);
 872		}
 873		__iget(inode);
 874		spin_unlock(&inode->i_lock);
 875		return inode;
 876	}
 877	return NULL;
 878}
 879
 880/*
 881 * Each cpu owns a range of LAST_INO_BATCH numbers.
 882 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 883 * to renew the exhausted range.
 884 *
 885 * This does not significantly increase overflow rate because every CPU can
 886 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 887 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 888 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 889 * overflow rate by 2x, which does not seem too significant.
 890 *
 891 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 892 * error if st_ino won't fit in target struct field. Use 32bit counter
 893 * here to attempt to avoid that.
 894 */
 895#define LAST_INO_BATCH 1024
 896static DEFINE_PER_CPU(unsigned int, last_ino);
 897
 898unsigned int get_next_ino(void)
 899{
 900	unsigned int *p = &get_cpu_var(last_ino);
 901	unsigned int res = *p;
 902
 903#ifdef CONFIG_SMP
 904	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
 905		static atomic_t shared_last_ino;
 906		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
 907
 908		res = next - LAST_INO_BATCH;
 909	}
 910#endif
 911
 912	res++;
 913	/* get_next_ino should not provide a 0 inode number */
 914	if (unlikely(!res))
 915		res++;
 916	*p = res;
 917	put_cpu_var(last_ino);
 918	return res;
 919}
 920EXPORT_SYMBOL(get_next_ino);
 921
 922/**
 923 *	new_inode_pseudo 	- obtain an inode
 924 *	@sb: superblock
 925 *
 926 *	Allocates a new inode for given superblock.
 927 *	Inode wont be chained in superblock s_inodes list
 928 *	This means :
 929 *	- fs can't be unmount
 930 *	- quotas, fsnotify, writeback can't work
 931 */
 932struct inode *new_inode_pseudo(struct super_block *sb)
 933{
 934	struct inode *inode = alloc_inode(sb);
 935
 936	if (inode) {
 937		spin_lock(&inode->i_lock);
 938		inode->i_state = 0;
 939		spin_unlock(&inode->i_lock);
 940		INIT_LIST_HEAD(&inode->i_sb_list);
 941	}
 942	return inode;
 943}
 944
 945/**
 946 *	new_inode 	- obtain an inode
 947 *	@sb: superblock
 948 *
 949 *	Allocates a new inode for given superblock. The default gfp_mask
 950 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
 951 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 952 *	for the page cache are not reclaimable or migratable,
 953 *	mapping_set_gfp_mask() must be called with suitable flags on the
 954 *	newly created inode's mapping
 955 *
 956 */
 957struct inode *new_inode(struct super_block *sb)
 958{
 959	struct inode *inode;
 960
 961	spin_lock_prefetch(&sb->s_inode_list_lock);
 962
 963	inode = new_inode_pseudo(sb);
 964	if (inode)
 965		inode_sb_list_add(inode);
 966	return inode;
 967}
 968EXPORT_SYMBOL(new_inode);
 969
 970#ifdef CONFIG_DEBUG_LOCK_ALLOC
 971void lockdep_annotate_inode_mutex_key(struct inode *inode)
 972{
 973	if (S_ISDIR(inode->i_mode)) {
 974		struct file_system_type *type = inode->i_sb->s_type;
 975
 976		/* Set new key only if filesystem hasn't already changed it */
 977		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
 978			/*
 979			 * ensure nobody is actually holding i_mutex
 980			 */
 981			// mutex_destroy(&inode->i_mutex);
 982			init_rwsem(&inode->i_rwsem);
 983			lockdep_set_class(&inode->i_rwsem,
 984					  &type->i_mutex_dir_key);
 985		}
 986	}
 987}
 988EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
 989#endif
 990
 991/**
 992 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 993 * @inode:	new inode to unlock
 994 *
 995 * Called when the inode is fully initialised to clear the new state of the
 996 * inode and wake up anyone waiting for the inode to finish initialisation.
 997 */
 998void unlock_new_inode(struct inode *inode)
 999{
1000	lockdep_annotate_inode_mutex_key(inode);
1001	spin_lock(&inode->i_lock);
1002	WARN_ON(!(inode->i_state & I_NEW));
1003	inode->i_state &= ~I_NEW & ~I_CREATING;
1004	smp_mb();
1005	wake_up_bit(&inode->i_state, __I_NEW);
1006	spin_unlock(&inode->i_lock);
1007}
1008EXPORT_SYMBOL(unlock_new_inode);
1009
1010void discard_new_inode(struct inode *inode)
1011{
1012	lockdep_annotate_inode_mutex_key(inode);
1013	spin_lock(&inode->i_lock);
1014	WARN_ON(!(inode->i_state & I_NEW));
1015	inode->i_state &= ~I_NEW;
1016	smp_mb();
1017	wake_up_bit(&inode->i_state, __I_NEW);
1018	spin_unlock(&inode->i_lock);
1019	iput(inode);
1020}
1021EXPORT_SYMBOL(discard_new_inode);
1022
1023/**
1024 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1025 *
1026 * Lock any non-NULL argument that is not a directory.
1027 * Zero, one or two objects may be locked by this function.
1028 *
1029 * @inode1: first inode to lock
1030 * @inode2: second inode to lock
1031 */
1032void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1033{
1034	if (inode1 > inode2)
1035		swap(inode1, inode2);
1036
1037	if (inode1 && !S_ISDIR(inode1->i_mode))
1038		inode_lock(inode1);
1039	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1040		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1041}
1042EXPORT_SYMBOL(lock_two_nondirectories);
1043
1044/**
1045 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1046 * @inode1: first inode to unlock
1047 * @inode2: second inode to unlock
1048 */
1049void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1050{
1051	if (inode1 && !S_ISDIR(inode1->i_mode))
1052		inode_unlock(inode1);
1053	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1054		inode_unlock(inode2);
1055}
1056EXPORT_SYMBOL(unlock_two_nondirectories);
1057
1058/**
1059 * inode_insert5 - obtain an inode from a mounted file system
1060 * @inode:	pre-allocated inode to use for insert to cache
1061 * @hashval:	hash value (usually inode number) to get
1062 * @test:	callback used for comparisons between inodes
1063 * @set:	callback used to initialize a new struct inode
1064 * @data:	opaque data pointer to pass to @test and @set
1065 *
1066 * Search for the inode specified by @hashval and @data in the inode cache,
1067 * and if present it is return it with an increased reference count. This is
1068 * a variant of iget5_locked() for callers that don't want to fail on memory
1069 * allocation of inode.
1070 *
1071 * If the inode is not in cache, insert the pre-allocated inode to cache and
1072 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1073 * to fill it in before unlocking it via unlock_new_inode().
1074 *
1075 * Note both @test and @set are called with the inode_hash_lock held, so can't
1076 * sleep.
1077 */
1078struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1079			    int (*test)(struct inode *, void *),
1080			    int (*set)(struct inode *, void *), void *data)
1081{
1082	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1083	struct inode *old;
1084	bool creating = inode->i_state & I_CREATING;
1085
1086again:
1087	spin_lock(&inode_hash_lock);
1088	old = find_inode(inode->i_sb, head, test, data);
1089	if (unlikely(old)) {
1090		/*
1091		 * Uhhuh, somebody else created the same inode under us.
1092		 * Use the old inode instead of the preallocated one.
1093		 */
1094		spin_unlock(&inode_hash_lock);
1095		if (IS_ERR(old))
1096			return NULL;
1097		wait_on_inode(old);
1098		if (unlikely(inode_unhashed(old))) {
1099			iput(old);
1100			goto again;
1101		}
1102		return old;
1103	}
1104
1105	if (set && unlikely(set(inode, data))) {
1106		inode = NULL;
1107		goto unlock;
1108	}
1109
1110	/*
1111	 * Return the locked inode with I_NEW set, the
1112	 * caller is responsible for filling in the contents
1113	 */
1114	spin_lock(&inode->i_lock);
1115	inode->i_state |= I_NEW;
1116	hlist_add_head_rcu(&inode->i_hash, head);
1117	spin_unlock(&inode->i_lock);
1118	if (!creating)
1119		inode_sb_list_add(inode);
1120unlock:
1121	spin_unlock(&inode_hash_lock);
1122
1123	return inode;
1124}
1125EXPORT_SYMBOL(inode_insert5);
1126
1127/**
1128 * iget5_locked - obtain an inode from a mounted file system
1129 * @sb:		super block of file system
1130 * @hashval:	hash value (usually inode number) to get
1131 * @test:	callback used for comparisons between inodes
1132 * @set:	callback used to initialize a new struct inode
1133 * @data:	opaque data pointer to pass to @test and @set
1134 *
1135 * Search for the inode specified by @hashval and @data in the inode cache,
1136 * and if present it is return it with an increased reference count. This is
1137 * a generalized version of iget_locked() for file systems where the inode
1138 * number is not sufficient for unique identification of an inode.
1139 *
1140 * If the inode is not in cache, allocate a new inode and return it locked,
1141 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1142 * before unlocking it via unlock_new_inode().
1143 *
1144 * Note both @test and @set are called with the inode_hash_lock held, so can't
1145 * sleep.
1146 */
1147struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1148		int (*test)(struct inode *, void *),
1149		int (*set)(struct inode *, void *), void *data)
1150{
1151	struct inode *inode = ilookup5(sb, hashval, test, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152
1153	if (!inode) {
1154		struct inode *new = alloc_inode(sb);
 
1155
1156		if (new) {
1157			new->i_state = 0;
1158			inode = inode_insert5(new, hashval, test, set, data);
1159			if (unlikely(inode != new))
1160				destroy_inode(new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161		}
1162	}
1163	return inode;
 
 
 
 
 
1164}
1165EXPORT_SYMBOL(iget5_locked);
1166
1167/**
1168 * iget_locked - obtain an inode from a mounted file system
1169 * @sb:		super block of file system
1170 * @ino:	inode number to get
1171 *
1172 * Search for the inode specified by @ino in the inode cache and if present
1173 * return it with an increased reference count. This is for file systems
1174 * where the inode number is sufficient for unique identification of an inode.
1175 *
1176 * If the inode is not in cache, allocate a new inode and return it locked,
1177 * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1178 * before unlocking it via unlock_new_inode().
1179 */
1180struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1181{
1182	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1183	struct inode *inode;
1184again:
1185	spin_lock(&inode_hash_lock);
1186	inode = find_inode_fast(sb, head, ino);
1187	spin_unlock(&inode_hash_lock);
1188	if (inode) {
1189		if (IS_ERR(inode))
1190			return NULL;
1191		wait_on_inode(inode);
1192		if (unlikely(inode_unhashed(inode))) {
1193			iput(inode);
1194			goto again;
1195		}
1196		return inode;
1197	}
1198
1199	inode = alloc_inode(sb);
1200	if (inode) {
1201		struct inode *old;
1202
1203		spin_lock(&inode_hash_lock);
1204		/* We released the lock, so.. */
1205		old = find_inode_fast(sb, head, ino);
1206		if (!old) {
1207			inode->i_ino = ino;
1208			spin_lock(&inode->i_lock);
1209			inode->i_state = I_NEW;
1210			hlist_add_head_rcu(&inode->i_hash, head);
1211			spin_unlock(&inode->i_lock);
1212			inode_sb_list_add(inode);
1213			spin_unlock(&inode_hash_lock);
1214
1215			/* Return the locked inode with I_NEW set, the
1216			 * caller is responsible for filling in the contents
1217			 */
1218			return inode;
1219		}
1220
1221		/*
1222		 * Uhhuh, somebody else created the same inode under
1223		 * us. Use the old inode instead of the one we just
1224		 * allocated.
1225		 */
1226		spin_unlock(&inode_hash_lock);
1227		destroy_inode(inode);
1228		if (IS_ERR(old))
1229			return NULL;
1230		inode = old;
1231		wait_on_inode(inode);
1232		if (unlikely(inode_unhashed(inode))) {
1233			iput(inode);
1234			goto again;
1235		}
1236	}
1237	return inode;
1238}
1239EXPORT_SYMBOL(iget_locked);
1240
1241/*
1242 * search the inode cache for a matching inode number.
1243 * If we find one, then the inode number we are trying to
1244 * allocate is not unique and so we should not use it.
1245 *
1246 * Returns 1 if the inode number is unique, 0 if it is not.
1247 */
1248static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1249{
1250	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1251	struct inode *inode;
1252
1253	hlist_for_each_entry_rcu(inode, b, i_hash) {
1254		if (inode->i_ino == ino && inode->i_sb == sb)
 
 
1255			return 0;
 
1256	}
 
 
1257	return 1;
1258}
1259
1260/**
1261 *	iunique - get a unique inode number
1262 *	@sb: superblock
1263 *	@max_reserved: highest reserved inode number
1264 *
1265 *	Obtain an inode number that is unique on the system for a given
1266 *	superblock. This is used by file systems that have no natural
1267 *	permanent inode numbering system. An inode number is returned that
1268 *	is higher than the reserved limit but unique.
1269 *
1270 *	BUGS:
1271 *	With a large number of inodes live on the file system this function
1272 *	currently becomes quite slow.
1273 */
1274ino_t iunique(struct super_block *sb, ino_t max_reserved)
1275{
1276	/*
1277	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1278	 * error if st_ino won't fit in target struct field. Use 32bit counter
1279	 * here to attempt to avoid that.
1280	 */
1281	static DEFINE_SPINLOCK(iunique_lock);
1282	static unsigned int counter;
1283	ino_t res;
1284
1285	rcu_read_lock();
1286	spin_lock(&iunique_lock);
1287	do {
1288		if (counter <= max_reserved)
1289			counter = max_reserved + 1;
1290		res = counter++;
1291	} while (!test_inode_iunique(sb, res));
1292	spin_unlock(&iunique_lock);
1293	rcu_read_unlock();
1294
1295	return res;
1296}
1297EXPORT_SYMBOL(iunique);
1298
1299struct inode *igrab(struct inode *inode)
1300{
1301	spin_lock(&inode->i_lock);
1302	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1303		__iget(inode);
1304		spin_unlock(&inode->i_lock);
1305	} else {
1306		spin_unlock(&inode->i_lock);
1307		/*
1308		 * Handle the case where s_op->clear_inode is not been
1309		 * called yet, and somebody is calling igrab
1310		 * while the inode is getting freed.
1311		 */
1312		inode = NULL;
1313	}
1314	return inode;
1315}
1316EXPORT_SYMBOL(igrab);
1317
1318/**
1319 * ilookup5_nowait - search for an inode in the inode cache
1320 * @sb:		super block of file system to search
1321 * @hashval:	hash value (usually inode number) to search for
1322 * @test:	callback used for comparisons between inodes
1323 * @data:	opaque data pointer to pass to @test
1324 *
1325 * Search for the inode specified by @hashval and @data in the inode cache.
1326 * If the inode is in the cache, the inode is returned with an incremented
1327 * reference count.
1328 *
1329 * Note: I_NEW is not waited upon so you have to be very careful what you do
1330 * with the returned inode.  You probably should be using ilookup5() instead.
1331 *
1332 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1333 */
1334struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1335		int (*test)(struct inode *, void *), void *data)
1336{
1337	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1338	struct inode *inode;
1339
1340	spin_lock(&inode_hash_lock);
1341	inode = find_inode(sb, head, test, data);
1342	spin_unlock(&inode_hash_lock);
1343
1344	return IS_ERR(inode) ? NULL : inode;
1345}
1346EXPORT_SYMBOL(ilookup5_nowait);
1347
1348/**
1349 * ilookup5 - search for an inode in the inode cache
1350 * @sb:		super block of file system to search
1351 * @hashval:	hash value (usually inode number) to search for
1352 * @test:	callback used for comparisons between inodes
1353 * @data:	opaque data pointer to pass to @test
1354 *
1355 * Search for the inode specified by @hashval and @data in the inode cache,
1356 * and if the inode is in the cache, return the inode with an incremented
1357 * reference count.  Waits on I_NEW before returning the inode.
1358 * returned with an incremented reference count.
1359 *
1360 * This is a generalized version of ilookup() for file systems where the
1361 * inode number is not sufficient for unique identification of an inode.
1362 *
1363 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1364 */
1365struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1366		int (*test)(struct inode *, void *), void *data)
1367{
1368	struct inode *inode;
1369again:
1370	inode = ilookup5_nowait(sb, hashval, test, data);
1371	if (inode) {
1372		wait_on_inode(inode);
1373		if (unlikely(inode_unhashed(inode))) {
1374			iput(inode);
1375			goto again;
1376		}
1377	}
1378	return inode;
1379}
1380EXPORT_SYMBOL(ilookup5);
1381
1382/**
1383 * ilookup - search for an inode in the inode cache
1384 * @sb:		super block of file system to search
1385 * @ino:	inode number to search for
1386 *
1387 * Search for the inode @ino in the inode cache, and if the inode is in the
1388 * cache, the inode is returned with an incremented reference count.
1389 */
1390struct inode *ilookup(struct super_block *sb, unsigned long ino)
1391{
1392	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1393	struct inode *inode;
1394again:
1395	spin_lock(&inode_hash_lock);
1396	inode = find_inode_fast(sb, head, ino);
1397	spin_unlock(&inode_hash_lock);
1398
1399	if (inode) {
1400		if (IS_ERR(inode))
1401			return NULL;
1402		wait_on_inode(inode);
1403		if (unlikely(inode_unhashed(inode))) {
1404			iput(inode);
1405			goto again;
1406		}
1407	}
1408	return inode;
1409}
1410EXPORT_SYMBOL(ilookup);
1411
1412/**
1413 * find_inode_nowait - find an inode in the inode cache
1414 * @sb:		super block of file system to search
1415 * @hashval:	hash value (usually inode number) to search for
1416 * @match:	callback used for comparisons between inodes
1417 * @data:	opaque data pointer to pass to @match
1418 *
1419 * Search for the inode specified by @hashval and @data in the inode
1420 * cache, where the helper function @match will return 0 if the inode
1421 * does not match, 1 if the inode does match, and -1 if the search
1422 * should be stopped.  The @match function must be responsible for
1423 * taking the i_lock spin_lock and checking i_state for an inode being
1424 * freed or being initialized, and incrementing the reference count
1425 * before returning 1.  It also must not sleep, since it is called with
1426 * the inode_hash_lock spinlock held.
1427 *
1428 * This is a even more generalized version of ilookup5() when the
1429 * function must never block --- find_inode() can block in
1430 * __wait_on_freeing_inode() --- or when the caller can not increment
1431 * the reference count because the resulting iput() might cause an
1432 * inode eviction.  The tradeoff is that the @match funtion must be
1433 * very carefully implemented.
1434 */
1435struct inode *find_inode_nowait(struct super_block *sb,
1436				unsigned long hashval,
1437				int (*match)(struct inode *, unsigned long,
1438					     void *),
1439				void *data)
1440{
1441	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1442	struct inode *inode, *ret_inode = NULL;
1443	int mval;
1444
1445	spin_lock(&inode_hash_lock);
1446	hlist_for_each_entry(inode, head, i_hash) {
1447		if (inode->i_sb != sb)
1448			continue;
1449		mval = match(inode, hashval, data);
1450		if (mval == 0)
1451			continue;
1452		if (mval == 1)
1453			ret_inode = inode;
1454		goto out;
1455	}
1456out:
1457	spin_unlock(&inode_hash_lock);
1458	return ret_inode;
1459}
1460EXPORT_SYMBOL(find_inode_nowait);
1461
1462/**
1463 * find_inode_rcu - find an inode in the inode cache
1464 * @sb:		Super block of file system to search
1465 * @hashval:	Key to hash
1466 * @test:	Function to test match on an inode
1467 * @data:	Data for test function
1468 *
1469 * Search for the inode specified by @hashval and @data in the inode cache,
1470 * where the helper function @test will return 0 if the inode does not match
1471 * and 1 if it does.  The @test function must be responsible for taking the
1472 * i_lock spin_lock and checking i_state for an inode being freed or being
1473 * initialized.
1474 *
1475 * If successful, this will return the inode for which the @test function
1476 * returned 1 and NULL otherwise.
1477 *
1478 * The @test function is not permitted to take a ref on any inode presented.
1479 * It is also not permitted to sleep.
1480 *
1481 * The caller must hold the RCU read lock.
1482 */
1483struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1484			     int (*test)(struct inode *, void *), void *data)
1485{
1486	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1487	struct inode *inode;
1488
1489	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1490			 "suspicious find_inode_rcu() usage");
1491
1492	hlist_for_each_entry_rcu(inode, head, i_hash) {
1493		if (inode->i_sb == sb &&
1494		    !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1495		    test(inode, data))
1496			return inode;
1497	}
1498	return NULL;
1499}
1500EXPORT_SYMBOL(find_inode_rcu);
1501
1502/**
1503 * find_inode_by_ino_rcu - Find an inode in the inode cache
1504 * @sb:		Super block of file system to search
1505 * @ino:	The inode number to match
1506 *
1507 * Search for the inode specified by @hashval and @data in the inode cache,
1508 * where the helper function @test will return 0 if the inode does not match
1509 * and 1 if it does.  The @test function must be responsible for taking the
1510 * i_lock spin_lock and checking i_state for an inode being freed or being
1511 * initialized.
1512 *
1513 * If successful, this will return the inode for which the @test function
1514 * returned 1 and NULL otherwise.
1515 *
1516 * The @test function is not permitted to take a ref on any inode presented.
1517 * It is also not permitted to sleep.
1518 *
1519 * The caller must hold the RCU read lock.
1520 */
1521struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1522				    unsigned long ino)
1523{
1524	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1525	struct inode *inode;
1526
1527	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1528			 "suspicious find_inode_by_ino_rcu() usage");
1529
1530	hlist_for_each_entry_rcu(inode, head, i_hash) {
1531		if (inode->i_ino == ino &&
1532		    inode->i_sb == sb &&
1533		    !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1534		    return inode;
1535	}
1536	return NULL;
1537}
1538EXPORT_SYMBOL(find_inode_by_ino_rcu);
1539
1540int insert_inode_locked(struct inode *inode)
1541{
1542	struct super_block *sb = inode->i_sb;
1543	ino_t ino = inode->i_ino;
1544	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1545
1546	while (1) {
1547		struct inode *old = NULL;
1548		spin_lock(&inode_hash_lock);
1549		hlist_for_each_entry(old, head, i_hash) {
1550			if (old->i_ino != ino)
1551				continue;
1552			if (old->i_sb != sb)
1553				continue;
1554			spin_lock(&old->i_lock);
1555			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1556				spin_unlock(&old->i_lock);
1557				continue;
1558			}
1559			break;
1560		}
1561		if (likely(!old)) {
1562			spin_lock(&inode->i_lock);
1563			inode->i_state |= I_NEW | I_CREATING;
1564			hlist_add_head_rcu(&inode->i_hash, head);
1565			spin_unlock(&inode->i_lock);
1566			spin_unlock(&inode_hash_lock);
1567			return 0;
1568		}
1569		if (unlikely(old->i_state & I_CREATING)) {
1570			spin_unlock(&old->i_lock);
1571			spin_unlock(&inode_hash_lock);
1572			return -EBUSY;
1573		}
1574		__iget(old);
1575		spin_unlock(&old->i_lock);
1576		spin_unlock(&inode_hash_lock);
1577		wait_on_inode(old);
1578		if (unlikely(!inode_unhashed(old))) {
1579			iput(old);
1580			return -EBUSY;
1581		}
1582		iput(old);
1583	}
1584}
1585EXPORT_SYMBOL(insert_inode_locked);
1586
1587int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1588		int (*test)(struct inode *, void *), void *data)
1589{
1590	struct inode *old;
 
1591
1592	inode->i_state |= I_CREATING;
1593	old = inode_insert5(inode, hashval, test, NULL, data);
1594
1595	if (old != inode) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596		iput(old);
1597		return -EBUSY;
1598	}
1599	return 0;
1600}
1601EXPORT_SYMBOL(insert_inode_locked4);
1602
1603
1604int generic_delete_inode(struct inode *inode)
1605{
1606	return 1;
1607}
1608EXPORT_SYMBOL(generic_delete_inode);
1609
1610/*
1611 * Called when we're dropping the last reference
1612 * to an inode.
1613 *
1614 * Call the FS "drop_inode()" function, defaulting to
1615 * the legacy UNIX filesystem behaviour.  If it tells
1616 * us to evict inode, do so.  Otherwise, retain inode
1617 * in cache if fs is alive, sync and evict if fs is
1618 * shutting down.
1619 */
1620static void iput_final(struct inode *inode)
1621{
1622	struct super_block *sb = inode->i_sb;
1623	const struct super_operations *op = inode->i_sb->s_op;
1624	unsigned long state;
1625	int drop;
1626
1627	WARN_ON(inode->i_state & I_NEW);
1628
1629	if (op->drop_inode)
1630		drop = op->drop_inode(inode);
1631	else
1632		drop = generic_drop_inode(inode);
1633
1634	if (!drop &&
1635	    !(inode->i_state & I_DONTCACHE) &&
1636	    (sb->s_flags & SB_ACTIVE)) {
1637		inode_add_lru(inode);
1638		spin_unlock(&inode->i_lock);
1639		return;
1640	}
1641
1642	state = inode->i_state;
1643	if (!drop) {
1644		WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1645		spin_unlock(&inode->i_lock);
1646
1647		write_inode_now(inode, 1);
1648
1649		spin_lock(&inode->i_lock);
1650		state = inode->i_state;
1651		WARN_ON(state & I_NEW);
1652		state &= ~I_WILL_FREE;
1653	}
1654
1655	WRITE_ONCE(inode->i_state, state | I_FREEING);
1656	if (!list_empty(&inode->i_lru))
1657		inode_lru_list_del(inode);
1658	spin_unlock(&inode->i_lock);
1659
1660	evict(inode);
1661}
1662
1663/**
1664 *	iput	- put an inode
1665 *	@inode: inode to put
1666 *
1667 *	Puts an inode, dropping its usage count. If the inode use count hits
1668 *	zero, the inode is then freed and may also be destroyed.
1669 *
1670 *	Consequently, iput() can sleep.
1671 */
1672void iput(struct inode *inode)
1673{
1674	if (!inode)
1675		return;
1676	BUG_ON(inode->i_state & I_CLEAR);
1677retry:
1678	if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1679		if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1680			atomic_inc(&inode->i_count);
 
1681			spin_unlock(&inode->i_lock);
1682			trace_writeback_lazytime_iput(inode);
1683			mark_inode_dirty_sync(inode);
1684			goto retry;
1685		}
1686		iput_final(inode);
1687	}
1688}
1689EXPORT_SYMBOL(iput);
1690
1691#ifdef CONFIG_BLOCK
1692/**
1693 *	bmap	- find a block number in a file
1694 *	@inode:  inode owning the block number being requested
1695 *	@block: pointer containing the block to find
1696 *
1697 *	Replaces the value in ``*block`` with the block number on the device holding
1698 *	corresponding to the requested block number in the file.
1699 *	That is, asked for block 4 of inode 1 the function will replace the
1700 *	4 in ``*block``, with disk block relative to the disk start that holds that
1701 *	block of the file.
1702 *
1703 *	Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1704 *	hole, returns 0 and ``*block`` is also set to 0.
 
 
 
 
 
 
 
 
 
1705 */
1706int bmap(struct inode *inode, sector_t *block)
 
1707{
1708	if (!inode->i_mapping->a_ops->bmap)
1709		return -EINVAL;
1710
1711	*block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1712	return 0;
 
 
 
 
 
1713}
1714EXPORT_SYMBOL(bmap);
1715#endif
1716
1717/*
1718 * With relative atime, only update atime if the previous atime is
1719 * earlier than either the ctime or mtime or if at least a day has
1720 * passed since the last atime update.
1721 */
1722static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1723			     struct timespec64 now)
1724{
1725
1726	if (!(mnt->mnt_flags & MNT_RELATIME))
1727		return 1;
 
 
1728	/*
1729	 * Is mtime younger than atime? If yes, update atime:
1730	 */
1731	if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1732		return 1;
1733	/*
1734	 * Is ctime younger than atime? If yes, update atime:
1735	 */
1736	if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1737		return 1;
1738
1739	/*
1740	 * Is the previous atime value older than a day? If yes,
1741	 * update atime:
1742	 */
1743	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1744		return 1;
1745	/*
1746	 * Good, we can skip the atime update:
1747	 */
1748	return 0;
1749}
1750
1751int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1752{
1753	int dirty_flags = 0;
1754
1755	if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
1756		if (flags & S_ATIME)
1757			inode->i_atime = *time;
1758		if (flags & S_CTIME)
1759			inode->i_ctime = *time;
1760		if (flags & S_MTIME)
1761			inode->i_mtime = *time;
1762
1763		if (inode->i_sb->s_flags & SB_LAZYTIME)
1764			dirty_flags |= I_DIRTY_TIME;
1765		else
1766			dirty_flags |= I_DIRTY_SYNC;
1767	}
1768
1769	if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
1770		dirty_flags |= I_DIRTY_SYNC;
1771
1772	__mark_inode_dirty(inode, dirty_flags);
 
 
 
 
 
 
 
 
 
 
 
1773	return 0;
1774}
1775EXPORT_SYMBOL(generic_update_time);
1776
1777/*
1778 * This does the actual work of updating an inodes time or version.  Must have
1779 * had called mnt_want_write() before calling this.
1780 */
1781static int update_time(struct inode *inode, struct timespec64 *time, int flags)
1782{
1783	if (inode->i_op->update_time)
1784		return inode->i_op->update_time(inode, time, flags);
1785	return generic_update_time(inode, time, flags);
 
 
 
1786}
1787
1788/**
1789 *	atime_needs_update	-	update the access time
1790 *	@path: the &struct path to update
1791 *	@inode: inode to update
1792 *
1793 *	Update the accessed time on an inode and mark it for writeback.
1794 *	This function automatically handles read only file systems and media,
1795 *	as well as the "noatime" flag and inode specific "noatime" markers.
1796 */
1797bool atime_needs_update(const struct path *path, struct inode *inode)
 
1798{
1799	struct vfsmount *mnt = path->mnt;
1800	struct timespec64 now;
1801
1802	if (inode->i_flags & S_NOATIME)
1803		return false;
1804
1805	/* Atime updates will likely cause i_uid and i_gid to be written
1806	 * back improprely if their true value is unknown to the vfs.
1807	 */
1808	if (HAS_UNMAPPED_ID(mnt_user_ns(mnt), inode))
1809		return false;
1810
1811	if (IS_NOATIME(inode))
1812		return false;
1813	if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1814		return false;
1815
1816	if (mnt->mnt_flags & MNT_NOATIME)
1817		return false;
1818	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1819		return false;
1820
1821	now = current_time(inode);
1822
1823	if (!relatime_need_update(mnt, inode, now))
1824		return false;
1825
1826	if (timespec64_equal(&inode->i_atime, &now))
1827		return false;
1828
1829	return true;
1830}
1831
1832void touch_atime(const struct path *path)
1833{
1834	struct vfsmount *mnt = path->mnt;
1835	struct inode *inode = d_inode(path->dentry);
1836	struct timespec64 now;
1837
1838	if (!atime_needs_update(path, inode))
1839		return;
1840
1841	if (!sb_start_write_trylock(inode->i_sb))
1842		return;
1843
1844	if (__mnt_want_write(mnt) != 0)
1845		goto skip_update;
1846	/*
1847	 * File systems can error out when updating inodes if they need to
1848	 * allocate new space to modify an inode (such is the case for
1849	 * Btrfs), but since we touch atime while walking down the path we
1850	 * really don't care if we failed to update the atime of the file,
1851	 * so just ignore the return value.
1852	 * We may also fail on filesystems that have the ability to make parts
1853	 * of the fs read only, e.g. subvolumes in Btrfs.
1854	 */
1855	now = current_time(inode);
1856	update_time(inode, &now, S_ATIME);
1857	__mnt_drop_write(mnt);
1858skip_update:
1859	sb_end_write(inode->i_sb);
1860}
1861EXPORT_SYMBOL(touch_atime);
1862
1863/*
1864 * The logic we want is
1865 *
1866 *	if suid or (sgid and xgrp)
1867 *		remove privs
1868 */
1869int should_remove_suid(struct dentry *dentry)
1870{
1871	umode_t mode = d_inode(dentry)->i_mode;
1872	int kill = 0;
1873
1874	/* suid always must be killed */
1875	if (unlikely(mode & S_ISUID))
1876		kill = ATTR_KILL_SUID;
1877
1878	/*
1879	 * sgid without any exec bits is just a mandatory locking mark; leave
1880	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1881	 */
1882	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1883		kill |= ATTR_KILL_SGID;
1884
1885	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1886		return kill;
1887
1888	return 0;
1889}
1890EXPORT_SYMBOL(should_remove_suid);
1891
1892/*
1893 * Return mask of changes for notify_change() that need to be done as a
1894 * response to write or truncate. Return 0 if nothing has to be changed.
1895 * Negative value on error (change should be denied).
1896 */
1897int dentry_needs_remove_privs(struct dentry *dentry)
1898{
1899	struct inode *inode = d_inode(dentry);
1900	int mask = 0;
1901	int ret;
1902
1903	if (IS_NOSEC(inode))
1904		return 0;
1905
1906	mask = should_remove_suid(dentry);
1907	ret = security_inode_need_killpriv(dentry);
1908	if (ret < 0)
1909		return ret;
1910	if (ret)
1911		mask |= ATTR_KILL_PRIV;
1912	return mask;
1913}
1914
1915static int __remove_privs(struct user_namespace *mnt_userns,
1916			  struct dentry *dentry, int kill)
1917{
1918	struct iattr newattrs;
1919
1920	newattrs.ia_valid = ATTR_FORCE | kill;
1921	/*
1922	 * Note we call this on write, so notify_change will not
1923	 * encounter any conflicting delegations:
1924	 */
1925	return notify_change(mnt_userns, dentry, &newattrs, NULL);
1926}
1927
1928/*
1929 * Remove special file priviledges (suid, capabilities) when file is written
1930 * to or truncated.
1931 */
1932int file_remove_privs(struct file *file)
1933{
1934	struct dentry *dentry = file_dentry(file);
1935	struct inode *inode = file_inode(file);
1936	int kill;
1937	int error = 0;
1938
1939	/*
1940	 * Fast path for nothing security related.
1941	 * As well for non-regular files, e.g. blkdev inodes.
1942	 * For example, blkdev_write_iter() might get here
1943	 * trying to remove privs which it is not allowed to.
1944	 */
1945	if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1946		return 0;
1947
1948	kill = dentry_needs_remove_privs(dentry);
1949	if (kill < 0)
1950		return kill;
1951	if (kill)
1952		error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
1953	if (!error)
1954		inode_has_no_xattr(inode);
1955
1956	return error;
1957}
1958EXPORT_SYMBOL(file_remove_privs);
1959
1960/**
1961 *	file_update_time	-	update mtime and ctime time
1962 *	@file: file accessed
1963 *
1964 *	Update the mtime and ctime members of an inode and mark the inode
1965 *	for writeback.  Note that this function is meant exclusively for
1966 *	usage in the file write path of filesystems, and filesystems may
1967 *	choose to explicitly ignore update via this function with the
1968 *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1969 *	timestamps are handled by the server.  This can return an error for
1970 *	file systems who need to allocate space in order to update an inode.
1971 */
1972
1973int file_update_time(struct file *file)
1974{
1975	struct inode *inode = file_inode(file);
1976	struct timespec64 now;
1977	int sync_it = 0;
1978	int ret;
1979
1980	/* First try to exhaust all avenues to not sync */
1981	if (IS_NOCMTIME(inode))
1982		return 0;
1983
1984	now = current_time(inode);
1985	if (!timespec64_equal(&inode->i_mtime, &now))
1986		sync_it = S_MTIME;
1987
1988	if (!timespec64_equal(&inode->i_ctime, &now))
1989		sync_it |= S_CTIME;
1990
1991	if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1992		sync_it |= S_VERSION;
1993
1994	if (!sync_it)
1995		return 0;
1996
1997	/* Finally allowed to write? Takes lock. */
1998	if (__mnt_want_write_file(file))
1999		return 0;
2000
2001	ret = update_time(inode, &now, sync_it);
2002	__mnt_drop_write_file(file);
2003
2004	return ret;
2005}
2006EXPORT_SYMBOL(file_update_time);
2007
2008/* Caller must hold the file's inode lock */
2009int file_modified(struct file *file)
2010{
2011	int err;
2012
2013	/*
2014	 * Clear the security bits if the process is not being run by root.
2015	 * This keeps people from modifying setuid and setgid binaries.
2016	 */
2017	err = file_remove_privs(file);
2018	if (err)
2019		return err;
2020
2021	if (unlikely(file->f_mode & FMODE_NOCMTIME))
2022		return 0;
2023
2024	return file_update_time(file);
2025}
2026EXPORT_SYMBOL(file_modified);
2027
2028int inode_needs_sync(struct inode *inode)
2029{
2030	if (IS_SYNC(inode))
2031		return 1;
2032	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2033		return 1;
2034	return 0;
2035}
2036EXPORT_SYMBOL(inode_needs_sync);
2037
2038/*
2039 * If we try to find an inode in the inode hash while it is being
2040 * deleted, we have to wait until the filesystem completes its
2041 * deletion before reporting that it isn't found.  This function waits
2042 * until the deletion _might_ have completed.  Callers are responsible
2043 * to recheck inode state.
2044 *
2045 * It doesn't matter if I_NEW is not set initially, a call to
2046 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2047 * will DTRT.
2048 */
2049static void __wait_on_freeing_inode(struct inode *inode)
2050{
2051	wait_queue_head_t *wq;
2052	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2053	wq = bit_waitqueue(&inode->i_state, __I_NEW);
2054	prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2055	spin_unlock(&inode->i_lock);
2056	spin_unlock(&inode_hash_lock);
2057	schedule();
2058	finish_wait(wq, &wait.wq_entry);
2059	spin_lock(&inode_hash_lock);
2060}
2061
2062static __initdata unsigned long ihash_entries;
2063static int __init set_ihash_entries(char *str)
2064{
2065	if (!str)
2066		return 0;
2067	ihash_entries = simple_strtoul(str, &str, 0);
2068	return 1;
2069}
2070__setup("ihash_entries=", set_ihash_entries);
2071
2072/*
2073 * Initialize the waitqueues and inode hash table.
2074 */
2075void __init inode_init_early(void)
2076{
 
 
2077	/* If hashes are distributed across NUMA nodes, defer
2078	 * hash allocation until vmalloc space is available.
2079	 */
2080	if (hashdist)
2081		return;
2082
2083	inode_hashtable =
2084		alloc_large_system_hash("Inode-cache",
2085					sizeof(struct hlist_head),
2086					ihash_entries,
2087					14,
2088					HASH_EARLY | HASH_ZERO,
2089					&i_hash_shift,
2090					&i_hash_mask,
2091					0,
2092					0);
 
 
 
2093}
2094
2095void __init inode_init(void)
2096{
 
 
2097	/* inode slab cache */
2098	inode_cachep = kmem_cache_create("inode_cache",
2099					 sizeof(struct inode),
2100					 0,
2101					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2102					 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2103					 init_once);
2104
2105	/* Hash may have been set up in inode_init_early */
2106	if (!hashdist)
2107		return;
2108
2109	inode_hashtable =
2110		alloc_large_system_hash("Inode-cache",
2111					sizeof(struct hlist_head),
2112					ihash_entries,
2113					14,
2114					HASH_ZERO,
2115					&i_hash_shift,
2116					&i_hash_mask,
2117					0,
2118					0);
 
 
 
2119}
2120
2121void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2122{
2123	inode->i_mode = mode;
2124	if (S_ISCHR(mode)) {
2125		inode->i_fop = &def_chr_fops;
2126		inode->i_rdev = rdev;
2127	} else if (S_ISBLK(mode)) {
2128		inode->i_fop = &def_blk_fops;
2129		inode->i_rdev = rdev;
2130	} else if (S_ISFIFO(mode))
2131		inode->i_fop = &pipefifo_fops;
2132	else if (S_ISSOCK(mode))
2133		;	/* leave it no_open_fops */
2134	else
2135		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2136				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
2137				  inode->i_ino);
2138}
2139EXPORT_SYMBOL(init_special_inode);
2140
2141/**
2142 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2143 * @mnt_userns:	User namespace of the mount the inode was created from
2144 * @inode: New inode
2145 * @dir: Directory inode
2146 * @mode: mode of the new inode
2147 *
2148 * If the inode has been created through an idmapped mount the user namespace of
2149 * the vfsmount must be passed through @mnt_userns. This function will then take
2150 * care to map the inode according to @mnt_userns before checking permissions
2151 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2152 * checking is to be performed on the raw inode simply passs init_user_ns.
2153 */
2154void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
2155		      const struct inode *dir, umode_t mode)
2156{
2157	inode_fsuid_set(inode, mnt_userns);
2158	if (dir && dir->i_mode & S_ISGID) {
2159		inode->i_gid = dir->i_gid;
2160
2161		/* Directories are special, and always inherit S_ISGID */
2162		if (S_ISDIR(mode))
2163			mode |= S_ISGID;
2164		else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2165			 !in_group_p(i_gid_into_mnt(mnt_userns, dir)) &&
2166			 !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
2167			mode &= ~S_ISGID;
2168	} else
2169		inode_fsgid_set(inode, mnt_userns);
2170	inode->i_mode = mode;
2171}
2172EXPORT_SYMBOL(inode_init_owner);
2173
2174/**
2175 * inode_owner_or_capable - check current task permissions to inode
2176 * @mnt_userns:	user namespace of the mount the inode was found from
2177 * @inode: inode being checked
2178 *
2179 * Return true if current either has CAP_FOWNER in a namespace with the
2180 * inode owner uid mapped, or owns the file.
2181 *
2182 * If the inode has been found through an idmapped mount the user namespace of
2183 * the vfsmount must be passed through @mnt_userns. This function will then take
2184 * care to map the inode according to @mnt_userns before checking permissions.
2185 * On non-idmapped mounts or if permission checking is to be performed on the
2186 * raw inode simply passs init_user_ns.
2187 */
2188bool inode_owner_or_capable(struct user_namespace *mnt_userns,
2189			    const struct inode *inode)
2190{
2191	kuid_t i_uid;
2192	struct user_namespace *ns;
2193
2194	i_uid = i_uid_into_mnt(mnt_userns, inode);
2195	if (uid_eq(current_fsuid(), i_uid))
2196		return true;
2197
2198	ns = current_user_ns();
2199	if (kuid_has_mapping(ns, i_uid) && ns_capable(ns, CAP_FOWNER))
2200		return true;
2201	return false;
2202}
2203EXPORT_SYMBOL(inode_owner_or_capable);
2204
2205/*
2206 * Direct i/o helper functions
2207 */
2208static void __inode_dio_wait(struct inode *inode)
2209{
2210	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2211	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2212
2213	do {
2214		prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2215		if (atomic_read(&inode->i_dio_count))
2216			schedule();
2217	} while (atomic_read(&inode->i_dio_count));
2218	finish_wait(wq, &q.wq_entry);
2219}
2220
2221/**
2222 * inode_dio_wait - wait for outstanding DIO requests to finish
2223 * @inode: inode to wait for
2224 *
2225 * Waits for all pending direct I/O requests to finish so that we can
2226 * proceed with a truncate or equivalent operation.
2227 *
2228 * Must be called under a lock that serializes taking new references
2229 * to i_dio_count, usually by inode->i_mutex.
2230 */
2231void inode_dio_wait(struct inode *inode)
2232{
2233	if (atomic_read(&inode->i_dio_count))
2234		__inode_dio_wait(inode);
2235}
2236EXPORT_SYMBOL(inode_dio_wait);
2237
2238/*
2239 * inode_set_flags - atomically set some inode flags
2240 *
2241 * Note: the caller should be holding i_mutex, or else be sure that
2242 * they have exclusive access to the inode structure (i.e., while the
2243 * inode is being instantiated).  The reason for the cmpxchg() loop
2244 * --- which wouldn't be necessary if all code paths which modify
2245 * i_flags actually followed this rule, is that there is at least one
2246 * code path which doesn't today so we use cmpxchg() out of an abundance
2247 * of caution.
2248 *
2249 * In the long run, i_mutex is overkill, and we should probably look
2250 * at using the i_lock spinlock to protect i_flags, and then make sure
2251 * it is so documented in include/linux/fs.h and that all code follows
2252 * the locking convention!!
2253 */
2254void inode_set_flags(struct inode *inode, unsigned int flags,
2255		     unsigned int mask)
2256{
 
 
2257	WARN_ON_ONCE(flags & ~mask);
2258	set_mask_bits(&inode->i_flags, mask, flags);
 
 
 
 
2259}
2260EXPORT_SYMBOL(inode_set_flags);
2261
2262void inode_nohighmem(struct inode *inode)
2263{
2264	mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2265}
2266EXPORT_SYMBOL(inode_nohighmem);
2267
2268/**
2269 * timestamp_truncate - Truncate timespec to a granularity
2270 * @t: Timespec
2271 * @inode: inode being updated
2272 *
2273 * Truncate a timespec to the granularity supported by the fs
2274 * containing the inode. Always rounds down. gran must
2275 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2276 */
2277struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2278{
2279	struct super_block *sb = inode->i_sb;
2280	unsigned int gran = sb->s_time_gran;
2281
2282	t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2283	if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2284		t.tv_nsec = 0;
2285
2286	/* Avoid division in the common cases 1 ns and 1 s. */
2287	if (gran == 1)
2288		; /* nothing */
2289	else if (gran == NSEC_PER_SEC)
2290		t.tv_nsec = 0;
2291	else if (gran > 1 && gran < NSEC_PER_SEC)
2292		t.tv_nsec -= t.tv_nsec % gran;
2293	else
2294		WARN(1, "invalid file time granularity: %u", gran);
2295	return t;
2296}
2297EXPORT_SYMBOL(timestamp_truncate);
2298
2299/**
2300 * current_time - Return FS time
2301 * @inode: inode.
2302 *
2303 * Return the current time truncated to the time granularity supported by
2304 * the fs.
2305 *
2306 * Note that inode and inode->sb cannot be NULL.
2307 * Otherwise, the function warns and returns time without truncation.
2308 */
2309struct timespec64 current_time(struct inode *inode)
2310{
2311	struct timespec64 now;
2312
2313	ktime_get_coarse_real_ts64(&now);
2314
2315	if (unlikely(!inode->i_sb)) {
2316		WARN(1, "current_time() called with uninitialized super_block in the inode");
2317		return now;
2318	}
2319
2320	return timestamp_truncate(now, inode);
2321}
2322EXPORT_SYMBOL(current_time);