Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dcache.c
   4 *
   5 * Complete reimplementation
   6 * (C) 1997 Thomas Schoebel-Theuer,
   7 * with heavy changes by Linus Torvalds
   8 */
   9
  10/*
  11 * Notes on the allocation strategy:
  12 *
  13 * The dcache is a master of the icache - whenever a dcache entry
  14 * exists, the inode will always exist. "iput()" is done either when
  15 * the dcache entry is deleted or garbage collected.
  16 */
  17
  18#include <linux/ratelimit.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/fs.h>
  22#include <linux/fscrypt.h>
  23#include <linux/fsnotify.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/hash.h>
  27#include <linux/cache.h>
  28#include <linux/export.h>
  29#include <linux/security.h>
  30#include <linux/seqlock.h>
  31#include <linux/memblock.h>
  32#include <linux/bit_spinlock.h>
  33#include <linux/rculist_bl.h>
  34#include <linux/list_lru.h>
  35#include "internal.h"
  36#include "mount.h"
  37
  38/*
  39 * Usage:
  40 * dcache->d_inode->i_lock protects:
  41 *   - i_dentry, d_u.d_alias, d_inode of aliases
  42 * dcache_hash_bucket lock protects:
  43 *   - the dcache hash table
  44 * s_roots bl list spinlock protects:
  45 *   - the s_roots list (see __d_drop)
  46 * dentry->d_sb->s_dentry_lru_lock protects:
  47 *   - the dcache lru lists and counters
  48 * d_lock protects:
  49 *   - d_flags
  50 *   - d_name
  51 *   - d_lru
  52 *   - d_count
  53 *   - d_unhashed()
  54 *   - d_parent and d_chilren
  55 *   - childrens' d_sib and d_parent
  56 *   - d_u.d_alias, d_inode
  57 *
  58 * Ordering:
  59 * dentry->d_inode->i_lock
  60 *   dentry->d_lock
  61 *     dentry->d_sb->s_dentry_lru_lock
  62 *     dcache_hash_bucket lock
  63 *     s_roots lock
  64 *
  65 * If there is an ancestor relationship:
  66 * dentry->d_parent->...->d_parent->d_lock
  67 *   ...
  68 *     dentry->d_parent->d_lock
  69 *       dentry->d_lock
  70 *
  71 * If no ancestor relationship:
  72 * arbitrary, since it's serialized on rename_lock
  73 */
  74int sysctl_vfs_cache_pressure __read_mostly = 100;
  75EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  76
  77__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  78
  79EXPORT_SYMBOL(rename_lock);
  80
  81static struct kmem_cache *dentry_cache __ro_after_init;
  82
  83const struct qstr empty_name = QSTR_INIT("", 0);
  84EXPORT_SYMBOL(empty_name);
  85const struct qstr slash_name = QSTR_INIT("/", 1);
  86EXPORT_SYMBOL(slash_name);
  87const struct qstr dotdot_name = QSTR_INIT("..", 2);
  88EXPORT_SYMBOL(dotdot_name);
  89
  90/*
  91 * This is the single most critical data structure when it comes
  92 * to the dcache: the hashtable for lookups. Somebody should try
  93 * to make this good - I've just made it work.
  94 *
  95 * This hash-function tries to avoid losing too many bits of hash
  96 * information, yet avoid using a prime hash-size or similar.
  97 */
  98
  99static unsigned int d_hash_shift __ro_after_init;
 100
 101static struct hlist_bl_head *dentry_hashtable __ro_after_init;
 102
 103static inline struct hlist_bl_head *d_hash(unsigned int hash)
 104{
 105	return dentry_hashtable + (hash >> d_hash_shift);
 106}
 107
 108#define IN_LOOKUP_SHIFT 10
 109static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 110
 111static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 112					unsigned int hash)
 113{
 114	hash += (unsigned long) parent / L1_CACHE_BYTES;
 115	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 116}
 117
 118struct dentry_stat_t {
 119	long nr_dentry;
 120	long nr_unused;
 121	long age_limit;		/* age in seconds */
 122	long want_pages;	/* pages requested by system */
 123	long nr_negative;	/* # of unused negative dentries */
 124	long dummy;		/* Reserved for future use */
 125};
 126
 127static DEFINE_PER_CPU(long, nr_dentry);
 128static DEFINE_PER_CPU(long, nr_dentry_unused);
 129static DEFINE_PER_CPU(long, nr_dentry_negative);
 130
 131#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 132/* Statistics gathering. */
 133static struct dentry_stat_t dentry_stat = {
 134	.age_limit = 45,
 135};
 136
 137/*
 138 * Here we resort to our own counters instead of using generic per-cpu counters
 139 * for consistency with what the vfs inode code does. We are expected to harvest
 140 * better code and performance by having our own specialized counters.
 141 *
 142 * Please note that the loop is done over all possible CPUs, not over all online
 143 * CPUs. The reason for this is that we don't want to play games with CPUs going
 144 * on and off. If one of them goes off, we will just keep their counters.
 145 *
 146 * glommer: See cffbc8a for details, and if you ever intend to change this,
 147 * please update all vfs counters to match.
 148 */
 149static long get_nr_dentry(void)
 150{
 151	int i;
 152	long sum = 0;
 153	for_each_possible_cpu(i)
 154		sum += per_cpu(nr_dentry, i);
 155	return sum < 0 ? 0 : sum;
 156}
 157
 158static long get_nr_dentry_unused(void)
 159{
 160	int i;
 161	long sum = 0;
 162	for_each_possible_cpu(i)
 163		sum += per_cpu(nr_dentry_unused, i);
 164	return sum < 0 ? 0 : sum;
 165}
 166
 167static long get_nr_dentry_negative(void)
 168{
 169	int i;
 170	long sum = 0;
 171
 172	for_each_possible_cpu(i)
 173		sum += per_cpu(nr_dentry_negative, i);
 174	return sum < 0 ? 0 : sum;
 175}
 176
 177static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
 178			  size_t *lenp, loff_t *ppos)
 179{
 180	dentry_stat.nr_dentry = get_nr_dentry();
 181	dentry_stat.nr_unused = get_nr_dentry_unused();
 182	dentry_stat.nr_negative = get_nr_dentry_negative();
 183	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 184}
 185
 186static struct ctl_table fs_dcache_sysctls[] = {
 187	{
 188		.procname	= "dentry-state",
 189		.data		= &dentry_stat,
 190		.maxlen		= 6*sizeof(long),
 191		.mode		= 0444,
 192		.proc_handler	= proc_nr_dentry,
 193	},
 194};
 195
 196static int __init init_fs_dcache_sysctls(void)
 197{
 198	register_sysctl_init("fs", fs_dcache_sysctls);
 199	return 0;
 200}
 201fs_initcall(init_fs_dcache_sysctls);
 202#endif
 203
 204/*
 205 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 206 * The strings are both count bytes long, and count is non-zero.
 207 */
 208#ifdef CONFIG_DCACHE_WORD_ACCESS
 209
 210#include <asm/word-at-a-time.h>
 211/*
 212 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 213 * aligned allocation for this particular component. We don't
 214 * strictly need the load_unaligned_zeropad() safety, but it
 215 * doesn't hurt either.
 216 *
 217 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 218 * need the careful unaligned handling.
 219 */
 220static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 221{
 222	unsigned long a,b,mask;
 223
 224	for (;;) {
 225		a = read_word_at_a_time(cs);
 226		b = load_unaligned_zeropad(ct);
 227		if (tcount < sizeof(unsigned long))
 228			break;
 229		if (unlikely(a != b))
 230			return 1;
 231		cs += sizeof(unsigned long);
 232		ct += sizeof(unsigned long);
 233		tcount -= sizeof(unsigned long);
 234		if (!tcount)
 235			return 0;
 236	}
 237	mask = bytemask_from_count(tcount);
 238	return unlikely(!!((a ^ b) & mask));
 239}
 240
 241#else
 242
 243static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 244{
 245	do {
 246		if (*cs != *ct)
 247			return 1;
 248		cs++;
 249		ct++;
 250		tcount--;
 251	} while (tcount);
 252	return 0;
 253}
 254
 255#endif
 256
 257static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 258{
 259	/*
 260	 * Be careful about RCU walk racing with rename:
 261	 * use 'READ_ONCE' to fetch the name pointer.
 262	 *
 263	 * NOTE! Even if a rename will mean that the length
 264	 * was not loaded atomically, we don't care. The
 265	 * RCU walk will check the sequence count eventually,
 266	 * and catch it. And we won't overrun the buffer,
 267	 * because we're reading the name pointer atomically,
 268	 * and a dentry name is guaranteed to be properly
 269	 * terminated with a NUL byte.
 270	 *
 271	 * End result: even if 'len' is wrong, we'll exit
 272	 * early because the data cannot match (there can
 273	 * be no NUL in the ct/tcount data)
 274	 */
 275	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 276
 277	return dentry_string_cmp(cs, ct, tcount);
 278}
 279
 280struct external_name {
 281	union {
 282		atomic_t count;
 283		struct rcu_head head;
 284	} u;
 285	unsigned char name[];
 286};
 287
 288static inline struct external_name *external_name(struct dentry *dentry)
 289{
 290	return container_of(dentry->d_name.name, struct external_name, name[0]);
 291}
 292
 293static void __d_free(struct rcu_head *head)
 294{
 295	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 296
 297	kmem_cache_free(dentry_cache, dentry); 
 298}
 299
 300static void __d_free_external(struct rcu_head *head)
 301{
 302	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 303	kfree(external_name(dentry));
 304	kmem_cache_free(dentry_cache, dentry);
 305}
 306
 307static inline int dname_external(const struct dentry *dentry)
 308{
 309	return dentry->d_name.name != dentry->d_iname;
 310}
 311
 312void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
 313{
 314	spin_lock(&dentry->d_lock);
 315	name->name = dentry->d_name;
 316	if (unlikely(dname_external(dentry))) {
 317		atomic_inc(&external_name(dentry)->u.count);
 318	} else {
 319		memcpy(name->inline_name, dentry->d_iname,
 320		       dentry->d_name.len + 1);
 321		name->name.name = name->inline_name;
 322	}
 323	spin_unlock(&dentry->d_lock);
 324}
 325EXPORT_SYMBOL(take_dentry_name_snapshot);
 326
 327void release_dentry_name_snapshot(struct name_snapshot *name)
 328{
 329	if (unlikely(name->name.name != name->inline_name)) {
 330		struct external_name *p;
 331		p = container_of(name->name.name, struct external_name, name[0]);
 332		if (unlikely(atomic_dec_and_test(&p->u.count)))
 333			kfree_rcu(p, u.head);
 334	}
 335}
 336EXPORT_SYMBOL(release_dentry_name_snapshot);
 337
 338static inline void __d_set_inode_and_type(struct dentry *dentry,
 339					  struct inode *inode,
 340					  unsigned type_flags)
 341{
 342	unsigned flags;
 343
 344	dentry->d_inode = inode;
 345	flags = READ_ONCE(dentry->d_flags);
 346	flags &= ~DCACHE_ENTRY_TYPE;
 347	flags |= type_flags;
 348	smp_store_release(&dentry->d_flags, flags);
 349}
 350
 351static inline void __d_clear_type_and_inode(struct dentry *dentry)
 352{
 353	unsigned flags = READ_ONCE(dentry->d_flags);
 354
 355	flags &= ~DCACHE_ENTRY_TYPE;
 356	WRITE_ONCE(dentry->d_flags, flags);
 357	dentry->d_inode = NULL;
 358	if (dentry->d_flags & DCACHE_LRU_LIST)
 359		this_cpu_inc(nr_dentry_negative);
 360}
 361
 362static void dentry_free(struct dentry *dentry)
 363{
 364	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 365	if (unlikely(dname_external(dentry))) {
 366		struct external_name *p = external_name(dentry);
 367		if (likely(atomic_dec_and_test(&p->u.count))) {
 368			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 369			return;
 370		}
 371	}
 372	/* if dentry was never visible to RCU, immediate free is OK */
 373	if (dentry->d_flags & DCACHE_NORCU)
 374		__d_free(&dentry->d_u.d_rcu);
 375	else
 376		call_rcu(&dentry->d_u.d_rcu, __d_free);
 377}
 378
 379/*
 380 * Release the dentry's inode, using the filesystem
 381 * d_iput() operation if defined.
 382 */
 383static void dentry_unlink_inode(struct dentry * dentry)
 384	__releases(dentry->d_lock)
 385	__releases(dentry->d_inode->i_lock)
 386{
 387	struct inode *inode = dentry->d_inode;
 388
 389	raw_write_seqcount_begin(&dentry->d_seq);
 390	__d_clear_type_and_inode(dentry);
 391	hlist_del_init(&dentry->d_u.d_alias);
 392	raw_write_seqcount_end(&dentry->d_seq);
 393	spin_unlock(&dentry->d_lock);
 394	spin_unlock(&inode->i_lock);
 395	if (!inode->i_nlink)
 396		fsnotify_inoderemove(inode);
 397	if (dentry->d_op && dentry->d_op->d_iput)
 398		dentry->d_op->d_iput(dentry, inode);
 399	else
 400		iput(inode);
 401}
 402
 403/*
 404 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 405 * is in use - which includes both the "real" per-superblock
 406 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 407 *
 408 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 409 * on the shrink list (ie not on the superblock LRU list).
 410 *
 411 * The per-cpu "nr_dentry_unused" counters are updated with
 412 * the DCACHE_LRU_LIST bit.
 413 *
 414 * The per-cpu "nr_dentry_negative" counters are only updated
 415 * when deleted from or added to the per-superblock LRU list, not
 416 * from/to the shrink list. That is to avoid an unneeded dec/inc
 417 * pair when moving from LRU to shrink list in select_collect().
 418 *
 419 * These helper functions make sure we always follow the
 420 * rules. d_lock must be held by the caller.
 421 */
 422#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 423static void d_lru_add(struct dentry *dentry)
 424{
 425	D_FLAG_VERIFY(dentry, 0);
 426	dentry->d_flags |= DCACHE_LRU_LIST;
 427	this_cpu_inc(nr_dentry_unused);
 428	if (d_is_negative(dentry))
 429		this_cpu_inc(nr_dentry_negative);
 430	WARN_ON_ONCE(!list_lru_add_obj(
 431			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 432}
 433
 434static void d_lru_del(struct dentry *dentry)
 435{
 436	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 437	dentry->d_flags &= ~DCACHE_LRU_LIST;
 438	this_cpu_dec(nr_dentry_unused);
 439	if (d_is_negative(dentry))
 440		this_cpu_dec(nr_dentry_negative);
 441	WARN_ON_ONCE(!list_lru_del_obj(
 442			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 443}
 444
 445static void d_shrink_del(struct dentry *dentry)
 446{
 447	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 448	list_del_init(&dentry->d_lru);
 449	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 450	this_cpu_dec(nr_dentry_unused);
 451}
 452
 453static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 454{
 455	D_FLAG_VERIFY(dentry, 0);
 456	list_add(&dentry->d_lru, list);
 457	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 458	this_cpu_inc(nr_dentry_unused);
 459}
 460
 461/*
 462 * These can only be called under the global LRU lock, ie during the
 463 * callback for freeing the LRU list. "isolate" removes it from the
 464 * LRU lists entirely, while shrink_move moves it to the indicated
 465 * private list.
 466 */
 467static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 468{
 469	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 470	dentry->d_flags &= ~DCACHE_LRU_LIST;
 471	this_cpu_dec(nr_dentry_unused);
 472	if (d_is_negative(dentry))
 473		this_cpu_dec(nr_dentry_negative);
 474	list_lru_isolate(lru, &dentry->d_lru);
 475}
 476
 477static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 478			      struct list_head *list)
 479{
 480	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 481	dentry->d_flags |= DCACHE_SHRINK_LIST;
 482	if (d_is_negative(dentry))
 483		this_cpu_dec(nr_dentry_negative);
 484	list_lru_isolate_move(lru, &dentry->d_lru, list);
 485}
 486
 487static void ___d_drop(struct dentry *dentry)
 488{
 489	struct hlist_bl_head *b;
 490	/*
 491	 * Hashed dentries are normally on the dentry hashtable,
 492	 * with the exception of those newly allocated by
 493	 * d_obtain_root, which are always IS_ROOT:
 494	 */
 495	if (unlikely(IS_ROOT(dentry)))
 496		b = &dentry->d_sb->s_roots;
 497	else
 498		b = d_hash(dentry->d_name.hash);
 499
 500	hlist_bl_lock(b);
 501	__hlist_bl_del(&dentry->d_hash);
 502	hlist_bl_unlock(b);
 503}
 504
 505void __d_drop(struct dentry *dentry)
 506{
 507	if (!d_unhashed(dentry)) {
 508		___d_drop(dentry);
 509		dentry->d_hash.pprev = NULL;
 510		write_seqcount_invalidate(&dentry->d_seq);
 511	}
 512}
 513EXPORT_SYMBOL(__d_drop);
 514
 515/**
 516 * d_drop - drop a dentry
 517 * @dentry: dentry to drop
 518 *
 519 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 520 * be found through a VFS lookup any more. Note that this is different from
 521 * deleting the dentry - d_delete will try to mark the dentry negative if
 522 * possible, giving a successful _negative_ lookup, while d_drop will
 523 * just make the cache lookup fail.
 524 *
 525 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 526 * reason (NFS timeouts or autofs deletes).
 527 *
 528 * __d_drop requires dentry->d_lock
 529 *
 530 * ___d_drop doesn't mark dentry as "unhashed"
 531 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
 532 */
 533void d_drop(struct dentry *dentry)
 534{
 535	spin_lock(&dentry->d_lock);
 536	__d_drop(dentry);
 537	spin_unlock(&dentry->d_lock);
 538}
 539EXPORT_SYMBOL(d_drop);
 540
 541static inline void dentry_unlist(struct dentry *dentry)
 542{
 543	struct dentry *next;
 544	/*
 545	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 546	 * attached to the dentry tree
 547	 */
 548	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 549	if (unlikely(hlist_unhashed(&dentry->d_sib)))
 550		return;
 551	__hlist_del(&dentry->d_sib);
 552	/*
 553	 * Cursors can move around the list of children.  While we'd been
 554	 * a normal list member, it didn't matter - ->d_sib.next would've
 555	 * been updated.  However, from now on it won't be and for the
 556	 * things like d_walk() it might end up with a nasty surprise.
 557	 * Normally d_walk() doesn't care about cursors moving around -
 558	 * ->d_lock on parent prevents that and since a cursor has no children
 559	 * of its own, we get through it without ever unlocking the parent.
 560	 * There is one exception, though - if we ascend from a child that
 561	 * gets killed as soon as we unlock it, the next sibling is found
 562	 * using the value left in its ->d_sib.next.  And if _that_
 563	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 564	 * before d_walk() regains parent->d_lock, we'll end up skipping
 565	 * everything the cursor had been moved past.
 566	 *
 567	 * Solution: make sure that the pointer left behind in ->d_sib.next
 568	 * points to something that won't be moving around.  I.e. skip the
 569	 * cursors.
 570	 */
 571	while (dentry->d_sib.next) {
 572		next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
 573		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 574			break;
 575		dentry->d_sib.next = next->d_sib.next;
 576	}
 577}
 578
 579static struct dentry *__dentry_kill(struct dentry *dentry)
 580{
 581	struct dentry *parent = NULL;
 582	bool can_free = true;
 
 
 583
 584	/*
 585	 * The dentry is now unrecoverably dead to the world.
 586	 */
 587	lockref_mark_dead(&dentry->d_lockref);
 588
 589	/*
 590	 * inform the fs via d_prune that this dentry is about to be
 591	 * unhashed and destroyed.
 592	 */
 593	if (dentry->d_flags & DCACHE_OP_PRUNE)
 594		dentry->d_op->d_prune(dentry);
 595
 596	if (dentry->d_flags & DCACHE_LRU_LIST) {
 597		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 598			d_lru_del(dentry);
 599	}
 600	/* if it was on the hash then remove it */
 601	__d_drop(dentry);
 
 
 
 602	if (dentry->d_inode)
 603		dentry_unlink_inode(dentry);
 604	else
 605		spin_unlock(&dentry->d_lock);
 606	this_cpu_dec(nr_dentry);
 607	if (dentry->d_op && dentry->d_op->d_release)
 608		dentry->d_op->d_release(dentry);
 609
 610	cond_resched();
 611	/* now that it's negative, ->d_parent is stable */
 612	if (!IS_ROOT(dentry)) {
 613		parent = dentry->d_parent;
 614		spin_lock(&parent->d_lock);
 615	}
 616	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 617	dentry_unlist(dentry);
 618	if (dentry->d_flags & DCACHE_SHRINK_LIST)
 619		can_free = false;
 
 620	spin_unlock(&dentry->d_lock);
 621	if (likely(can_free))
 622		dentry_free(dentry);
 623	if (parent && --parent->d_lockref.count) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624		spin_unlock(&parent->d_lock);
 625		return NULL;
 626	}
 
 
 
 
 
 627	return parent;
 628}
 629
 630/*
 631 * Lock a dentry for feeding it to __dentry_kill().
 632 * Called under rcu_read_lock() and dentry->d_lock; the former
 633 * guarantees that nothing we access will be freed under us.
 634 * Note that dentry is *not* protected from concurrent dentry_kill(),
 635 * d_delete(), etc.
 636 *
 637 * Return false if dentry is busy.  Otherwise, return true and have
 638 * that dentry's inode locked.
 639 */
 640
 641static bool lock_for_kill(struct dentry *dentry)
 642{
 643	struct inode *inode = dentry->d_inode;
 644
 645	if (unlikely(dentry->d_lockref.count))
 646		return false;
 647
 648	if (!inode || likely(spin_trylock(&inode->i_lock)))
 649		return true;
 650
 651	do {
 652		spin_unlock(&dentry->d_lock);
 653		spin_lock(&inode->i_lock);
 654		spin_lock(&dentry->d_lock);
 655		if (likely(inode == dentry->d_inode))
 656			break;
 657		spin_unlock(&inode->i_lock);
 658		inode = dentry->d_inode;
 659	} while (inode);
 660	if (likely(!dentry->d_lockref.count))
 661		return true;
 662	if (inode)
 663		spin_unlock(&inode->i_lock);
 664	return false;
 665}
 666
 667/*
 668 * Decide if dentry is worth retaining.  Usually this is called with dentry
 669 * locked; if not locked, we are more limited and might not be able to tell
 670 * without a lock.  False in this case means "punt to locked path and recheck".
 671 *
 672 * In case we aren't locked, these predicates are not "stable". However, it is
 673 * sufficient that at some point after we dropped the reference the dentry was
 674 * hashed and the flags had the proper value. Other dentry users may have
 675 * re-gotten a reference to the dentry and change that, but our work is done -
 676 * we can leave the dentry around with a zero refcount.
 677 */
 678static inline bool retain_dentry(struct dentry *dentry, bool locked)
 679{
 680	unsigned int d_flags;
 681
 682	smp_rmb();
 683	d_flags = READ_ONCE(dentry->d_flags);
 684
 685	// Unreachable? Nobody would be able to look it up, no point retaining
 686	if (unlikely(d_unhashed(dentry)))
 687		return false;
 688
 689	// Same if it's disconnected
 690	if (unlikely(d_flags & DCACHE_DISCONNECTED))
 691		return false;
 692
 693	// ->d_delete() might tell us not to bother, but that requires
 694	// ->d_lock; can't decide without it
 695	if (unlikely(d_flags & DCACHE_OP_DELETE)) {
 696		if (!locked || dentry->d_op->d_delete(dentry))
 697			return false;
 698	}
 699
 700	// Explicitly told not to bother
 701	if (unlikely(d_flags & DCACHE_DONTCACHE))
 702		return false;
 703
 704	// At this point it looks like we ought to keep it.  We also might
 705	// need to do something - put it on LRU if it wasn't there already
 706	// and mark it referenced if it was on LRU, but not marked yet.
 707	// Unfortunately, both actions require ->d_lock, so in lockless
 708	// case we'd have to punt rather than doing those.
 709	if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
 710		if (!locked)
 711			return false;
 712		d_lru_add(dentry);
 713	} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
 714		if (!locked)
 715			return false;
 716		dentry->d_flags |= DCACHE_REFERENCED;
 717	}
 718	return true;
 719}
 720
 721void d_mark_dontcache(struct inode *inode)
 722{
 723	struct dentry *de;
 724
 725	spin_lock(&inode->i_lock);
 726	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
 727		spin_lock(&de->d_lock);
 728		de->d_flags |= DCACHE_DONTCACHE;
 729		spin_unlock(&de->d_lock);
 730	}
 731	inode->i_state |= I_DONTCACHE;
 732	spin_unlock(&inode->i_lock);
 733}
 734EXPORT_SYMBOL(d_mark_dontcache);
 735
 736/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737 * Try to do a lockless dput(), and return whether that was successful.
 738 *
 739 * If unsuccessful, we return false, having already taken the dentry lock.
 740 * In that case refcount is guaranteed to be zero and we have already
 741 * decided that it's not worth keeping around.
 742 *
 743 * The caller needs to hold the RCU read lock, so that the dentry is
 744 * guaranteed to stay around even if the refcount goes down to zero!
 745 */
 746static inline bool fast_dput(struct dentry *dentry)
 747{
 748	int ret;
 
 
 
 
 
 
 
 
 749
 750	/*
 751	 * try to decrement the lockref optimistically.
 
 752	 */
 753	ret = lockref_put_return(&dentry->d_lockref);
 754
 755	/*
 756	 * If the lockref_put_return() failed due to the lock being held
 757	 * by somebody else, the fast path has failed. We will need to
 758	 * get the lock, and then check the count again.
 759	 */
 760	if (unlikely(ret < 0)) {
 761		spin_lock(&dentry->d_lock);
 762		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
 
 763			spin_unlock(&dentry->d_lock);
 764			return true;
 765		}
 766		dentry->d_lockref.count--;
 767		goto locked;
 768	}
 769
 770	/*
 771	 * If we weren't the last ref, we're done.
 772	 */
 773	if (ret)
 774		return true;
 775
 776	/*
 777	 * Can we decide that decrement of refcount is all we needed without
 778	 * taking the lock?  There's a very common case when it's all we need -
 779	 * dentry looks like it ought to be retained and there's nothing else
 780	 * to do.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781	 */
 782	if (retain_dentry(dentry, false))
 
 
 
 
 
 
 783		return true;
 784
 785	/*
 786	 * Either not worth retaining or we can't tell without the lock.
 787	 * Get the lock, then.  We've already decremented the refcount to 0,
 788	 * but we'll need to re-check the situation after getting the lock.
 789	 */
 790	spin_lock(&dentry->d_lock);
 791
 792	/*
 793	 * Did somebody else grab a reference to it in the meantime, and
 794	 * we're no longer the last user after all? Alternatively, somebody
 795	 * else could have killed it and marked it dead. Either way, we
 796	 * don't need to do anything else.
 797	 */
 798locked:
 799	if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
 800		spin_unlock(&dentry->d_lock);
 801		return true;
 802	}
 
 
 
 
 
 
 
 803	return false;
 804}
 805
 806
 807/* 
 808 * This is dput
 809 *
 810 * This is complicated by the fact that we do not want to put
 811 * dentries that are no longer on any hash chain on the unused
 812 * list: we'd much rather just get rid of them immediately.
 813 *
 814 * However, that implies that we have to traverse the dentry
 815 * tree upwards to the parents which might _also_ now be
 816 * scheduled for deletion (it may have been only waiting for
 817 * its last child to go away).
 818 *
 819 * This tail recursion is done by hand as we don't want to depend
 820 * on the compiler to always get this right (gcc generally doesn't).
 821 * Real recursion would eat up our stack space.
 822 */
 823
 824/*
 825 * dput - release a dentry
 826 * @dentry: dentry to release 
 827 *
 828 * Release a dentry. This will drop the usage count and if appropriate
 829 * call the dentry unlink method as well as removing it from the queues and
 830 * releasing its resources. If the parent dentries were scheduled for release
 831 * they too may now get deleted.
 832 */
 833void dput(struct dentry *dentry)
 834{
 835	if (!dentry)
 836		return;
 837	might_sleep();
 838	rcu_read_lock();
 839	if (likely(fast_dput(dentry))) {
 840		rcu_read_unlock();
 841		return;
 842	}
 843	while (lock_for_kill(dentry)) {
 844		rcu_read_unlock();
 845		dentry = __dentry_kill(dentry);
 846		if (!dentry)
 847			return;
 848		if (retain_dentry(dentry, true)) {
 
 
 
 
 
 849			spin_unlock(&dentry->d_lock);
 850			return;
 851		}
 852		rcu_read_lock();
 
 853	}
 854	rcu_read_unlock();
 855	spin_unlock(&dentry->d_lock);
 856}
 857EXPORT_SYMBOL(dput);
 858
 859static void to_shrink_list(struct dentry *dentry, struct list_head *list)
 860__must_hold(&dentry->d_lock)
 861{
 862	if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
 
 
 
 863		if (dentry->d_flags & DCACHE_LRU_LIST)
 864			d_lru_del(dentry);
 865		d_shrink_add(dentry, list);
 
 866	}
 867}
 868
 869void dput_to_list(struct dentry *dentry, struct list_head *list)
 870{
 871	rcu_read_lock();
 872	if (likely(fast_dput(dentry))) {
 873		rcu_read_unlock();
 874		return;
 875	}
 876	rcu_read_unlock();
 877	to_shrink_list(dentry, list);
 
 878	spin_unlock(&dentry->d_lock);
 879}
 880
 
 
 
 
 
 
 
 
 
 
 
 881struct dentry *dget_parent(struct dentry *dentry)
 882{
 883	int gotref;
 884	struct dentry *ret;
 885	unsigned seq;
 886
 887	/*
 888	 * Do optimistic parent lookup without any
 889	 * locking.
 890	 */
 891	rcu_read_lock();
 892	seq = raw_seqcount_begin(&dentry->d_seq);
 893	ret = READ_ONCE(dentry->d_parent);
 894	gotref = lockref_get_not_zero(&ret->d_lockref);
 895	rcu_read_unlock();
 896	if (likely(gotref)) {
 897		if (!read_seqcount_retry(&dentry->d_seq, seq))
 898			return ret;
 899		dput(ret);
 900	}
 901
 902repeat:
 903	/*
 904	 * Don't need rcu_dereference because we re-check it was correct under
 905	 * the lock.
 906	 */
 907	rcu_read_lock();
 908	ret = dentry->d_parent;
 909	spin_lock(&ret->d_lock);
 910	if (unlikely(ret != dentry->d_parent)) {
 911		spin_unlock(&ret->d_lock);
 912		rcu_read_unlock();
 913		goto repeat;
 914	}
 915	rcu_read_unlock();
 916	BUG_ON(!ret->d_lockref.count);
 917	ret->d_lockref.count++;
 918	spin_unlock(&ret->d_lock);
 919	return ret;
 920}
 921EXPORT_SYMBOL(dget_parent);
 922
 923static struct dentry * __d_find_any_alias(struct inode *inode)
 924{
 925	struct dentry *alias;
 926
 927	if (hlist_empty(&inode->i_dentry))
 928		return NULL;
 929	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
 930	lockref_get(&alias->d_lockref);
 931	return alias;
 932}
 933
 934/**
 935 * d_find_any_alias - find any alias for a given inode
 936 * @inode: inode to find an alias for
 937 *
 938 * If any aliases exist for the given inode, take and return a
 939 * reference for one of them.  If no aliases exist, return %NULL.
 940 */
 941struct dentry *d_find_any_alias(struct inode *inode)
 942{
 943	struct dentry *de;
 944
 945	spin_lock(&inode->i_lock);
 946	de = __d_find_any_alias(inode);
 947	spin_unlock(&inode->i_lock);
 948	return de;
 949}
 950EXPORT_SYMBOL(d_find_any_alias);
 951
 952static struct dentry *__d_find_alias(struct inode *inode)
 953{
 954	struct dentry *alias;
 955
 956	if (S_ISDIR(inode->i_mode))
 957		return __d_find_any_alias(inode);
 958
 959	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 960		spin_lock(&alias->d_lock);
 961 		if (!d_unhashed(alias)) {
 962			dget_dlock(alias);
 963			spin_unlock(&alias->d_lock);
 964			return alias;
 965		}
 966		spin_unlock(&alias->d_lock);
 967	}
 968	return NULL;
 969}
 970
 971/**
 972 * d_find_alias - grab a hashed alias of inode
 973 * @inode: inode in question
 974 *
 975 * If inode has a hashed alias, or is a directory and has any alias,
 976 * acquire the reference to alias and return it. Otherwise return NULL.
 977 * Notice that if inode is a directory there can be only one alias and
 978 * it can be unhashed only if it has no children, or if it is the root
 979 * of a filesystem, or if the directory was renamed and d_revalidate
 980 * was the first vfs operation to notice.
 981 *
 982 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 983 * any other hashed alias over that one.
 984 */
 985struct dentry *d_find_alias(struct inode *inode)
 986{
 987	struct dentry *de = NULL;
 988
 989	if (!hlist_empty(&inode->i_dentry)) {
 990		spin_lock(&inode->i_lock);
 991		de = __d_find_alias(inode);
 992		spin_unlock(&inode->i_lock);
 993	}
 994	return de;
 995}
 996EXPORT_SYMBOL(d_find_alias);
 997
 998/*
 999 *  Caller MUST be holding rcu_read_lock() and be guaranteed
1000 *  that inode won't get freed until rcu_read_unlock().
1001 */
1002struct dentry *d_find_alias_rcu(struct inode *inode)
1003{
1004	struct hlist_head *l = &inode->i_dentry;
1005	struct dentry *de = NULL;
1006
1007	spin_lock(&inode->i_lock);
1008	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1009	// used without having I_FREEING set, which means no aliases left
1010	if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1011		if (S_ISDIR(inode->i_mode)) {
1012			de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1013		} else {
1014			hlist_for_each_entry(de, l, d_u.d_alias)
1015				if (!d_unhashed(de))
1016					break;
1017		}
1018	}
1019	spin_unlock(&inode->i_lock);
1020	return de;
1021}
1022
1023/*
1024 *	Try to kill dentries associated with this inode.
1025 * WARNING: you must own a reference to inode.
1026 */
1027void d_prune_aliases(struct inode *inode)
1028{
1029	LIST_HEAD(dispose);
1030	struct dentry *dentry;
1031
1032	spin_lock(&inode->i_lock);
1033	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1034		spin_lock(&dentry->d_lock);
1035		if (!dentry->d_lockref.count)
1036			to_shrink_list(dentry, &dispose);
 
 
 
 
 
 
 
 
1037		spin_unlock(&dentry->d_lock);
1038	}
1039	spin_unlock(&inode->i_lock);
1040	shrink_dentry_list(&dispose);
1041}
1042EXPORT_SYMBOL(d_prune_aliases);
1043
1044static inline void shrink_kill(struct dentry *victim)
 
 
 
 
 
 
 
 
 
 
 
1045{
1046	do {
1047		rcu_read_unlock();
1048		victim = __dentry_kill(victim);
1049		rcu_read_lock();
1050	} while (victim && lock_for_kill(victim));
1051	rcu_read_unlock();
1052	if (victim)
1053		spin_unlock(&victim->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054}
1055
1056void shrink_dentry_list(struct list_head *list)
1057{
1058	while (!list_empty(list)) {
1059		struct dentry *dentry;
1060
1061		dentry = list_entry(list->prev, struct dentry, d_lru);
1062		spin_lock(&dentry->d_lock);
1063		rcu_read_lock();
1064		if (!lock_for_kill(dentry)) {
1065			bool can_free;
1066			rcu_read_unlock();
1067			d_shrink_del(dentry);
1068			can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
 
1069			spin_unlock(&dentry->d_lock);
1070			if (can_free)
1071				dentry_free(dentry);
1072			continue;
1073		}
 
1074		d_shrink_del(dentry);
1075		shrink_kill(dentry);
 
 
 
1076	}
1077}
1078
1079static enum lru_status dentry_lru_isolate(struct list_head *item,
1080		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1081{
1082	struct list_head *freeable = arg;
1083	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1084
1085
1086	/*
1087	 * we are inverting the lru lock/dentry->d_lock here,
1088	 * so use a trylock. If we fail to get the lock, just skip
1089	 * it
1090	 */
1091	if (!spin_trylock(&dentry->d_lock))
1092		return LRU_SKIP;
1093
1094	/*
1095	 * Referenced dentries are still in use. If they have active
1096	 * counts, just remove them from the LRU. Otherwise give them
1097	 * another pass through the LRU.
1098	 */
1099	if (dentry->d_lockref.count) {
1100		d_lru_isolate(lru, dentry);
1101		spin_unlock(&dentry->d_lock);
1102		return LRU_REMOVED;
1103	}
1104
1105	if (dentry->d_flags & DCACHE_REFERENCED) {
1106		dentry->d_flags &= ~DCACHE_REFERENCED;
1107		spin_unlock(&dentry->d_lock);
1108
1109		/*
1110		 * The list move itself will be made by the common LRU code. At
1111		 * this point, we've dropped the dentry->d_lock but keep the
1112		 * lru lock. This is safe to do, since every list movement is
1113		 * protected by the lru lock even if both locks are held.
1114		 *
1115		 * This is guaranteed by the fact that all LRU management
1116		 * functions are intermediated by the LRU API calls like
1117		 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1118		 * only ever occur through this functions or through callbacks
1119		 * like this one, that are called from the LRU API.
1120		 *
1121		 * The only exceptions to this are functions like
1122		 * shrink_dentry_list, and code that first checks for the
1123		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1124		 * operating only with stack provided lists after they are
1125		 * properly isolated from the main list.  It is thus, always a
1126		 * local access.
1127		 */
1128		return LRU_ROTATE;
1129	}
1130
1131	d_lru_shrink_move(lru, dentry, freeable);
1132	spin_unlock(&dentry->d_lock);
1133
1134	return LRU_REMOVED;
1135}
1136
1137/**
1138 * prune_dcache_sb - shrink the dcache
1139 * @sb: superblock
1140 * @sc: shrink control, passed to list_lru_shrink_walk()
1141 *
1142 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1143 * is done when we need more memory and called from the superblock shrinker
1144 * function.
1145 *
1146 * This function may fail to free any resources if all the dentries are in
1147 * use.
1148 */
1149long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1150{
1151	LIST_HEAD(dispose);
1152	long freed;
1153
1154	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1155				     dentry_lru_isolate, &dispose);
1156	shrink_dentry_list(&dispose);
1157	return freed;
1158}
1159
1160static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1161		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1162{
1163	struct list_head *freeable = arg;
1164	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1165
1166	/*
1167	 * we are inverting the lru lock/dentry->d_lock here,
1168	 * so use a trylock. If we fail to get the lock, just skip
1169	 * it
1170	 */
1171	if (!spin_trylock(&dentry->d_lock))
1172		return LRU_SKIP;
1173
1174	d_lru_shrink_move(lru, dentry, freeable);
1175	spin_unlock(&dentry->d_lock);
1176
1177	return LRU_REMOVED;
1178}
1179
1180
1181/**
1182 * shrink_dcache_sb - shrink dcache for a superblock
1183 * @sb: superblock
1184 *
1185 * Shrink the dcache for the specified super block. This is used to free
1186 * the dcache before unmounting a file system.
1187 */
1188void shrink_dcache_sb(struct super_block *sb)
1189{
1190	do {
1191		LIST_HEAD(dispose);
1192
1193		list_lru_walk(&sb->s_dentry_lru,
1194			dentry_lru_isolate_shrink, &dispose, 1024);
1195		shrink_dentry_list(&dispose);
1196	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1197}
1198EXPORT_SYMBOL(shrink_dcache_sb);
1199
1200/**
1201 * enum d_walk_ret - action to talke during tree walk
1202 * @D_WALK_CONTINUE:	contrinue walk
1203 * @D_WALK_QUIT:	quit walk
1204 * @D_WALK_NORETRY:	quit when retry is needed
1205 * @D_WALK_SKIP:	skip this dentry and its children
1206 */
1207enum d_walk_ret {
1208	D_WALK_CONTINUE,
1209	D_WALK_QUIT,
1210	D_WALK_NORETRY,
1211	D_WALK_SKIP,
1212};
1213
1214/**
1215 * d_walk - walk the dentry tree
1216 * @parent:	start of walk
1217 * @data:	data passed to @enter() and @finish()
1218 * @enter:	callback when first entering the dentry
1219 *
1220 * The @enter() callbacks are called with d_lock held.
1221 */
1222static void d_walk(struct dentry *parent, void *data,
1223		   enum d_walk_ret (*enter)(void *, struct dentry *))
1224{
1225	struct dentry *this_parent, *dentry;
 
1226	unsigned seq = 0;
1227	enum d_walk_ret ret;
1228	bool retry = true;
1229
1230again:
1231	read_seqbegin_or_lock(&rename_lock, &seq);
1232	this_parent = parent;
1233	spin_lock(&this_parent->d_lock);
1234
1235	ret = enter(data, this_parent);
1236	switch (ret) {
1237	case D_WALK_CONTINUE:
1238		break;
1239	case D_WALK_QUIT:
1240	case D_WALK_SKIP:
1241		goto out_unlock;
1242	case D_WALK_NORETRY:
1243		retry = false;
1244		break;
1245	}
1246repeat:
1247	dentry = d_first_child(this_parent);
1248resume:
1249	hlist_for_each_entry_from(dentry, d_sib) {
 
 
 
 
1250		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1251			continue;
1252
1253		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1254
1255		ret = enter(data, dentry);
1256		switch (ret) {
1257		case D_WALK_CONTINUE:
1258			break;
1259		case D_WALK_QUIT:
1260			spin_unlock(&dentry->d_lock);
1261			goto out_unlock;
1262		case D_WALK_NORETRY:
1263			retry = false;
1264			break;
1265		case D_WALK_SKIP:
1266			spin_unlock(&dentry->d_lock);
1267			continue;
1268		}
1269
1270		if (!hlist_empty(&dentry->d_children)) {
1271			spin_unlock(&this_parent->d_lock);
1272			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1273			this_parent = dentry;
1274			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1275			goto repeat;
1276		}
1277		spin_unlock(&dentry->d_lock);
1278	}
1279	/*
1280	 * All done at this level ... ascend and resume the search.
1281	 */
1282	rcu_read_lock();
1283ascend:
1284	if (this_parent != parent) {
1285		dentry = this_parent;
1286		this_parent = dentry->d_parent;
1287
1288		spin_unlock(&dentry->d_lock);
1289		spin_lock(&this_parent->d_lock);
1290
1291		/* might go back up the wrong parent if we have had a rename. */
1292		if (need_seqretry(&rename_lock, seq))
1293			goto rename_retry;
1294		/* go into the first sibling still alive */
1295		hlist_for_each_entry_continue(dentry, d_sib) {
1296			if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1297				rcu_read_unlock();
1298				goto resume;
1299			}
1300		}
1301		goto ascend;
 
1302	}
1303	if (need_seqretry(&rename_lock, seq))
1304		goto rename_retry;
1305	rcu_read_unlock();
1306
1307out_unlock:
1308	spin_unlock(&this_parent->d_lock);
1309	done_seqretry(&rename_lock, seq);
1310	return;
1311
1312rename_retry:
1313	spin_unlock(&this_parent->d_lock);
1314	rcu_read_unlock();
1315	BUG_ON(seq & 1);
1316	if (!retry)
1317		return;
1318	seq = 1;
1319	goto again;
1320}
1321
1322struct check_mount {
1323	struct vfsmount *mnt;
1324	unsigned int mounted;
1325};
1326
1327static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1328{
1329	struct check_mount *info = data;
1330	struct path path = { .mnt = info->mnt, .dentry = dentry };
1331
1332	if (likely(!d_mountpoint(dentry)))
1333		return D_WALK_CONTINUE;
1334	if (__path_is_mountpoint(&path)) {
1335		info->mounted = 1;
1336		return D_WALK_QUIT;
1337	}
1338	return D_WALK_CONTINUE;
1339}
1340
1341/**
1342 * path_has_submounts - check for mounts over a dentry in the
1343 *                      current namespace.
1344 * @parent: path to check.
1345 *
1346 * Return true if the parent or its subdirectories contain
1347 * a mount point in the current namespace.
1348 */
1349int path_has_submounts(const struct path *parent)
1350{
1351	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1352
1353	read_seqlock_excl(&mount_lock);
1354	d_walk(parent->dentry, &data, path_check_mount);
1355	read_sequnlock_excl(&mount_lock);
1356
1357	return data.mounted;
1358}
1359EXPORT_SYMBOL(path_has_submounts);
1360
1361/*
1362 * Called by mount code to set a mountpoint and check if the mountpoint is
1363 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1364 * subtree can become unreachable).
1365 *
1366 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1367 * this reason take rename_lock and d_lock on dentry and ancestors.
1368 */
1369int d_set_mounted(struct dentry *dentry)
1370{
1371	struct dentry *p;
1372	int ret = -ENOENT;
1373	write_seqlock(&rename_lock);
1374	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1375		/* Need exclusion wrt. d_invalidate() */
1376		spin_lock(&p->d_lock);
1377		if (unlikely(d_unhashed(p))) {
1378			spin_unlock(&p->d_lock);
1379			goto out;
1380		}
1381		spin_unlock(&p->d_lock);
1382	}
1383	spin_lock(&dentry->d_lock);
1384	if (!d_unlinked(dentry)) {
1385		ret = -EBUSY;
1386		if (!d_mountpoint(dentry)) {
1387			dentry->d_flags |= DCACHE_MOUNTED;
1388			ret = 0;
1389		}
1390	}
1391 	spin_unlock(&dentry->d_lock);
1392out:
1393	write_sequnlock(&rename_lock);
1394	return ret;
1395}
1396
1397/*
1398 * Search the dentry child list of the specified parent,
1399 * and move any unused dentries to the end of the unused
1400 * list for prune_dcache(). We descend to the next level
1401 * whenever the d_children list is non-empty and continue
1402 * searching.
1403 *
1404 * It returns zero iff there are no unused children,
1405 * otherwise  it returns the number of children moved to
1406 * the end of the unused list. This may not be the total
1407 * number of unused children, because select_parent can
1408 * drop the lock and return early due to latency
1409 * constraints.
1410 */
1411
1412struct select_data {
1413	struct dentry *start;
1414	union {
1415		long found;
1416		struct dentry *victim;
1417	};
1418	struct list_head dispose;
1419};
1420
1421static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1422{
1423	struct select_data *data = _data;
1424	enum d_walk_ret ret = D_WALK_CONTINUE;
1425
1426	if (data->start == dentry)
1427		goto out;
1428
1429	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1430		data->found++;
1431	} else if (!dentry->d_lockref.count) {
1432		to_shrink_list(dentry, &data->dispose);
1433		data->found++;
1434	} else if (dentry->d_lockref.count < 0) {
1435		data->found++;
 
 
1436	}
1437	/*
1438	 * We can return to the caller if we have found some (this
1439	 * ensures forward progress). We'll be coming back to find
1440	 * the rest.
1441	 */
1442	if (!list_empty(&data->dispose))
1443		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1444out:
1445	return ret;
1446}
1447
1448static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1449{
1450	struct select_data *data = _data;
1451	enum d_walk_ret ret = D_WALK_CONTINUE;
1452
1453	if (data->start == dentry)
1454		goto out;
1455
1456	if (!dentry->d_lockref.count) {
1457		if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1458			rcu_read_lock();
1459			data->victim = dentry;
1460			return D_WALK_QUIT;
1461		}
1462		to_shrink_list(dentry, &data->dispose);
 
 
 
 
1463	}
1464	/*
1465	 * We can return to the caller if we have found some (this
1466	 * ensures forward progress). We'll be coming back to find
1467	 * the rest.
1468	 */
1469	if (!list_empty(&data->dispose))
1470		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1471out:
1472	return ret;
1473}
1474
1475/**
1476 * shrink_dcache_parent - prune dcache
1477 * @parent: parent of entries to prune
1478 *
1479 * Prune the dcache to remove unused children of the parent dentry.
1480 */
1481void shrink_dcache_parent(struct dentry *parent)
1482{
1483	for (;;) {
1484		struct select_data data = {.start = parent};
1485
1486		INIT_LIST_HEAD(&data.dispose);
1487		d_walk(parent, &data, select_collect);
1488
1489		if (!list_empty(&data.dispose)) {
1490			shrink_dentry_list(&data.dispose);
1491			continue;
1492		}
1493
1494		cond_resched();
1495		if (!data.found)
1496			break;
1497		data.victim = NULL;
1498		d_walk(parent, &data, select_collect2);
1499		if (data.victim) {
 
1500			spin_lock(&data.victim->d_lock);
1501			if (!lock_for_kill(data.victim)) {
1502				spin_unlock(&data.victim->d_lock);
1503				rcu_read_unlock();
1504			} else {
1505				shrink_kill(data.victim);
 
 
 
 
1506			}
1507		}
1508		if (!list_empty(&data.dispose))
1509			shrink_dentry_list(&data.dispose);
1510	}
1511}
1512EXPORT_SYMBOL(shrink_dcache_parent);
1513
1514static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1515{
1516	/* it has busy descendents; complain about those instead */
1517	if (!hlist_empty(&dentry->d_children))
1518		return D_WALK_CONTINUE;
1519
1520	/* root with refcount 1 is fine */
1521	if (dentry == _data && dentry->d_lockref.count == 1)
1522		return D_WALK_CONTINUE;
1523
1524	WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1525			" still in use (%d) [unmount of %s %s]\n",
1526		       dentry,
1527		       dentry->d_inode ?
1528		       dentry->d_inode->i_ino : 0UL,
1529		       dentry,
1530		       dentry->d_lockref.count,
1531		       dentry->d_sb->s_type->name,
1532		       dentry->d_sb->s_id);
 
1533	return D_WALK_CONTINUE;
1534}
1535
1536static void do_one_tree(struct dentry *dentry)
1537{
1538	shrink_dcache_parent(dentry);
1539	d_walk(dentry, dentry, umount_check);
1540	d_drop(dentry);
1541	dput(dentry);
1542}
1543
1544/*
1545 * destroy the dentries attached to a superblock on unmounting
1546 */
1547void shrink_dcache_for_umount(struct super_block *sb)
1548{
1549	struct dentry *dentry;
1550
1551	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1552
1553	dentry = sb->s_root;
1554	sb->s_root = NULL;
1555	do_one_tree(dentry);
1556
1557	while (!hlist_bl_empty(&sb->s_roots)) {
1558		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1559		do_one_tree(dentry);
1560	}
1561}
1562
1563static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1564{
1565	struct dentry **victim = _data;
1566	if (d_mountpoint(dentry)) {
1567		*victim = dget_dlock(dentry);
 
1568		return D_WALK_QUIT;
1569	}
1570	return D_WALK_CONTINUE;
1571}
1572
1573/**
1574 * d_invalidate - detach submounts, prune dcache, and drop
1575 * @dentry: dentry to invalidate (aka detach, prune and drop)
1576 */
1577void d_invalidate(struct dentry *dentry)
1578{
1579	bool had_submounts = false;
1580	spin_lock(&dentry->d_lock);
1581	if (d_unhashed(dentry)) {
1582		spin_unlock(&dentry->d_lock);
1583		return;
1584	}
1585	__d_drop(dentry);
1586	spin_unlock(&dentry->d_lock);
1587
1588	/* Negative dentries can be dropped without further checks */
1589	if (!dentry->d_inode)
1590		return;
1591
1592	shrink_dcache_parent(dentry);
1593	for (;;) {
1594		struct dentry *victim = NULL;
1595		d_walk(dentry, &victim, find_submount);
1596		if (!victim) {
1597			if (had_submounts)
1598				shrink_dcache_parent(dentry);
1599			return;
1600		}
1601		had_submounts = true;
1602		detach_mounts(victim);
1603		dput(victim);
1604	}
1605}
1606EXPORT_SYMBOL(d_invalidate);
1607
1608/**
1609 * __d_alloc	-	allocate a dcache entry
1610 * @sb: filesystem it will belong to
1611 * @name: qstr of the name
1612 *
1613 * Allocates a dentry. It returns %NULL if there is insufficient memory
1614 * available. On a success the dentry is returned. The name passed in is
1615 * copied and the copy passed in may be reused after this call.
1616 */
1617 
1618static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1619{
1620	struct dentry *dentry;
1621	char *dname;
1622	int err;
1623
1624	dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1625				      GFP_KERNEL);
1626	if (!dentry)
1627		return NULL;
1628
1629	/*
1630	 * We guarantee that the inline name is always NUL-terminated.
1631	 * This way the memcpy() done by the name switching in rename
1632	 * will still always have a NUL at the end, even if we might
1633	 * be overwriting an internal NUL character
1634	 */
1635	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1636	if (unlikely(!name)) {
1637		name = &slash_name;
1638		dname = dentry->d_iname;
1639	} else if (name->len > DNAME_INLINE_LEN-1) {
1640		size_t size = offsetof(struct external_name, name[1]);
1641		struct external_name *p = kmalloc(size + name->len,
1642						  GFP_KERNEL_ACCOUNT |
1643						  __GFP_RECLAIMABLE);
1644		if (!p) {
1645			kmem_cache_free(dentry_cache, dentry); 
1646			return NULL;
1647		}
1648		atomic_set(&p->u.count, 1);
1649		dname = p->name;
1650	} else  {
1651		dname = dentry->d_iname;
1652	}	
1653
1654	dentry->d_name.len = name->len;
1655	dentry->d_name.hash = name->hash;
1656	memcpy(dname, name->name, name->len);
1657	dname[name->len] = 0;
1658
1659	/* Make sure we always see the terminating NUL character */
1660	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1661
1662	dentry->d_lockref.count = 1;
1663	dentry->d_flags = 0;
1664	spin_lock_init(&dentry->d_lock);
1665	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1666	dentry->d_inode = NULL;
1667	dentry->d_parent = dentry;
1668	dentry->d_sb = sb;
1669	dentry->d_op = NULL;
1670	dentry->d_fsdata = NULL;
1671	INIT_HLIST_BL_NODE(&dentry->d_hash);
1672	INIT_LIST_HEAD(&dentry->d_lru);
1673	INIT_HLIST_HEAD(&dentry->d_children);
1674	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1675	INIT_HLIST_NODE(&dentry->d_sib);
1676	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1677
1678	if (dentry->d_op && dentry->d_op->d_init) {
1679		err = dentry->d_op->d_init(dentry);
1680		if (err) {
1681			if (dname_external(dentry))
1682				kfree(external_name(dentry));
1683			kmem_cache_free(dentry_cache, dentry);
1684			return NULL;
1685		}
1686	}
1687
1688	this_cpu_inc(nr_dentry);
1689
1690	return dentry;
1691}
1692
1693/**
1694 * d_alloc	-	allocate a dcache entry
1695 * @parent: parent of entry to allocate
1696 * @name: qstr of the name
1697 *
1698 * Allocates a dentry. It returns %NULL if there is insufficient memory
1699 * available. On a success the dentry is returned. The name passed in is
1700 * copied and the copy passed in may be reused after this call.
1701 */
1702struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1703{
1704	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1705	if (!dentry)
1706		return NULL;
1707	spin_lock(&parent->d_lock);
1708	/*
1709	 * don't need child lock because it is not subject
1710	 * to concurrency here
1711	 */
1712	dentry->d_parent = dget_dlock(parent);
1713	hlist_add_head(&dentry->d_sib, &parent->d_children);
 
1714	spin_unlock(&parent->d_lock);
1715
1716	return dentry;
1717}
1718EXPORT_SYMBOL(d_alloc);
1719
1720struct dentry *d_alloc_anon(struct super_block *sb)
1721{
1722	return __d_alloc(sb, NULL);
1723}
1724EXPORT_SYMBOL(d_alloc_anon);
1725
1726struct dentry *d_alloc_cursor(struct dentry * parent)
1727{
1728	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1729	if (dentry) {
1730		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1731		dentry->d_parent = dget(parent);
1732	}
1733	return dentry;
1734}
1735
1736/**
1737 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1738 * @sb: the superblock
1739 * @name: qstr of the name
1740 *
1741 * For a filesystem that just pins its dentries in memory and never
1742 * performs lookups at all, return an unhashed IS_ROOT dentry.
1743 * This is used for pipes, sockets et.al. - the stuff that should
1744 * never be anyone's children or parents.  Unlike all other
1745 * dentries, these will not have RCU delay between dropping the
1746 * last reference and freeing them.
1747 *
1748 * The only user is alloc_file_pseudo() and that's what should
1749 * be considered a public interface.  Don't use directly.
1750 */
1751struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1752{
1753	static const struct dentry_operations anon_ops = {
1754		.d_dname = simple_dname
1755	};
1756	struct dentry *dentry = __d_alloc(sb, name);
1757	if (likely(dentry)) {
1758		dentry->d_flags |= DCACHE_NORCU;
1759		if (!sb->s_d_op)
1760			d_set_d_op(dentry, &anon_ops);
1761	}
1762	return dentry;
1763}
1764
1765struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1766{
1767	struct qstr q;
1768
1769	q.name = name;
1770	q.hash_len = hashlen_string(parent, name);
1771	return d_alloc(parent, &q);
1772}
1773EXPORT_SYMBOL(d_alloc_name);
1774
1775void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1776{
1777	WARN_ON_ONCE(dentry->d_op);
1778	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1779				DCACHE_OP_COMPARE	|
1780				DCACHE_OP_REVALIDATE	|
1781				DCACHE_OP_WEAK_REVALIDATE	|
1782				DCACHE_OP_DELETE	|
1783				DCACHE_OP_REAL));
1784	dentry->d_op = op;
1785	if (!op)
1786		return;
1787	if (op->d_hash)
1788		dentry->d_flags |= DCACHE_OP_HASH;
1789	if (op->d_compare)
1790		dentry->d_flags |= DCACHE_OP_COMPARE;
1791	if (op->d_revalidate)
1792		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1793	if (op->d_weak_revalidate)
1794		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1795	if (op->d_delete)
1796		dentry->d_flags |= DCACHE_OP_DELETE;
1797	if (op->d_prune)
1798		dentry->d_flags |= DCACHE_OP_PRUNE;
1799	if (op->d_real)
1800		dentry->d_flags |= DCACHE_OP_REAL;
1801
1802}
1803EXPORT_SYMBOL(d_set_d_op);
1804
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805static unsigned d_flags_for_inode(struct inode *inode)
1806{
1807	unsigned add_flags = DCACHE_REGULAR_TYPE;
1808
1809	if (!inode)
1810		return DCACHE_MISS_TYPE;
1811
1812	if (S_ISDIR(inode->i_mode)) {
1813		add_flags = DCACHE_DIRECTORY_TYPE;
1814		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1815			if (unlikely(!inode->i_op->lookup))
1816				add_flags = DCACHE_AUTODIR_TYPE;
1817			else
1818				inode->i_opflags |= IOP_LOOKUP;
1819		}
1820		goto type_determined;
1821	}
1822
1823	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1824		if (unlikely(inode->i_op->get_link)) {
1825			add_flags = DCACHE_SYMLINK_TYPE;
1826			goto type_determined;
1827		}
1828		inode->i_opflags |= IOP_NOFOLLOW;
1829	}
1830
1831	if (unlikely(!S_ISREG(inode->i_mode)))
1832		add_flags = DCACHE_SPECIAL_TYPE;
1833
1834type_determined:
1835	if (unlikely(IS_AUTOMOUNT(inode)))
1836		add_flags |= DCACHE_NEED_AUTOMOUNT;
1837	return add_flags;
1838}
1839
1840static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1841{
1842	unsigned add_flags = d_flags_for_inode(inode);
1843	WARN_ON(d_in_lookup(dentry));
1844
1845	spin_lock(&dentry->d_lock);
1846	/*
1847	 * Decrement negative dentry count if it was in the LRU list.
1848	 */
1849	if (dentry->d_flags & DCACHE_LRU_LIST)
1850		this_cpu_dec(nr_dentry_negative);
1851	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1852	raw_write_seqcount_begin(&dentry->d_seq);
1853	__d_set_inode_and_type(dentry, inode, add_flags);
1854	raw_write_seqcount_end(&dentry->d_seq);
1855	fsnotify_update_flags(dentry);
1856	spin_unlock(&dentry->d_lock);
1857}
1858
1859/**
1860 * d_instantiate - fill in inode information for a dentry
1861 * @entry: dentry to complete
1862 * @inode: inode to attach to this dentry
1863 *
1864 * Fill in inode information in the entry.
1865 *
1866 * This turns negative dentries into productive full members
1867 * of society.
1868 *
1869 * NOTE! This assumes that the inode count has been incremented
1870 * (or otherwise set) by the caller to indicate that it is now
1871 * in use by the dcache.
1872 */
1873 
1874void d_instantiate(struct dentry *entry, struct inode * inode)
1875{
1876	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1877	if (inode) {
1878		security_d_instantiate(entry, inode);
1879		spin_lock(&inode->i_lock);
1880		__d_instantiate(entry, inode);
1881		spin_unlock(&inode->i_lock);
1882	}
1883}
1884EXPORT_SYMBOL(d_instantiate);
1885
1886/*
1887 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1888 * with lockdep-related part of unlock_new_inode() done before
1889 * anything else.  Use that instead of open-coding d_instantiate()/
1890 * unlock_new_inode() combinations.
1891 */
1892void d_instantiate_new(struct dentry *entry, struct inode *inode)
1893{
1894	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1895	BUG_ON(!inode);
1896	lockdep_annotate_inode_mutex_key(inode);
1897	security_d_instantiate(entry, inode);
1898	spin_lock(&inode->i_lock);
1899	__d_instantiate(entry, inode);
1900	WARN_ON(!(inode->i_state & I_NEW));
1901	inode->i_state &= ~I_NEW & ~I_CREATING;
1902	smp_mb();
1903	wake_up_bit(&inode->i_state, __I_NEW);
1904	spin_unlock(&inode->i_lock);
1905}
1906EXPORT_SYMBOL(d_instantiate_new);
1907
1908struct dentry *d_make_root(struct inode *root_inode)
1909{
1910	struct dentry *res = NULL;
1911
1912	if (root_inode) {
1913		res = d_alloc_anon(root_inode->i_sb);
1914		if (res)
1915			d_instantiate(res, root_inode);
1916		else
1917			iput(root_inode);
1918	}
1919	return res;
1920}
1921EXPORT_SYMBOL(d_make_root);
1922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1923static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1924{
1925	struct super_block *sb;
1926	struct dentry *new, *res;
1927
1928	if (!inode)
1929		return ERR_PTR(-ESTALE);
1930	if (IS_ERR(inode))
1931		return ERR_CAST(inode);
1932
1933	sb = inode->i_sb;
1934
1935	res = d_find_any_alias(inode); /* existing alias? */
1936	if (res)
1937		goto out;
1938
1939	new = d_alloc_anon(sb);
1940	if (!new) {
1941		res = ERR_PTR(-ENOMEM);
1942		goto out;
1943	}
1944
1945	security_d_instantiate(new, inode);
1946	spin_lock(&inode->i_lock);
1947	res = __d_find_any_alias(inode); /* recheck under lock */
1948	if (likely(!res)) { /* still no alias, attach a disconnected dentry */
1949		unsigned add_flags = d_flags_for_inode(inode);
1950
1951		if (disconnected)
1952			add_flags |= DCACHE_DISCONNECTED;
1953
1954		spin_lock(&new->d_lock);
1955		__d_set_inode_and_type(new, inode, add_flags);
1956		hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
1957		if (!disconnected) {
1958			hlist_bl_lock(&sb->s_roots);
1959			hlist_bl_add_head(&new->d_hash, &sb->s_roots);
1960			hlist_bl_unlock(&sb->s_roots);
1961		}
1962		spin_unlock(&new->d_lock);
1963		spin_unlock(&inode->i_lock);
1964		inode = NULL; /* consumed by new->d_inode */
1965		res = new;
1966	} else {
1967		spin_unlock(&inode->i_lock);
1968		dput(new);
1969	}
1970
1971 out:
1972	iput(inode);
1973	return res;
1974}
1975
1976/**
1977 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1978 * @inode: inode to allocate the dentry for
1979 *
1980 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1981 * similar open by handle operations.  The returned dentry may be anonymous,
1982 * or may have a full name (if the inode was already in the cache).
1983 *
1984 * When called on a directory inode, we must ensure that the inode only ever
1985 * has one dentry.  If a dentry is found, that is returned instead of
1986 * allocating a new one.
1987 *
1988 * On successful return, the reference to the inode has been transferred
1989 * to the dentry.  In case of an error the reference on the inode is released.
1990 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1991 * be passed in and the error will be propagated to the return value,
1992 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1993 */
1994struct dentry *d_obtain_alias(struct inode *inode)
1995{
1996	return __d_obtain_alias(inode, true);
1997}
1998EXPORT_SYMBOL(d_obtain_alias);
1999
2000/**
2001 * d_obtain_root - find or allocate a dentry for a given inode
2002 * @inode: inode to allocate the dentry for
2003 *
2004 * Obtain an IS_ROOT dentry for the root of a filesystem.
2005 *
2006 * We must ensure that directory inodes only ever have one dentry.  If a
2007 * dentry is found, that is returned instead of allocating a new one.
2008 *
2009 * On successful return, the reference to the inode has been transferred
2010 * to the dentry.  In case of an error the reference on the inode is
2011 * released.  A %NULL or IS_ERR inode may be passed in and will be the
2012 * error will be propagate to the return value, with a %NULL @inode
2013 * replaced by ERR_PTR(-ESTALE).
2014 */
2015struct dentry *d_obtain_root(struct inode *inode)
2016{
2017	return __d_obtain_alias(inode, false);
2018}
2019EXPORT_SYMBOL(d_obtain_root);
2020
2021/**
2022 * d_add_ci - lookup or allocate new dentry with case-exact name
2023 * @inode:  the inode case-insensitive lookup has found
2024 * @dentry: the negative dentry that was passed to the parent's lookup func
2025 * @name:   the case-exact name to be associated with the returned dentry
2026 *
2027 * This is to avoid filling the dcache with case-insensitive names to the
2028 * same inode, only the actual correct case is stored in the dcache for
2029 * case-insensitive filesystems.
2030 *
2031 * For a case-insensitive lookup match and if the case-exact dentry
2032 * already exists in the dcache, use it and return it.
2033 *
2034 * If no entry exists with the exact case name, allocate new dentry with
2035 * the exact case, and return the spliced entry.
2036 */
2037struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2038			struct qstr *name)
2039{
2040	struct dentry *found, *res;
2041
2042	/*
2043	 * First check if a dentry matching the name already exists,
2044	 * if not go ahead and create it now.
2045	 */
2046	found = d_hash_and_lookup(dentry->d_parent, name);
2047	if (found) {
2048		iput(inode);
2049		return found;
2050	}
2051	if (d_in_lookup(dentry)) {
2052		found = d_alloc_parallel(dentry->d_parent, name,
2053					dentry->d_wait);
2054		if (IS_ERR(found) || !d_in_lookup(found)) {
2055			iput(inode);
2056			return found;
2057		}
2058	} else {
2059		found = d_alloc(dentry->d_parent, name);
2060		if (!found) {
2061			iput(inode);
2062			return ERR_PTR(-ENOMEM);
2063		} 
2064	}
2065	res = d_splice_alias(inode, found);
2066	if (res) {
2067		d_lookup_done(found);
2068		dput(found);
2069		return res;
2070	}
2071	return found;
2072}
2073EXPORT_SYMBOL(d_add_ci);
2074
2075/**
2076 * d_same_name - compare dentry name with case-exact name
2077 * @parent: parent dentry
2078 * @dentry: the negative dentry that was passed to the parent's lookup func
2079 * @name:   the case-exact name to be associated with the returned dentry
2080 *
2081 * Return: true if names are same, or false
2082 */
2083bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2084		 const struct qstr *name)
2085{
2086	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2087		if (dentry->d_name.len != name->len)
2088			return false;
2089		return dentry_cmp(dentry, name->name, name->len) == 0;
2090	}
2091	return parent->d_op->d_compare(dentry,
2092				       dentry->d_name.len, dentry->d_name.name,
2093				       name) == 0;
2094}
2095EXPORT_SYMBOL_GPL(d_same_name);
2096
2097/*
2098 * This is __d_lookup_rcu() when the parent dentry has
2099 * DCACHE_OP_COMPARE, which makes things much nastier.
2100 */
2101static noinline struct dentry *__d_lookup_rcu_op_compare(
2102	const struct dentry *parent,
2103	const struct qstr *name,
2104	unsigned *seqp)
2105{
2106	u64 hashlen = name->hash_len;
2107	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2108	struct hlist_bl_node *node;
2109	struct dentry *dentry;
2110
2111	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2112		int tlen;
2113		const char *tname;
2114		unsigned seq;
2115
2116seqretry:
2117		seq = raw_seqcount_begin(&dentry->d_seq);
2118		if (dentry->d_parent != parent)
2119			continue;
2120		if (d_unhashed(dentry))
2121			continue;
2122		if (dentry->d_name.hash != hashlen_hash(hashlen))
2123			continue;
2124		tlen = dentry->d_name.len;
2125		tname = dentry->d_name.name;
2126		/* we want a consistent (name,len) pair */
2127		if (read_seqcount_retry(&dentry->d_seq, seq)) {
2128			cpu_relax();
2129			goto seqretry;
2130		}
2131		if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2132			continue;
2133		*seqp = seq;
2134		return dentry;
2135	}
2136	return NULL;
2137}
2138
2139/**
2140 * __d_lookup_rcu - search for a dentry (racy, store-free)
2141 * @parent: parent dentry
2142 * @name: qstr of name we wish to find
2143 * @seqp: returns d_seq value at the point where the dentry was found
2144 * Returns: dentry, or NULL
2145 *
2146 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2147 * resolution (store-free path walking) design described in
2148 * Documentation/filesystems/path-lookup.txt.
2149 *
2150 * This is not to be used outside core vfs.
2151 *
2152 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2153 * held, and rcu_read_lock held. The returned dentry must not be stored into
2154 * without taking d_lock and checking d_seq sequence count against @seq
2155 * returned here.
2156 *
2157 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2158 * function.
2159 *
2160 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2161 * the returned dentry, so long as its parent's seqlock is checked after the
2162 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2163 * is formed, giving integrity down the path walk.
2164 *
2165 * NOTE! The caller *has* to check the resulting dentry against the sequence
2166 * number we've returned before using any of the resulting dentry state!
2167 */
2168struct dentry *__d_lookup_rcu(const struct dentry *parent,
2169				const struct qstr *name,
2170				unsigned *seqp)
2171{
2172	u64 hashlen = name->hash_len;
2173	const unsigned char *str = name->name;
2174	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2175	struct hlist_bl_node *node;
2176	struct dentry *dentry;
2177
2178	/*
2179	 * Note: There is significant duplication with __d_lookup_rcu which is
2180	 * required to prevent single threaded performance regressions
2181	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2182	 * Keep the two functions in sync.
2183	 */
2184
2185	if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2186		return __d_lookup_rcu_op_compare(parent, name, seqp);
2187
2188	/*
2189	 * The hash list is protected using RCU.
2190	 *
2191	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2192	 * races with d_move().
2193	 *
2194	 * It is possible that concurrent renames can mess up our list
2195	 * walk here and result in missing our dentry, resulting in the
2196	 * false-negative result. d_lookup() protects against concurrent
2197	 * renames using rename_lock seqlock.
2198	 *
2199	 * See Documentation/filesystems/path-lookup.txt for more details.
2200	 */
2201	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2202		unsigned seq;
2203
 
2204		/*
2205		 * The dentry sequence count protects us from concurrent
2206		 * renames, and thus protects parent and name fields.
2207		 *
2208		 * The caller must perform a seqcount check in order
2209		 * to do anything useful with the returned dentry.
2210		 *
2211		 * NOTE! We do a "raw" seqcount_begin here. That means that
2212		 * we don't wait for the sequence count to stabilize if it
2213		 * is in the middle of a sequence change. If we do the slow
2214		 * dentry compare, we will do seqretries until it is stable,
2215		 * and if we end up with a successful lookup, we actually
2216		 * want to exit RCU lookup anyway.
2217		 *
2218		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2219		 * we are still guaranteed NUL-termination of ->d_name.name.
2220		 */
2221		seq = raw_seqcount_begin(&dentry->d_seq);
2222		if (dentry->d_parent != parent)
2223			continue;
2224		if (d_unhashed(dentry))
2225			continue;
2226		if (dentry->d_name.hash_len != hashlen)
2227			continue;
2228		if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2229			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2230		*seqp = seq;
2231		return dentry;
2232	}
2233	return NULL;
2234}
2235
2236/**
2237 * d_lookup - search for a dentry
2238 * @parent: parent dentry
2239 * @name: qstr of name we wish to find
2240 * Returns: dentry, or NULL
2241 *
2242 * d_lookup searches the children of the parent dentry for the name in
2243 * question. If the dentry is found its reference count is incremented and the
2244 * dentry is returned. The caller must use dput to free the entry when it has
2245 * finished using it. %NULL is returned if the dentry does not exist.
2246 */
2247struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2248{
2249	struct dentry *dentry;
2250	unsigned seq;
2251
2252	do {
2253		seq = read_seqbegin(&rename_lock);
2254		dentry = __d_lookup(parent, name);
2255		if (dentry)
2256			break;
2257	} while (read_seqretry(&rename_lock, seq));
2258	return dentry;
2259}
2260EXPORT_SYMBOL(d_lookup);
2261
2262/**
2263 * __d_lookup - search for a dentry (racy)
2264 * @parent: parent dentry
2265 * @name: qstr of name we wish to find
2266 * Returns: dentry, or NULL
2267 *
2268 * __d_lookup is like d_lookup, however it may (rarely) return a
2269 * false-negative result due to unrelated rename activity.
2270 *
2271 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2272 * however it must be used carefully, eg. with a following d_lookup in
2273 * the case of failure.
2274 *
2275 * __d_lookup callers must be commented.
2276 */
2277struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2278{
2279	unsigned int hash = name->hash;
2280	struct hlist_bl_head *b = d_hash(hash);
2281	struct hlist_bl_node *node;
2282	struct dentry *found = NULL;
2283	struct dentry *dentry;
2284
2285	/*
2286	 * Note: There is significant duplication with __d_lookup_rcu which is
2287	 * required to prevent single threaded performance regressions
2288	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2289	 * Keep the two functions in sync.
2290	 */
2291
2292	/*
2293	 * The hash list is protected using RCU.
2294	 *
2295	 * Take d_lock when comparing a candidate dentry, to avoid races
2296	 * with d_move().
2297	 *
2298	 * It is possible that concurrent renames can mess up our list
2299	 * walk here and result in missing our dentry, resulting in the
2300	 * false-negative result. d_lookup() protects against concurrent
2301	 * renames using rename_lock seqlock.
2302	 *
2303	 * See Documentation/filesystems/path-lookup.txt for more details.
2304	 */
2305	rcu_read_lock();
2306	
2307	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2308
2309		if (dentry->d_name.hash != hash)
2310			continue;
2311
2312		spin_lock(&dentry->d_lock);
2313		if (dentry->d_parent != parent)
2314			goto next;
2315		if (d_unhashed(dentry))
2316			goto next;
2317
2318		if (!d_same_name(dentry, parent, name))
2319			goto next;
2320
2321		dentry->d_lockref.count++;
2322		found = dentry;
2323		spin_unlock(&dentry->d_lock);
2324		break;
2325next:
2326		spin_unlock(&dentry->d_lock);
2327 	}
2328 	rcu_read_unlock();
2329
2330 	return found;
2331}
2332
2333/**
2334 * d_hash_and_lookup - hash the qstr then search for a dentry
2335 * @dir: Directory to search in
2336 * @name: qstr of name we wish to find
2337 *
2338 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2339 */
2340struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2341{
2342	/*
2343	 * Check for a fs-specific hash function. Note that we must
2344	 * calculate the standard hash first, as the d_op->d_hash()
2345	 * routine may choose to leave the hash value unchanged.
2346	 */
2347	name->hash = full_name_hash(dir, name->name, name->len);
2348	if (dir->d_flags & DCACHE_OP_HASH) {
2349		int err = dir->d_op->d_hash(dir, name);
2350		if (unlikely(err < 0))
2351			return ERR_PTR(err);
2352	}
2353	return d_lookup(dir, name);
2354}
2355EXPORT_SYMBOL(d_hash_and_lookup);
2356
2357/*
2358 * When a file is deleted, we have two options:
2359 * - turn this dentry into a negative dentry
2360 * - unhash this dentry and free it.
2361 *
2362 * Usually, we want to just turn this into
2363 * a negative dentry, but if anybody else is
2364 * currently using the dentry or the inode
2365 * we can't do that and we fall back on removing
2366 * it from the hash queues and waiting for
2367 * it to be deleted later when it has no users
2368 */
2369 
2370/**
2371 * d_delete - delete a dentry
2372 * @dentry: The dentry to delete
2373 *
2374 * Turn the dentry into a negative dentry if possible, otherwise
2375 * remove it from the hash queues so it can be deleted later
2376 */
2377 
2378void d_delete(struct dentry * dentry)
2379{
2380	struct inode *inode = dentry->d_inode;
2381
2382	spin_lock(&inode->i_lock);
2383	spin_lock(&dentry->d_lock);
2384	/*
2385	 * Are we the only user?
2386	 */
2387	if (dentry->d_lockref.count == 1) {
2388		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2389		dentry_unlink_inode(dentry);
2390	} else {
2391		__d_drop(dentry);
2392		spin_unlock(&dentry->d_lock);
2393		spin_unlock(&inode->i_lock);
2394	}
2395}
2396EXPORT_SYMBOL(d_delete);
2397
2398static void __d_rehash(struct dentry *entry)
2399{
2400	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2401
2402	hlist_bl_lock(b);
2403	hlist_bl_add_head_rcu(&entry->d_hash, b);
2404	hlist_bl_unlock(b);
2405}
2406
2407/**
2408 * d_rehash	- add an entry back to the hash
2409 * @entry: dentry to add to the hash
2410 *
2411 * Adds a dentry to the hash according to its name.
2412 */
2413 
2414void d_rehash(struct dentry * entry)
2415{
2416	spin_lock(&entry->d_lock);
2417	__d_rehash(entry);
2418	spin_unlock(&entry->d_lock);
2419}
2420EXPORT_SYMBOL(d_rehash);
2421
2422static inline unsigned start_dir_add(struct inode *dir)
2423{
2424	preempt_disable_nested();
2425	for (;;) {
2426		unsigned n = dir->i_dir_seq;
2427		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2428			return n;
2429		cpu_relax();
2430	}
2431}
2432
2433static inline void end_dir_add(struct inode *dir, unsigned int n,
2434			       wait_queue_head_t *d_wait)
2435{
2436	smp_store_release(&dir->i_dir_seq, n + 2);
2437	preempt_enable_nested();
2438	wake_up_all(d_wait);
2439}
2440
2441static void d_wait_lookup(struct dentry *dentry)
2442{
2443	if (d_in_lookup(dentry)) {
2444		DECLARE_WAITQUEUE(wait, current);
2445		add_wait_queue(dentry->d_wait, &wait);
2446		do {
2447			set_current_state(TASK_UNINTERRUPTIBLE);
2448			spin_unlock(&dentry->d_lock);
2449			schedule();
2450			spin_lock(&dentry->d_lock);
2451		} while (d_in_lookup(dentry));
2452	}
2453}
2454
2455struct dentry *d_alloc_parallel(struct dentry *parent,
2456				const struct qstr *name,
2457				wait_queue_head_t *wq)
2458{
2459	unsigned int hash = name->hash;
2460	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2461	struct hlist_bl_node *node;
2462	struct dentry *new = d_alloc(parent, name);
2463	struct dentry *dentry;
2464	unsigned seq, r_seq, d_seq;
2465
2466	if (unlikely(!new))
2467		return ERR_PTR(-ENOMEM);
2468
2469retry:
2470	rcu_read_lock();
2471	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2472	r_seq = read_seqbegin(&rename_lock);
2473	dentry = __d_lookup_rcu(parent, name, &d_seq);
2474	if (unlikely(dentry)) {
2475		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2476			rcu_read_unlock();
2477			goto retry;
2478		}
2479		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2480			rcu_read_unlock();
2481			dput(dentry);
2482			goto retry;
2483		}
2484		rcu_read_unlock();
2485		dput(new);
2486		return dentry;
2487	}
2488	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2489		rcu_read_unlock();
2490		goto retry;
2491	}
2492
2493	if (unlikely(seq & 1)) {
2494		rcu_read_unlock();
2495		goto retry;
2496	}
2497
2498	hlist_bl_lock(b);
2499	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2500		hlist_bl_unlock(b);
2501		rcu_read_unlock();
2502		goto retry;
2503	}
2504	/*
2505	 * No changes for the parent since the beginning of d_lookup().
2506	 * Since all removals from the chain happen with hlist_bl_lock(),
2507	 * any potential in-lookup matches are going to stay here until
2508	 * we unlock the chain.  All fields are stable in everything
2509	 * we encounter.
2510	 */
2511	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2512		if (dentry->d_name.hash != hash)
2513			continue;
2514		if (dentry->d_parent != parent)
2515			continue;
2516		if (!d_same_name(dentry, parent, name))
2517			continue;
2518		hlist_bl_unlock(b);
2519		/* now we can try to grab a reference */
2520		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2521			rcu_read_unlock();
2522			goto retry;
2523		}
2524
2525		rcu_read_unlock();
2526		/*
2527		 * somebody is likely to be still doing lookup for it;
2528		 * wait for them to finish
2529		 */
2530		spin_lock(&dentry->d_lock);
2531		d_wait_lookup(dentry);
2532		/*
2533		 * it's not in-lookup anymore; in principle we should repeat
2534		 * everything from dcache lookup, but it's likely to be what
2535		 * d_lookup() would've found anyway.  If it is, just return it;
2536		 * otherwise we really have to repeat the whole thing.
2537		 */
2538		if (unlikely(dentry->d_name.hash != hash))
2539			goto mismatch;
2540		if (unlikely(dentry->d_parent != parent))
2541			goto mismatch;
2542		if (unlikely(d_unhashed(dentry)))
2543			goto mismatch;
2544		if (unlikely(!d_same_name(dentry, parent, name)))
2545			goto mismatch;
2546		/* OK, it *is* a hashed match; return it */
2547		spin_unlock(&dentry->d_lock);
2548		dput(new);
2549		return dentry;
2550	}
2551	rcu_read_unlock();
2552	/* we can't take ->d_lock here; it's OK, though. */
2553	new->d_flags |= DCACHE_PAR_LOOKUP;
2554	new->d_wait = wq;
2555	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
2556	hlist_bl_unlock(b);
2557	return new;
2558mismatch:
2559	spin_unlock(&dentry->d_lock);
2560	dput(dentry);
2561	goto retry;
2562}
2563EXPORT_SYMBOL(d_alloc_parallel);
2564
2565/*
2566 * - Unhash the dentry
2567 * - Retrieve and clear the waitqueue head in dentry
2568 * - Return the waitqueue head
2569 */
2570static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2571{
2572	wait_queue_head_t *d_wait;
2573	struct hlist_bl_head *b;
2574
2575	lockdep_assert_held(&dentry->d_lock);
2576
2577	b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2578	hlist_bl_lock(b);
2579	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2580	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2581	d_wait = dentry->d_wait;
2582	dentry->d_wait = NULL;
2583	hlist_bl_unlock(b);
2584	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2585	INIT_LIST_HEAD(&dentry->d_lru);
2586	return d_wait;
2587}
2588
2589void __d_lookup_unhash_wake(struct dentry *dentry)
2590{
2591	spin_lock(&dentry->d_lock);
2592	wake_up_all(__d_lookup_unhash(dentry));
2593	spin_unlock(&dentry->d_lock);
2594}
2595EXPORT_SYMBOL(__d_lookup_unhash_wake);
2596
2597/* inode->i_lock held if inode is non-NULL */
2598
2599static inline void __d_add(struct dentry *dentry, struct inode *inode)
2600{
2601	wait_queue_head_t *d_wait;
2602	struct inode *dir = NULL;
2603	unsigned n;
2604	spin_lock(&dentry->d_lock);
2605	if (unlikely(d_in_lookup(dentry))) {
2606		dir = dentry->d_parent->d_inode;
2607		n = start_dir_add(dir);
2608		d_wait = __d_lookup_unhash(dentry);
2609	}
2610	if (inode) {
2611		unsigned add_flags = d_flags_for_inode(inode);
2612		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2613		raw_write_seqcount_begin(&dentry->d_seq);
2614		__d_set_inode_and_type(dentry, inode, add_flags);
2615		raw_write_seqcount_end(&dentry->d_seq);
2616		fsnotify_update_flags(dentry);
2617	}
2618	__d_rehash(dentry);
2619	if (dir)
2620		end_dir_add(dir, n, d_wait);
2621	spin_unlock(&dentry->d_lock);
2622	if (inode)
2623		spin_unlock(&inode->i_lock);
2624}
2625
2626/**
2627 * d_add - add dentry to hash queues
2628 * @entry: dentry to add
2629 * @inode: The inode to attach to this dentry
2630 *
2631 * This adds the entry to the hash queues and initializes @inode.
2632 * The entry was actually filled in earlier during d_alloc().
2633 */
2634
2635void d_add(struct dentry *entry, struct inode *inode)
2636{
2637	if (inode) {
2638		security_d_instantiate(entry, inode);
2639		spin_lock(&inode->i_lock);
2640	}
2641	__d_add(entry, inode);
2642}
2643EXPORT_SYMBOL(d_add);
2644
2645/**
2646 * d_exact_alias - find and hash an exact unhashed alias
2647 * @entry: dentry to add
2648 * @inode: The inode to go with this dentry
2649 *
2650 * If an unhashed dentry with the same name/parent and desired
2651 * inode already exists, hash and return it.  Otherwise, return
2652 * NULL.
2653 *
2654 * Parent directory should be locked.
2655 */
2656struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2657{
2658	struct dentry *alias;
2659	unsigned int hash = entry->d_name.hash;
2660
2661	spin_lock(&inode->i_lock);
2662	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2663		/*
2664		 * Don't need alias->d_lock here, because aliases with
2665		 * d_parent == entry->d_parent are not subject to name or
2666		 * parent changes, because the parent inode i_mutex is held.
2667		 */
2668		if (alias->d_name.hash != hash)
2669			continue;
2670		if (alias->d_parent != entry->d_parent)
2671			continue;
2672		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2673			continue;
2674		spin_lock(&alias->d_lock);
2675		if (!d_unhashed(alias)) {
2676			spin_unlock(&alias->d_lock);
2677			alias = NULL;
2678		} else {
2679			dget_dlock(alias);
2680			__d_rehash(alias);
2681			spin_unlock(&alias->d_lock);
2682		}
2683		spin_unlock(&inode->i_lock);
2684		return alias;
2685	}
2686	spin_unlock(&inode->i_lock);
2687	return NULL;
2688}
2689EXPORT_SYMBOL(d_exact_alias);
2690
2691static void swap_names(struct dentry *dentry, struct dentry *target)
2692{
2693	if (unlikely(dname_external(target))) {
2694		if (unlikely(dname_external(dentry))) {
2695			/*
2696			 * Both external: swap the pointers
2697			 */
2698			swap(target->d_name.name, dentry->d_name.name);
2699		} else {
2700			/*
2701			 * dentry:internal, target:external.  Steal target's
2702			 * storage and make target internal.
2703			 */
2704			memcpy(target->d_iname, dentry->d_name.name,
2705					dentry->d_name.len + 1);
2706			dentry->d_name.name = target->d_name.name;
2707			target->d_name.name = target->d_iname;
2708		}
2709	} else {
2710		if (unlikely(dname_external(dentry))) {
2711			/*
2712			 * dentry:external, target:internal.  Give dentry's
2713			 * storage to target and make dentry internal
2714			 */
2715			memcpy(dentry->d_iname, target->d_name.name,
2716					target->d_name.len + 1);
2717			target->d_name.name = dentry->d_name.name;
2718			dentry->d_name.name = dentry->d_iname;
2719		} else {
2720			/*
2721			 * Both are internal.
2722			 */
2723			unsigned int i;
2724			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2725			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2726				swap(((long *) &dentry->d_iname)[i],
2727				     ((long *) &target->d_iname)[i]);
2728			}
2729		}
2730	}
2731	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2732}
2733
2734static void copy_name(struct dentry *dentry, struct dentry *target)
2735{
2736	struct external_name *old_name = NULL;
2737	if (unlikely(dname_external(dentry)))
2738		old_name = external_name(dentry);
2739	if (unlikely(dname_external(target))) {
2740		atomic_inc(&external_name(target)->u.count);
2741		dentry->d_name = target->d_name;
2742	} else {
2743		memcpy(dentry->d_iname, target->d_name.name,
2744				target->d_name.len + 1);
2745		dentry->d_name.name = dentry->d_iname;
2746		dentry->d_name.hash_len = target->d_name.hash_len;
2747	}
2748	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2749		kfree_rcu(old_name, u.head);
2750}
2751
2752/*
2753 * __d_move - move a dentry
2754 * @dentry: entry to move
2755 * @target: new dentry
2756 * @exchange: exchange the two dentries
2757 *
2758 * Update the dcache to reflect the move of a file name. Negative
2759 * dcache entries should not be moved in this way. Caller must hold
2760 * rename_lock, the i_mutex of the source and target directories,
2761 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2762 */
2763static void __d_move(struct dentry *dentry, struct dentry *target,
2764		     bool exchange)
2765{
2766	struct dentry *old_parent, *p;
2767	wait_queue_head_t *d_wait;
2768	struct inode *dir = NULL;
2769	unsigned n;
2770
2771	WARN_ON(!dentry->d_inode);
2772	if (WARN_ON(dentry == target))
2773		return;
2774
2775	BUG_ON(d_ancestor(target, dentry));
2776	old_parent = dentry->d_parent;
2777	p = d_ancestor(old_parent, target);
2778	if (IS_ROOT(dentry)) {
2779		BUG_ON(p);
2780		spin_lock(&target->d_parent->d_lock);
2781	} else if (!p) {
2782		/* target is not a descendent of dentry->d_parent */
2783		spin_lock(&target->d_parent->d_lock);
2784		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2785	} else {
2786		BUG_ON(p == dentry);
2787		spin_lock(&old_parent->d_lock);
2788		if (p != target)
2789			spin_lock_nested(&target->d_parent->d_lock,
2790					DENTRY_D_LOCK_NESTED);
2791	}
2792	spin_lock_nested(&dentry->d_lock, 2);
2793	spin_lock_nested(&target->d_lock, 3);
2794
2795	if (unlikely(d_in_lookup(target))) {
2796		dir = target->d_parent->d_inode;
2797		n = start_dir_add(dir);
2798		d_wait = __d_lookup_unhash(target);
2799	}
2800
2801	write_seqcount_begin(&dentry->d_seq);
2802	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2803
2804	/* unhash both */
2805	if (!d_unhashed(dentry))
2806		___d_drop(dentry);
2807	if (!d_unhashed(target))
2808		___d_drop(target);
2809
2810	/* ... and switch them in the tree */
2811	dentry->d_parent = target->d_parent;
2812	if (!exchange) {
2813		copy_name(dentry, target);
2814		target->d_hash.pprev = NULL;
2815		dentry->d_parent->d_lockref.count++;
2816		if (dentry != old_parent) /* wasn't IS_ROOT */
2817			WARN_ON(!--old_parent->d_lockref.count);
2818	} else {
2819		target->d_parent = old_parent;
2820		swap_names(dentry, target);
2821		if (!hlist_unhashed(&target->d_sib))
2822			__hlist_del(&target->d_sib);
2823		hlist_add_head(&target->d_sib, &target->d_parent->d_children);
2824		__d_rehash(target);
2825		fsnotify_update_flags(target);
2826	}
2827	if (!hlist_unhashed(&dentry->d_sib))
2828		__hlist_del(&dentry->d_sib);
2829	hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
2830	__d_rehash(dentry);
2831	fsnotify_update_flags(dentry);
2832	fscrypt_handle_d_move(dentry);
2833
2834	write_seqcount_end(&target->d_seq);
2835	write_seqcount_end(&dentry->d_seq);
2836
2837	if (dir)
2838		end_dir_add(dir, n, d_wait);
2839
2840	if (dentry->d_parent != old_parent)
2841		spin_unlock(&dentry->d_parent->d_lock);
2842	if (dentry != old_parent)
2843		spin_unlock(&old_parent->d_lock);
2844	spin_unlock(&target->d_lock);
2845	spin_unlock(&dentry->d_lock);
2846}
2847
2848/*
2849 * d_move - move a dentry
2850 * @dentry: entry to move
2851 * @target: new dentry
2852 *
2853 * Update the dcache to reflect the move of a file name. Negative
2854 * dcache entries should not be moved in this way. See the locking
2855 * requirements for __d_move.
2856 */
2857void d_move(struct dentry *dentry, struct dentry *target)
2858{
2859	write_seqlock(&rename_lock);
2860	__d_move(dentry, target, false);
2861	write_sequnlock(&rename_lock);
2862}
2863EXPORT_SYMBOL(d_move);
2864
2865/*
2866 * d_exchange - exchange two dentries
2867 * @dentry1: first dentry
2868 * @dentry2: second dentry
2869 */
2870void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2871{
2872	write_seqlock(&rename_lock);
2873
2874	WARN_ON(!dentry1->d_inode);
2875	WARN_ON(!dentry2->d_inode);
2876	WARN_ON(IS_ROOT(dentry1));
2877	WARN_ON(IS_ROOT(dentry2));
2878
2879	__d_move(dentry1, dentry2, true);
2880
2881	write_sequnlock(&rename_lock);
2882}
2883
2884/**
2885 * d_ancestor - search for an ancestor
2886 * @p1: ancestor dentry
2887 * @p2: child dentry
2888 *
2889 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2890 * an ancestor of p2, else NULL.
2891 */
2892struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2893{
2894	struct dentry *p;
2895
2896	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2897		if (p->d_parent == p1)
2898			return p;
2899	}
2900	return NULL;
2901}
2902
2903/*
2904 * This helper attempts to cope with remotely renamed directories
2905 *
2906 * It assumes that the caller is already holding
2907 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2908 *
2909 * Note: If ever the locking in lock_rename() changes, then please
2910 * remember to update this too...
2911 */
2912static int __d_unalias(struct dentry *dentry, struct dentry *alias)
 
2913{
2914	struct mutex *m1 = NULL;
2915	struct rw_semaphore *m2 = NULL;
2916	int ret = -ESTALE;
2917
2918	/* If alias and dentry share a parent, then no extra locks required */
2919	if (alias->d_parent == dentry->d_parent)
2920		goto out_unalias;
2921
2922	/* See lock_rename() */
2923	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2924		goto out_err;
2925	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2926	if (!inode_trylock_shared(alias->d_parent->d_inode))
2927		goto out_err;
2928	m2 = &alias->d_parent->d_inode->i_rwsem;
2929out_unalias:
2930	__d_move(alias, dentry, false);
2931	ret = 0;
2932out_err:
2933	if (m2)
2934		up_read(m2);
2935	if (m1)
2936		mutex_unlock(m1);
2937	return ret;
2938}
2939
2940/**
2941 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2942 * @inode:  the inode which may have a disconnected dentry
2943 * @dentry: a negative dentry which we want to point to the inode.
2944 *
2945 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2946 * place of the given dentry and return it, else simply d_add the inode
2947 * to the dentry and return NULL.
2948 *
2949 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2950 * we should error out: directories can't have multiple aliases.
2951 *
2952 * This is needed in the lookup routine of any filesystem that is exportable
2953 * (via knfsd) so that we can build dcache paths to directories effectively.
2954 *
2955 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2956 * is returned.  This matches the expected return value of ->lookup.
2957 *
2958 * Cluster filesystems may call this function with a negative, hashed dentry.
2959 * In that case, we know that the inode will be a regular file, and also this
2960 * will only occur during atomic_open. So we need to check for the dentry
2961 * being already hashed only in the final case.
2962 */
2963struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2964{
2965	if (IS_ERR(inode))
2966		return ERR_CAST(inode);
2967
2968	BUG_ON(!d_unhashed(dentry));
2969
2970	if (!inode)
2971		goto out;
2972
2973	security_d_instantiate(dentry, inode);
2974	spin_lock(&inode->i_lock);
2975	if (S_ISDIR(inode->i_mode)) {
2976		struct dentry *new = __d_find_any_alias(inode);
2977		if (unlikely(new)) {
2978			/* The reference to new ensures it remains an alias */
2979			spin_unlock(&inode->i_lock);
2980			write_seqlock(&rename_lock);
2981			if (unlikely(d_ancestor(new, dentry))) {
2982				write_sequnlock(&rename_lock);
2983				dput(new);
2984				new = ERR_PTR(-ELOOP);
2985				pr_warn_ratelimited(
2986					"VFS: Lookup of '%s' in %s %s"
2987					" would have caused loop\n",
2988					dentry->d_name.name,
2989					inode->i_sb->s_type->name,
2990					inode->i_sb->s_id);
2991			} else if (!IS_ROOT(new)) {
2992				struct dentry *old_parent = dget(new->d_parent);
2993				int err = __d_unalias(dentry, new);
2994				write_sequnlock(&rename_lock);
2995				if (err) {
2996					dput(new);
2997					new = ERR_PTR(err);
2998				}
2999				dput(old_parent);
3000			} else {
3001				__d_move(new, dentry, false);
3002				write_sequnlock(&rename_lock);
3003			}
3004			iput(inode);
3005			return new;
3006		}
3007	}
3008out:
3009	__d_add(dentry, inode);
3010	return NULL;
3011}
3012EXPORT_SYMBOL(d_splice_alias);
3013
3014/*
3015 * Test whether new_dentry is a subdirectory of old_dentry.
3016 *
3017 * Trivially implemented using the dcache structure
3018 */
3019
3020/**
3021 * is_subdir - is new dentry a subdirectory of old_dentry
3022 * @new_dentry: new dentry
3023 * @old_dentry: old dentry
3024 *
3025 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3026 * Returns false otherwise.
3027 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3028 */
3029  
3030bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3031{
3032	bool result;
3033	unsigned seq;
3034
3035	if (new_dentry == old_dentry)
3036		return true;
3037
3038	do {
3039		/* for restarting inner loop in case of seq retry */
3040		seq = read_seqbegin(&rename_lock);
3041		/*
3042		 * Need rcu_readlock to protect against the d_parent trashing
3043		 * due to d_move
3044		 */
3045		rcu_read_lock();
3046		if (d_ancestor(old_dentry, new_dentry))
3047			result = true;
3048		else
3049			result = false;
3050		rcu_read_unlock();
3051	} while (read_seqretry(&rename_lock, seq));
3052
3053	return result;
3054}
3055EXPORT_SYMBOL(is_subdir);
3056
3057static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3058{
3059	struct dentry *root = data;
3060	if (dentry != root) {
3061		if (d_unhashed(dentry) || !dentry->d_inode)
3062			return D_WALK_SKIP;
3063
3064		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3065			dentry->d_flags |= DCACHE_GENOCIDE;
3066			dentry->d_lockref.count--;
3067		}
3068	}
3069	return D_WALK_CONTINUE;
3070}
3071
3072void d_genocide(struct dentry *parent)
3073{
3074	d_walk(parent, parent, d_genocide_kill);
3075}
3076
3077void d_mark_tmpfile(struct file *file, struct inode *inode)
3078{
3079	struct dentry *dentry = file->f_path.dentry;
3080
 
 
 
3081	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3082		!hlist_unhashed(&dentry->d_u.d_alias) ||
3083		!d_unlinked(dentry));
3084	spin_lock(&dentry->d_parent->d_lock);
3085	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3086	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3087				(unsigned long long)inode->i_ino);
3088	spin_unlock(&dentry->d_lock);
3089	spin_unlock(&dentry->d_parent->d_lock);
3090}
3091EXPORT_SYMBOL(d_mark_tmpfile);
3092
3093void d_tmpfile(struct file *file, struct inode *inode)
3094{
3095	struct dentry *dentry = file->f_path.dentry;
3096
3097	inode_dec_link_count(inode);
3098	d_mark_tmpfile(file, inode);
3099	d_instantiate(dentry, inode);
3100}
3101EXPORT_SYMBOL(d_tmpfile);
3102
3103static __initdata unsigned long dhash_entries;
3104static int __init set_dhash_entries(char *str)
3105{
3106	if (!str)
3107		return 0;
3108	dhash_entries = simple_strtoul(str, &str, 0);
3109	return 1;
3110}
3111__setup("dhash_entries=", set_dhash_entries);
3112
3113static void __init dcache_init_early(void)
3114{
3115	/* If hashes are distributed across NUMA nodes, defer
3116	 * hash allocation until vmalloc space is available.
3117	 */
3118	if (hashdist)
3119		return;
3120
3121	dentry_hashtable =
3122		alloc_large_system_hash("Dentry cache",
3123					sizeof(struct hlist_bl_head),
3124					dhash_entries,
3125					13,
3126					HASH_EARLY | HASH_ZERO,
3127					&d_hash_shift,
3128					NULL,
3129					0,
3130					0);
3131	d_hash_shift = 32 - d_hash_shift;
3132}
3133
3134static void __init dcache_init(void)
3135{
3136	/*
3137	 * A constructor could be added for stable state like the lists,
3138	 * but it is probably not worth it because of the cache nature
3139	 * of the dcache.
3140	 */
3141	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3142		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3143		d_iname);
3144
3145	/* Hash may have been set up in dcache_init_early */
3146	if (!hashdist)
3147		return;
3148
3149	dentry_hashtable =
3150		alloc_large_system_hash("Dentry cache",
3151					sizeof(struct hlist_bl_head),
3152					dhash_entries,
3153					13,
3154					HASH_ZERO,
3155					&d_hash_shift,
3156					NULL,
3157					0,
3158					0);
3159	d_hash_shift = 32 - d_hash_shift;
3160}
3161
3162/* SLAB cache for __getname() consumers */
3163struct kmem_cache *names_cachep __ro_after_init;
3164EXPORT_SYMBOL(names_cachep);
3165
3166void __init vfs_caches_init_early(void)
3167{
3168	int i;
3169
3170	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3171		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3172
3173	dcache_init_early();
3174	inode_init_early();
3175}
3176
3177void __init vfs_caches_init(void)
3178{
3179	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3180			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3181
3182	dcache_init();
3183	inode_init();
3184	files_init();
3185	files_maxfiles_init();
3186	mnt_init();
3187	bdev_cache_init();
3188	chrdev_init();
3189}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dcache.c
   4 *
   5 * Complete reimplementation
   6 * (C) 1997 Thomas Schoebel-Theuer,
   7 * with heavy changes by Linus Torvalds
   8 */
   9
  10/*
  11 * Notes on the allocation strategy:
  12 *
  13 * The dcache is a master of the icache - whenever a dcache entry
  14 * exists, the inode will always exist. "iput()" is done either when
  15 * the dcache entry is deleted or garbage collected.
  16 */
  17
  18#include <linux/ratelimit.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/fs.h>
  22#include <linux/fscrypt.h>
  23#include <linux/fsnotify.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/hash.h>
  27#include <linux/cache.h>
  28#include <linux/export.h>
  29#include <linux/security.h>
  30#include <linux/seqlock.h>
  31#include <linux/memblock.h>
  32#include <linux/bit_spinlock.h>
  33#include <linux/rculist_bl.h>
  34#include <linux/list_lru.h>
  35#include "internal.h"
  36#include "mount.h"
  37
  38/*
  39 * Usage:
  40 * dcache->d_inode->i_lock protects:
  41 *   - i_dentry, d_u.d_alias, d_inode of aliases
  42 * dcache_hash_bucket lock protects:
  43 *   - the dcache hash table
  44 * s_roots bl list spinlock protects:
  45 *   - the s_roots list (see __d_drop)
  46 * dentry->d_sb->s_dentry_lru_lock protects:
  47 *   - the dcache lru lists and counters
  48 * d_lock protects:
  49 *   - d_flags
  50 *   - d_name
  51 *   - d_lru
  52 *   - d_count
  53 *   - d_unhashed()
  54 *   - d_parent and d_subdirs
  55 *   - childrens' d_child and d_parent
  56 *   - d_u.d_alias, d_inode
  57 *
  58 * Ordering:
  59 * dentry->d_inode->i_lock
  60 *   dentry->d_lock
  61 *     dentry->d_sb->s_dentry_lru_lock
  62 *     dcache_hash_bucket lock
  63 *     s_roots lock
  64 *
  65 * If there is an ancestor relationship:
  66 * dentry->d_parent->...->d_parent->d_lock
  67 *   ...
  68 *     dentry->d_parent->d_lock
  69 *       dentry->d_lock
  70 *
  71 * If no ancestor relationship:
  72 * arbitrary, since it's serialized on rename_lock
  73 */
  74int sysctl_vfs_cache_pressure __read_mostly = 100;
  75EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  76
  77__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  78
  79EXPORT_SYMBOL(rename_lock);
  80
  81static struct kmem_cache *dentry_cache __read_mostly;
  82
  83const struct qstr empty_name = QSTR_INIT("", 0);
  84EXPORT_SYMBOL(empty_name);
  85const struct qstr slash_name = QSTR_INIT("/", 1);
  86EXPORT_SYMBOL(slash_name);
  87const struct qstr dotdot_name = QSTR_INIT("..", 2);
  88EXPORT_SYMBOL(dotdot_name);
  89
  90/*
  91 * This is the single most critical data structure when it comes
  92 * to the dcache: the hashtable for lookups. Somebody should try
  93 * to make this good - I've just made it work.
  94 *
  95 * This hash-function tries to avoid losing too many bits of hash
  96 * information, yet avoid using a prime hash-size or similar.
  97 */
  98
  99static unsigned int d_hash_shift __read_mostly;
 100
 101static struct hlist_bl_head *dentry_hashtable __read_mostly;
 102
 103static inline struct hlist_bl_head *d_hash(unsigned int hash)
 104{
 105	return dentry_hashtable + (hash >> d_hash_shift);
 106}
 107
 108#define IN_LOOKUP_SHIFT 10
 109static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 110
 111static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 112					unsigned int hash)
 113{
 114	hash += (unsigned long) parent / L1_CACHE_BYTES;
 115	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 116}
 117
 118
 119/* Statistics gathering. */
 120struct dentry_stat_t dentry_stat = {
 121	.age_limit = 45,
 
 
 
 122};
 123
 124static DEFINE_PER_CPU(long, nr_dentry);
 125static DEFINE_PER_CPU(long, nr_dentry_unused);
 126static DEFINE_PER_CPU(long, nr_dentry_negative);
 127
 128#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 
 
 
 
 129
 130/*
 131 * Here we resort to our own counters instead of using generic per-cpu counters
 132 * for consistency with what the vfs inode code does. We are expected to harvest
 133 * better code and performance by having our own specialized counters.
 134 *
 135 * Please note that the loop is done over all possible CPUs, not over all online
 136 * CPUs. The reason for this is that we don't want to play games with CPUs going
 137 * on and off. If one of them goes off, we will just keep their counters.
 138 *
 139 * glommer: See cffbc8a for details, and if you ever intend to change this,
 140 * please update all vfs counters to match.
 141 */
 142static long get_nr_dentry(void)
 143{
 144	int i;
 145	long sum = 0;
 146	for_each_possible_cpu(i)
 147		sum += per_cpu(nr_dentry, i);
 148	return sum < 0 ? 0 : sum;
 149}
 150
 151static long get_nr_dentry_unused(void)
 152{
 153	int i;
 154	long sum = 0;
 155	for_each_possible_cpu(i)
 156		sum += per_cpu(nr_dentry_unused, i);
 157	return sum < 0 ? 0 : sum;
 158}
 159
 160static long get_nr_dentry_negative(void)
 161{
 162	int i;
 163	long sum = 0;
 164
 165	for_each_possible_cpu(i)
 166		sum += per_cpu(nr_dentry_negative, i);
 167	return sum < 0 ? 0 : sum;
 168}
 169
 170int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
 171		   size_t *lenp, loff_t *ppos)
 172{
 173	dentry_stat.nr_dentry = get_nr_dentry();
 174	dentry_stat.nr_unused = get_nr_dentry_unused();
 175	dentry_stat.nr_negative = get_nr_dentry_negative();
 176	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 177}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178#endif
 179
 180/*
 181 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 182 * The strings are both count bytes long, and count is non-zero.
 183 */
 184#ifdef CONFIG_DCACHE_WORD_ACCESS
 185
 186#include <asm/word-at-a-time.h>
 187/*
 188 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 189 * aligned allocation for this particular component. We don't
 190 * strictly need the load_unaligned_zeropad() safety, but it
 191 * doesn't hurt either.
 192 *
 193 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 194 * need the careful unaligned handling.
 195 */
 196static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 197{
 198	unsigned long a,b,mask;
 199
 200	for (;;) {
 201		a = read_word_at_a_time(cs);
 202		b = load_unaligned_zeropad(ct);
 203		if (tcount < sizeof(unsigned long))
 204			break;
 205		if (unlikely(a != b))
 206			return 1;
 207		cs += sizeof(unsigned long);
 208		ct += sizeof(unsigned long);
 209		tcount -= sizeof(unsigned long);
 210		if (!tcount)
 211			return 0;
 212	}
 213	mask = bytemask_from_count(tcount);
 214	return unlikely(!!((a ^ b) & mask));
 215}
 216
 217#else
 218
 219static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 220{
 221	do {
 222		if (*cs != *ct)
 223			return 1;
 224		cs++;
 225		ct++;
 226		tcount--;
 227	} while (tcount);
 228	return 0;
 229}
 230
 231#endif
 232
 233static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 234{
 235	/*
 236	 * Be careful about RCU walk racing with rename:
 237	 * use 'READ_ONCE' to fetch the name pointer.
 238	 *
 239	 * NOTE! Even if a rename will mean that the length
 240	 * was not loaded atomically, we don't care. The
 241	 * RCU walk will check the sequence count eventually,
 242	 * and catch it. And we won't overrun the buffer,
 243	 * because we're reading the name pointer atomically,
 244	 * and a dentry name is guaranteed to be properly
 245	 * terminated with a NUL byte.
 246	 *
 247	 * End result: even if 'len' is wrong, we'll exit
 248	 * early because the data cannot match (there can
 249	 * be no NUL in the ct/tcount data)
 250	 */
 251	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 252
 253	return dentry_string_cmp(cs, ct, tcount);
 254}
 255
 256struct external_name {
 257	union {
 258		atomic_t count;
 259		struct rcu_head head;
 260	} u;
 261	unsigned char name[];
 262};
 263
 264static inline struct external_name *external_name(struct dentry *dentry)
 265{
 266	return container_of(dentry->d_name.name, struct external_name, name[0]);
 267}
 268
 269static void __d_free(struct rcu_head *head)
 270{
 271	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 272
 273	kmem_cache_free(dentry_cache, dentry); 
 274}
 275
 276static void __d_free_external(struct rcu_head *head)
 277{
 278	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 279	kfree(external_name(dentry));
 280	kmem_cache_free(dentry_cache, dentry);
 281}
 282
 283static inline int dname_external(const struct dentry *dentry)
 284{
 285	return dentry->d_name.name != dentry->d_iname;
 286}
 287
 288void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
 289{
 290	spin_lock(&dentry->d_lock);
 291	name->name = dentry->d_name;
 292	if (unlikely(dname_external(dentry))) {
 293		atomic_inc(&external_name(dentry)->u.count);
 294	} else {
 295		memcpy(name->inline_name, dentry->d_iname,
 296		       dentry->d_name.len + 1);
 297		name->name.name = name->inline_name;
 298	}
 299	spin_unlock(&dentry->d_lock);
 300}
 301EXPORT_SYMBOL(take_dentry_name_snapshot);
 302
 303void release_dentry_name_snapshot(struct name_snapshot *name)
 304{
 305	if (unlikely(name->name.name != name->inline_name)) {
 306		struct external_name *p;
 307		p = container_of(name->name.name, struct external_name, name[0]);
 308		if (unlikely(atomic_dec_and_test(&p->u.count)))
 309			kfree_rcu(p, u.head);
 310	}
 311}
 312EXPORT_SYMBOL(release_dentry_name_snapshot);
 313
 314static inline void __d_set_inode_and_type(struct dentry *dentry,
 315					  struct inode *inode,
 316					  unsigned type_flags)
 317{
 318	unsigned flags;
 319
 320	dentry->d_inode = inode;
 321	flags = READ_ONCE(dentry->d_flags);
 322	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 323	flags |= type_flags;
 324	smp_store_release(&dentry->d_flags, flags);
 325}
 326
 327static inline void __d_clear_type_and_inode(struct dentry *dentry)
 328{
 329	unsigned flags = READ_ONCE(dentry->d_flags);
 330
 331	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 332	WRITE_ONCE(dentry->d_flags, flags);
 333	dentry->d_inode = NULL;
 334	if (dentry->d_flags & DCACHE_LRU_LIST)
 335		this_cpu_inc(nr_dentry_negative);
 336}
 337
 338static void dentry_free(struct dentry *dentry)
 339{
 340	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 341	if (unlikely(dname_external(dentry))) {
 342		struct external_name *p = external_name(dentry);
 343		if (likely(atomic_dec_and_test(&p->u.count))) {
 344			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 345			return;
 346		}
 347	}
 348	/* if dentry was never visible to RCU, immediate free is OK */
 349	if (dentry->d_flags & DCACHE_NORCU)
 350		__d_free(&dentry->d_u.d_rcu);
 351	else
 352		call_rcu(&dentry->d_u.d_rcu, __d_free);
 353}
 354
 355/*
 356 * Release the dentry's inode, using the filesystem
 357 * d_iput() operation if defined.
 358 */
 359static void dentry_unlink_inode(struct dentry * dentry)
 360	__releases(dentry->d_lock)
 361	__releases(dentry->d_inode->i_lock)
 362{
 363	struct inode *inode = dentry->d_inode;
 364
 365	raw_write_seqcount_begin(&dentry->d_seq);
 366	__d_clear_type_and_inode(dentry);
 367	hlist_del_init(&dentry->d_u.d_alias);
 368	raw_write_seqcount_end(&dentry->d_seq);
 369	spin_unlock(&dentry->d_lock);
 370	spin_unlock(&inode->i_lock);
 371	if (!inode->i_nlink)
 372		fsnotify_inoderemove(inode);
 373	if (dentry->d_op && dentry->d_op->d_iput)
 374		dentry->d_op->d_iput(dentry, inode);
 375	else
 376		iput(inode);
 377}
 378
 379/*
 380 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 381 * is in use - which includes both the "real" per-superblock
 382 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 383 *
 384 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 385 * on the shrink list (ie not on the superblock LRU list).
 386 *
 387 * The per-cpu "nr_dentry_unused" counters are updated with
 388 * the DCACHE_LRU_LIST bit.
 389 *
 390 * The per-cpu "nr_dentry_negative" counters are only updated
 391 * when deleted from or added to the per-superblock LRU list, not
 392 * from/to the shrink list. That is to avoid an unneeded dec/inc
 393 * pair when moving from LRU to shrink list in select_collect().
 394 *
 395 * These helper functions make sure we always follow the
 396 * rules. d_lock must be held by the caller.
 397 */
 398#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 399static void d_lru_add(struct dentry *dentry)
 400{
 401	D_FLAG_VERIFY(dentry, 0);
 402	dentry->d_flags |= DCACHE_LRU_LIST;
 403	this_cpu_inc(nr_dentry_unused);
 404	if (d_is_negative(dentry))
 405		this_cpu_inc(nr_dentry_negative);
 406	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 407}
 408
 409static void d_lru_del(struct dentry *dentry)
 410{
 411	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 412	dentry->d_flags &= ~DCACHE_LRU_LIST;
 413	this_cpu_dec(nr_dentry_unused);
 414	if (d_is_negative(dentry))
 415		this_cpu_dec(nr_dentry_negative);
 416	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 417}
 418
 419static void d_shrink_del(struct dentry *dentry)
 420{
 421	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 422	list_del_init(&dentry->d_lru);
 423	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 424	this_cpu_dec(nr_dentry_unused);
 425}
 426
 427static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 428{
 429	D_FLAG_VERIFY(dentry, 0);
 430	list_add(&dentry->d_lru, list);
 431	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 432	this_cpu_inc(nr_dentry_unused);
 433}
 434
 435/*
 436 * These can only be called under the global LRU lock, ie during the
 437 * callback for freeing the LRU list. "isolate" removes it from the
 438 * LRU lists entirely, while shrink_move moves it to the indicated
 439 * private list.
 440 */
 441static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 442{
 443	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 444	dentry->d_flags &= ~DCACHE_LRU_LIST;
 445	this_cpu_dec(nr_dentry_unused);
 446	if (d_is_negative(dentry))
 447		this_cpu_dec(nr_dentry_negative);
 448	list_lru_isolate(lru, &dentry->d_lru);
 449}
 450
 451static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 452			      struct list_head *list)
 453{
 454	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 455	dentry->d_flags |= DCACHE_SHRINK_LIST;
 456	if (d_is_negative(dentry))
 457		this_cpu_dec(nr_dentry_negative);
 458	list_lru_isolate_move(lru, &dentry->d_lru, list);
 459}
 460
 461static void ___d_drop(struct dentry *dentry)
 462{
 463	struct hlist_bl_head *b;
 464	/*
 465	 * Hashed dentries are normally on the dentry hashtable,
 466	 * with the exception of those newly allocated by
 467	 * d_obtain_root, which are always IS_ROOT:
 468	 */
 469	if (unlikely(IS_ROOT(dentry)))
 470		b = &dentry->d_sb->s_roots;
 471	else
 472		b = d_hash(dentry->d_name.hash);
 473
 474	hlist_bl_lock(b);
 475	__hlist_bl_del(&dentry->d_hash);
 476	hlist_bl_unlock(b);
 477}
 478
 479void __d_drop(struct dentry *dentry)
 480{
 481	if (!d_unhashed(dentry)) {
 482		___d_drop(dentry);
 483		dentry->d_hash.pprev = NULL;
 484		write_seqcount_invalidate(&dentry->d_seq);
 485	}
 486}
 487EXPORT_SYMBOL(__d_drop);
 488
 489/**
 490 * d_drop - drop a dentry
 491 * @dentry: dentry to drop
 492 *
 493 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 494 * be found through a VFS lookup any more. Note that this is different from
 495 * deleting the dentry - d_delete will try to mark the dentry negative if
 496 * possible, giving a successful _negative_ lookup, while d_drop will
 497 * just make the cache lookup fail.
 498 *
 499 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 500 * reason (NFS timeouts or autofs deletes).
 501 *
 502 * __d_drop requires dentry->d_lock
 503 *
 504 * ___d_drop doesn't mark dentry as "unhashed"
 505 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
 506 */
 507void d_drop(struct dentry *dentry)
 508{
 509	spin_lock(&dentry->d_lock);
 510	__d_drop(dentry);
 511	spin_unlock(&dentry->d_lock);
 512}
 513EXPORT_SYMBOL(d_drop);
 514
 515static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
 516{
 517	struct dentry *next;
 518	/*
 519	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 520	 * attached to the dentry tree
 521	 */
 522	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 523	if (unlikely(list_empty(&dentry->d_child)))
 524		return;
 525	__list_del_entry(&dentry->d_child);
 526	/*
 527	 * Cursors can move around the list of children.  While we'd been
 528	 * a normal list member, it didn't matter - ->d_child.next would've
 529	 * been updated.  However, from now on it won't be and for the
 530	 * things like d_walk() it might end up with a nasty surprise.
 531	 * Normally d_walk() doesn't care about cursors moving around -
 532	 * ->d_lock on parent prevents that and since a cursor has no children
 533	 * of its own, we get through it without ever unlocking the parent.
 534	 * There is one exception, though - if we ascend from a child that
 535	 * gets killed as soon as we unlock it, the next sibling is found
 536	 * using the value left in its ->d_child.next.  And if _that_
 537	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 538	 * before d_walk() regains parent->d_lock, we'll end up skipping
 539	 * everything the cursor had been moved past.
 540	 *
 541	 * Solution: make sure that the pointer left behind in ->d_child.next
 542	 * points to something that won't be moving around.  I.e. skip the
 543	 * cursors.
 544	 */
 545	while (dentry->d_child.next != &parent->d_subdirs) {
 546		next = list_entry(dentry->d_child.next, struct dentry, d_child);
 547		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 548			break;
 549		dentry->d_child.next = next->d_child.next;
 550	}
 551}
 552
 553static void __dentry_kill(struct dentry *dentry)
 554{
 555	struct dentry *parent = NULL;
 556	bool can_free = true;
 557	if (!IS_ROOT(dentry))
 558		parent = dentry->d_parent;
 559
 560	/*
 561	 * The dentry is now unrecoverably dead to the world.
 562	 */
 563	lockref_mark_dead(&dentry->d_lockref);
 564
 565	/*
 566	 * inform the fs via d_prune that this dentry is about to be
 567	 * unhashed and destroyed.
 568	 */
 569	if (dentry->d_flags & DCACHE_OP_PRUNE)
 570		dentry->d_op->d_prune(dentry);
 571
 572	if (dentry->d_flags & DCACHE_LRU_LIST) {
 573		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 574			d_lru_del(dentry);
 575	}
 576	/* if it was on the hash then remove it */
 577	__d_drop(dentry);
 578	dentry_unlist(dentry, parent);
 579	if (parent)
 580		spin_unlock(&parent->d_lock);
 581	if (dentry->d_inode)
 582		dentry_unlink_inode(dentry);
 583	else
 584		spin_unlock(&dentry->d_lock);
 585	this_cpu_dec(nr_dentry);
 586	if (dentry->d_op && dentry->d_op->d_release)
 587		dentry->d_op->d_release(dentry);
 588
 589	spin_lock(&dentry->d_lock);
 590	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
 591		dentry->d_flags |= DCACHE_MAY_FREE;
 
 
 
 
 
 
 592		can_free = false;
 593	}
 594	spin_unlock(&dentry->d_lock);
 595	if (likely(can_free))
 596		dentry_free(dentry);
 597	cond_resched();
 598}
 599
 600static struct dentry *__lock_parent(struct dentry *dentry)
 601{
 602	struct dentry *parent;
 603	rcu_read_lock();
 604	spin_unlock(&dentry->d_lock);
 605again:
 606	parent = READ_ONCE(dentry->d_parent);
 607	spin_lock(&parent->d_lock);
 608	/*
 609	 * We can't blindly lock dentry until we are sure
 610	 * that we won't violate the locking order.
 611	 * Any changes of dentry->d_parent must have
 612	 * been done with parent->d_lock held, so
 613	 * spin_lock() above is enough of a barrier
 614	 * for checking if it's still our child.
 615	 */
 616	if (unlikely(parent != dentry->d_parent)) {
 617		spin_unlock(&parent->d_lock);
 618		goto again;
 619	}
 620	rcu_read_unlock();
 621	if (parent != dentry)
 622		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 623	else
 624		parent = NULL;
 625	return parent;
 626}
 627
 628static inline struct dentry *lock_parent(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 629{
 630	struct dentry *parent = dentry->d_parent;
 631	if (IS_ROOT(dentry))
 632		return NULL;
 633	if (likely(spin_trylock(&parent->d_lock)))
 634		return parent;
 635	return __lock_parent(dentry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636}
 637
 638static inline bool retain_dentry(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 639{
 640	WARN_ON(d_in_lookup(dentry));
 
 
 
 641
 642	/* Unreachable? Get rid of it */
 643	if (unlikely(d_unhashed(dentry)))
 644		return false;
 645
 646	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
 
 647		return false;
 648
 649	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
 650		if (dentry->d_op->d_delete(dentry))
 
 
 651			return false;
 652	}
 653
 654	if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
 
 655		return false;
 656
 657	/* retain; LRU fodder */
 658	dentry->d_lockref.count--;
 659	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
 
 
 
 
 
 660		d_lru_add(dentry);
 661	else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
 
 
 662		dentry->d_flags |= DCACHE_REFERENCED;
 
 663	return true;
 664}
 665
 666void d_mark_dontcache(struct inode *inode)
 667{
 668	struct dentry *de;
 669
 670	spin_lock(&inode->i_lock);
 671	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
 672		spin_lock(&de->d_lock);
 673		de->d_flags |= DCACHE_DONTCACHE;
 674		spin_unlock(&de->d_lock);
 675	}
 676	inode->i_state |= I_DONTCACHE;
 677	spin_unlock(&inode->i_lock);
 678}
 679EXPORT_SYMBOL(d_mark_dontcache);
 680
 681/*
 682 * Finish off a dentry we've decided to kill.
 683 * dentry->d_lock must be held, returns with it unlocked.
 684 * Returns dentry requiring refcount drop, or NULL if we're done.
 685 */
 686static struct dentry *dentry_kill(struct dentry *dentry)
 687	__releases(dentry->d_lock)
 688{
 689	struct inode *inode = dentry->d_inode;
 690	struct dentry *parent = NULL;
 691
 692	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
 693		goto slow_positive;
 694
 695	if (!IS_ROOT(dentry)) {
 696		parent = dentry->d_parent;
 697		if (unlikely(!spin_trylock(&parent->d_lock))) {
 698			parent = __lock_parent(dentry);
 699			if (likely(inode || !dentry->d_inode))
 700				goto got_locks;
 701			/* negative that became positive */
 702			if (parent)
 703				spin_unlock(&parent->d_lock);
 704			inode = dentry->d_inode;
 705			goto slow_positive;
 706		}
 707	}
 708	__dentry_kill(dentry);
 709	return parent;
 710
 711slow_positive:
 712	spin_unlock(&dentry->d_lock);
 713	spin_lock(&inode->i_lock);
 714	spin_lock(&dentry->d_lock);
 715	parent = lock_parent(dentry);
 716got_locks:
 717	if (unlikely(dentry->d_lockref.count != 1)) {
 718		dentry->d_lockref.count--;
 719	} else if (likely(!retain_dentry(dentry))) {
 720		__dentry_kill(dentry);
 721		return parent;
 722	}
 723	/* we are keeping it, after all */
 724	if (inode)
 725		spin_unlock(&inode->i_lock);
 726	if (parent)
 727		spin_unlock(&parent->d_lock);
 728	spin_unlock(&dentry->d_lock);
 729	return NULL;
 730}
 731
 732/*
 733 * Try to do a lockless dput(), and return whether that was successful.
 734 *
 735 * If unsuccessful, we return false, having already taken the dentry lock.
 
 
 736 *
 737 * The caller needs to hold the RCU read lock, so that the dentry is
 738 * guaranteed to stay around even if the refcount goes down to zero!
 739 */
 740static inline bool fast_dput(struct dentry *dentry)
 741{
 742	int ret;
 743	unsigned int d_flags;
 744
 745	/*
 746	 * If we have a d_op->d_delete() operation, we sould not
 747	 * let the dentry count go to zero, so use "put_or_lock".
 748	 */
 749	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
 750		return lockref_put_or_lock(&dentry->d_lockref);
 751
 752	/*
 753	 * .. otherwise, we can try to just decrement the
 754	 * lockref optimistically.
 755	 */
 756	ret = lockref_put_return(&dentry->d_lockref);
 757
 758	/*
 759	 * If the lockref_put_return() failed due to the lock being held
 760	 * by somebody else, the fast path has failed. We will need to
 761	 * get the lock, and then check the count again.
 762	 */
 763	if (unlikely(ret < 0)) {
 764		spin_lock(&dentry->d_lock);
 765		if (dentry->d_lockref.count > 1) {
 766			dentry->d_lockref.count--;
 767			spin_unlock(&dentry->d_lock);
 768			return true;
 769		}
 770		return false;
 
 771	}
 772
 773	/*
 774	 * If we weren't the last ref, we're done.
 775	 */
 776	if (ret)
 777		return true;
 778
 779	/*
 780	 * Careful, careful. The reference count went down
 781	 * to zero, but we don't hold the dentry lock, so
 782	 * somebody else could get it again, and do another
 783	 * dput(), and we need to not race with that.
 784	 *
 785	 * However, there is a very special and common case
 786	 * where we don't care, because there is nothing to
 787	 * do: the dentry is still hashed, it does not have
 788	 * a 'delete' op, and it's referenced and already on
 789	 * the LRU list.
 790	 *
 791	 * NOTE! Since we aren't locked, these values are
 792	 * not "stable". However, it is sufficient that at
 793	 * some point after we dropped the reference the
 794	 * dentry was hashed and the flags had the proper
 795	 * value. Other dentry users may have re-gotten
 796	 * a reference to the dentry and change that, but
 797	 * our work is done - we can leave the dentry
 798	 * around with a zero refcount.
 799	 *
 800	 * Nevertheless, there are two cases that we should kill
 801	 * the dentry anyway.
 802	 * 1. free disconnected dentries as soon as their refcount
 803	 *    reached zero.
 804	 * 2. free dentries if they should not be cached.
 805	 */
 806	smp_rmb();
 807	d_flags = READ_ONCE(dentry->d_flags);
 808	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST |
 809			DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
 810
 811	/* Nothing to do? Dropping the reference was all we needed? */
 812	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
 813		return true;
 814
 815	/*
 816	 * Not the fast normal case? Get the lock. We've already decremented
 817	 * the refcount, but we'll need to re-check the situation after
 818	 * getting the lock.
 819	 */
 820	spin_lock(&dentry->d_lock);
 821
 822	/*
 823	 * Did somebody else grab a reference to it in the meantime, and
 824	 * we're no longer the last user after all? Alternatively, somebody
 825	 * else could have killed it and marked it dead. Either way, we
 826	 * don't need to do anything else.
 827	 */
 828	if (dentry->d_lockref.count) {
 
 829		spin_unlock(&dentry->d_lock);
 830		return true;
 831	}
 832
 833	/*
 834	 * Re-get the reference we optimistically dropped. We hold the
 835	 * lock, and we just tested that it was zero, so we can just
 836	 * set it to 1.
 837	 */
 838	dentry->d_lockref.count = 1;
 839	return false;
 840}
 841
 842
 843/* 
 844 * This is dput
 845 *
 846 * This is complicated by the fact that we do not want to put
 847 * dentries that are no longer on any hash chain on the unused
 848 * list: we'd much rather just get rid of them immediately.
 849 *
 850 * However, that implies that we have to traverse the dentry
 851 * tree upwards to the parents which might _also_ now be
 852 * scheduled for deletion (it may have been only waiting for
 853 * its last child to go away).
 854 *
 855 * This tail recursion is done by hand as we don't want to depend
 856 * on the compiler to always get this right (gcc generally doesn't).
 857 * Real recursion would eat up our stack space.
 858 */
 859
 860/*
 861 * dput - release a dentry
 862 * @dentry: dentry to release 
 863 *
 864 * Release a dentry. This will drop the usage count and if appropriate
 865 * call the dentry unlink method as well as removing it from the queues and
 866 * releasing its resources. If the parent dentries were scheduled for release
 867 * they too may now get deleted.
 868 */
 869void dput(struct dentry *dentry)
 870{
 871	while (dentry) {
 872		might_sleep();
 873
 874		rcu_read_lock();
 875		if (likely(fast_dput(dentry))) {
 876			rcu_read_unlock();
 
 
 
 
 
 
 877			return;
 878		}
 879
 880		/* Slow case: now with the dentry lock held */
 881		rcu_read_unlock();
 882
 883		if (likely(retain_dentry(dentry))) {
 884			spin_unlock(&dentry->d_lock);
 885			return;
 886		}
 887
 888		dentry = dentry_kill(dentry);
 889	}
 
 
 890}
 891EXPORT_SYMBOL(dput);
 892
 893static void __dput_to_list(struct dentry *dentry, struct list_head *list)
 894__must_hold(&dentry->d_lock)
 895{
 896	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
 897		/* let the owner of the list it's on deal with it */
 898		--dentry->d_lockref.count;
 899	} else {
 900		if (dentry->d_flags & DCACHE_LRU_LIST)
 901			d_lru_del(dentry);
 902		if (!--dentry->d_lockref.count)
 903			d_shrink_add(dentry, list);
 904	}
 905}
 906
 907void dput_to_list(struct dentry *dentry, struct list_head *list)
 908{
 909	rcu_read_lock();
 910	if (likely(fast_dput(dentry))) {
 911		rcu_read_unlock();
 912		return;
 913	}
 914	rcu_read_unlock();
 915	if (!retain_dentry(dentry))
 916		__dput_to_list(dentry, list);
 917	spin_unlock(&dentry->d_lock);
 918}
 919
 920/* This must be called with d_lock held */
 921static inline void __dget_dlock(struct dentry *dentry)
 922{
 923	dentry->d_lockref.count++;
 924}
 925
 926static inline void __dget(struct dentry *dentry)
 927{
 928	lockref_get(&dentry->d_lockref);
 929}
 930
 931struct dentry *dget_parent(struct dentry *dentry)
 932{
 933	int gotref;
 934	struct dentry *ret;
 935	unsigned seq;
 936
 937	/*
 938	 * Do optimistic parent lookup without any
 939	 * locking.
 940	 */
 941	rcu_read_lock();
 942	seq = raw_seqcount_begin(&dentry->d_seq);
 943	ret = READ_ONCE(dentry->d_parent);
 944	gotref = lockref_get_not_zero(&ret->d_lockref);
 945	rcu_read_unlock();
 946	if (likely(gotref)) {
 947		if (!read_seqcount_retry(&dentry->d_seq, seq))
 948			return ret;
 949		dput(ret);
 950	}
 951
 952repeat:
 953	/*
 954	 * Don't need rcu_dereference because we re-check it was correct under
 955	 * the lock.
 956	 */
 957	rcu_read_lock();
 958	ret = dentry->d_parent;
 959	spin_lock(&ret->d_lock);
 960	if (unlikely(ret != dentry->d_parent)) {
 961		spin_unlock(&ret->d_lock);
 962		rcu_read_unlock();
 963		goto repeat;
 964	}
 965	rcu_read_unlock();
 966	BUG_ON(!ret->d_lockref.count);
 967	ret->d_lockref.count++;
 968	spin_unlock(&ret->d_lock);
 969	return ret;
 970}
 971EXPORT_SYMBOL(dget_parent);
 972
 973static struct dentry * __d_find_any_alias(struct inode *inode)
 974{
 975	struct dentry *alias;
 976
 977	if (hlist_empty(&inode->i_dentry))
 978		return NULL;
 979	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
 980	__dget(alias);
 981	return alias;
 982}
 983
 984/**
 985 * d_find_any_alias - find any alias for a given inode
 986 * @inode: inode to find an alias for
 987 *
 988 * If any aliases exist for the given inode, take and return a
 989 * reference for one of them.  If no aliases exist, return %NULL.
 990 */
 991struct dentry *d_find_any_alias(struct inode *inode)
 992{
 993	struct dentry *de;
 994
 995	spin_lock(&inode->i_lock);
 996	de = __d_find_any_alias(inode);
 997	spin_unlock(&inode->i_lock);
 998	return de;
 999}
1000EXPORT_SYMBOL(d_find_any_alias);
1001
1002static struct dentry *__d_find_alias(struct inode *inode)
1003{
1004	struct dentry *alias;
1005
1006	if (S_ISDIR(inode->i_mode))
1007		return __d_find_any_alias(inode);
1008
1009	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1010		spin_lock(&alias->d_lock);
1011 		if (!d_unhashed(alias)) {
1012			__dget_dlock(alias);
1013			spin_unlock(&alias->d_lock);
1014			return alias;
1015		}
1016		spin_unlock(&alias->d_lock);
1017	}
1018	return NULL;
1019}
1020
1021/**
1022 * d_find_alias - grab a hashed alias of inode
1023 * @inode: inode in question
1024 *
1025 * If inode has a hashed alias, or is a directory and has any alias,
1026 * acquire the reference to alias and return it. Otherwise return NULL.
1027 * Notice that if inode is a directory there can be only one alias and
1028 * it can be unhashed only if it has no children, or if it is the root
1029 * of a filesystem, or if the directory was renamed and d_revalidate
1030 * was the first vfs operation to notice.
1031 *
1032 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1033 * any other hashed alias over that one.
1034 */
1035struct dentry *d_find_alias(struct inode *inode)
1036{
1037	struct dentry *de = NULL;
1038
1039	if (!hlist_empty(&inode->i_dentry)) {
1040		spin_lock(&inode->i_lock);
1041		de = __d_find_alias(inode);
1042		spin_unlock(&inode->i_lock);
1043	}
1044	return de;
1045}
1046EXPORT_SYMBOL(d_find_alias);
1047
1048/*
1049 *  Caller MUST be holding rcu_read_lock() and be guaranteed
1050 *  that inode won't get freed until rcu_read_unlock().
1051 */
1052struct dentry *d_find_alias_rcu(struct inode *inode)
1053{
1054	struct hlist_head *l = &inode->i_dentry;
1055	struct dentry *de = NULL;
1056
1057	spin_lock(&inode->i_lock);
1058	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1059	// used without having I_FREEING set, which means no aliases left
1060	if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1061		if (S_ISDIR(inode->i_mode)) {
1062			de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1063		} else {
1064			hlist_for_each_entry(de, l, d_u.d_alias)
1065				if (!d_unhashed(de))
1066					break;
1067		}
1068	}
1069	spin_unlock(&inode->i_lock);
1070	return de;
1071}
1072
1073/*
1074 *	Try to kill dentries associated with this inode.
1075 * WARNING: you must own a reference to inode.
1076 */
1077void d_prune_aliases(struct inode *inode)
1078{
 
1079	struct dentry *dentry;
1080restart:
1081	spin_lock(&inode->i_lock);
1082	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1083		spin_lock(&dentry->d_lock);
1084		if (!dentry->d_lockref.count) {
1085			struct dentry *parent = lock_parent(dentry);
1086			if (likely(!dentry->d_lockref.count)) {
1087				__dentry_kill(dentry);
1088				dput(parent);
1089				goto restart;
1090			}
1091			if (parent)
1092				spin_unlock(&parent->d_lock);
1093		}
1094		spin_unlock(&dentry->d_lock);
1095	}
1096	spin_unlock(&inode->i_lock);
 
1097}
1098EXPORT_SYMBOL(d_prune_aliases);
1099
1100/*
1101 * Lock a dentry from shrink list.
1102 * Called under rcu_read_lock() and dentry->d_lock; the former
1103 * guarantees that nothing we access will be freed under us.
1104 * Note that dentry is *not* protected from concurrent dentry_kill(),
1105 * d_delete(), etc.
1106 *
1107 * Return false if dentry has been disrupted or grabbed, leaving
1108 * the caller to kick it off-list.  Otherwise, return true and have
1109 * that dentry's inode and parent both locked.
1110 */
1111static bool shrink_lock_dentry(struct dentry *dentry)
1112{
1113	struct inode *inode;
1114	struct dentry *parent;
1115
1116	if (dentry->d_lockref.count)
1117		return false;
1118
1119	inode = dentry->d_inode;
1120	if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1121		spin_unlock(&dentry->d_lock);
1122		spin_lock(&inode->i_lock);
1123		spin_lock(&dentry->d_lock);
1124		if (unlikely(dentry->d_lockref.count))
1125			goto out;
1126		/* changed inode means that somebody had grabbed it */
1127		if (unlikely(inode != dentry->d_inode))
1128			goto out;
1129	}
1130
1131	parent = dentry->d_parent;
1132	if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1133		return true;
1134
1135	spin_unlock(&dentry->d_lock);
1136	spin_lock(&parent->d_lock);
1137	if (unlikely(parent != dentry->d_parent)) {
1138		spin_unlock(&parent->d_lock);
1139		spin_lock(&dentry->d_lock);
1140		goto out;
1141	}
1142	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1143	if (likely(!dentry->d_lockref.count))
1144		return true;
1145	spin_unlock(&parent->d_lock);
1146out:
1147	if (inode)
1148		spin_unlock(&inode->i_lock);
1149	return false;
1150}
1151
1152void shrink_dentry_list(struct list_head *list)
1153{
1154	while (!list_empty(list)) {
1155		struct dentry *dentry, *parent;
1156
1157		dentry = list_entry(list->prev, struct dentry, d_lru);
1158		spin_lock(&dentry->d_lock);
1159		rcu_read_lock();
1160		if (!shrink_lock_dentry(dentry)) {
1161			bool can_free = false;
1162			rcu_read_unlock();
1163			d_shrink_del(dentry);
1164			if (dentry->d_lockref.count < 0)
1165				can_free = dentry->d_flags & DCACHE_MAY_FREE;
1166			spin_unlock(&dentry->d_lock);
1167			if (can_free)
1168				dentry_free(dentry);
1169			continue;
1170		}
1171		rcu_read_unlock();
1172		d_shrink_del(dentry);
1173		parent = dentry->d_parent;
1174		if (parent != dentry)
1175			__dput_to_list(parent, list);
1176		__dentry_kill(dentry);
1177	}
1178}
1179
1180static enum lru_status dentry_lru_isolate(struct list_head *item,
1181		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1182{
1183	struct list_head *freeable = arg;
1184	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1185
1186
1187	/*
1188	 * we are inverting the lru lock/dentry->d_lock here,
1189	 * so use a trylock. If we fail to get the lock, just skip
1190	 * it
1191	 */
1192	if (!spin_trylock(&dentry->d_lock))
1193		return LRU_SKIP;
1194
1195	/*
1196	 * Referenced dentries are still in use. If they have active
1197	 * counts, just remove them from the LRU. Otherwise give them
1198	 * another pass through the LRU.
1199	 */
1200	if (dentry->d_lockref.count) {
1201		d_lru_isolate(lru, dentry);
1202		spin_unlock(&dentry->d_lock);
1203		return LRU_REMOVED;
1204	}
1205
1206	if (dentry->d_flags & DCACHE_REFERENCED) {
1207		dentry->d_flags &= ~DCACHE_REFERENCED;
1208		spin_unlock(&dentry->d_lock);
1209
1210		/*
1211		 * The list move itself will be made by the common LRU code. At
1212		 * this point, we've dropped the dentry->d_lock but keep the
1213		 * lru lock. This is safe to do, since every list movement is
1214		 * protected by the lru lock even if both locks are held.
1215		 *
1216		 * This is guaranteed by the fact that all LRU management
1217		 * functions are intermediated by the LRU API calls like
1218		 * list_lru_add and list_lru_del. List movement in this file
1219		 * only ever occur through this functions or through callbacks
1220		 * like this one, that are called from the LRU API.
1221		 *
1222		 * The only exceptions to this are functions like
1223		 * shrink_dentry_list, and code that first checks for the
1224		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1225		 * operating only with stack provided lists after they are
1226		 * properly isolated from the main list.  It is thus, always a
1227		 * local access.
1228		 */
1229		return LRU_ROTATE;
1230	}
1231
1232	d_lru_shrink_move(lru, dentry, freeable);
1233	spin_unlock(&dentry->d_lock);
1234
1235	return LRU_REMOVED;
1236}
1237
1238/**
1239 * prune_dcache_sb - shrink the dcache
1240 * @sb: superblock
1241 * @sc: shrink control, passed to list_lru_shrink_walk()
1242 *
1243 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1244 * is done when we need more memory and called from the superblock shrinker
1245 * function.
1246 *
1247 * This function may fail to free any resources if all the dentries are in
1248 * use.
1249 */
1250long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1251{
1252	LIST_HEAD(dispose);
1253	long freed;
1254
1255	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1256				     dentry_lru_isolate, &dispose);
1257	shrink_dentry_list(&dispose);
1258	return freed;
1259}
1260
1261static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1262		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1263{
1264	struct list_head *freeable = arg;
1265	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1266
1267	/*
1268	 * we are inverting the lru lock/dentry->d_lock here,
1269	 * so use a trylock. If we fail to get the lock, just skip
1270	 * it
1271	 */
1272	if (!spin_trylock(&dentry->d_lock))
1273		return LRU_SKIP;
1274
1275	d_lru_shrink_move(lru, dentry, freeable);
1276	spin_unlock(&dentry->d_lock);
1277
1278	return LRU_REMOVED;
1279}
1280
1281
1282/**
1283 * shrink_dcache_sb - shrink dcache for a superblock
1284 * @sb: superblock
1285 *
1286 * Shrink the dcache for the specified super block. This is used to free
1287 * the dcache before unmounting a file system.
1288 */
1289void shrink_dcache_sb(struct super_block *sb)
1290{
1291	do {
1292		LIST_HEAD(dispose);
1293
1294		list_lru_walk(&sb->s_dentry_lru,
1295			dentry_lru_isolate_shrink, &dispose, 1024);
1296		shrink_dentry_list(&dispose);
1297	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1298}
1299EXPORT_SYMBOL(shrink_dcache_sb);
1300
1301/**
1302 * enum d_walk_ret - action to talke during tree walk
1303 * @D_WALK_CONTINUE:	contrinue walk
1304 * @D_WALK_QUIT:	quit walk
1305 * @D_WALK_NORETRY:	quit when retry is needed
1306 * @D_WALK_SKIP:	skip this dentry and its children
1307 */
1308enum d_walk_ret {
1309	D_WALK_CONTINUE,
1310	D_WALK_QUIT,
1311	D_WALK_NORETRY,
1312	D_WALK_SKIP,
1313};
1314
1315/**
1316 * d_walk - walk the dentry tree
1317 * @parent:	start of walk
1318 * @data:	data passed to @enter() and @finish()
1319 * @enter:	callback when first entering the dentry
1320 *
1321 * The @enter() callbacks are called with d_lock held.
1322 */
1323static void d_walk(struct dentry *parent, void *data,
1324		   enum d_walk_ret (*enter)(void *, struct dentry *))
1325{
1326	struct dentry *this_parent;
1327	struct list_head *next;
1328	unsigned seq = 0;
1329	enum d_walk_ret ret;
1330	bool retry = true;
1331
1332again:
1333	read_seqbegin_or_lock(&rename_lock, &seq);
1334	this_parent = parent;
1335	spin_lock(&this_parent->d_lock);
1336
1337	ret = enter(data, this_parent);
1338	switch (ret) {
1339	case D_WALK_CONTINUE:
1340		break;
1341	case D_WALK_QUIT:
1342	case D_WALK_SKIP:
1343		goto out_unlock;
1344	case D_WALK_NORETRY:
1345		retry = false;
1346		break;
1347	}
1348repeat:
1349	next = this_parent->d_subdirs.next;
1350resume:
1351	while (next != &this_parent->d_subdirs) {
1352		struct list_head *tmp = next;
1353		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1354		next = tmp->next;
1355
1356		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1357			continue;
1358
1359		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1360
1361		ret = enter(data, dentry);
1362		switch (ret) {
1363		case D_WALK_CONTINUE:
1364			break;
1365		case D_WALK_QUIT:
1366			spin_unlock(&dentry->d_lock);
1367			goto out_unlock;
1368		case D_WALK_NORETRY:
1369			retry = false;
1370			break;
1371		case D_WALK_SKIP:
1372			spin_unlock(&dentry->d_lock);
1373			continue;
1374		}
1375
1376		if (!list_empty(&dentry->d_subdirs)) {
1377			spin_unlock(&this_parent->d_lock);
1378			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1379			this_parent = dentry;
1380			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1381			goto repeat;
1382		}
1383		spin_unlock(&dentry->d_lock);
1384	}
1385	/*
1386	 * All done at this level ... ascend and resume the search.
1387	 */
1388	rcu_read_lock();
1389ascend:
1390	if (this_parent != parent) {
1391		struct dentry *child = this_parent;
1392		this_parent = child->d_parent;
1393
1394		spin_unlock(&child->d_lock);
1395		spin_lock(&this_parent->d_lock);
1396
1397		/* might go back up the wrong parent if we have had a rename. */
1398		if (need_seqretry(&rename_lock, seq))
1399			goto rename_retry;
1400		/* go into the first sibling still alive */
1401		do {
1402			next = child->d_child.next;
1403			if (next == &this_parent->d_subdirs)
1404				goto ascend;
1405			child = list_entry(next, struct dentry, d_child);
1406		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1407		rcu_read_unlock();
1408		goto resume;
1409	}
1410	if (need_seqretry(&rename_lock, seq))
1411		goto rename_retry;
1412	rcu_read_unlock();
1413
1414out_unlock:
1415	spin_unlock(&this_parent->d_lock);
1416	done_seqretry(&rename_lock, seq);
1417	return;
1418
1419rename_retry:
1420	spin_unlock(&this_parent->d_lock);
1421	rcu_read_unlock();
1422	BUG_ON(seq & 1);
1423	if (!retry)
1424		return;
1425	seq = 1;
1426	goto again;
1427}
1428
1429struct check_mount {
1430	struct vfsmount *mnt;
1431	unsigned int mounted;
1432};
1433
1434static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1435{
1436	struct check_mount *info = data;
1437	struct path path = { .mnt = info->mnt, .dentry = dentry };
1438
1439	if (likely(!d_mountpoint(dentry)))
1440		return D_WALK_CONTINUE;
1441	if (__path_is_mountpoint(&path)) {
1442		info->mounted = 1;
1443		return D_WALK_QUIT;
1444	}
1445	return D_WALK_CONTINUE;
1446}
1447
1448/**
1449 * path_has_submounts - check for mounts over a dentry in the
1450 *                      current namespace.
1451 * @parent: path to check.
1452 *
1453 * Return true if the parent or its subdirectories contain
1454 * a mount point in the current namespace.
1455 */
1456int path_has_submounts(const struct path *parent)
1457{
1458	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1459
1460	read_seqlock_excl(&mount_lock);
1461	d_walk(parent->dentry, &data, path_check_mount);
1462	read_sequnlock_excl(&mount_lock);
1463
1464	return data.mounted;
1465}
1466EXPORT_SYMBOL(path_has_submounts);
1467
1468/*
1469 * Called by mount code to set a mountpoint and check if the mountpoint is
1470 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1471 * subtree can become unreachable).
1472 *
1473 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1474 * this reason take rename_lock and d_lock on dentry and ancestors.
1475 */
1476int d_set_mounted(struct dentry *dentry)
1477{
1478	struct dentry *p;
1479	int ret = -ENOENT;
1480	write_seqlock(&rename_lock);
1481	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1482		/* Need exclusion wrt. d_invalidate() */
1483		spin_lock(&p->d_lock);
1484		if (unlikely(d_unhashed(p))) {
1485			spin_unlock(&p->d_lock);
1486			goto out;
1487		}
1488		spin_unlock(&p->d_lock);
1489	}
1490	spin_lock(&dentry->d_lock);
1491	if (!d_unlinked(dentry)) {
1492		ret = -EBUSY;
1493		if (!d_mountpoint(dentry)) {
1494			dentry->d_flags |= DCACHE_MOUNTED;
1495			ret = 0;
1496		}
1497	}
1498 	spin_unlock(&dentry->d_lock);
1499out:
1500	write_sequnlock(&rename_lock);
1501	return ret;
1502}
1503
1504/*
1505 * Search the dentry child list of the specified parent,
1506 * and move any unused dentries to the end of the unused
1507 * list for prune_dcache(). We descend to the next level
1508 * whenever the d_subdirs list is non-empty and continue
1509 * searching.
1510 *
1511 * It returns zero iff there are no unused children,
1512 * otherwise  it returns the number of children moved to
1513 * the end of the unused list. This may not be the total
1514 * number of unused children, because select_parent can
1515 * drop the lock and return early due to latency
1516 * constraints.
1517 */
1518
1519struct select_data {
1520	struct dentry *start;
1521	union {
1522		long found;
1523		struct dentry *victim;
1524	};
1525	struct list_head dispose;
1526};
1527
1528static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1529{
1530	struct select_data *data = _data;
1531	enum d_walk_ret ret = D_WALK_CONTINUE;
1532
1533	if (data->start == dentry)
1534		goto out;
1535
1536	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1537		data->found++;
1538	} else {
1539		if (dentry->d_flags & DCACHE_LRU_LIST)
1540			d_lru_del(dentry);
1541		if (!dentry->d_lockref.count) {
1542			d_shrink_add(dentry, &data->dispose);
1543			data->found++;
1544		}
1545	}
1546	/*
1547	 * We can return to the caller if we have found some (this
1548	 * ensures forward progress). We'll be coming back to find
1549	 * the rest.
1550	 */
1551	if (!list_empty(&data->dispose))
1552		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1553out:
1554	return ret;
1555}
1556
1557static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1558{
1559	struct select_data *data = _data;
1560	enum d_walk_ret ret = D_WALK_CONTINUE;
1561
1562	if (data->start == dentry)
1563		goto out;
1564
1565	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1566		if (!dentry->d_lockref.count) {
1567			rcu_read_lock();
1568			data->victim = dentry;
1569			return D_WALK_QUIT;
1570		}
1571	} else {
1572		if (dentry->d_flags & DCACHE_LRU_LIST)
1573			d_lru_del(dentry);
1574		if (!dentry->d_lockref.count)
1575			d_shrink_add(dentry, &data->dispose);
1576	}
1577	/*
1578	 * We can return to the caller if we have found some (this
1579	 * ensures forward progress). We'll be coming back to find
1580	 * the rest.
1581	 */
1582	if (!list_empty(&data->dispose))
1583		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1584out:
1585	return ret;
1586}
1587
1588/**
1589 * shrink_dcache_parent - prune dcache
1590 * @parent: parent of entries to prune
1591 *
1592 * Prune the dcache to remove unused children of the parent dentry.
1593 */
1594void shrink_dcache_parent(struct dentry *parent)
1595{
1596	for (;;) {
1597		struct select_data data = {.start = parent};
1598
1599		INIT_LIST_HEAD(&data.dispose);
1600		d_walk(parent, &data, select_collect);
1601
1602		if (!list_empty(&data.dispose)) {
1603			shrink_dentry_list(&data.dispose);
1604			continue;
1605		}
1606
1607		cond_resched();
1608		if (!data.found)
1609			break;
1610		data.victim = NULL;
1611		d_walk(parent, &data, select_collect2);
1612		if (data.victim) {
1613			struct dentry *parent;
1614			spin_lock(&data.victim->d_lock);
1615			if (!shrink_lock_dentry(data.victim)) {
1616				spin_unlock(&data.victim->d_lock);
1617				rcu_read_unlock();
1618			} else {
1619				rcu_read_unlock();
1620				parent = data.victim->d_parent;
1621				if (parent != data.victim)
1622					__dput_to_list(parent, &data.dispose);
1623				__dentry_kill(data.victim);
1624			}
1625		}
1626		if (!list_empty(&data.dispose))
1627			shrink_dentry_list(&data.dispose);
1628	}
1629}
1630EXPORT_SYMBOL(shrink_dcache_parent);
1631
1632static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1633{
1634	/* it has busy descendents; complain about those instead */
1635	if (!list_empty(&dentry->d_subdirs))
1636		return D_WALK_CONTINUE;
1637
1638	/* root with refcount 1 is fine */
1639	if (dentry == _data && dentry->d_lockref.count == 1)
1640		return D_WALK_CONTINUE;
1641
1642	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1643			" still in use (%d) [unmount of %s %s]\n",
1644		       dentry,
1645		       dentry->d_inode ?
1646		       dentry->d_inode->i_ino : 0UL,
1647		       dentry,
1648		       dentry->d_lockref.count,
1649		       dentry->d_sb->s_type->name,
1650		       dentry->d_sb->s_id);
1651	WARN_ON(1);
1652	return D_WALK_CONTINUE;
1653}
1654
1655static void do_one_tree(struct dentry *dentry)
1656{
1657	shrink_dcache_parent(dentry);
1658	d_walk(dentry, dentry, umount_check);
1659	d_drop(dentry);
1660	dput(dentry);
1661}
1662
1663/*
1664 * destroy the dentries attached to a superblock on unmounting
1665 */
1666void shrink_dcache_for_umount(struct super_block *sb)
1667{
1668	struct dentry *dentry;
1669
1670	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1671
1672	dentry = sb->s_root;
1673	sb->s_root = NULL;
1674	do_one_tree(dentry);
1675
1676	while (!hlist_bl_empty(&sb->s_roots)) {
1677		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1678		do_one_tree(dentry);
1679	}
1680}
1681
1682static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1683{
1684	struct dentry **victim = _data;
1685	if (d_mountpoint(dentry)) {
1686		__dget_dlock(dentry);
1687		*victim = dentry;
1688		return D_WALK_QUIT;
1689	}
1690	return D_WALK_CONTINUE;
1691}
1692
1693/**
1694 * d_invalidate - detach submounts, prune dcache, and drop
1695 * @dentry: dentry to invalidate (aka detach, prune and drop)
1696 */
1697void d_invalidate(struct dentry *dentry)
1698{
1699	bool had_submounts = false;
1700	spin_lock(&dentry->d_lock);
1701	if (d_unhashed(dentry)) {
1702		spin_unlock(&dentry->d_lock);
1703		return;
1704	}
1705	__d_drop(dentry);
1706	spin_unlock(&dentry->d_lock);
1707
1708	/* Negative dentries can be dropped without further checks */
1709	if (!dentry->d_inode)
1710		return;
1711
1712	shrink_dcache_parent(dentry);
1713	for (;;) {
1714		struct dentry *victim = NULL;
1715		d_walk(dentry, &victim, find_submount);
1716		if (!victim) {
1717			if (had_submounts)
1718				shrink_dcache_parent(dentry);
1719			return;
1720		}
1721		had_submounts = true;
1722		detach_mounts(victim);
1723		dput(victim);
1724	}
1725}
1726EXPORT_SYMBOL(d_invalidate);
1727
1728/**
1729 * __d_alloc	-	allocate a dcache entry
1730 * @sb: filesystem it will belong to
1731 * @name: qstr of the name
1732 *
1733 * Allocates a dentry. It returns %NULL if there is insufficient memory
1734 * available. On a success the dentry is returned. The name passed in is
1735 * copied and the copy passed in may be reused after this call.
1736 */
1737 
1738static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1739{
1740	struct dentry *dentry;
1741	char *dname;
1742	int err;
1743
1744	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 
1745	if (!dentry)
1746		return NULL;
1747
1748	/*
1749	 * We guarantee that the inline name is always NUL-terminated.
1750	 * This way the memcpy() done by the name switching in rename
1751	 * will still always have a NUL at the end, even if we might
1752	 * be overwriting an internal NUL character
1753	 */
1754	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1755	if (unlikely(!name)) {
1756		name = &slash_name;
1757		dname = dentry->d_iname;
1758	} else if (name->len > DNAME_INLINE_LEN-1) {
1759		size_t size = offsetof(struct external_name, name[1]);
1760		struct external_name *p = kmalloc(size + name->len,
1761						  GFP_KERNEL_ACCOUNT |
1762						  __GFP_RECLAIMABLE);
1763		if (!p) {
1764			kmem_cache_free(dentry_cache, dentry); 
1765			return NULL;
1766		}
1767		atomic_set(&p->u.count, 1);
1768		dname = p->name;
1769	} else  {
1770		dname = dentry->d_iname;
1771	}	
1772
1773	dentry->d_name.len = name->len;
1774	dentry->d_name.hash = name->hash;
1775	memcpy(dname, name->name, name->len);
1776	dname[name->len] = 0;
1777
1778	/* Make sure we always see the terminating NUL character */
1779	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1780
1781	dentry->d_lockref.count = 1;
1782	dentry->d_flags = 0;
1783	spin_lock_init(&dentry->d_lock);
1784	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1785	dentry->d_inode = NULL;
1786	dentry->d_parent = dentry;
1787	dentry->d_sb = sb;
1788	dentry->d_op = NULL;
1789	dentry->d_fsdata = NULL;
1790	INIT_HLIST_BL_NODE(&dentry->d_hash);
1791	INIT_LIST_HEAD(&dentry->d_lru);
1792	INIT_LIST_HEAD(&dentry->d_subdirs);
1793	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1794	INIT_LIST_HEAD(&dentry->d_child);
1795	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1796
1797	if (dentry->d_op && dentry->d_op->d_init) {
1798		err = dentry->d_op->d_init(dentry);
1799		if (err) {
1800			if (dname_external(dentry))
1801				kfree(external_name(dentry));
1802			kmem_cache_free(dentry_cache, dentry);
1803			return NULL;
1804		}
1805	}
1806
1807	this_cpu_inc(nr_dentry);
1808
1809	return dentry;
1810}
1811
1812/**
1813 * d_alloc	-	allocate a dcache entry
1814 * @parent: parent of entry to allocate
1815 * @name: qstr of the name
1816 *
1817 * Allocates a dentry. It returns %NULL if there is insufficient memory
1818 * available. On a success the dentry is returned. The name passed in is
1819 * copied and the copy passed in may be reused after this call.
1820 */
1821struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1822{
1823	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1824	if (!dentry)
1825		return NULL;
1826	spin_lock(&parent->d_lock);
1827	/*
1828	 * don't need child lock because it is not subject
1829	 * to concurrency here
1830	 */
1831	__dget_dlock(parent);
1832	dentry->d_parent = parent;
1833	list_add(&dentry->d_child, &parent->d_subdirs);
1834	spin_unlock(&parent->d_lock);
1835
1836	return dentry;
1837}
1838EXPORT_SYMBOL(d_alloc);
1839
1840struct dentry *d_alloc_anon(struct super_block *sb)
1841{
1842	return __d_alloc(sb, NULL);
1843}
1844EXPORT_SYMBOL(d_alloc_anon);
1845
1846struct dentry *d_alloc_cursor(struct dentry * parent)
1847{
1848	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1849	if (dentry) {
1850		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1851		dentry->d_parent = dget(parent);
1852	}
1853	return dentry;
1854}
1855
1856/**
1857 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1858 * @sb: the superblock
1859 * @name: qstr of the name
1860 *
1861 * For a filesystem that just pins its dentries in memory and never
1862 * performs lookups at all, return an unhashed IS_ROOT dentry.
1863 * This is used for pipes, sockets et.al. - the stuff that should
1864 * never be anyone's children or parents.  Unlike all other
1865 * dentries, these will not have RCU delay between dropping the
1866 * last reference and freeing them.
1867 *
1868 * The only user is alloc_file_pseudo() and that's what should
1869 * be considered a public interface.  Don't use directly.
1870 */
1871struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1872{
 
 
 
1873	struct dentry *dentry = __d_alloc(sb, name);
1874	if (likely(dentry))
1875		dentry->d_flags |= DCACHE_NORCU;
 
 
 
1876	return dentry;
1877}
1878
1879struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1880{
1881	struct qstr q;
1882
1883	q.name = name;
1884	q.hash_len = hashlen_string(parent, name);
1885	return d_alloc(parent, &q);
1886}
1887EXPORT_SYMBOL(d_alloc_name);
1888
1889void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1890{
1891	WARN_ON_ONCE(dentry->d_op);
1892	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1893				DCACHE_OP_COMPARE	|
1894				DCACHE_OP_REVALIDATE	|
1895				DCACHE_OP_WEAK_REVALIDATE	|
1896				DCACHE_OP_DELETE	|
1897				DCACHE_OP_REAL));
1898	dentry->d_op = op;
1899	if (!op)
1900		return;
1901	if (op->d_hash)
1902		dentry->d_flags |= DCACHE_OP_HASH;
1903	if (op->d_compare)
1904		dentry->d_flags |= DCACHE_OP_COMPARE;
1905	if (op->d_revalidate)
1906		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1907	if (op->d_weak_revalidate)
1908		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1909	if (op->d_delete)
1910		dentry->d_flags |= DCACHE_OP_DELETE;
1911	if (op->d_prune)
1912		dentry->d_flags |= DCACHE_OP_PRUNE;
1913	if (op->d_real)
1914		dentry->d_flags |= DCACHE_OP_REAL;
1915
1916}
1917EXPORT_SYMBOL(d_set_d_op);
1918
1919
1920/*
1921 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1922 * @dentry - The dentry to mark
1923 *
1924 * Mark a dentry as falling through to the lower layer (as set with
1925 * d_pin_lower()).  This flag may be recorded on the medium.
1926 */
1927void d_set_fallthru(struct dentry *dentry)
1928{
1929	spin_lock(&dentry->d_lock);
1930	dentry->d_flags |= DCACHE_FALLTHRU;
1931	spin_unlock(&dentry->d_lock);
1932}
1933EXPORT_SYMBOL(d_set_fallthru);
1934
1935static unsigned d_flags_for_inode(struct inode *inode)
1936{
1937	unsigned add_flags = DCACHE_REGULAR_TYPE;
1938
1939	if (!inode)
1940		return DCACHE_MISS_TYPE;
1941
1942	if (S_ISDIR(inode->i_mode)) {
1943		add_flags = DCACHE_DIRECTORY_TYPE;
1944		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1945			if (unlikely(!inode->i_op->lookup))
1946				add_flags = DCACHE_AUTODIR_TYPE;
1947			else
1948				inode->i_opflags |= IOP_LOOKUP;
1949		}
1950		goto type_determined;
1951	}
1952
1953	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1954		if (unlikely(inode->i_op->get_link)) {
1955			add_flags = DCACHE_SYMLINK_TYPE;
1956			goto type_determined;
1957		}
1958		inode->i_opflags |= IOP_NOFOLLOW;
1959	}
1960
1961	if (unlikely(!S_ISREG(inode->i_mode)))
1962		add_flags = DCACHE_SPECIAL_TYPE;
1963
1964type_determined:
1965	if (unlikely(IS_AUTOMOUNT(inode)))
1966		add_flags |= DCACHE_NEED_AUTOMOUNT;
1967	return add_flags;
1968}
1969
1970static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1971{
1972	unsigned add_flags = d_flags_for_inode(inode);
1973	WARN_ON(d_in_lookup(dentry));
1974
1975	spin_lock(&dentry->d_lock);
1976	/*
1977	 * Decrement negative dentry count if it was in the LRU list.
1978	 */
1979	if (dentry->d_flags & DCACHE_LRU_LIST)
1980		this_cpu_dec(nr_dentry_negative);
1981	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1982	raw_write_seqcount_begin(&dentry->d_seq);
1983	__d_set_inode_and_type(dentry, inode, add_flags);
1984	raw_write_seqcount_end(&dentry->d_seq);
1985	fsnotify_update_flags(dentry);
1986	spin_unlock(&dentry->d_lock);
1987}
1988
1989/**
1990 * d_instantiate - fill in inode information for a dentry
1991 * @entry: dentry to complete
1992 * @inode: inode to attach to this dentry
1993 *
1994 * Fill in inode information in the entry.
1995 *
1996 * This turns negative dentries into productive full members
1997 * of society.
1998 *
1999 * NOTE! This assumes that the inode count has been incremented
2000 * (or otherwise set) by the caller to indicate that it is now
2001 * in use by the dcache.
2002 */
2003 
2004void d_instantiate(struct dentry *entry, struct inode * inode)
2005{
2006	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2007	if (inode) {
2008		security_d_instantiate(entry, inode);
2009		spin_lock(&inode->i_lock);
2010		__d_instantiate(entry, inode);
2011		spin_unlock(&inode->i_lock);
2012	}
2013}
2014EXPORT_SYMBOL(d_instantiate);
2015
2016/*
2017 * This should be equivalent to d_instantiate() + unlock_new_inode(),
2018 * with lockdep-related part of unlock_new_inode() done before
2019 * anything else.  Use that instead of open-coding d_instantiate()/
2020 * unlock_new_inode() combinations.
2021 */
2022void d_instantiate_new(struct dentry *entry, struct inode *inode)
2023{
2024	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2025	BUG_ON(!inode);
2026	lockdep_annotate_inode_mutex_key(inode);
2027	security_d_instantiate(entry, inode);
2028	spin_lock(&inode->i_lock);
2029	__d_instantiate(entry, inode);
2030	WARN_ON(!(inode->i_state & I_NEW));
2031	inode->i_state &= ~I_NEW & ~I_CREATING;
2032	smp_mb();
2033	wake_up_bit(&inode->i_state, __I_NEW);
2034	spin_unlock(&inode->i_lock);
2035}
2036EXPORT_SYMBOL(d_instantiate_new);
2037
2038struct dentry *d_make_root(struct inode *root_inode)
2039{
2040	struct dentry *res = NULL;
2041
2042	if (root_inode) {
2043		res = d_alloc_anon(root_inode->i_sb);
2044		if (res)
2045			d_instantiate(res, root_inode);
2046		else
2047			iput(root_inode);
2048	}
2049	return res;
2050}
2051EXPORT_SYMBOL(d_make_root);
2052
2053static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2054					   struct inode *inode,
2055					   bool disconnected)
2056{
2057	struct dentry *res;
2058	unsigned add_flags;
2059
2060	security_d_instantiate(dentry, inode);
2061	spin_lock(&inode->i_lock);
2062	res = __d_find_any_alias(inode);
2063	if (res) {
2064		spin_unlock(&inode->i_lock);
2065		dput(dentry);
2066		goto out_iput;
2067	}
2068
2069	/* attach a disconnected dentry */
2070	add_flags = d_flags_for_inode(inode);
2071
2072	if (disconnected)
2073		add_flags |= DCACHE_DISCONNECTED;
2074
2075	spin_lock(&dentry->d_lock);
2076	__d_set_inode_and_type(dentry, inode, add_flags);
2077	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2078	if (!disconnected) {
2079		hlist_bl_lock(&dentry->d_sb->s_roots);
2080		hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2081		hlist_bl_unlock(&dentry->d_sb->s_roots);
2082	}
2083	spin_unlock(&dentry->d_lock);
2084	spin_unlock(&inode->i_lock);
2085
2086	return dentry;
2087
2088 out_iput:
2089	iput(inode);
2090	return res;
2091}
2092
2093struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2094{
2095	return __d_instantiate_anon(dentry, inode, true);
2096}
2097EXPORT_SYMBOL(d_instantiate_anon);
2098
2099static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2100{
2101	struct dentry *tmp;
2102	struct dentry *res;
2103
2104	if (!inode)
2105		return ERR_PTR(-ESTALE);
2106	if (IS_ERR(inode))
2107		return ERR_CAST(inode);
2108
2109	res = d_find_any_alias(inode);
 
 
2110	if (res)
2111		goto out_iput;
2112
2113	tmp = d_alloc_anon(inode->i_sb);
2114	if (!tmp) {
2115		res = ERR_PTR(-ENOMEM);
2116		goto out_iput;
2117	}
2118
2119	return __d_instantiate_anon(tmp, inode, disconnected);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2120
2121out_iput:
2122	iput(inode);
2123	return res;
2124}
2125
2126/**
2127 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2128 * @inode: inode to allocate the dentry for
2129 *
2130 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2131 * similar open by handle operations.  The returned dentry may be anonymous,
2132 * or may have a full name (if the inode was already in the cache).
2133 *
2134 * When called on a directory inode, we must ensure that the inode only ever
2135 * has one dentry.  If a dentry is found, that is returned instead of
2136 * allocating a new one.
2137 *
2138 * On successful return, the reference to the inode has been transferred
2139 * to the dentry.  In case of an error the reference on the inode is released.
2140 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2141 * be passed in and the error will be propagated to the return value,
2142 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2143 */
2144struct dentry *d_obtain_alias(struct inode *inode)
2145{
2146	return __d_obtain_alias(inode, true);
2147}
2148EXPORT_SYMBOL(d_obtain_alias);
2149
2150/**
2151 * d_obtain_root - find or allocate a dentry for a given inode
2152 * @inode: inode to allocate the dentry for
2153 *
2154 * Obtain an IS_ROOT dentry for the root of a filesystem.
2155 *
2156 * We must ensure that directory inodes only ever have one dentry.  If a
2157 * dentry is found, that is returned instead of allocating a new one.
2158 *
2159 * On successful return, the reference to the inode has been transferred
2160 * to the dentry.  In case of an error the reference on the inode is
2161 * released.  A %NULL or IS_ERR inode may be passed in and will be the
2162 * error will be propagate to the return value, with a %NULL @inode
2163 * replaced by ERR_PTR(-ESTALE).
2164 */
2165struct dentry *d_obtain_root(struct inode *inode)
2166{
2167	return __d_obtain_alias(inode, false);
2168}
2169EXPORT_SYMBOL(d_obtain_root);
2170
2171/**
2172 * d_add_ci - lookup or allocate new dentry with case-exact name
2173 * @inode:  the inode case-insensitive lookup has found
2174 * @dentry: the negative dentry that was passed to the parent's lookup func
2175 * @name:   the case-exact name to be associated with the returned dentry
2176 *
2177 * This is to avoid filling the dcache with case-insensitive names to the
2178 * same inode, only the actual correct case is stored in the dcache for
2179 * case-insensitive filesystems.
2180 *
2181 * For a case-insensitive lookup match and if the case-exact dentry
2182 * already exists in the dcache, use it and return it.
2183 *
2184 * If no entry exists with the exact case name, allocate new dentry with
2185 * the exact case, and return the spliced entry.
2186 */
2187struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2188			struct qstr *name)
2189{
2190	struct dentry *found, *res;
2191
2192	/*
2193	 * First check if a dentry matching the name already exists,
2194	 * if not go ahead and create it now.
2195	 */
2196	found = d_hash_and_lookup(dentry->d_parent, name);
2197	if (found) {
2198		iput(inode);
2199		return found;
2200	}
2201	if (d_in_lookup(dentry)) {
2202		found = d_alloc_parallel(dentry->d_parent, name,
2203					dentry->d_wait);
2204		if (IS_ERR(found) || !d_in_lookup(found)) {
2205			iput(inode);
2206			return found;
2207		}
2208	} else {
2209		found = d_alloc(dentry->d_parent, name);
2210		if (!found) {
2211			iput(inode);
2212			return ERR_PTR(-ENOMEM);
2213		} 
2214	}
2215	res = d_splice_alias(inode, found);
2216	if (res) {
 
2217		dput(found);
2218		return res;
2219	}
2220	return found;
2221}
2222EXPORT_SYMBOL(d_add_ci);
2223
2224
2225static inline bool d_same_name(const struct dentry *dentry,
2226				const struct dentry *parent,
2227				const struct qstr *name)
 
 
 
 
 
 
2228{
2229	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2230		if (dentry->d_name.len != name->len)
2231			return false;
2232		return dentry_cmp(dentry, name->name, name->len) == 0;
2233	}
2234	return parent->d_op->d_compare(dentry,
2235				       dentry->d_name.len, dentry->d_name.name,
2236				       name) == 0;
2237}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2238
2239/**
2240 * __d_lookup_rcu - search for a dentry (racy, store-free)
2241 * @parent: parent dentry
2242 * @name: qstr of name we wish to find
2243 * @seqp: returns d_seq value at the point where the dentry was found
2244 * Returns: dentry, or NULL
2245 *
2246 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2247 * resolution (store-free path walking) design described in
2248 * Documentation/filesystems/path-lookup.txt.
2249 *
2250 * This is not to be used outside core vfs.
2251 *
2252 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2253 * held, and rcu_read_lock held. The returned dentry must not be stored into
2254 * without taking d_lock and checking d_seq sequence count against @seq
2255 * returned here.
2256 *
2257 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2258 * function.
2259 *
2260 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2261 * the returned dentry, so long as its parent's seqlock is checked after the
2262 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2263 * is formed, giving integrity down the path walk.
2264 *
2265 * NOTE! The caller *has* to check the resulting dentry against the sequence
2266 * number we've returned before using any of the resulting dentry state!
2267 */
2268struct dentry *__d_lookup_rcu(const struct dentry *parent,
2269				const struct qstr *name,
2270				unsigned *seqp)
2271{
2272	u64 hashlen = name->hash_len;
2273	const unsigned char *str = name->name;
2274	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2275	struct hlist_bl_node *node;
2276	struct dentry *dentry;
2277
2278	/*
2279	 * Note: There is significant duplication with __d_lookup_rcu which is
2280	 * required to prevent single threaded performance regressions
2281	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2282	 * Keep the two functions in sync.
2283	 */
2284
 
 
 
2285	/*
2286	 * The hash list is protected using RCU.
2287	 *
2288	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2289	 * races with d_move().
2290	 *
2291	 * It is possible that concurrent renames can mess up our list
2292	 * walk here and result in missing our dentry, resulting in the
2293	 * false-negative result. d_lookup() protects against concurrent
2294	 * renames using rename_lock seqlock.
2295	 *
2296	 * See Documentation/filesystems/path-lookup.txt for more details.
2297	 */
2298	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2299		unsigned seq;
2300
2301seqretry:
2302		/*
2303		 * The dentry sequence count protects us from concurrent
2304		 * renames, and thus protects parent and name fields.
2305		 *
2306		 * The caller must perform a seqcount check in order
2307		 * to do anything useful with the returned dentry.
2308		 *
2309		 * NOTE! We do a "raw" seqcount_begin here. That means that
2310		 * we don't wait for the sequence count to stabilize if it
2311		 * is in the middle of a sequence change. If we do the slow
2312		 * dentry compare, we will do seqretries until it is stable,
2313		 * and if we end up with a successful lookup, we actually
2314		 * want to exit RCU lookup anyway.
2315		 *
2316		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2317		 * we are still guaranteed NUL-termination of ->d_name.name.
2318		 */
2319		seq = raw_seqcount_begin(&dentry->d_seq);
2320		if (dentry->d_parent != parent)
2321			continue;
2322		if (d_unhashed(dentry))
2323			continue;
2324
2325		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2326			int tlen;
2327			const char *tname;
2328			if (dentry->d_name.hash != hashlen_hash(hashlen))
2329				continue;
2330			tlen = dentry->d_name.len;
2331			tname = dentry->d_name.name;
2332			/* we want a consistent (name,len) pair */
2333			if (read_seqcount_retry(&dentry->d_seq, seq)) {
2334				cpu_relax();
2335				goto seqretry;
2336			}
2337			if (parent->d_op->d_compare(dentry,
2338						    tlen, tname, name) != 0)
2339				continue;
2340		} else {
2341			if (dentry->d_name.hash_len != hashlen)
2342				continue;
2343			if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2344				continue;
2345		}
2346		*seqp = seq;
2347		return dentry;
2348	}
2349	return NULL;
2350}
2351
2352/**
2353 * d_lookup - search for a dentry
2354 * @parent: parent dentry
2355 * @name: qstr of name we wish to find
2356 * Returns: dentry, or NULL
2357 *
2358 * d_lookup searches the children of the parent dentry for the name in
2359 * question. If the dentry is found its reference count is incremented and the
2360 * dentry is returned. The caller must use dput to free the entry when it has
2361 * finished using it. %NULL is returned if the dentry does not exist.
2362 */
2363struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2364{
2365	struct dentry *dentry;
2366	unsigned seq;
2367
2368	do {
2369		seq = read_seqbegin(&rename_lock);
2370		dentry = __d_lookup(parent, name);
2371		if (dentry)
2372			break;
2373	} while (read_seqretry(&rename_lock, seq));
2374	return dentry;
2375}
2376EXPORT_SYMBOL(d_lookup);
2377
2378/**
2379 * __d_lookup - search for a dentry (racy)
2380 * @parent: parent dentry
2381 * @name: qstr of name we wish to find
2382 * Returns: dentry, or NULL
2383 *
2384 * __d_lookup is like d_lookup, however it may (rarely) return a
2385 * false-negative result due to unrelated rename activity.
2386 *
2387 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2388 * however it must be used carefully, eg. with a following d_lookup in
2389 * the case of failure.
2390 *
2391 * __d_lookup callers must be commented.
2392 */
2393struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2394{
2395	unsigned int hash = name->hash;
2396	struct hlist_bl_head *b = d_hash(hash);
2397	struct hlist_bl_node *node;
2398	struct dentry *found = NULL;
2399	struct dentry *dentry;
2400
2401	/*
2402	 * Note: There is significant duplication with __d_lookup_rcu which is
2403	 * required to prevent single threaded performance regressions
2404	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2405	 * Keep the two functions in sync.
2406	 */
2407
2408	/*
2409	 * The hash list is protected using RCU.
2410	 *
2411	 * Take d_lock when comparing a candidate dentry, to avoid races
2412	 * with d_move().
2413	 *
2414	 * It is possible that concurrent renames can mess up our list
2415	 * walk here and result in missing our dentry, resulting in the
2416	 * false-negative result. d_lookup() protects against concurrent
2417	 * renames using rename_lock seqlock.
2418	 *
2419	 * See Documentation/filesystems/path-lookup.txt for more details.
2420	 */
2421	rcu_read_lock();
2422	
2423	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2424
2425		if (dentry->d_name.hash != hash)
2426			continue;
2427
2428		spin_lock(&dentry->d_lock);
2429		if (dentry->d_parent != parent)
2430			goto next;
2431		if (d_unhashed(dentry))
2432			goto next;
2433
2434		if (!d_same_name(dentry, parent, name))
2435			goto next;
2436
2437		dentry->d_lockref.count++;
2438		found = dentry;
2439		spin_unlock(&dentry->d_lock);
2440		break;
2441next:
2442		spin_unlock(&dentry->d_lock);
2443 	}
2444 	rcu_read_unlock();
2445
2446 	return found;
2447}
2448
2449/**
2450 * d_hash_and_lookup - hash the qstr then search for a dentry
2451 * @dir: Directory to search in
2452 * @name: qstr of name we wish to find
2453 *
2454 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2455 */
2456struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2457{
2458	/*
2459	 * Check for a fs-specific hash function. Note that we must
2460	 * calculate the standard hash first, as the d_op->d_hash()
2461	 * routine may choose to leave the hash value unchanged.
2462	 */
2463	name->hash = full_name_hash(dir, name->name, name->len);
2464	if (dir->d_flags & DCACHE_OP_HASH) {
2465		int err = dir->d_op->d_hash(dir, name);
2466		if (unlikely(err < 0))
2467			return ERR_PTR(err);
2468	}
2469	return d_lookup(dir, name);
2470}
2471EXPORT_SYMBOL(d_hash_and_lookup);
2472
2473/*
2474 * When a file is deleted, we have two options:
2475 * - turn this dentry into a negative dentry
2476 * - unhash this dentry and free it.
2477 *
2478 * Usually, we want to just turn this into
2479 * a negative dentry, but if anybody else is
2480 * currently using the dentry or the inode
2481 * we can't do that and we fall back on removing
2482 * it from the hash queues and waiting for
2483 * it to be deleted later when it has no users
2484 */
2485 
2486/**
2487 * d_delete - delete a dentry
2488 * @dentry: The dentry to delete
2489 *
2490 * Turn the dentry into a negative dentry if possible, otherwise
2491 * remove it from the hash queues so it can be deleted later
2492 */
2493 
2494void d_delete(struct dentry * dentry)
2495{
2496	struct inode *inode = dentry->d_inode;
2497
2498	spin_lock(&inode->i_lock);
2499	spin_lock(&dentry->d_lock);
2500	/*
2501	 * Are we the only user?
2502	 */
2503	if (dentry->d_lockref.count == 1) {
2504		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2505		dentry_unlink_inode(dentry);
2506	} else {
2507		__d_drop(dentry);
2508		spin_unlock(&dentry->d_lock);
2509		spin_unlock(&inode->i_lock);
2510	}
2511}
2512EXPORT_SYMBOL(d_delete);
2513
2514static void __d_rehash(struct dentry *entry)
2515{
2516	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2517
2518	hlist_bl_lock(b);
2519	hlist_bl_add_head_rcu(&entry->d_hash, b);
2520	hlist_bl_unlock(b);
2521}
2522
2523/**
2524 * d_rehash	- add an entry back to the hash
2525 * @entry: dentry to add to the hash
2526 *
2527 * Adds a dentry to the hash according to its name.
2528 */
2529 
2530void d_rehash(struct dentry * entry)
2531{
2532	spin_lock(&entry->d_lock);
2533	__d_rehash(entry);
2534	spin_unlock(&entry->d_lock);
2535}
2536EXPORT_SYMBOL(d_rehash);
2537
2538static inline unsigned start_dir_add(struct inode *dir)
2539{
2540
2541	for (;;) {
2542		unsigned n = dir->i_dir_seq;
2543		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2544			return n;
2545		cpu_relax();
2546	}
2547}
2548
2549static inline void end_dir_add(struct inode *dir, unsigned n)
 
2550{
2551	smp_store_release(&dir->i_dir_seq, n + 2);
 
 
2552}
2553
2554static void d_wait_lookup(struct dentry *dentry)
2555{
2556	if (d_in_lookup(dentry)) {
2557		DECLARE_WAITQUEUE(wait, current);
2558		add_wait_queue(dentry->d_wait, &wait);
2559		do {
2560			set_current_state(TASK_UNINTERRUPTIBLE);
2561			spin_unlock(&dentry->d_lock);
2562			schedule();
2563			spin_lock(&dentry->d_lock);
2564		} while (d_in_lookup(dentry));
2565	}
2566}
2567
2568struct dentry *d_alloc_parallel(struct dentry *parent,
2569				const struct qstr *name,
2570				wait_queue_head_t *wq)
2571{
2572	unsigned int hash = name->hash;
2573	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2574	struct hlist_bl_node *node;
2575	struct dentry *new = d_alloc(parent, name);
2576	struct dentry *dentry;
2577	unsigned seq, r_seq, d_seq;
2578
2579	if (unlikely(!new))
2580		return ERR_PTR(-ENOMEM);
2581
2582retry:
2583	rcu_read_lock();
2584	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2585	r_seq = read_seqbegin(&rename_lock);
2586	dentry = __d_lookup_rcu(parent, name, &d_seq);
2587	if (unlikely(dentry)) {
2588		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2589			rcu_read_unlock();
2590			goto retry;
2591		}
2592		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2593			rcu_read_unlock();
2594			dput(dentry);
2595			goto retry;
2596		}
2597		rcu_read_unlock();
2598		dput(new);
2599		return dentry;
2600	}
2601	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2602		rcu_read_unlock();
2603		goto retry;
2604	}
2605
2606	if (unlikely(seq & 1)) {
2607		rcu_read_unlock();
2608		goto retry;
2609	}
2610
2611	hlist_bl_lock(b);
2612	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2613		hlist_bl_unlock(b);
2614		rcu_read_unlock();
2615		goto retry;
2616	}
2617	/*
2618	 * No changes for the parent since the beginning of d_lookup().
2619	 * Since all removals from the chain happen with hlist_bl_lock(),
2620	 * any potential in-lookup matches are going to stay here until
2621	 * we unlock the chain.  All fields are stable in everything
2622	 * we encounter.
2623	 */
2624	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2625		if (dentry->d_name.hash != hash)
2626			continue;
2627		if (dentry->d_parent != parent)
2628			continue;
2629		if (!d_same_name(dentry, parent, name))
2630			continue;
2631		hlist_bl_unlock(b);
2632		/* now we can try to grab a reference */
2633		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2634			rcu_read_unlock();
2635			goto retry;
2636		}
2637
2638		rcu_read_unlock();
2639		/*
2640		 * somebody is likely to be still doing lookup for it;
2641		 * wait for them to finish
2642		 */
2643		spin_lock(&dentry->d_lock);
2644		d_wait_lookup(dentry);
2645		/*
2646		 * it's not in-lookup anymore; in principle we should repeat
2647		 * everything from dcache lookup, but it's likely to be what
2648		 * d_lookup() would've found anyway.  If it is, just return it;
2649		 * otherwise we really have to repeat the whole thing.
2650		 */
2651		if (unlikely(dentry->d_name.hash != hash))
2652			goto mismatch;
2653		if (unlikely(dentry->d_parent != parent))
2654			goto mismatch;
2655		if (unlikely(d_unhashed(dentry)))
2656			goto mismatch;
2657		if (unlikely(!d_same_name(dentry, parent, name)))
2658			goto mismatch;
2659		/* OK, it *is* a hashed match; return it */
2660		spin_unlock(&dentry->d_lock);
2661		dput(new);
2662		return dentry;
2663	}
2664	rcu_read_unlock();
2665	/* we can't take ->d_lock here; it's OK, though. */
2666	new->d_flags |= DCACHE_PAR_LOOKUP;
2667	new->d_wait = wq;
2668	hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2669	hlist_bl_unlock(b);
2670	return new;
2671mismatch:
2672	spin_unlock(&dentry->d_lock);
2673	dput(dentry);
2674	goto retry;
2675}
2676EXPORT_SYMBOL(d_alloc_parallel);
2677
2678void __d_lookup_done(struct dentry *dentry)
 
 
 
 
 
2679{
2680	struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2681						 dentry->d_name.hash);
 
 
 
 
2682	hlist_bl_lock(b);
2683	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2684	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2685	wake_up_all(dentry->d_wait);
2686	dentry->d_wait = NULL;
2687	hlist_bl_unlock(b);
2688	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2689	INIT_LIST_HEAD(&dentry->d_lru);
 
2690}
2691EXPORT_SYMBOL(__d_lookup_done);
 
 
 
 
 
 
 
2692
2693/* inode->i_lock held if inode is non-NULL */
2694
2695static inline void __d_add(struct dentry *dentry, struct inode *inode)
2696{
 
2697	struct inode *dir = NULL;
2698	unsigned n;
2699	spin_lock(&dentry->d_lock);
2700	if (unlikely(d_in_lookup(dentry))) {
2701		dir = dentry->d_parent->d_inode;
2702		n = start_dir_add(dir);
2703		__d_lookup_done(dentry);
2704	}
2705	if (inode) {
2706		unsigned add_flags = d_flags_for_inode(inode);
2707		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2708		raw_write_seqcount_begin(&dentry->d_seq);
2709		__d_set_inode_and_type(dentry, inode, add_flags);
2710		raw_write_seqcount_end(&dentry->d_seq);
2711		fsnotify_update_flags(dentry);
2712	}
2713	__d_rehash(dentry);
2714	if (dir)
2715		end_dir_add(dir, n);
2716	spin_unlock(&dentry->d_lock);
2717	if (inode)
2718		spin_unlock(&inode->i_lock);
2719}
2720
2721/**
2722 * d_add - add dentry to hash queues
2723 * @entry: dentry to add
2724 * @inode: The inode to attach to this dentry
2725 *
2726 * This adds the entry to the hash queues and initializes @inode.
2727 * The entry was actually filled in earlier during d_alloc().
2728 */
2729
2730void d_add(struct dentry *entry, struct inode *inode)
2731{
2732	if (inode) {
2733		security_d_instantiate(entry, inode);
2734		spin_lock(&inode->i_lock);
2735	}
2736	__d_add(entry, inode);
2737}
2738EXPORT_SYMBOL(d_add);
2739
2740/**
2741 * d_exact_alias - find and hash an exact unhashed alias
2742 * @entry: dentry to add
2743 * @inode: The inode to go with this dentry
2744 *
2745 * If an unhashed dentry with the same name/parent and desired
2746 * inode already exists, hash and return it.  Otherwise, return
2747 * NULL.
2748 *
2749 * Parent directory should be locked.
2750 */
2751struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2752{
2753	struct dentry *alias;
2754	unsigned int hash = entry->d_name.hash;
2755
2756	spin_lock(&inode->i_lock);
2757	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2758		/*
2759		 * Don't need alias->d_lock here, because aliases with
2760		 * d_parent == entry->d_parent are not subject to name or
2761		 * parent changes, because the parent inode i_mutex is held.
2762		 */
2763		if (alias->d_name.hash != hash)
2764			continue;
2765		if (alias->d_parent != entry->d_parent)
2766			continue;
2767		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2768			continue;
2769		spin_lock(&alias->d_lock);
2770		if (!d_unhashed(alias)) {
2771			spin_unlock(&alias->d_lock);
2772			alias = NULL;
2773		} else {
2774			__dget_dlock(alias);
2775			__d_rehash(alias);
2776			spin_unlock(&alias->d_lock);
2777		}
2778		spin_unlock(&inode->i_lock);
2779		return alias;
2780	}
2781	spin_unlock(&inode->i_lock);
2782	return NULL;
2783}
2784EXPORT_SYMBOL(d_exact_alias);
2785
2786static void swap_names(struct dentry *dentry, struct dentry *target)
2787{
2788	if (unlikely(dname_external(target))) {
2789		if (unlikely(dname_external(dentry))) {
2790			/*
2791			 * Both external: swap the pointers
2792			 */
2793			swap(target->d_name.name, dentry->d_name.name);
2794		} else {
2795			/*
2796			 * dentry:internal, target:external.  Steal target's
2797			 * storage and make target internal.
2798			 */
2799			memcpy(target->d_iname, dentry->d_name.name,
2800					dentry->d_name.len + 1);
2801			dentry->d_name.name = target->d_name.name;
2802			target->d_name.name = target->d_iname;
2803		}
2804	} else {
2805		if (unlikely(dname_external(dentry))) {
2806			/*
2807			 * dentry:external, target:internal.  Give dentry's
2808			 * storage to target and make dentry internal
2809			 */
2810			memcpy(dentry->d_iname, target->d_name.name,
2811					target->d_name.len + 1);
2812			target->d_name.name = dentry->d_name.name;
2813			dentry->d_name.name = dentry->d_iname;
2814		} else {
2815			/*
2816			 * Both are internal.
2817			 */
2818			unsigned int i;
2819			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2820			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2821				swap(((long *) &dentry->d_iname)[i],
2822				     ((long *) &target->d_iname)[i]);
2823			}
2824		}
2825	}
2826	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2827}
2828
2829static void copy_name(struct dentry *dentry, struct dentry *target)
2830{
2831	struct external_name *old_name = NULL;
2832	if (unlikely(dname_external(dentry)))
2833		old_name = external_name(dentry);
2834	if (unlikely(dname_external(target))) {
2835		atomic_inc(&external_name(target)->u.count);
2836		dentry->d_name = target->d_name;
2837	} else {
2838		memcpy(dentry->d_iname, target->d_name.name,
2839				target->d_name.len + 1);
2840		dentry->d_name.name = dentry->d_iname;
2841		dentry->d_name.hash_len = target->d_name.hash_len;
2842	}
2843	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2844		kfree_rcu(old_name, u.head);
2845}
2846
2847/*
2848 * __d_move - move a dentry
2849 * @dentry: entry to move
2850 * @target: new dentry
2851 * @exchange: exchange the two dentries
2852 *
2853 * Update the dcache to reflect the move of a file name. Negative
2854 * dcache entries should not be moved in this way. Caller must hold
2855 * rename_lock, the i_mutex of the source and target directories,
2856 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2857 */
2858static void __d_move(struct dentry *dentry, struct dentry *target,
2859		     bool exchange)
2860{
2861	struct dentry *old_parent, *p;
 
2862	struct inode *dir = NULL;
2863	unsigned n;
2864
2865	WARN_ON(!dentry->d_inode);
2866	if (WARN_ON(dentry == target))
2867		return;
2868
2869	BUG_ON(d_ancestor(target, dentry));
2870	old_parent = dentry->d_parent;
2871	p = d_ancestor(old_parent, target);
2872	if (IS_ROOT(dentry)) {
2873		BUG_ON(p);
2874		spin_lock(&target->d_parent->d_lock);
2875	} else if (!p) {
2876		/* target is not a descendent of dentry->d_parent */
2877		spin_lock(&target->d_parent->d_lock);
2878		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2879	} else {
2880		BUG_ON(p == dentry);
2881		spin_lock(&old_parent->d_lock);
2882		if (p != target)
2883			spin_lock_nested(&target->d_parent->d_lock,
2884					DENTRY_D_LOCK_NESTED);
2885	}
2886	spin_lock_nested(&dentry->d_lock, 2);
2887	spin_lock_nested(&target->d_lock, 3);
2888
2889	if (unlikely(d_in_lookup(target))) {
2890		dir = target->d_parent->d_inode;
2891		n = start_dir_add(dir);
2892		__d_lookup_done(target);
2893	}
2894
2895	write_seqcount_begin(&dentry->d_seq);
2896	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2897
2898	/* unhash both */
2899	if (!d_unhashed(dentry))
2900		___d_drop(dentry);
2901	if (!d_unhashed(target))
2902		___d_drop(target);
2903
2904	/* ... and switch them in the tree */
2905	dentry->d_parent = target->d_parent;
2906	if (!exchange) {
2907		copy_name(dentry, target);
2908		target->d_hash.pprev = NULL;
2909		dentry->d_parent->d_lockref.count++;
2910		if (dentry != old_parent) /* wasn't IS_ROOT */
2911			WARN_ON(!--old_parent->d_lockref.count);
2912	} else {
2913		target->d_parent = old_parent;
2914		swap_names(dentry, target);
2915		list_move(&target->d_child, &target->d_parent->d_subdirs);
 
 
2916		__d_rehash(target);
2917		fsnotify_update_flags(target);
2918	}
2919	list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
 
 
2920	__d_rehash(dentry);
2921	fsnotify_update_flags(dentry);
2922	fscrypt_handle_d_move(dentry);
2923
2924	write_seqcount_end(&target->d_seq);
2925	write_seqcount_end(&dentry->d_seq);
2926
2927	if (dir)
2928		end_dir_add(dir, n);
2929
2930	if (dentry->d_parent != old_parent)
2931		spin_unlock(&dentry->d_parent->d_lock);
2932	if (dentry != old_parent)
2933		spin_unlock(&old_parent->d_lock);
2934	spin_unlock(&target->d_lock);
2935	spin_unlock(&dentry->d_lock);
2936}
2937
2938/*
2939 * d_move - move a dentry
2940 * @dentry: entry to move
2941 * @target: new dentry
2942 *
2943 * Update the dcache to reflect the move of a file name. Negative
2944 * dcache entries should not be moved in this way. See the locking
2945 * requirements for __d_move.
2946 */
2947void d_move(struct dentry *dentry, struct dentry *target)
2948{
2949	write_seqlock(&rename_lock);
2950	__d_move(dentry, target, false);
2951	write_sequnlock(&rename_lock);
2952}
2953EXPORT_SYMBOL(d_move);
2954
2955/*
2956 * d_exchange - exchange two dentries
2957 * @dentry1: first dentry
2958 * @dentry2: second dentry
2959 */
2960void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2961{
2962	write_seqlock(&rename_lock);
2963
2964	WARN_ON(!dentry1->d_inode);
2965	WARN_ON(!dentry2->d_inode);
2966	WARN_ON(IS_ROOT(dentry1));
2967	WARN_ON(IS_ROOT(dentry2));
2968
2969	__d_move(dentry1, dentry2, true);
2970
2971	write_sequnlock(&rename_lock);
2972}
2973
2974/**
2975 * d_ancestor - search for an ancestor
2976 * @p1: ancestor dentry
2977 * @p2: child dentry
2978 *
2979 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2980 * an ancestor of p2, else NULL.
2981 */
2982struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2983{
2984	struct dentry *p;
2985
2986	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2987		if (p->d_parent == p1)
2988			return p;
2989	}
2990	return NULL;
2991}
2992
2993/*
2994 * This helper attempts to cope with remotely renamed directories
2995 *
2996 * It assumes that the caller is already holding
2997 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2998 *
2999 * Note: If ever the locking in lock_rename() changes, then please
3000 * remember to update this too...
3001 */
3002static int __d_unalias(struct inode *inode,
3003		struct dentry *dentry, struct dentry *alias)
3004{
3005	struct mutex *m1 = NULL;
3006	struct rw_semaphore *m2 = NULL;
3007	int ret = -ESTALE;
3008
3009	/* If alias and dentry share a parent, then no extra locks required */
3010	if (alias->d_parent == dentry->d_parent)
3011		goto out_unalias;
3012
3013	/* See lock_rename() */
3014	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
3015		goto out_err;
3016	m1 = &dentry->d_sb->s_vfs_rename_mutex;
3017	if (!inode_trylock_shared(alias->d_parent->d_inode))
3018		goto out_err;
3019	m2 = &alias->d_parent->d_inode->i_rwsem;
3020out_unalias:
3021	__d_move(alias, dentry, false);
3022	ret = 0;
3023out_err:
3024	if (m2)
3025		up_read(m2);
3026	if (m1)
3027		mutex_unlock(m1);
3028	return ret;
3029}
3030
3031/**
3032 * d_splice_alias - splice a disconnected dentry into the tree if one exists
3033 * @inode:  the inode which may have a disconnected dentry
3034 * @dentry: a negative dentry which we want to point to the inode.
3035 *
3036 * If inode is a directory and has an IS_ROOT alias, then d_move that in
3037 * place of the given dentry and return it, else simply d_add the inode
3038 * to the dentry and return NULL.
3039 *
3040 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3041 * we should error out: directories can't have multiple aliases.
3042 *
3043 * This is needed in the lookup routine of any filesystem that is exportable
3044 * (via knfsd) so that we can build dcache paths to directories effectively.
3045 *
3046 * If a dentry was found and moved, then it is returned.  Otherwise NULL
3047 * is returned.  This matches the expected return value of ->lookup.
3048 *
3049 * Cluster filesystems may call this function with a negative, hashed dentry.
3050 * In that case, we know that the inode will be a regular file, and also this
3051 * will only occur during atomic_open. So we need to check for the dentry
3052 * being already hashed only in the final case.
3053 */
3054struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3055{
3056	if (IS_ERR(inode))
3057		return ERR_CAST(inode);
3058
3059	BUG_ON(!d_unhashed(dentry));
3060
3061	if (!inode)
3062		goto out;
3063
3064	security_d_instantiate(dentry, inode);
3065	spin_lock(&inode->i_lock);
3066	if (S_ISDIR(inode->i_mode)) {
3067		struct dentry *new = __d_find_any_alias(inode);
3068		if (unlikely(new)) {
3069			/* The reference to new ensures it remains an alias */
3070			spin_unlock(&inode->i_lock);
3071			write_seqlock(&rename_lock);
3072			if (unlikely(d_ancestor(new, dentry))) {
3073				write_sequnlock(&rename_lock);
3074				dput(new);
3075				new = ERR_PTR(-ELOOP);
3076				pr_warn_ratelimited(
3077					"VFS: Lookup of '%s' in %s %s"
3078					" would have caused loop\n",
3079					dentry->d_name.name,
3080					inode->i_sb->s_type->name,
3081					inode->i_sb->s_id);
3082			} else if (!IS_ROOT(new)) {
3083				struct dentry *old_parent = dget(new->d_parent);
3084				int err = __d_unalias(inode, dentry, new);
3085				write_sequnlock(&rename_lock);
3086				if (err) {
3087					dput(new);
3088					new = ERR_PTR(err);
3089				}
3090				dput(old_parent);
3091			} else {
3092				__d_move(new, dentry, false);
3093				write_sequnlock(&rename_lock);
3094			}
3095			iput(inode);
3096			return new;
3097		}
3098	}
3099out:
3100	__d_add(dentry, inode);
3101	return NULL;
3102}
3103EXPORT_SYMBOL(d_splice_alias);
3104
3105/*
3106 * Test whether new_dentry is a subdirectory of old_dentry.
3107 *
3108 * Trivially implemented using the dcache structure
3109 */
3110
3111/**
3112 * is_subdir - is new dentry a subdirectory of old_dentry
3113 * @new_dentry: new dentry
3114 * @old_dentry: old dentry
3115 *
3116 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3117 * Returns false otherwise.
3118 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3119 */
3120  
3121bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3122{
3123	bool result;
3124	unsigned seq;
3125
3126	if (new_dentry == old_dentry)
3127		return true;
3128
3129	do {
3130		/* for restarting inner loop in case of seq retry */
3131		seq = read_seqbegin(&rename_lock);
3132		/*
3133		 * Need rcu_readlock to protect against the d_parent trashing
3134		 * due to d_move
3135		 */
3136		rcu_read_lock();
3137		if (d_ancestor(old_dentry, new_dentry))
3138			result = true;
3139		else
3140			result = false;
3141		rcu_read_unlock();
3142	} while (read_seqretry(&rename_lock, seq));
3143
3144	return result;
3145}
3146EXPORT_SYMBOL(is_subdir);
3147
3148static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3149{
3150	struct dentry *root = data;
3151	if (dentry != root) {
3152		if (d_unhashed(dentry) || !dentry->d_inode)
3153			return D_WALK_SKIP;
3154
3155		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3156			dentry->d_flags |= DCACHE_GENOCIDE;
3157			dentry->d_lockref.count--;
3158		}
3159	}
3160	return D_WALK_CONTINUE;
3161}
3162
3163void d_genocide(struct dentry *parent)
3164{
3165	d_walk(parent, parent, d_genocide_kill);
3166}
3167
3168EXPORT_SYMBOL(d_genocide);
 
 
3169
3170void d_tmpfile(struct dentry *dentry, struct inode *inode)
3171{
3172	inode_dec_link_count(inode);
3173	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3174		!hlist_unhashed(&dentry->d_u.d_alias) ||
3175		!d_unlinked(dentry));
3176	spin_lock(&dentry->d_parent->d_lock);
3177	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3178	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3179				(unsigned long long)inode->i_ino);
3180	spin_unlock(&dentry->d_lock);
3181	spin_unlock(&dentry->d_parent->d_lock);
 
 
 
 
 
 
 
 
 
3182	d_instantiate(dentry, inode);
3183}
3184EXPORT_SYMBOL(d_tmpfile);
3185
3186static __initdata unsigned long dhash_entries;
3187static int __init set_dhash_entries(char *str)
3188{
3189	if (!str)
3190		return 0;
3191	dhash_entries = simple_strtoul(str, &str, 0);
3192	return 1;
3193}
3194__setup("dhash_entries=", set_dhash_entries);
3195
3196static void __init dcache_init_early(void)
3197{
3198	/* If hashes are distributed across NUMA nodes, defer
3199	 * hash allocation until vmalloc space is available.
3200	 */
3201	if (hashdist)
3202		return;
3203
3204	dentry_hashtable =
3205		alloc_large_system_hash("Dentry cache",
3206					sizeof(struct hlist_bl_head),
3207					dhash_entries,
3208					13,
3209					HASH_EARLY | HASH_ZERO,
3210					&d_hash_shift,
3211					NULL,
3212					0,
3213					0);
3214	d_hash_shift = 32 - d_hash_shift;
3215}
3216
3217static void __init dcache_init(void)
3218{
3219	/*
3220	 * A constructor could be added for stable state like the lists,
3221	 * but it is probably not worth it because of the cache nature
3222	 * of the dcache.
3223	 */
3224	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3225		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3226		d_iname);
3227
3228	/* Hash may have been set up in dcache_init_early */
3229	if (!hashdist)
3230		return;
3231
3232	dentry_hashtable =
3233		alloc_large_system_hash("Dentry cache",
3234					sizeof(struct hlist_bl_head),
3235					dhash_entries,
3236					13,
3237					HASH_ZERO,
3238					&d_hash_shift,
3239					NULL,
3240					0,
3241					0);
3242	d_hash_shift = 32 - d_hash_shift;
3243}
3244
3245/* SLAB cache for __getname() consumers */
3246struct kmem_cache *names_cachep __read_mostly;
3247EXPORT_SYMBOL(names_cachep);
3248
3249void __init vfs_caches_init_early(void)
3250{
3251	int i;
3252
3253	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3254		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3255
3256	dcache_init_early();
3257	inode_init_early();
3258}
3259
3260void __init vfs_caches_init(void)
3261{
3262	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3263			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3264
3265	dcache_init();
3266	inode_init();
3267	files_init();
3268	files_maxfiles_init();
3269	mnt_init();
3270	bdev_cache_init();
3271	chrdev_init();
3272}