Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dcache.c
   4 *
   5 * Complete reimplementation
   6 * (C) 1997 Thomas Schoebel-Theuer,
   7 * with heavy changes by Linus Torvalds
   8 */
   9
  10/*
  11 * Notes on the allocation strategy:
  12 *
  13 * The dcache is a master of the icache - whenever a dcache entry
  14 * exists, the inode will always exist. "iput()" is done either when
  15 * the dcache entry is deleted or garbage collected.
  16 */
  17
  18#include <linux/ratelimit.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/fs.h>
  22#include <linux/fscrypt.h>
  23#include <linux/fsnotify.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/hash.h>
  27#include <linux/cache.h>
  28#include <linux/export.h>
 
 
 
  29#include <linux/security.h>
  30#include <linux/seqlock.h>
  31#include <linux/memblock.h>
 
 
 
  32#include <linux/bit_spinlock.h>
  33#include <linux/rculist_bl.h>
 
 
  34#include <linux/list_lru.h>
 
 
  35#include "internal.h"
  36#include "mount.h"
  37
  38#include <asm/runtime-const.h>
  39
  40/*
  41 * Usage:
  42 * dcache->d_inode->i_lock protects:
  43 *   - i_dentry, d_u.d_alias, d_inode of aliases
  44 * dcache_hash_bucket lock protects:
  45 *   - the dcache hash table
  46 * s_roots bl list spinlock protects:
  47 *   - the s_roots list (see __d_drop)
  48 * dentry->d_sb->s_dentry_lru_lock protects:
  49 *   - the dcache lru lists and counters
  50 * d_lock protects:
  51 *   - d_flags
  52 *   - d_name
  53 *   - d_lru
  54 *   - d_count
  55 *   - d_unhashed()
  56 *   - d_parent and d_chilren
  57 *   - childrens' d_sib and d_parent
  58 *   - d_u.d_alias, d_inode
  59 *
  60 * Ordering:
  61 * dentry->d_inode->i_lock
  62 *   dentry->d_lock
  63 *     dentry->d_sb->s_dentry_lru_lock
  64 *     dcache_hash_bucket lock
  65 *     s_roots lock
  66 *
  67 * If there is an ancestor relationship:
  68 * dentry->d_parent->...->d_parent->d_lock
  69 *   ...
  70 *     dentry->d_parent->d_lock
  71 *       dentry->d_lock
  72 *
  73 * If no ancestor relationship:
  74 * arbitrary, since it's serialized on rename_lock
 
 
  75 */
  76int sysctl_vfs_cache_pressure __read_mostly = 100;
  77EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  78
  79__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  80
  81EXPORT_SYMBOL(rename_lock);
  82
  83static struct kmem_cache *dentry_cache __ro_after_init;
  84
  85const struct qstr empty_name = QSTR_INIT("", 0);
  86EXPORT_SYMBOL(empty_name);
  87const struct qstr slash_name = QSTR_INIT("/", 1);
  88EXPORT_SYMBOL(slash_name);
  89const struct qstr dotdot_name = QSTR_INIT("..", 2);
  90EXPORT_SYMBOL(dotdot_name);
  91
  92/*
  93 * This is the single most critical data structure when it comes
  94 * to the dcache: the hashtable for lookups. Somebody should try
  95 * to make this good - I've just made it work.
  96 *
  97 * This hash-function tries to avoid losing too many bits of hash
  98 * information, yet avoid using a prime hash-size or similar.
  99 *
 100 * Marking the variables "used" ensures that the compiler doesn't
 101 * optimize them away completely on architectures with runtime
 102 * constant infrastructure, this allows debuggers to see their
 103 * values. But updating these values has no effect on those arches.
 104 */
 105
 106static unsigned int d_hash_shift __ro_after_init __used;
 
 107
 108static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
 109
 110static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
 111{
 112	return runtime_const_ptr(dentry_hashtable) +
 113		runtime_const_shift_right_32(hashlen, d_hash_shift);
 114}
 115
 116#define IN_LOOKUP_SHIFT 10
 117static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 118
 119static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 120					unsigned int hash)
 121{
 122	hash += (unsigned long) parent / L1_CACHE_BYTES;
 123	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 124}
 125
 126struct dentry_stat_t {
 127	long nr_dentry;
 128	long nr_unused;
 129	long age_limit;		/* age in seconds */
 130	long want_pages;	/* pages requested by system */
 131	long nr_negative;	/* # of unused negative dentries */
 132	long dummy;		/* Reserved for future use */
 133};
 134
 135static DEFINE_PER_CPU(long, nr_dentry);
 136static DEFINE_PER_CPU(long, nr_dentry_unused);
 137static DEFINE_PER_CPU(long, nr_dentry_negative);
 138static int dentry_negative_policy;
 139
 140#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 141/* Statistics gathering. */
 142static struct dentry_stat_t dentry_stat = {
 143	.age_limit = 45,
 144};
 145
 146/*
 147 * Here we resort to our own counters instead of using generic per-cpu counters
 148 * for consistency with what the vfs inode code does. We are expected to harvest
 149 * better code and performance by having our own specialized counters.
 150 *
 151 * Please note that the loop is done over all possible CPUs, not over all online
 152 * CPUs. The reason for this is that we don't want to play games with CPUs going
 153 * on and off. If one of them goes off, we will just keep their counters.
 154 *
 155 * glommer: See cffbc8a for details, and if you ever intend to change this,
 156 * please update all vfs counters to match.
 157 */
 158static long get_nr_dentry(void)
 159{
 160	int i;
 161	long sum = 0;
 162	for_each_possible_cpu(i)
 163		sum += per_cpu(nr_dentry, i);
 164	return sum < 0 ? 0 : sum;
 165}
 166
 167static long get_nr_dentry_unused(void)
 168{
 169	int i;
 170	long sum = 0;
 171	for_each_possible_cpu(i)
 172		sum += per_cpu(nr_dentry_unused, i);
 173	return sum < 0 ? 0 : sum;
 174}
 175
 176static long get_nr_dentry_negative(void)
 177{
 178	int i;
 179	long sum = 0;
 180
 181	for_each_possible_cpu(i)
 182		sum += per_cpu(nr_dentry_negative, i);
 183	return sum < 0 ? 0 : sum;
 184}
 185
 186static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
 187			  size_t *lenp, loff_t *ppos)
 188{
 189	dentry_stat.nr_dentry = get_nr_dentry();
 190	dentry_stat.nr_unused = get_nr_dentry_unused();
 191	dentry_stat.nr_negative = get_nr_dentry_negative();
 192	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 193}
 194
 195static struct ctl_table fs_dcache_sysctls[] = {
 196	{
 197		.procname	= "dentry-state",
 198		.data		= &dentry_stat,
 199		.maxlen		= 6*sizeof(long),
 200		.mode		= 0444,
 201		.proc_handler	= proc_nr_dentry,
 202	},
 203	{
 204		.procname	= "dentry-negative",
 205		.data		= &dentry_negative_policy,
 206		.maxlen		= sizeof(dentry_negative_policy),
 207		.mode		= 0644,
 208		.proc_handler	= proc_dointvec_minmax,
 209		.extra1		= SYSCTL_ZERO,
 210		.extra2		= SYSCTL_ONE,
 211	},
 212};
 213
 214static int __init init_fs_dcache_sysctls(void)
 215{
 216	register_sysctl_init("fs", fs_dcache_sysctls);
 217	return 0;
 218}
 219fs_initcall(init_fs_dcache_sysctls);
 220#endif
 221
 222/*
 223 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 224 * The strings are both count bytes long, and count is non-zero.
 225 */
 226#ifdef CONFIG_DCACHE_WORD_ACCESS
 227
 228#include <asm/word-at-a-time.h>
 229/*
 230 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 231 * aligned allocation for this particular component. We don't
 232 * strictly need the load_unaligned_zeropad() safety, but it
 233 * doesn't hurt either.
 234 *
 235 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 236 * need the careful unaligned handling.
 237 */
 238static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 239{
 240	unsigned long a,b,mask;
 241
 242	for (;;) {
 243		a = read_word_at_a_time(cs);
 244		b = load_unaligned_zeropad(ct);
 245		if (tcount < sizeof(unsigned long))
 246			break;
 247		if (unlikely(a != b))
 248			return 1;
 249		cs += sizeof(unsigned long);
 250		ct += sizeof(unsigned long);
 251		tcount -= sizeof(unsigned long);
 252		if (!tcount)
 253			return 0;
 254	}
 255	mask = bytemask_from_count(tcount);
 256	return unlikely(!!((a ^ b) & mask));
 257}
 258
 259#else
 260
 261static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 262{
 263	do {
 264		if (*cs != *ct)
 265			return 1;
 266		cs++;
 267		ct++;
 268		tcount--;
 269	} while (tcount);
 270	return 0;
 271}
 272
 273#endif
 274
 275static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 276{
 277	/*
 278	 * Be careful about RCU walk racing with rename:
 279	 * use 'READ_ONCE' to fetch the name pointer.
 280	 *
 281	 * NOTE! Even if a rename will mean that the length
 282	 * was not loaded atomically, we don't care. The
 283	 * RCU walk will check the sequence count eventually,
 284	 * and catch it. And we won't overrun the buffer,
 285	 * because we're reading the name pointer atomically,
 286	 * and a dentry name is guaranteed to be properly
 287	 * terminated with a NUL byte.
 288	 *
 289	 * End result: even if 'len' is wrong, we'll exit
 290	 * early because the data cannot match (there can
 291	 * be no NUL in the ct/tcount data)
 292	 */
 293	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 294
 295	return dentry_string_cmp(cs, ct, tcount);
 296}
 297
 298struct external_name {
 299	union {
 300		atomic_t count;
 301		struct rcu_head head;
 302	} u;
 303	unsigned char name[];
 304};
 305
 306static inline struct external_name *external_name(struct dentry *dentry)
 307{
 308	return container_of(dentry->d_name.name, struct external_name, name[0]);
 309}
 310
 311static void __d_free(struct rcu_head *head)
 312{
 313	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 314
 315	kmem_cache_free(dentry_cache, dentry); 
 316}
 317
 318static void __d_free_external(struct rcu_head *head)
 319{
 320	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 321	kfree(external_name(dentry));
 322	kmem_cache_free(dentry_cache, dentry);
 323}
 324
 325static inline int dname_external(const struct dentry *dentry)
 326{
 327	return dentry->d_name.name != dentry->d_iname;
 328}
 329
 330void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
 331{
 332	spin_lock(&dentry->d_lock);
 333	name->name = dentry->d_name;
 334	if (unlikely(dname_external(dentry))) {
 335		atomic_inc(&external_name(dentry)->u.count);
 336	} else {
 337		memcpy(name->inline_name, dentry->d_iname,
 338		       dentry->d_name.len + 1);
 339		name->name.name = name->inline_name;
 340	}
 341	spin_unlock(&dentry->d_lock);
 342}
 343EXPORT_SYMBOL(take_dentry_name_snapshot);
 344
 345void release_dentry_name_snapshot(struct name_snapshot *name)
 346{
 347	if (unlikely(name->name.name != name->inline_name)) {
 348		struct external_name *p;
 349		p = container_of(name->name.name, struct external_name, name[0]);
 350		if (unlikely(atomic_dec_and_test(&p->u.count)))
 351			kfree_rcu(p, u.head);
 352	}
 353}
 354EXPORT_SYMBOL(release_dentry_name_snapshot);
 355
 356static inline void __d_set_inode_and_type(struct dentry *dentry,
 357					  struct inode *inode,
 358					  unsigned type_flags)
 359{
 360	unsigned flags;
 361
 362	dentry->d_inode = inode;
 363	flags = READ_ONCE(dentry->d_flags);
 364	flags &= ~DCACHE_ENTRY_TYPE;
 365	flags |= type_flags;
 366	smp_store_release(&dentry->d_flags, flags);
 367}
 368
 369static inline void __d_clear_type_and_inode(struct dentry *dentry)
 370{
 371	unsigned flags = READ_ONCE(dentry->d_flags);
 372
 373	flags &= ~DCACHE_ENTRY_TYPE;
 374	WRITE_ONCE(dentry->d_flags, flags);
 375	dentry->d_inode = NULL;
 376	/*
 377	 * The negative counter only tracks dentries on the LRU. Don't inc if
 378	 * d_lru is on another list.
 379	 */
 380	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
 381		this_cpu_inc(nr_dentry_negative);
 382}
 383
 384static void dentry_free(struct dentry *dentry)
 385{
 386	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 387	if (unlikely(dname_external(dentry))) {
 388		struct external_name *p = external_name(dentry);
 389		if (likely(atomic_dec_and_test(&p->u.count))) {
 390			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 391			return;
 392		}
 393	}
 394	/* if dentry was never visible to RCU, immediate free is OK */
 395	if (dentry->d_flags & DCACHE_NORCU)
 396		__d_free(&dentry->d_u.d_rcu);
 397	else
 398		call_rcu(&dentry->d_u.d_rcu, __d_free);
 399}
 400
 401/*
 402 * Release the dentry's inode, using the filesystem
 403 * d_iput() operation if defined.
 404 */
 405static void dentry_unlink_inode(struct dentry * dentry)
 406	__releases(dentry->d_lock)
 407	__releases(dentry->d_inode->i_lock)
 408{
 409	struct inode *inode = dentry->d_inode;
 
 410
 411	raw_write_seqcount_begin(&dentry->d_seq);
 
 412	__d_clear_type_and_inode(dentry);
 413	hlist_del_init(&dentry->d_u.d_alias);
 414	raw_write_seqcount_end(&dentry->d_seq);
 
 415	spin_unlock(&dentry->d_lock);
 416	spin_unlock(&inode->i_lock);
 417	if (!inode->i_nlink)
 418		fsnotify_inoderemove(inode);
 419	if (dentry->d_op && dentry->d_op->d_iput)
 420		dentry->d_op->d_iput(dentry, inode);
 421	else
 422		iput(inode);
 423}
 424
 425/*
 426 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 427 * is in use - which includes both the "real" per-superblock
 428 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 429 *
 430 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 431 * on the shrink list (ie not on the superblock LRU list).
 432 *
 433 * The per-cpu "nr_dentry_unused" counters are updated with
 434 * the DCACHE_LRU_LIST bit.
 435 *
 436 * The per-cpu "nr_dentry_negative" counters are only updated
 437 * when deleted from or added to the per-superblock LRU list, not
 438 * from/to the shrink list. That is to avoid an unneeded dec/inc
 439 * pair when moving from LRU to shrink list in select_collect().
 440 *
 441 * These helper functions make sure we always follow the
 442 * rules. d_lock must be held by the caller.
 443 */
 444#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 445static void d_lru_add(struct dentry *dentry)
 446{
 447	D_FLAG_VERIFY(dentry, 0);
 448	dentry->d_flags |= DCACHE_LRU_LIST;
 449	this_cpu_inc(nr_dentry_unused);
 450	if (d_is_negative(dentry))
 451		this_cpu_inc(nr_dentry_negative);
 452	WARN_ON_ONCE(!list_lru_add_obj(
 453			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 454}
 455
 456static void d_lru_del(struct dentry *dentry)
 457{
 458	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 459	dentry->d_flags &= ~DCACHE_LRU_LIST;
 460	this_cpu_dec(nr_dentry_unused);
 461	if (d_is_negative(dentry))
 462		this_cpu_dec(nr_dentry_negative);
 463	WARN_ON_ONCE(!list_lru_del_obj(
 464			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 465}
 466
 467static void d_shrink_del(struct dentry *dentry)
 468{
 469	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 470	list_del_init(&dentry->d_lru);
 471	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 472	this_cpu_dec(nr_dentry_unused);
 473}
 474
 475static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 476{
 477	D_FLAG_VERIFY(dentry, 0);
 478	list_add(&dentry->d_lru, list);
 479	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 480	this_cpu_inc(nr_dentry_unused);
 481}
 482
 483/*
 484 * These can only be called under the global LRU lock, ie during the
 485 * callback for freeing the LRU list. "isolate" removes it from the
 486 * LRU lists entirely, while shrink_move moves it to the indicated
 487 * private list.
 488 */
 489static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 490{
 491	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 492	dentry->d_flags &= ~DCACHE_LRU_LIST;
 493	this_cpu_dec(nr_dentry_unused);
 494	if (d_is_negative(dentry))
 495		this_cpu_dec(nr_dentry_negative);
 496	list_lru_isolate(lru, &dentry->d_lru);
 497}
 498
 499static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 500			      struct list_head *list)
 501{
 502	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 503	dentry->d_flags |= DCACHE_SHRINK_LIST;
 504	if (d_is_negative(dentry))
 505		this_cpu_dec(nr_dentry_negative);
 506	list_lru_isolate_move(lru, &dentry->d_lru, list);
 507}
 508
 509static void ___d_drop(struct dentry *dentry)
 510{
 511	struct hlist_bl_head *b;
 512	/*
 513	 * Hashed dentries are normally on the dentry hashtable,
 514	 * with the exception of those newly allocated by
 515	 * d_obtain_root, which are always IS_ROOT:
 516	 */
 517	if (unlikely(IS_ROOT(dentry)))
 518		b = &dentry->d_sb->s_roots;
 519	else
 520		b = d_hash(dentry->d_name.hash);
 521
 522	hlist_bl_lock(b);
 523	__hlist_bl_del(&dentry->d_hash);
 524	hlist_bl_unlock(b);
 525}
 526
 527void __d_drop(struct dentry *dentry)
 528{
 529	if (!d_unhashed(dentry)) {
 530		___d_drop(dentry);
 531		dentry->d_hash.pprev = NULL;
 532		write_seqcount_invalidate(&dentry->d_seq);
 533	}
 534}
 535EXPORT_SYMBOL(__d_drop);
 536
 537/**
 538 * d_drop - drop a dentry
 539 * @dentry: dentry to drop
 540 *
 541 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 542 * be found through a VFS lookup any more. Note that this is different from
 543 * deleting the dentry - d_delete will try to mark the dentry negative if
 544 * possible, giving a successful _negative_ lookup, while d_drop will
 545 * just make the cache lookup fail.
 546 *
 547 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 548 * reason (NFS timeouts or autofs deletes).
 549 *
 550 * __d_drop requires dentry->d_lock
 551 *
 552 * ___d_drop doesn't mark dentry as "unhashed"
 553 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
 554 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555void d_drop(struct dentry *dentry)
 556{
 557	spin_lock(&dentry->d_lock);
 558	__d_drop(dentry);
 559	spin_unlock(&dentry->d_lock);
 560}
 561EXPORT_SYMBOL(d_drop);
 562
 563static inline void dentry_unlist(struct dentry *dentry)
 564{
 565	struct dentry *next;
 566	/*
 567	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 568	 * attached to the dentry tree
 569	 */
 570	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 571	if (unlikely(hlist_unhashed(&dentry->d_sib)))
 572		return;
 573	__hlist_del(&dentry->d_sib);
 574	/*
 575	 * Cursors can move around the list of children.  While we'd been
 576	 * a normal list member, it didn't matter - ->d_sib.next would've
 577	 * been updated.  However, from now on it won't be and for the
 578	 * things like d_walk() it might end up with a nasty surprise.
 579	 * Normally d_walk() doesn't care about cursors moving around -
 580	 * ->d_lock on parent prevents that and since a cursor has no children
 581	 * of its own, we get through it without ever unlocking the parent.
 582	 * There is one exception, though - if we ascend from a child that
 583	 * gets killed as soon as we unlock it, the next sibling is found
 584	 * using the value left in its ->d_sib.next.  And if _that_
 585	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 586	 * before d_walk() regains parent->d_lock, we'll end up skipping
 587	 * everything the cursor had been moved past.
 588	 *
 589	 * Solution: make sure that the pointer left behind in ->d_sib.next
 590	 * points to something that won't be moving around.  I.e. skip the
 591	 * cursors.
 592	 */
 593	while (dentry->d_sib.next) {
 594		next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
 595		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 596			break;
 597		dentry->d_sib.next = next->d_sib.next;
 598	}
 599}
 600
 601static struct dentry *__dentry_kill(struct dentry *dentry)
 602{
 603	struct dentry *parent = NULL;
 604	bool can_free = true;
 
 
 605
 606	/*
 607	 * The dentry is now unrecoverably dead to the world.
 608	 */
 609	lockref_mark_dead(&dentry->d_lockref);
 610
 611	/*
 612	 * inform the fs via d_prune that this dentry is about to be
 613	 * unhashed and destroyed.
 614	 */
 615	if (dentry->d_flags & DCACHE_OP_PRUNE)
 616		dentry->d_op->d_prune(dentry);
 617
 618	if (dentry->d_flags & DCACHE_LRU_LIST) {
 619		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 620			d_lru_del(dentry);
 621	}
 622	/* if it was on the hash then remove it */
 623	__d_drop(dentry);
 
 
 
 624	if (dentry->d_inode)
 625		dentry_unlink_inode(dentry);
 626	else
 627		spin_unlock(&dentry->d_lock);
 628	this_cpu_dec(nr_dentry);
 629	if (dentry->d_op && dentry->d_op->d_release)
 630		dentry->d_op->d_release(dentry);
 631
 632	cond_resched();
 633	/* now that it's negative, ->d_parent is stable */
 634	if (!IS_ROOT(dentry)) {
 635		parent = dentry->d_parent;
 636		spin_lock(&parent->d_lock);
 637	}
 638	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 639	dentry_unlist(dentry);
 640	if (dentry->d_flags & DCACHE_SHRINK_LIST)
 641		can_free = false;
 
 642	spin_unlock(&dentry->d_lock);
 643	if (likely(can_free))
 644		dentry_free(dentry);
 645	if (parent && --parent->d_lockref.count) {
 646		spin_unlock(&parent->d_lock);
 647		return NULL;
 648	}
 649	return parent;
 650}
 651
 652/*
 653 * Lock a dentry for feeding it to __dentry_kill().
 654 * Called under rcu_read_lock() and dentry->d_lock; the former
 655 * guarantees that nothing we access will be freed under us.
 656 * Note that dentry is *not* protected from concurrent dentry_kill(),
 657 * d_delete(), etc.
 658 *
 659 * Return false if dentry is busy.  Otherwise, return true and have
 660 * that dentry's inode locked.
 661 */
 662
 663static bool lock_for_kill(struct dentry *dentry)
 664{
 665	struct inode *inode = dentry->d_inode;
 
 666
 667	if (unlikely(dentry->d_lockref.count))
 668		return false;
 669
 670	if (!inode || likely(spin_trylock(&inode->i_lock)))
 671		return true;
 672
 673	do {
 674		spin_unlock(&dentry->d_lock);
 675		spin_lock(&inode->i_lock);
 676		spin_lock(&dentry->d_lock);
 677		if (likely(inode == dentry->d_inode))
 678			break;
 679		spin_unlock(&inode->i_lock);
 680		inode = dentry->d_inode;
 681	} while (inode);
 682	if (likely(!dentry->d_lockref.count))
 683		return true;
 684	if (inode)
 685		spin_unlock(&inode->i_lock);
 686	return false;
 687}
 688
 689/*
 690 * Decide if dentry is worth retaining.  Usually this is called with dentry
 691 * locked; if not locked, we are more limited and might not be able to tell
 692 * without a lock.  False in this case means "punt to locked path and recheck".
 693 *
 694 * In case we aren't locked, these predicates are not "stable". However, it is
 695 * sufficient that at some point after we dropped the reference the dentry was
 696 * hashed and the flags had the proper value. Other dentry users may have
 697 * re-gotten a reference to the dentry and change that, but our work is done -
 698 * we can leave the dentry around with a zero refcount.
 699 */
 700static inline bool retain_dentry(struct dentry *dentry, bool locked)
 701{
 702	unsigned int d_flags;
 703
 704	smp_rmb();
 705	d_flags = READ_ONCE(dentry->d_flags);
 706
 707	// Unreachable? Nobody would be able to look it up, no point retaining
 708	if (unlikely(d_unhashed(dentry)))
 709		return false;
 710
 711	// Same if it's disconnected
 712	if (unlikely(d_flags & DCACHE_DISCONNECTED))
 713		return false;
 714
 715	// ->d_delete() might tell us not to bother, but that requires
 716	// ->d_lock; can't decide without it
 717	if (unlikely(d_flags & DCACHE_OP_DELETE)) {
 718		if (!locked || dentry->d_op->d_delete(dentry))
 719			return false;
 720	}
 721
 722	// Explicitly told not to bother
 723	if (unlikely(d_flags & DCACHE_DONTCACHE))
 724		return false;
 725
 726	// At this point it looks like we ought to keep it.  We also might
 727	// need to do something - put it on LRU if it wasn't there already
 728	// and mark it referenced if it was on LRU, but not marked yet.
 729	// Unfortunately, both actions require ->d_lock, so in lockless
 730	// case we'd have to punt rather than doing those.
 731	if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
 732		if (!locked)
 733			return false;
 734		d_lru_add(dentry);
 735	} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
 736		if (!locked)
 737			return false;
 738		dentry->d_flags |= DCACHE_REFERENCED;
 739	}
 740	return true;
 741}
 742
 743void d_mark_dontcache(struct inode *inode)
 744{
 745	struct dentry *de;
 746
 747	spin_lock(&inode->i_lock);
 748	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
 749		spin_lock(&de->d_lock);
 750		de->d_flags |= DCACHE_DONTCACHE;
 751		spin_unlock(&de->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752	}
 753	inode->i_state |= I_DONTCACHE;
 754	spin_unlock(&inode->i_lock);
 
 
 
 
 755}
 756EXPORT_SYMBOL(d_mark_dontcache);
 757
 758/*
 759 * Try to do a lockless dput(), and return whether that was successful.
 760 *
 761 * If unsuccessful, we return false, having already taken the dentry lock.
 762 * In that case refcount is guaranteed to be zero and we have already
 763 * decided that it's not worth keeping around.
 764 *
 765 * The caller needs to hold the RCU read lock, so that the dentry is
 766 * guaranteed to stay around even if the refcount goes down to zero!
 767 */
 768static inline bool fast_dput(struct dentry *dentry)
 769{
 770	int ret;
 
 771
 772	/*
 773	 * try to decrement the lockref optimistically.
 
 
 
 
 
 
 
 
 774	 */
 775	ret = lockref_put_return(&dentry->d_lockref);
 776
 777	/*
 778	 * If the lockref_put_return() failed due to the lock being held
 779	 * by somebody else, the fast path has failed. We will need to
 780	 * get the lock, and then check the count again.
 781	 */
 782	if (unlikely(ret < 0)) {
 783		spin_lock(&dentry->d_lock);
 784		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
 
 785			spin_unlock(&dentry->d_lock);
 786			return true;
 787		}
 788		dentry->d_lockref.count--;
 789		goto locked;
 790	}
 791
 792	/*
 793	 * If we weren't the last ref, we're done.
 794	 */
 795	if (ret)
 796		return true;
 797
 798	/*
 799	 * Can we decide that decrement of refcount is all we needed without
 800	 * taking the lock?  There's a very common case when it's all we need -
 801	 * dentry looks like it ought to be retained and there's nothing else
 802	 * to do.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803	 */
 804	if (retain_dentry(dentry, false))
 805		return true;
 
 
 
 
 
 806
 807	/*
 808	 * Either not worth retaining or we can't tell without the lock.
 809	 * Get the lock, then.  We've already decremented the refcount to 0,
 810	 * but we'll need to re-check the situation after getting the lock.
 811	 */
 812	spin_lock(&dentry->d_lock);
 813
 814	/*
 815	 * Did somebody else grab a reference to it in the meantime, and
 816	 * we're no longer the last user after all? Alternatively, somebody
 817	 * else could have killed it and marked it dead. Either way, we
 818	 * don't need to do anything else.
 819	 */
 820locked:
 821	if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
 822		spin_unlock(&dentry->d_lock);
 823		return true;
 824	}
 825	return false;
 
 
 
 
 
 
 
 826}
 827
 828
 829/* 
 830 * This is dput
 831 *
 832 * This is complicated by the fact that we do not want to put
 833 * dentries that are no longer on any hash chain on the unused
 834 * list: we'd much rather just get rid of them immediately.
 835 *
 836 * However, that implies that we have to traverse the dentry
 837 * tree upwards to the parents which might _also_ now be
 838 * scheduled for deletion (it may have been only waiting for
 839 * its last child to go away).
 840 *
 841 * This tail recursion is done by hand as we don't want to depend
 842 * on the compiler to always get this right (gcc generally doesn't).
 843 * Real recursion would eat up our stack space.
 844 */
 845
 846/*
 847 * dput - release a dentry
 848 * @dentry: dentry to release 
 849 *
 850 * Release a dentry. This will drop the usage count and if appropriate
 851 * call the dentry unlink method as well as removing it from the queues and
 852 * releasing its resources. If the parent dentries were scheduled for release
 853 * they too may now get deleted.
 854 */
 855void dput(struct dentry *dentry)
 856{
 857	if (!dentry)
 858		return;
 
 
 859	might_sleep();
 
 860	rcu_read_lock();
 861	if (likely(fast_dput(dentry))) {
 862		rcu_read_unlock();
 863		return;
 864	}
 865	while (lock_for_kill(dentry)) {
 866		rcu_read_unlock();
 867		dentry = __dentry_kill(dentry);
 868		if (!dentry)
 869			return;
 870		if (retain_dentry(dentry, true)) {
 871			spin_unlock(&dentry->d_lock);
 872			return;
 873		}
 874		rcu_read_lock();
 875	}
 876	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877	spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 878}
 879EXPORT_SYMBOL(dput);
 880
 881static void to_shrink_list(struct dentry *dentry, struct list_head *list)
 882__must_hold(&dentry->d_lock)
 
 883{
 884	if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
 885		if (dentry->d_flags & DCACHE_LRU_LIST)
 886			d_lru_del(dentry);
 887		d_shrink_add(dentry, list);
 888	}
 889}
 890
 891void dput_to_list(struct dentry *dentry, struct list_head *list)
 892{
 893	rcu_read_lock();
 894	if (likely(fast_dput(dentry))) {
 895		rcu_read_unlock();
 896		return;
 897	}
 898	rcu_read_unlock();
 899	to_shrink_list(dentry, list);
 900	spin_unlock(&dentry->d_lock);
 901}
 902
 903struct dentry *dget_parent(struct dentry *dentry)
 904{
 905	int gotref;
 906	struct dentry *ret;
 907	unsigned seq;
 908
 909	/*
 910	 * Do optimistic parent lookup without any
 911	 * locking.
 912	 */
 913	rcu_read_lock();
 914	seq = raw_seqcount_begin(&dentry->d_seq);
 915	ret = READ_ONCE(dentry->d_parent);
 916	gotref = lockref_get_not_zero(&ret->d_lockref);
 917	rcu_read_unlock();
 918	if (likely(gotref)) {
 919		if (!read_seqcount_retry(&dentry->d_seq, seq))
 920			return ret;
 921		dput(ret);
 922	}
 923
 924repeat:
 925	/*
 926	 * Don't need rcu_dereference because we re-check it was correct under
 927	 * the lock.
 928	 */
 929	rcu_read_lock();
 930	ret = dentry->d_parent;
 931	spin_lock(&ret->d_lock);
 932	if (unlikely(ret != dentry->d_parent)) {
 933		spin_unlock(&ret->d_lock);
 934		rcu_read_unlock();
 935		goto repeat;
 936	}
 937	rcu_read_unlock();
 938	BUG_ON(!ret->d_lockref.count);
 939	ret->d_lockref.count++;
 940	spin_unlock(&ret->d_lock);
 941	return ret;
 942}
 943EXPORT_SYMBOL(dget_parent);
 944
 945static struct dentry * __d_find_any_alias(struct inode *inode)
 946{
 947	struct dentry *alias;
 948
 949	if (hlist_empty(&inode->i_dentry))
 950		return NULL;
 951	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
 952	lockref_get(&alias->d_lockref);
 953	return alias;
 954}
 955
 956/**
 957 * d_find_any_alias - find any alias for a given inode
 958 * @inode: inode to find an alias for
 959 *
 960 * If any aliases exist for the given inode, take and return a
 961 * reference for one of them.  If no aliases exist, return %NULL.
 
 
 
 
 
 
 
 962 */
 963struct dentry *d_find_any_alias(struct inode *inode)
 964{
 965	struct dentry *de;
 966
 967	spin_lock(&inode->i_lock);
 968	de = __d_find_any_alias(inode);
 969	spin_unlock(&inode->i_lock);
 970	return de;
 971}
 972EXPORT_SYMBOL(d_find_any_alias);
 973
 974static struct dentry *__d_find_alias(struct inode *inode)
 975{
 976	struct dentry *alias;
 977
 978	if (S_ISDIR(inode->i_mode))
 979		return __d_find_any_alias(inode);
 980
 
 
 981	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 982		spin_lock(&alias->d_lock);
 983 		if (!d_unhashed(alias)) {
 984			dget_dlock(alias);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985			spin_unlock(&alias->d_lock);
 986			return alias;
 987		}
 988		spin_unlock(&alias->d_lock);
 
 989	}
 990	return NULL;
 991}
 992
 993/**
 994 * d_find_alias - grab a hashed alias of inode
 995 * @inode: inode in question
 996 *
 997 * If inode has a hashed alias, or is a directory and has any alias,
 998 * acquire the reference to alias and return it. Otherwise return NULL.
 999 * Notice that if inode is a directory there can be only one alias and
1000 * it can be unhashed only if it has no children, or if it is the root
1001 * of a filesystem, or if the directory was renamed and d_revalidate
1002 * was the first vfs operation to notice.
1003 *
1004 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1005 * any other hashed alias over that one.
1006 */
1007struct dentry *d_find_alias(struct inode *inode)
1008{
1009	struct dentry *de = NULL;
1010
1011	if (!hlist_empty(&inode->i_dentry)) {
1012		spin_lock(&inode->i_lock);
1013		de = __d_find_alias(inode);
1014		spin_unlock(&inode->i_lock);
1015	}
1016	return de;
1017}
1018EXPORT_SYMBOL(d_find_alias);
1019
1020/*
1021 *  Caller MUST be holding rcu_read_lock() and be guaranteed
1022 *  that inode won't get freed until rcu_read_unlock().
1023 */
1024struct dentry *d_find_alias_rcu(struct inode *inode)
1025{
1026	struct hlist_head *l = &inode->i_dentry;
1027	struct dentry *de = NULL;
1028
1029	spin_lock(&inode->i_lock);
1030	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1031	// used without having I_FREEING set, which means no aliases left
1032	if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1033		if (S_ISDIR(inode->i_mode)) {
1034			de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1035		} else {
1036			hlist_for_each_entry(de, l, d_u.d_alias)
1037				if (!d_unhashed(de))
1038					break;
1039		}
1040	}
1041	spin_unlock(&inode->i_lock);
1042	return de;
1043}
1044
1045/*
1046 *	Try to kill dentries associated with this inode.
1047 * WARNING: you must own a reference to inode.
1048 */
1049void d_prune_aliases(struct inode *inode)
1050{
1051	LIST_HEAD(dispose);
1052	struct dentry *dentry;
1053
1054	spin_lock(&inode->i_lock);
1055	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1056		spin_lock(&dentry->d_lock);
1057		if (!dentry->d_lockref.count)
1058			to_shrink_list(dentry, &dispose);
 
 
 
 
 
 
 
 
1059		spin_unlock(&dentry->d_lock);
1060	}
1061	spin_unlock(&inode->i_lock);
1062	shrink_dentry_list(&dispose);
1063}
1064EXPORT_SYMBOL(d_prune_aliases);
1065
1066static inline void shrink_kill(struct dentry *victim)
1067{
1068	do {
1069		rcu_read_unlock();
1070		victim = __dentry_kill(victim);
1071		rcu_read_lock();
1072	} while (victim && lock_for_kill(victim));
1073	rcu_read_unlock();
1074	if (victim)
1075		spin_unlock(&victim->d_lock);
1076}
1077
1078void shrink_dentry_list(struct list_head *list)
1079{
1080	while (!list_empty(list)) {
1081		struct dentry *dentry;
1082
1083		dentry = list_entry(list->prev, struct dentry, d_lru);
1084		spin_lock(&dentry->d_lock);
1085		rcu_read_lock();
1086		if (!lock_for_kill(dentry)) {
1087			bool can_free;
1088			rcu_read_unlock();
1089			d_shrink_del(dentry);
1090			can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
 
 
 
 
 
 
 
 
1091			spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 
1092			if (can_free)
1093				dentry_free(dentry);
1094			continue;
1095		}
1096		d_shrink_del(dentry);
1097		shrink_kill(dentry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098	}
1099}
1100
1101static enum lru_status dentry_lru_isolate(struct list_head *item,
1102		struct list_lru_one *lru, void *arg)
1103{
1104	struct list_head *freeable = arg;
1105	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1106
1107
1108	/*
1109	 * we are inverting the lru lock/dentry->d_lock here,
1110	 * so use a trylock. If we fail to get the lock, just skip
1111	 * it
1112	 */
1113	if (!spin_trylock(&dentry->d_lock))
1114		return LRU_SKIP;
1115
1116	/*
1117	 * Referenced dentries are still in use. If they have active
1118	 * counts, just remove them from the LRU. Otherwise give them
1119	 * another pass through the LRU.
1120	 */
1121	if (dentry->d_lockref.count) {
1122		d_lru_isolate(lru, dentry);
1123		spin_unlock(&dentry->d_lock);
1124		return LRU_REMOVED;
1125	}
1126
1127	if (dentry->d_flags & DCACHE_REFERENCED) {
1128		dentry->d_flags &= ~DCACHE_REFERENCED;
1129		spin_unlock(&dentry->d_lock);
1130
1131		/*
1132		 * The list move itself will be made by the common LRU code. At
1133		 * this point, we've dropped the dentry->d_lock but keep the
1134		 * lru lock. This is safe to do, since every list movement is
1135		 * protected by the lru lock even if both locks are held.
1136		 *
1137		 * This is guaranteed by the fact that all LRU management
1138		 * functions are intermediated by the LRU API calls like
1139		 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1140		 * only ever occur through this functions or through callbacks
1141		 * like this one, that are called from the LRU API.
1142		 *
1143		 * The only exceptions to this are functions like
1144		 * shrink_dentry_list, and code that first checks for the
1145		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1146		 * operating only with stack provided lists after they are
1147		 * properly isolated from the main list.  It is thus, always a
1148		 * local access.
1149		 */
1150		return LRU_ROTATE;
1151	}
1152
1153	d_lru_shrink_move(lru, dentry, freeable);
1154	spin_unlock(&dentry->d_lock);
1155
1156	return LRU_REMOVED;
1157}
1158
1159/**
1160 * prune_dcache_sb - shrink the dcache
1161 * @sb: superblock
1162 * @sc: shrink control, passed to list_lru_shrink_walk()
1163 *
1164 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1165 * is done when we need more memory and called from the superblock shrinker
1166 * function.
1167 *
1168 * This function may fail to free any resources if all the dentries are in
1169 * use.
1170 */
1171long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1172{
1173	LIST_HEAD(dispose);
1174	long freed;
1175
1176	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1177				     dentry_lru_isolate, &dispose);
1178	shrink_dentry_list(&dispose);
1179	return freed;
1180}
1181
1182static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1183		struct list_lru_one *lru, void *arg)
1184{
1185	struct list_head *freeable = arg;
1186	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1187
1188	/*
1189	 * we are inverting the lru lock/dentry->d_lock here,
1190	 * so use a trylock. If we fail to get the lock, just skip
1191	 * it
1192	 */
1193	if (!spin_trylock(&dentry->d_lock))
1194		return LRU_SKIP;
1195
1196	d_lru_shrink_move(lru, dentry, freeable);
1197	spin_unlock(&dentry->d_lock);
1198
1199	return LRU_REMOVED;
1200}
1201
1202
1203/**
1204 * shrink_dcache_sb - shrink dcache for a superblock
1205 * @sb: superblock
1206 *
1207 * Shrink the dcache for the specified super block. This is used to free
1208 * the dcache before unmounting a file system.
1209 */
1210void shrink_dcache_sb(struct super_block *sb)
1211{
 
 
1212	do {
1213		LIST_HEAD(dispose);
1214
1215		list_lru_walk(&sb->s_dentry_lru,
1216			dentry_lru_isolate_shrink, &dispose, 1024);
 
 
1217		shrink_dentry_list(&dispose);
1218	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1219}
1220EXPORT_SYMBOL(shrink_dcache_sb);
1221
1222/**
1223 * enum d_walk_ret - action to talke during tree walk
1224 * @D_WALK_CONTINUE:	contrinue walk
1225 * @D_WALK_QUIT:	quit walk
1226 * @D_WALK_NORETRY:	quit when retry is needed
1227 * @D_WALK_SKIP:	skip this dentry and its children
1228 */
1229enum d_walk_ret {
1230	D_WALK_CONTINUE,
1231	D_WALK_QUIT,
1232	D_WALK_NORETRY,
1233	D_WALK_SKIP,
1234};
1235
1236/**
1237 * d_walk - walk the dentry tree
1238 * @parent:	start of walk
1239 * @data:	data passed to @enter() and @finish()
1240 * @enter:	callback when first entering the dentry
 
1241 *
1242 * The @enter() callbacks are called with d_lock held.
1243 */
1244static void d_walk(struct dentry *parent, void *data,
1245		   enum d_walk_ret (*enter)(void *, struct dentry *))
 
1246{
1247	struct dentry *this_parent, *dentry;
 
1248	unsigned seq = 0;
1249	enum d_walk_ret ret;
1250	bool retry = true;
1251
1252again:
1253	read_seqbegin_or_lock(&rename_lock, &seq);
1254	this_parent = parent;
1255	spin_lock(&this_parent->d_lock);
1256
1257	ret = enter(data, this_parent);
1258	switch (ret) {
1259	case D_WALK_CONTINUE:
1260		break;
1261	case D_WALK_QUIT:
1262	case D_WALK_SKIP:
1263		goto out_unlock;
1264	case D_WALK_NORETRY:
1265		retry = false;
1266		break;
1267	}
1268repeat:
1269	dentry = d_first_child(this_parent);
1270resume:
1271	hlist_for_each_entry_from(dentry, d_sib) {
 
 
 
 
1272		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1273			continue;
1274
1275		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1276
1277		ret = enter(data, dentry);
1278		switch (ret) {
1279		case D_WALK_CONTINUE:
1280			break;
1281		case D_WALK_QUIT:
1282			spin_unlock(&dentry->d_lock);
1283			goto out_unlock;
1284		case D_WALK_NORETRY:
1285			retry = false;
1286			break;
1287		case D_WALK_SKIP:
1288			spin_unlock(&dentry->d_lock);
1289			continue;
1290		}
1291
1292		if (!hlist_empty(&dentry->d_children)) {
1293			spin_unlock(&this_parent->d_lock);
1294			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1295			this_parent = dentry;
1296			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1297			goto repeat;
1298		}
1299		spin_unlock(&dentry->d_lock);
1300	}
1301	/*
1302	 * All done at this level ... ascend and resume the search.
1303	 */
1304	rcu_read_lock();
1305ascend:
1306	if (this_parent != parent) {
1307		dentry = this_parent;
1308		this_parent = dentry->d_parent;
1309
1310		spin_unlock(&dentry->d_lock);
1311		spin_lock(&this_parent->d_lock);
1312
1313		/* might go back up the wrong parent if we have had a rename. */
1314		if (need_seqretry(&rename_lock, seq))
1315			goto rename_retry;
1316		/* go into the first sibling still alive */
1317		hlist_for_each_entry_continue(dentry, d_sib) {
1318			if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1319				rcu_read_unlock();
1320				goto resume;
1321			}
1322		}
1323		goto ascend;
 
1324	}
1325	if (need_seqretry(&rename_lock, seq))
1326		goto rename_retry;
1327	rcu_read_unlock();
 
 
1328
1329out_unlock:
1330	spin_unlock(&this_parent->d_lock);
1331	done_seqretry(&rename_lock, seq);
1332	return;
1333
1334rename_retry:
1335	spin_unlock(&this_parent->d_lock);
1336	rcu_read_unlock();
1337	BUG_ON(seq & 1);
1338	if (!retry)
1339		return;
1340	seq = 1;
1341	goto again;
1342}
1343
1344struct check_mount {
1345	struct vfsmount *mnt;
1346	unsigned int mounted;
1347};
1348
1349static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1350{
1351	struct check_mount *info = data;
1352	struct path path = { .mnt = info->mnt, .dentry = dentry };
1353
1354	if (likely(!d_mountpoint(dentry)))
1355		return D_WALK_CONTINUE;
1356	if (__path_is_mountpoint(&path)) {
1357		info->mounted = 1;
1358		return D_WALK_QUIT;
1359	}
1360	return D_WALK_CONTINUE;
1361}
1362
1363/**
1364 * path_has_submounts - check for mounts over a dentry in the
1365 *                      current namespace.
1366 * @parent: path to check.
1367 *
1368 * Return true if the parent or its subdirectories contain
1369 * a mount point in the current namespace.
1370 */
1371int path_has_submounts(const struct path *parent)
1372{
1373	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1374
1375	read_seqlock_excl(&mount_lock);
1376	d_walk(parent->dentry, &data, path_check_mount);
1377	read_sequnlock_excl(&mount_lock);
1378
1379	return data.mounted;
1380}
1381EXPORT_SYMBOL(path_has_submounts);
1382
1383/*
1384 * Called by mount code to set a mountpoint and check if the mountpoint is
1385 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1386 * subtree can become unreachable).
1387 *
1388 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1389 * this reason take rename_lock and d_lock on dentry and ancestors.
1390 */
1391int d_set_mounted(struct dentry *dentry)
1392{
1393	struct dentry *p;
1394	int ret = -ENOENT;
1395	write_seqlock(&rename_lock);
1396	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1397		/* Need exclusion wrt. d_invalidate() */
1398		spin_lock(&p->d_lock);
1399		if (unlikely(d_unhashed(p))) {
1400			spin_unlock(&p->d_lock);
1401			goto out;
1402		}
1403		spin_unlock(&p->d_lock);
1404	}
1405	spin_lock(&dentry->d_lock);
1406	if (!d_unlinked(dentry)) {
1407		ret = -EBUSY;
1408		if (!d_mountpoint(dentry)) {
1409			dentry->d_flags |= DCACHE_MOUNTED;
1410			ret = 0;
1411		}
1412	}
1413 	spin_unlock(&dentry->d_lock);
1414out:
1415	write_sequnlock(&rename_lock);
1416	return ret;
1417}
1418
1419/*
1420 * Search the dentry child list of the specified parent,
1421 * and move any unused dentries to the end of the unused
1422 * list for prune_dcache(). We descend to the next level
1423 * whenever the d_children list is non-empty and continue
1424 * searching.
1425 *
1426 * It returns zero iff there are no unused children,
1427 * otherwise  it returns the number of children moved to
1428 * the end of the unused list. This may not be the total
1429 * number of unused children, because select_parent can
1430 * drop the lock and return early due to latency
1431 * constraints.
1432 */
1433
1434struct select_data {
1435	struct dentry *start;
1436	union {
1437		long found;
1438		struct dentry *victim;
1439	};
1440	struct list_head dispose;
 
1441};
1442
1443static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1444{
1445	struct select_data *data = _data;
1446	enum d_walk_ret ret = D_WALK_CONTINUE;
1447
1448	if (data->start == dentry)
1449		goto out;
1450
1451	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1452		data->found++;
1453	} else if (!dentry->d_lockref.count) {
1454		to_shrink_list(dentry, &data->dispose);
1455		data->found++;
1456	} else if (dentry->d_lockref.count < 0) {
1457		data->found++;
1458	}
1459	/*
1460	 * We can return to the caller if we have found some (this
1461	 * ensures forward progress). We'll be coming back to find
1462	 * the rest.
1463	 */
1464	if (!list_empty(&data->dispose))
1465		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1466out:
1467	return ret;
1468}
1469
1470static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1471{
1472	struct select_data *data = _data;
1473	enum d_walk_ret ret = D_WALK_CONTINUE;
1474
1475	if (data->start == dentry)
1476		goto out;
1477
1478	if (!dentry->d_lockref.count) {
1479		if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1480			rcu_read_lock();
1481			data->victim = dentry;
1482			return D_WALK_QUIT;
1483		}
1484		to_shrink_list(dentry, &data->dispose);
1485	}
1486	/*
1487	 * We can return to the caller if we have found some (this
1488	 * ensures forward progress). We'll be coming back to find
1489	 * the rest.
1490	 */
1491	if (!list_empty(&data->dispose))
1492		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1493out:
1494	return ret;
1495}
1496
1497/**
1498 * shrink_dcache_parent - prune dcache
1499 * @parent: parent of entries to prune
1500 *
1501 * Prune the dcache to remove unused children of the parent dentry.
1502 */
1503void shrink_dcache_parent(struct dentry *parent)
1504{
1505	for (;;) {
1506		struct select_data data = {.start = parent};
1507
1508		INIT_LIST_HEAD(&data.dispose);
1509		d_walk(parent, &data, select_collect);
1510
1511		if (!list_empty(&data.dispose)) {
1512			shrink_dentry_list(&data.dispose);
1513			continue;
1514		}
1515
1516		cond_resched();
1517		if (!data.found)
1518			break;
1519		data.victim = NULL;
1520		d_walk(parent, &data, select_collect2);
1521		if (data.victim) {
1522			spin_lock(&data.victim->d_lock);
1523			if (!lock_for_kill(data.victim)) {
1524				spin_unlock(&data.victim->d_lock);
1525				rcu_read_unlock();
1526			} else {
1527				shrink_kill(data.victim);
1528			}
1529		}
1530		if (!list_empty(&data.dispose))
1531			shrink_dentry_list(&data.dispose);
1532	}
1533}
1534EXPORT_SYMBOL(shrink_dcache_parent);
1535
1536static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1537{
1538	/* it has busy descendents; complain about those instead */
1539	if (!hlist_empty(&dentry->d_children))
1540		return D_WALK_CONTINUE;
1541
1542	/* root with refcount 1 is fine */
1543	if (dentry == _data && dentry->d_lockref.count == 1)
1544		return D_WALK_CONTINUE;
1545
1546	WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1547			" still in use (%d) [unmount of %s %s]\n",
1548		       dentry,
1549		       dentry->d_inode ?
1550		       dentry->d_inode->i_ino : 0UL,
1551		       dentry,
1552		       dentry->d_lockref.count,
1553		       dentry->d_sb->s_type->name,
1554		       dentry->d_sb->s_id);
 
1555	return D_WALK_CONTINUE;
1556}
1557
1558static void do_one_tree(struct dentry *dentry)
1559{
1560	shrink_dcache_parent(dentry);
1561	d_walk(dentry, dentry, umount_check);
1562	d_drop(dentry);
1563	dput(dentry);
1564}
1565
1566/*
1567 * destroy the dentries attached to a superblock on unmounting
1568 */
1569void shrink_dcache_for_umount(struct super_block *sb)
1570{
1571	struct dentry *dentry;
1572
1573	rwsem_assert_held_write(&sb->s_umount);
1574
1575	dentry = sb->s_root;
1576	sb->s_root = NULL;
1577	do_one_tree(dentry);
1578
1579	while (!hlist_bl_empty(&sb->s_roots)) {
1580		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1581		do_one_tree(dentry);
1582	}
1583}
1584
1585static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
 
 
 
 
1586{
1587	struct dentry **victim = _data;
 
1588	if (d_mountpoint(dentry)) {
1589		*victim = dget_dlock(dentry);
 
1590		return D_WALK_QUIT;
1591	}
1592	return D_WALK_CONTINUE;
 
 
 
 
 
 
 
 
 
1593}
1594
1595/**
1596 * d_invalidate - detach submounts, prune dcache, and drop
1597 * @dentry: dentry to invalidate (aka detach, prune and drop)
 
 
 
 
 
 
1598 */
1599void d_invalidate(struct dentry *dentry)
1600{
1601	bool had_submounts = false;
 
 
1602	spin_lock(&dentry->d_lock);
1603	if (d_unhashed(dentry)) {
1604		spin_unlock(&dentry->d_lock);
1605		return;
1606	}
1607	__d_drop(dentry);
1608	spin_unlock(&dentry->d_lock);
1609
1610	/* Negative dentries can be dropped without further checks */
1611	if (!dentry->d_inode)
 
1612		return;
 
1613
1614	shrink_dcache_parent(dentry);
1615	for (;;) {
1616		struct dentry *victim = NULL;
1617		d_walk(dentry, &victim, find_submount);
1618		if (!victim) {
1619			if (had_submounts)
1620				shrink_dcache_parent(dentry);
1621			return;
 
 
 
 
 
 
 
 
 
1622		}
1623		had_submounts = true;
1624		detach_mounts(victim);
1625		dput(victim);
 
 
1626	}
1627}
1628EXPORT_SYMBOL(d_invalidate);
1629
1630/**
1631 * __d_alloc	-	allocate a dcache entry
1632 * @sb: filesystem it will belong to
1633 * @name: qstr of the name
1634 *
1635 * Allocates a dentry. It returns %NULL if there is insufficient memory
1636 * available. On a success the dentry is returned. The name passed in is
1637 * copied and the copy passed in may be reused after this call.
1638 */
1639 
1640static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1641{
1642	struct dentry *dentry;
1643	char *dname;
1644	int err;
1645
1646	dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1647				      GFP_KERNEL);
1648	if (!dentry)
1649		return NULL;
1650
1651	/*
1652	 * We guarantee that the inline name is always NUL-terminated.
1653	 * This way the memcpy() done by the name switching in rename
1654	 * will still always have a NUL at the end, even if we might
1655	 * be overwriting an internal NUL character
1656	 */
1657	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1658	if (unlikely(!name)) {
1659		name = &slash_name;
 
1660		dname = dentry->d_iname;
1661	} else if (name->len > DNAME_INLINE_LEN-1) {
1662		size_t size = offsetof(struct external_name, name[1]);
1663		struct external_name *p = kmalloc(size + name->len,
1664						  GFP_KERNEL_ACCOUNT |
1665						  __GFP_RECLAIMABLE);
1666		if (!p) {
1667			kmem_cache_free(dentry_cache, dentry); 
1668			return NULL;
1669		}
1670		atomic_set(&p->u.count, 1);
1671		dname = p->name;
 
 
 
1672	} else  {
1673		dname = dentry->d_iname;
1674	}	
1675
1676	dentry->d_name.len = name->len;
1677	dentry->d_name.hash = name->hash;
1678	memcpy(dname, name->name, name->len);
1679	dname[name->len] = 0;
1680
1681	/* Make sure we always see the terminating NUL character */
1682	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
 
1683
1684	dentry->d_lockref.count = 1;
1685	dentry->d_flags = 0;
1686	spin_lock_init(&dentry->d_lock);
1687	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1688	dentry->d_inode = NULL;
1689	dentry->d_parent = dentry;
1690	dentry->d_sb = sb;
1691	dentry->d_op = NULL;
1692	dentry->d_fsdata = NULL;
1693	INIT_HLIST_BL_NODE(&dentry->d_hash);
1694	INIT_LIST_HEAD(&dentry->d_lru);
1695	INIT_HLIST_HEAD(&dentry->d_children);
1696	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1697	INIT_HLIST_NODE(&dentry->d_sib);
1698	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1699
1700	if (dentry->d_op && dentry->d_op->d_init) {
1701		err = dentry->d_op->d_init(dentry);
1702		if (err) {
1703			if (dname_external(dentry))
1704				kfree(external_name(dentry));
1705			kmem_cache_free(dentry_cache, dentry);
1706			return NULL;
1707		}
1708	}
1709
1710	this_cpu_inc(nr_dentry);
1711
1712	return dentry;
1713}
1714
1715/**
1716 * d_alloc	-	allocate a dcache entry
1717 * @parent: parent of entry to allocate
1718 * @name: qstr of the name
1719 *
1720 * Allocates a dentry. It returns %NULL if there is insufficient memory
1721 * available. On a success the dentry is returned. The name passed in is
1722 * copied and the copy passed in may be reused after this call.
1723 */
1724struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1725{
1726	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1727	if (!dentry)
1728		return NULL;
 
1729	spin_lock(&parent->d_lock);
1730	/*
1731	 * don't need child lock because it is not subject
1732	 * to concurrency here
1733	 */
1734	dentry->d_parent = dget_dlock(parent);
1735	hlist_add_head(&dentry->d_sib, &parent->d_children);
 
1736	spin_unlock(&parent->d_lock);
1737
1738	return dentry;
1739}
1740EXPORT_SYMBOL(d_alloc);
1741
1742struct dentry *d_alloc_anon(struct super_block *sb)
1743{
1744	return __d_alloc(sb, NULL);
1745}
1746EXPORT_SYMBOL(d_alloc_anon);
1747
1748struct dentry *d_alloc_cursor(struct dentry * parent)
1749{
1750	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1751	if (dentry) {
1752		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1753		dentry->d_parent = dget(parent);
1754	}
1755	return dentry;
1756}
1757
1758/**
1759 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1760 * @sb: the superblock
1761 * @name: qstr of the name
1762 *
1763 * For a filesystem that just pins its dentries in memory and never
1764 * performs lookups at all, return an unhashed IS_ROOT dentry.
1765 * This is used for pipes, sockets et.al. - the stuff that should
1766 * never be anyone's children or parents.  Unlike all other
1767 * dentries, these will not have RCU delay between dropping the
1768 * last reference and freeing them.
1769 *
1770 * The only user is alloc_file_pseudo() and that's what should
1771 * be considered a public interface.  Don't use directly.
1772 */
1773struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1774{
1775	static const struct dentry_operations anon_ops = {
1776		.d_dname = simple_dname
1777	};
1778	struct dentry *dentry = __d_alloc(sb, name);
1779	if (likely(dentry)) {
1780		dentry->d_flags |= DCACHE_NORCU;
1781		if (!sb->s_d_op)
1782			d_set_d_op(dentry, &anon_ops);
1783	}
1784	return dentry;
1785}
 
1786
1787struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1788{
1789	struct qstr q;
1790
1791	q.name = name;
1792	q.hash_len = hashlen_string(parent, name);
1793	return d_alloc(parent, &q);
1794}
1795EXPORT_SYMBOL(d_alloc_name);
1796
1797void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1798{
1799	WARN_ON_ONCE(dentry->d_op);
1800	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1801				DCACHE_OP_COMPARE	|
1802				DCACHE_OP_REVALIDATE	|
1803				DCACHE_OP_WEAK_REVALIDATE	|
1804				DCACHE_OP_DELETE	|
1805				DCACHE_OP_REAL));
1806	dentry->d_op = op;
1807	if (!op)
1808		return;
1809	if (op->d_hash)
1810		dentry->d_flags |= DCACHE_OP_HASH;
1811	if (op->d_compare)
1812		dentry->d_flags |= DCACHE_OP_COMPARE;
1813	if (op->d_revalidate)
1814		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1815	if (op->d_weak_revalidate)
1816		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1817	if (op->d_delete)
1818		dentry->d_flags |= DCACHE_OP_DELETE;
1819	if (op->d_prune)
1820		dentry->d_flags |= DCACHE_OP_PRUNE;
1821	if (op->d_real)
1822		dentry->d_flags |= DCACHE_OP_REAL;
1823
1824}
1825EXPORT_SYMBOL(d_set_d_op);
1826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827static unsigned d_flags_for_inode(struct inode *inode)
1828{
1829	unsigned add_flags = DCACHE_REGULAR_TYPE;
1830
1831	if (!inode)
1832		return DCACHE_MISS_TYPE;
1833
1834	if (S_ISDIR(inode->i_mode)) {
1835		add_flags = DCACHE_DIRECTORY_TYPE;
1836		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1837			if (unlikely(!inode->i_op->lookup))
1838				add_flags = DCACHE_AUTODIR_TYPE;
1839			else
1840				inode->i_opflags |= IOP_LOOKUP;
1841		}
1842		goto type_determined;
1843	}
1844
1845	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1846		if (unlikely(inode->i_op->get_link)) {
1847			add_flags = DCACHE_SYMLINK_TYPE;
1848			goto type_determined;
1849		}
1850		inode->i_opflags |= IOP_NOFOLLOW;
1851	}
1852
1853	if (unlikely(!S_ISREG(inode->i_mode)))
1854		add_flags = DCACHE_SPECIAL_TYPE;
1855
1856type_determined:
1857	if (unlikely(IS_AUTOMOUNT(inode)))
1858		add_flags |= DCACHE_NEED_AUTOMOUNT;
1859	return add_flags;
1860}
1861
1862static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1863{
1864	unsigned add_flags = d_flags_for_inode(inode);
1865	WARN_ON(d_in_lookup(dentry));
1866
1867	spin_lock(&dentry->d_lock);
1868	/*
1869	 * The negative counter only tracks dentries on the LRU. Don't dec if
1870	 * d_lru is on another list.
1871	 */
1872	if ((dentry->d_flags &
1873	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
1874		this_cpu_dec(nr_dentry_negative);
1875	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1876	raw_write_seqcount_begin(&dentry->d_seq);
1877	__d_set_inode_and_type(dentry, inode, add_flags);
1878	raw_write_seqcount_end(&dentry->d_seq);
1879	fsnotify_update_flags(dentry);
1880	spin_unlock(&dentry->d_lock);
1881}
1882
1883/**
1884 * d_instantiate - fill in inode information for a dentry
1885 * @entry: dentry to complete
1886 * @inode: inode to attach to this dentry
1887 *
1888 * Fill in inode information in the entry.
1889 *
1890 * This turns negative dentries into productive full members
1891 * of society.
1892 *
1893 * NOTE! This assumes that the inode count has been incremented
1894 * (or otherwise set) by the caller to indicate that it is now
1895 * in use by the dcache.
1896 */
1897 
1898void d_instantiate(struct dentry *entry, struct inode * inode)
1899{
1900	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1901	if (inode) {
1902		security_d_instantiate(entry, inode);
1903		spin_lock(&inode->i_lock);
1904		__d_instantiate(entry, inode);
1905		spin_unlock(&inode->i_lock);
1906	}
1907}
1908EXPORT_SYMBOL(d_instantiate);
1909
1910/*
1911 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1912 * with lockdep-related part of unlock_new_inode() done before
1913 * anything else.  Use that instead of open-coding d_instantiate()/
1914 * unlock_new_inode() combinations.
 
 
 
1915 */
1916void d_instantiate_new(struct dentry *entry, struct inode *inode)
1917{
1918	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1919	BUG_ON(!inode);
1920	lockdep_annotate_inode_mutex_key(inode);
1921	security_d_instantiate(entry, inode);
1922	spin_lock(&inode->i_lock);
 
 
 
 
 
1923	__d_instantiate(entry, inode);
1924	WARN_ON(!(inode->i_state & I_NEW));
1925	inode->i_state &= ~I_NEW & ~I_CREATING;
1926	/*
1927	 * Pairs with the barrier in prepare_to_wait_event() to make sure
1928	 * ___wait_var_event() either sees the bit cleared or
1929	 * waitqueue_active() check in wake_up_var() sees the waiter.
1930	 */
1931	smp_mb();
1932	inode_wake_up_bit(inode, __I_NEW);
1933	spin_unlock(&inode->i_lock);
 
 
1934}
1935EXPORT_SYMBOL(d_instantiate_new);
1936
1937struct dentry *d_make_root(struct inode *root_inode)
1938{
1939	struct dentry *res = NULL;
1940
1941	if (root_inode) {
1942		res = d_alloc_anon(root_inode->i_sb);
1943		if (res)
1944			d_instantiate(res, root_inode);
1945		else
1946			iput(root_inode);
1947	}
1948	return res;
1949}
1950EXPORT_SYMBOL(d_make_root);
1951
1952static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1953{
1954	struct super_block *sb;
1955	struct dentry *new, *res;
 
 
 
 
 
 
 
 
 
 
 
 
1956
1957	if (!inode)
1958		return ERR_PTR(-ESTALE);
1959	if (IS_ERR(inode))
1960		return ERR_CAST(inode);
1961
1962	sb = inode->i_sb;
1963
1964	res = d_find_any_alias(inode); /* existing alias? */
1965	if (res)
1966		goto out;
1967
1968	new = d_alloc_anon(sb);
1969	if (!new) {
1970		res = ERR_PTR(-ENOMEM);
1971		goto out;
1972	}
1973
1974	security_d_instantiate(new, inode);
1975	spin_lock(&inode->i_lock);
1976	res = __d_find_any_alias(inode); /* recheck under lock */
1977	if (likely(!res)) { /* still no alias, attach a disconnected dentry */
1978		unsigned add_flags = d_flags_for_inode(inode);
1979
1980		if (disconnected)
1981			add_flags |= DCACHE_DISCONNECTED;
1982
1983		spin_lock(&new->d_lock);
1984		__d_set_inode_and_type(new, inode, add_flags);
1985		hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
1986		if (!disconnected) {
1987			hlist_bl_lock(&sb->s_roots);
1988			hlist_bl_add_head(&new->d_hash, &sb->s_roots);
1989			hlist_bl_unlock(&sb->s_roots);
1990		}
1991		spin_unlock(&new->d_lock);
1992		spin_unlock(&inode->i_lock);
1993		inode = NULL; /* consumed by new->d_inode */
1994		res = new;
1995	} else {
1996		spin_unlock(&inode->i_lock);
1997		dput(new);
 
1998	}
1999
2000 out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001	iput(inode);
2002	return res;
2003}
2004
2005/**
2006 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2007 * @inode: inode to allocate the dentry for
2008 *
2009 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2010 * similar open by handle operations.  The returned dentry may be anonymous,
2011 * or may have a full name (if the inode was already in the cache).
2012 *
2013 * When called on a directory inode, we must ensure that the inode only ever
2014 * has one dentry.  If a dentry is found, that is returned instead of
2015 * allocating a new one.
2016 *
2017 * On successful return, the reference to the inode has been transferred
2018 * to the dentry.  In case of an error the reference on the inode is released.
2019 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2020 * be passed in and the error will be propagated to the return value,
2021 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2022 */
2023struct dentry *d_obtain_alias(struct inode *inode)
2024{
2025	return __d_obtain_alias(inode, true);
2026}
2027EXPORT_SYMBOL(d_obtain_alias);
2028
2029/**
2030 * d_obtain_root - find or allocate a dentry for a given inode
2031 * @inode: inode to allocate the dentry for
2032 *
2033 * Obtain an IS_ROOT dentry for the root of a filesystem.
2034 *
2035 * We must ensure that directory inodes only ever have one dentry.  If a
2036 * dentry is found, that is returned instead of allocating a new one.
2037 *
2038 * On successful return, the reference to the inode has been transferred
2039 * to the dentry.  In case of an error the reference on the inode is
2040 * released.  A %NULL or IS_ERR inode may be passed in and will be the
2041 * error will be propagate to the return value, with a %NULL @inode
2042 * replaced by ERR_PTR(-ESTALE).
2043 */
2044struct dentry *d_obtain_root(struct inode *inode)
2045{
2046	return __d_obtain_alias(inode, false);
2047}
2048EXPORT_SYMBOL(d_obtain_root);
2049
2050/**
2051 * d_add_ci - lookup or allocate new dentry with case-exact name
2052 * @dentry: the negative dentry that was passed to the parent's lookup func
2053 * @inode:  the inode case-insensitive lookup has found
 
2054 * @name:   the case-exact name to be associated with the returned dentry
2055 *
2056 * This is to avoid filling the dcache with case-insensitive names to the
2057 * same inode, only the actual correct case is stored in the dcache for
2058 * case-insensitive filesystems.
2059 *
2060 * For a case-insensitive lookup match and if the case-exact dentry
2061 * already exists in the dcache, use it and return it.
2062 *
2063 * If no entry exists with the exact case name, allocate new dentry with
2064 * the exact case, and return the spliced entry.
2065 */
2066struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2067			struct qstr *name)
2068{
2069	struct dentry *found, *res;
2070
2071	/*
2072	 * First check if a dentry matching the name already exists,
2073	 * if not go ahead and create it now.
2074	 */
2075	found = d_hash_and_lookup(dentry->d_parent, name);
2076	if (found) {
2077		iput(inode);
2078		return found;
2079	}
2080	if (d_in_lookup(dentry)) {
2081		found = d_alloc_parallel(dentry->d_parent, name,
2082					dentry->d_wait);
2083		if (IS_ERR(found) || !d_in_lookup(found)) {
2084			iput(inode);
2085			return found;
2086		}
2087	} else {
2088		found = d_alloc(dentry->d_parent, name);
2089		if (!found) {
2090			iput(inode);
2091			return ERR_PTR(-ENOMEM);
2092		} 
2093	}
2094	res = d_splice_alias(inode, found);
2095	if (res) {
2096		d_lookup_done(found);
2097		dput(found);
2098		return res;
2099	}
2100	return found;
2101}
2102EXPORT_SYMBOL(d_add_ci);
2103
2104/**
2105 * d_same_name - compare dentry name with case-exact name
2106 * @dentry: the negative dentry that was passed to the parent's lookup func
2107 * @parent: parent dentry
2108 * @name:   the case-exact name to be associated with the returned dentry
2109 *
2110 * Return: true if names are same, or false
2111 */
2112bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2113		 const struct qstr *name)
2114{
2115	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2116		if (dentry->d_name.len != name->len)
2117			return false;
2118		return dentry_cmp(dentry, name->name, name->len) == 0;
2119	}
2120	return parent->d_op->d_compare(dentry,
2121				       dentry->d_name.len, dentry->d_name.name,
2122				       name) == 0;
2123}
2124EXPORT_SYMBOL_GPL(d_same_name);
2125
2126/*
2127 * This is __d_lookup_rcu() when the parent dentry has
2128 * DCACHE_OP_COMPARE, which makes things much nastier.
2129 */
2130static noinline struct dentry *__d_lookup_rcu_op_compare(
2131	const struct dentry *parent,
2132	const struct qstr *name,
2133	unsigned *seqp)
2134{
2135	u64 hashlen = name->hash_len;
2136	struct hlist_bl_head *b = d_hash(hashlen);
2137	struct hlist_bl_node *node;
2138	struct dentry *dentry;
2139
2140	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2141		int tlen;
2142		const char *tname;
2143		unsigned seq;
2144
2145seqretry:
2146		seq = raw_seqcount_begin(&dentry->d_seq);
2147		if (dentry->d_parent != parent)
2148			continue;
2149		if (d_unhashed(dentry))
2150			continue;
2151		if (dentry->d_name.hash != hashlen_hash(hashlen))
2152			continue;
2153		tlen = dentry->d_name.len;
2154		tname = dentry->d_name.name;
2155		/* we want a consistent (name,len) pair */
2156		if (read_seqcount_retry(&dentry->d_seq, seq)) {
2157			cpu_relax();
2158			goto seqretry;
2159		}
2160		if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2161			continue;
2162		*seqp = seq;
2163		return dentry;
2164	}
2165	return NULL;
2166}
2167
2168/**
2169 * __d_lookup_rcu - search for a dentry (racy, store-free)
2170 * @parent: parent dentry
2171 * @name: qstr of name we wish to find
2172 * @seqp: returns d_seq value at the point where the dentry was found
2173 * Returns: dentry, or NULL
2174 *
2175 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2176 * resolution (store-free path walking) design described in
2177 * Documentation/filesystems/path-lookup.txt.
2178 *
2179 * This is not to be used outside core vfs.
2180 *
2181 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2182 * held, and rcu_read_lock held. The returned dentry must not be stored into
2183 * without taking d_lock and checking d_seq sequence count against @seq
2184 * returned here.
2185 *
 
 
 
2186 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2187 * the returned dentry, so long as its parent's seqlock is checked after the
2188 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2189 * is formed, giving integrity down the path walk.
2190 *
2191 * NOTE! The caller *has* to check the resulting dentry against the sequence
2192 * number we've returned before using any of the resulting dentry state!
2193 */
2194struct dentry *__d_lookup_rcu(const struct dentry *parent,
2195				const struct qstr *name,
2196				unsigned *seqp)
2197{
2198	u64 hashlen = name->hash_len;
2199	const unsigned char *str = name->name;
2200	struct hlist_bl_head *b = d_hash(hashlen);
2201	struct hlist_bl_node *node;
2202	struct dentry *dentry;
2203
2204	/*
2205	 * Note: There is significant duplication with __d_lookup_rcu which is
2206	 * required to prevent single threaded performance regressions
2207	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2208	 * Keep the two functions in sync.
2209	 */
2210
2211	if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2212		return __d_lookup_rcu_op_compare(parent, name, seqp);
2213
2214	/*
2215	 * The hash list is protected using RCU.
2216	 *
2217	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2218	 * races with d_move().
2219	 *
2220	 * It is possible that concurrent renames can mess up our list
2221	 * walk here and result in missing our dentry, resulting in the
2222	 * false-negative result. d_lookup() protects against concurrent
2223	 * renames using rename_lock seqlock.
2224	 *
2225	 * See Documentation/filesystems/path-lookup.txt for more details.
2226	 */
2227	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2228		unsigned seq;
2229
 
2230		/*
2231		 * The dentry sequence count protects us from concurrent
2232		 * renames, and thus protects parent and name fields.
2233		 *
2234		 * The caller must perform a seqcount check in order
2235		 * to do anything useful with the returned dentry.
2236		 *
2237		 * NOTE! We do a "raw" seqcount_begin here. That means that
2238		 * we don't wait for the sequence count to stabilize if it
2239		 * is in the middle of a sequence change. If we do the slow
2240		 * dentry compare, we will do seqretries until it is stable,
2241		 * and if we end up with a successful lookup, we actually
2242		 * want to exit RCU lookup anyway.
2243		 *
2244		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2245		 * we are still guaranteed NUL-termination of ->d_name.name.
2246		 */
2247		seq = raw_seqcount_begin(&dentry->d_seq);
2248		if (dentry->d_parent != parent)
2249			continue;
2250		if (d_unhashed(dentry))
2251			continue;
2252		if (dentry->d_name.hash_len != hashlen)
2253			continue;
2254		if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2255			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2256		*seqp = seq;
2257		return dentry;
2258	}
2259	return NULL;
2260}
2261
2262/**
2263 * d_lookup - search for a dentry
2264 * @parent: parent dentry
2265 * @name: qstr of name we wish to find
2266 * Returns: dentry, or NULL
2267 *
2268 * d_lookup searches the children of the parent dentry for the name in
2269 * question. If the dentry is found its reference count is incremented and the
2270 * dentry is returned. The caller must use dput to free the entry when it has
2271 * finished using it. %NULL is returned if the dentry does not exist.
2272 */
2273struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2274{
2275	struct dentry *dentry;
2276	unsigned seq;
2277
2278	do {
2279		seq = read_seqbegin(&rename_lock);
2280		dentry = __d_lookup(parent, name);
2281		if (dentry)
2282			break;
2283	} while (read_seqretry(&rename_lock, seq));
2284	return dentry;
2285}
2286EXPORT_SYMBOL(d_lookup);
2287
2288/**
2289 * __d_lookup - search for a dentry (racy)
2290 * @parent: parent dentry
2291 * @name: qstr of name we wish to find
2292 * Returns: dentry, or NULL
2293 *
2294 * __d_lookup is like d_lookup, however it may (rarely) return a
2295 * false-negative result due to unrelated rename activity.
2296 *
2297 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2298 * however it must be used carefully, eg. with a following d_lookup in
2299 * the case of failure.
2300 *
2301 * __d_lookup callers must be commented.
2302 */
2303struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2304{
2305	unsigned int hash = name->hash;
2306	struct hlist_bl_head *b = d_hash(hash);
2307	struct hlist_bl_node *node;
2308	struct dentry *found = NULL;
2309	struct dentry *dentry;
2310
2311	/*
2312	 * Note: There is significant duplication with __d_lookup_rcu which is
2313	 * required to prevent single threaded performance regressions
2314	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2315	 * Keep the two functions in sync.
2316	 */
2317
2318	/*
2319	 * The hash list is protected using RCU.
2320	 *
2321	 * Take d_lock when comparing a candidate dentry, to avoid races
2322	 * with d_move().
2323	 *
2324	 * It is possible that concurrent renames can mess up our list
2325	 * walk here and result in missing our dentry, resulting in the
2326	 * false-negative result. d_lookup() protects against concurrent
2327	 * renames using rename_lock seqlock.
2328	 *
2329	 * See Documentation/filesystems/path-lookup.txt for more details.
2330	 */
2331	rcu_read_lock();
2332	
2333	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2334
2335		if (dentry->d_name.hash != hash)
2336			continue;
2337
2338		spin_lock(&dentry->d_lock);
2339		if (dentry->d_parent != parent)
2340			goto next;
2341		if (d_unhashed(dentry))
2342			goto next;
2343
2344		if (!d_same_name(dentry, parent, name))
2345			goto next;
2346
2347		dentry->d_lockref.count++;
2348		found = dentry;
2349		spin_unlock(&dentry->d_lock);
2350		break;
2351next:
2352		spin_unlock(&dentry->d_lock);
2353 	}
2354 	rcu_read_unlock();
2355
2356 	return found;
2357}
2358
2359/**
2360 * d_hash_and_lookup - hash the qstr then search for a dentry
2361 * @dir: Directory to search in
2362 * @name: qstr of name we wish to find
2363 *
2364 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2365 */
2366struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2367{
2368	/*
2369	 * Check for a fs-specific hash function. Note that we must
2370	 * calculate the standard hash first, as the d_op->d_hash()
2371	 * routine may choose to leave the hash value unchanged.
2372	 */
2373	name->hash = full_name_hash(dir, name->name, name->len);
2374	if (dir->d_flags & DCACHE_OP_HASH) {
2375		int err = dir->d_op->d_hash(dir, name);
2376		if (unlikely(err < 0))
2377			return ERR_PTR(err);
2378	}
2379	return d_lookup(dir, name);
2380}
2381EXPORT_SYMBOL(d_hash_and_lookup);
2382
2383/*
2384 * When a file is deleted, we have two options:
2385 * - turn this dentry into a negative dentry
2386 * - unhash this dentry and free it.
2387 *
2388 * Usually, we want to just turn this into
2389 * a negative dentry, but if anybody else is
2390 * currently using the dentry or the inode
2391 * we can't do that and we fall back on removing
2392 * it from the hash queues and waiting for
2393 * it to be deleted later when it has no users
2394 */
2395 
2396/**
2397 * d_delete - delete a dentry
2398 * @dentry: The dentry to delete
2399 *
2400 * Turn the dentry into a negative dentry if possible, otherwise
2401 * remove it from the hash queues so it can be deleted later
2402 */
2403 
2404void d_delete(struct dentry * dentry)
2405{
2406	struct inode *inode = dentry->d_inode;
2407
2408	spin_lock(&inode->i_lock);
2409	spin_lock(&dentry->d_lock);
2410	/*
2411	 * Are we the only user?
2412	 */
 
 
 
 
2413	if (dentry->d_lockref.count == 1) {
2414		if (dentry_negative_policy)
2415			__d_drop(dentry);
 
 
 
2416		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2417		dentry_unlink_inode(dentry);
2418	} else {
2419		__d_drop(dentry);
2420		spin_unlock(&dentry->d_lock);
2421		spin_unlock(&inode->i_lock);
2422	}
 
 
 
 
 
 
 
2423}
2424EXPORT_SYMBOL(d_delete);
2425
2426static void __d_rehash(struct dentry *entry)
2427{
2428	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2429
2430	hlist_bl_lock(b);
2431	hlist_bl_add_head_rcu(&entry->d_hash, b);
2432	hlist_bl_unlock(b);
2433}
2434
2435/**
2436 * d_rehash	- add an entry back to the hash
2437 * @entry: dentry to add to the hash
2438 *
2439 * Adds a dentry to the hash according to its name.
2440 */
2441 
2442void d_rehash(struct dentry * entry)
2443{
2444	spin_lock(&entry->d_lock);
2445	__d_rehash(entry);
2446	spin_unlock(&entry->d_lock);
2447}
2448EXPORT_SYMBOL(d_rehash);
2449
2450static inline unsigned start_dir_add(struct inode *dir)
2451{
2452	preempt_disable_nested();
2453	for (;;) {
2454		unsigned n = dir->i_dir_seq;
2455		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2456			return n;
2457		cpu_relax();
2458	}
2459}
2460
2461static inline void end_dir_add(struct inode *dir, unsigned int n,
2462			       wait_queue_head_t *d_wait)
2463{
2464	smp_store_release(&dir->i_dir_seq, n + 2);
2465	preempt_enable_nested();
2466	wake_up_all(d_wait);
2467}
2468
2469static void d_wait_lookup(struct dentry *dentry)
2470{
2471	if (d_in_lookup(dentry)) {
2472		DECLARE_WAITQUEUE(wait, current);
2473		add_wait_queue(dentry->d_wait, &wait);
2474		do {
2475			set_current_state(TASK_UNINTERRUPTIBLE);
2476			spin_unlock(&dentry->d_lock);
2477			schedule();
2478			spin_lock(&dentry->d_lock);
2479		} while (d_in_lookup(dentry));
2480	}
2481}
2482
2483struct dentry *d_alloc_parallel(struct dentry *parent,
2484				const struct qstr *name,
2485				wait_queue_head_t *wq)
2486{
2487	unsigned int hash = name->hash;
2488	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2489	struct hlist_bl_node *node;
2490	struct dentry *new = d_alloc(parent, name);
2491	struct dentry *dentry;
2492	unsigned seq, r_seq, d_seq;
2493
2494	if (unlikely(!new))
2495		return ERR_PTR(-ENOMEM);
2496
2497retry:
2498	rcu_read_lock();
2499	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2500	r_seq = read_seqbegin(&rename_lock);
2501	dentry = __d_lookup_rcu(parent, name, &d_seq);
2502	if (unlikely(dentry)) {
2503		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2504			rcu_read_unlock();
2505			goto retry;
2506		}
2507		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2508			rcu_read_unlock();
2509			dput(dentry);
2510			goto retry;
2511		}
2512		rcu_read_unlock();
2513		dput(new);
2514		return dentry;
2515	}
2516	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2517		rcu_read_unlock();
2518		goto retry;
2519	}
2520
2521	if (unlikely(seq & 1)) {
2522		rcu_read_unlock();
2523		goto retry;
2524	}
2525
2526	hlist_bl_lock(b);
2527	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2528		hlist_bl_unlock(b);
2529		rcu_read_unlock();
2530		goto retry;
2531	}
2532	/*
2533	 * No changes for the parent since the beginning of d_lookup().
2534	 * Since all removals from the chain happen with hlist_bl_lock(),
2535	 * any potential in-lookup matches are going to stay here until
2536	 * we unlock the chain.  All fields are stable in everything
2537	 * we encounter.
2538	 */
2539	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2540		if (dentry->d_name.hash != hash)
2541			continue;
2542		if (dentry->d_parent != parent)
2543			continue;
2544		if (!d_same_name(dentry, parent, name))
2545			continue;
2546		hlist_bl_unlock(b);
2547		/* now we can try to grab a reference */
2548		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2549			rcu_read_unlock();
2550			goto retry;
2551		}
2552
2553		rcu_read_unlock();
2554		/*
2555		 * somebody is likely to be still doing lookup for it;
2556		 * wait for them to finish
2557		 */
2558		spin_lock(&dentry->d_lock);
2559		d_wait_lookup(dentry);
2560		/*
2561		 * it's not in-lookup anymore; in principle we should repeat
2562		 * everything from dcache lookup, but it's likely to be what
2563		 * d_lookup() would've found anyway.  If it is, just return it;
2564		 * otherwise we really have to repeat the whole thing.
2565		 */
2566		if (unlikely(dentry->d_name.hash != hash))
2567			goto mismatch;
2568		if (unlikely(dentry->d_parent != parent))
2569			goto mismatch;
2570		if (unlikely(d_unhashed(dentry)))
2571			goto mismatch;
2572		if (unlikely(!d_same_name(dentry, parent, name)))
2573			goto mismatch;
2574		/* OK, it *is* a hashed match; return it */
2575		spin_unlock(&dentry->d_lock);
2576		dput(new);
2577		return dentry;
2578	}
2579	rcu_read_unlock();
2580	/* we can't take ->d_lock here; it's OK, though. */
2581	new->d_flags |= DCACHE_PAR_LOOKUP;
2582	new->d_wait = wq;
2583	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
2584	hlist_bl_unlock(b);
2585	return new;
2586mismatch:
2587	spin_unlock(&dentry->d_lock);
2588	dput(dentry);
2589	goto retry;
2590}
2591EXPORT_SYMBOL(d_alloc_parallel);
2592
2593/*
2594 * - Unhash the dentry
2595 * - Retrieve and clear the waitqueue head in dentry
2596 * - Return the waitqueue head
2597 */
2598static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2599{
2600	wait_queue_head_t *d_wait;
2601	struct hlist_bl_head *b;
2602
2603	lockdep_assert_held(&dentry->d_lock);
2604
2605	b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2606	hlist_bl_lock(b);
2607	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2608	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2609	d_wait = dentry->d_wait;
2610	dentry->d_wait = NULL;
2611	hlist_bl_unlock(b);
2612	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2613	INIT_LIST_HEAD(&dentry->d_lru);
2614	return d_wait;
2615}
2616
2617void __d_lookup_unhash_wake(struct dentry *dentry)
2618{
2619	spin_lock(&dentry->d_lock);
2620	wake_up_all(__d_lookup_unhash(dentry));
2621	spin_unlock(&dentry->d_lock);
2622}
2623EXPORT_SYMBOL(__d_lookup_unhash_wake);
2624
2625/* inode->i_lock held if inode is non-NULL */
2626
2627static inline void __d_add(struct dentry *dentry, struct inode *inode)
2628{
2629	wait_queue_head_t *d_wait;
2630	struct inode *dir = NULL;
2631	unsigned n;
2632	spin_lock(&dentry->d_lock);
2633	if (unlikely(d_in_lookup(dentry))) {
2634		dir = dentry->d_parent->d_inode;
2635		n = start_dir_add(dir);
2636		d_wait = __d_lookup_unhash(dentry);
2637	}
2638	if (inode) {
2639		unsigned add_flags = d_flags_for_inode(inode);
2640		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2641		raw_write_seqcount_begin(&dentry->d_seq);
2642		__d_set_inode_and_type(dentry, inode, add_flags);
2643		raw_write_seqcount_end(&dentry->d_seq);
2644		fsnotify_update_flags(dentry);
2645	}
2646	__d_rehash(dentry);
2647	if (dir)
2648		end_dir_add(dir, n, d_wait);
2649	spin_unlock(&dentry->d_lock);
2650	if (inode)
2651		spin_unlock(&inode->i_lock);
2652}
2653
2654/**
2655 * d_add - add dentry to hash queues
2656 * @entry: dentry to add
2657 * @inode: The inode to attach to this dentry
2658 *
2659 * This adds the entry to the hash queues and initializes @inode.
2660 * The entry was actually filled in earlier during d_alloc().
2661 */
2662
2663void d_add(struct dentry *entry, struct inode *inode)
2664{
2665	if (inode) {
2666		security_d_instantiate(entry, inode);
2667		spin_lock(&inode->i_lock);
2668	}
2669	__d_add(entry, inode);
2670}
2671EXPORT_SYMBOL(d_add);
2672
2673/**
2674 * d_exact_alias - find and hash an exact unhashed alias
2675 * @entry: dentry to add
2676 * @inode: The inode to go with this dentry
2677 *
2678 * If an unhashed dentry with the same name/parent and desired
2679 * inode already exists, hash and return it.  Otherwise, return
2680 * NULL.
2681 *
2682 * Parent directory should be locked.
2683 */
2684struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2685{
2686	struct dentry *alias;
2687	unsigned int hash = entry->d_name.hash;
2688
2689	spin_lock(&inode->i_lock);
2690	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2691		/*
2692		 * Don't need alias->d_lock here, because aliases with
2693		 * d_parent == entry->d_parent are not subject to name or
2694		 * parent changes, because the parent inode i_mutex is held.
2695		 */
2696		if (alias->d_name.hash != hash)
2697			continue;
2698		if (alias->d_parent != entry->d_parent)
2699			continue;
2700		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2701			continue;
2702		spin_lock(&alias->d_lock);
2703		if (!d_unhashed(alias)) {
2704			spin_unlock(&alias->d_lock);
2705			alias = NULL;
2706		} else {
2707			dget_dlock(alias);
2708			__d_rehash(alias);
2709			spin_unlock(&alias->d_lock);
2710		}
2711		spin_unlock(&inode->i_lock);
2712		return alias;
2713	}
2714	spin_unlock(&inode->i_lock);
2715	return NULL;
2716}
2717EXPORT_SYMBOL(d_exact_alias);
2718
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2719static void swap_names(struct dentry *dentry, struct dentry *target)
2720{
2721	if (unlikely(dname_external(target))) {
2722		if (unlikely(dname_external(dentry))) {
2723			/*
2724			 * Both external: swap the pointers
2725			 */
2726			swap(target->d_name.name, dentry->d_name.name);
2727		} else {
2728			/*
2729			 * dentry:internal, target:external.  Steal target's
2730			 * storage and make target internal.
2731			 */
2732			memcpy(target->d_iname, dentry->d_name.name,
2733					dentry->d_name.len + 1);
2734			dentry->d_name.name = target->d_name.name;
2735			target->d_name.name = target->d_iname;
2736		}
2737	} else {
2738		if (unlikely(dname_external(dentry))) {
2739			/*
2740			 * dentry:external, target:internal.  Give dentry's
2741			 * storage to target and make dentry internal
2742			 */
2743			memcpy(dentry->d_iname, target->d_name.name,
2744					target->d_name.len + 1);
2745			target->d_name.name = dentry->d_name.name;
2746			dentry->d_name.name = dentry->d_iname;
2747		} else {
2748			/*
2749			 * Both are internal.
2750			 */
2751			unsigned int i;
2752			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
 
 
2753			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2754				swap(((long *) &dentry->d_iname)[i],
2755				     ((long *) &target->d_iname)[i]);
2756			}
2757		}
2758	}
2759	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2760}
2761
2762static void copy_name(struct dentry *dentry, struct dentry *target)
2763{
2764	struct external_name *old_name = NULL;
2765	if (unlikely(dname_external(dentry)))
2766		old_name = external_name(dentry);
2767	if (unlikely(dname_external(target))) {
2768		atomic_inc(&external_name(target)->u.count);
2769		dentry->d_name = target->d_name;
2770	} else {
2771		memcpy(dentry->d_iname, target->d_name.name,
2772				target->d_name.len + 1);
2773		dentry->d_name.name = dentry->d_iname;
2774		dentry->d_name.hash_len = target->d_name.hash_len;
2775	}
2776	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2777		kfree_rcu(old_name, u.head);
2778}
2779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780/*
2781 * __d_move - move a dentry
2782 * @dentry: entry to move
2783 * @target: new dentry
2784 * @exchange: exchange the two dentries
2785 *
2786 * Update the dcache to reflect the move of a file name. Negative
2787 * dcache entries should not be moved in this way. Caller must hold
2788 * rename_lock, the i_mutex of the source and target directories,
2789 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2790 */
2791static void __d_move(struct dentry *dentry, struct dentry *target,
2792		     bool exchange)
2793{
2794	struct dentry *old_parent, *p;
2795	wait_queue_head_t *d_wait;
2796	struct inode *dir = NULL;
2797	unsigned n;
 
 
2798
2799	WARN_ON(!dentry->d_inode);
2800	if (WARN_ON(dentry == target))
2801		return;
2802
2803	BUG_ON(d_ancestor(target, dentry));
2804	old_parent = dentry->d_parent;
2805	p = d_ancestor(old_parent, target);
2806	if (IS_ROOT(dentry)) {
2807		BUG_ON(p);
2808		spin_lock(&target->d_parent->d_lock);
2809	} else if (!p) {
2810		/* target is not a descendent of dentry->d_parent */
2811		spin_lock(&target->d_parent->d_lock);
2812		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2813	} else {
2814		BUG_ON(p == dentry);
2815		spin_lock(&old_parent->d_lock);
2816		if (p != target)
2817			spin_lock_nested(&target->d_parent->d_lock,
2818					DENTRY_D_LOCK_NESTED);
2819	}
2820	spin_lock_nested(&dentry->d_lock, 2);
2821	spin_lock_nested(&target->d_lock, 3);
2822
 
2823	if (unlikely(d_in_lookup(target))) {
2824		dir = target->d_parent->d_inode;
2825		n = start_dir_add(dir);
2826		d_wait = __d_lookup_unhash(target);
2827	}
2828
2829	write_seqcount_begin(&dentry->d_seq);
2830	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2831
2832	/* unhash both */
2833	if (!d_unhashed(dentry))
2834		___d_drop(dentry);
2835	if (!d_unhashed(target))
2836		___d_drop(target);
2837
2838	/* ... and switch them in the tree */
2839	dentry->d_parent = target->d_parent;
2840	if (!exchange) {
2841		copy_name(dentry, target);
2842		target->d_hash.pprev = NULL;
2843		dentry->d_parent->d_lockref.count++;
2844		if (dentry != old_parent) /* wasn't IS_ROOT */
2845			WARN_ON(!--old_parent->d_lockref.count);
2846	} else {
2847		target->d_parent = old_parent;
2848		swap_names(dentry, target);
2849		if (!hlist_unhashed(&target->d_sib))
2850			__hlist_del(&target->d_sib);
2851		hlist_add_head(&target->d_sib, &target->d_parent->d_children);
 
 
 
2852		__d_rehash(target);
2853		fsnotify_update_flags(target);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854	}
2855	if (!hlist_unhashed(&dentry->d_sib))
2856		__hlist_del(&dentry->d_sib);
2857	hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
2858	__d_rehash(dentry);
2859	fsnotify_update_flags(dentry);
2860	fscrypt_handle_d_move(dentry);
2861
2862	write_seqcount_end(&target->d_seq);
2863	write_seqcount_end(&dentry->d_seq);
2864
2865	if (dir)
2866		end_dir_add(dir, n, d_wait);
2867
2868	if (dentry->d_parent != old_parent)
2869		spin_unlock(&dentry->d_parent->d_lock);
2870	if (dentry != old_parent)
2871		spin_unlock(&old_parent->d_lock);
2872	spin_unlock(&target->d_lock);
2873	spin_unlock(&dentry->d_lock);
2874}
2875
2876/*
2877 * d_move - move a dentry
2878 * @dentry: entry to move
2879 * @target: new dentry
2880 *
2881 * Update the dcache to reflect the move of a file name. Negative
2882 * dcache entries should not be moved in this way. See the locking
2883 * requirements for __d_move.
2884 */
2885void d_move(struct dentry *dentry, struct dentry *target)
2886{
2887	write_seqlock(&rename_lock);
2888	__d_move(dentry, target, false);
2889	write_sequnlock(&rename_lock);
2890}
2891EXPORT_SYMBOL(d_move);
2892
2893/*
2894 * d_exchange - exchange two dentries
2895 * @dentry1: first dentry
2896 * @dentry2: second dentry
2897 */
2898void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2899{
2900	write_seqlock(&rename_lock);
2901
2902	WARN_ON(!dentry1->d_inode);
2903	WARN_ON(!dentry2->d_inode);
2904	WARN_ON(IS_ROOT(dentry1));
2905	WARN_ON(IS_ROOT(dentry2));
2906
2907	__d_move(dentry1, dentry2, true);
2908
2909	write_sequnlock(&rename_lock);
2910}
2911
2912/**
2913 * d_ancestor - search for an ancestor
2914 * @p1: ancestor dentry
2915 * @p2: child dentry
2916 *
2917 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2918 * an ancestor of p2, else NULL.
2919 */
2920struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2921{
2922	struct dentry *p;
2923
2924	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2925		if (p->d_parent == p1)
2926			return p;
2927	}
2928	return NULL;
2929}
2930
2931/*
2932 * This helper attempts to cope with remotely renamed directories
2933 *
2934 * It assumes that the caller is already holding
2935 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2936 *
2937 * Note: If ever the locking in lock_rename() changes, then please
2938 * remember to update this too...
2939 */
2940static int __d_unalias(struct dentry *dentry, struct dentry *alias)
 
2941{
2942	struct mutex *m1 = NULL;
2943	struct rw_semaphore *m2 = NULL;
2944	int ret = -ESTALE;
2945
2946	/* If alias and dentry share a parent, then no extra locks required */
2947	if (alias->d_parent == dentry->d_parent)
2948		goto out_unalias;
2949
2950	/* See lock_rename() */
2951	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2952		goto out_err;
2953	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2954	if (!inode_trylock_shared(alias->d_parent->d_inode))
2955		goto out_err;
2956	m2 = &alias->d_parent->d_inode->i_rwsem;
2957out_unalias:
2958	__d_move(alias, dentry, false);
2959	ret = 0;
2960out_err:
2961	if (m2)
2962		up_read(m2);
2963	if (m1)
2964		mutex_unlock(m1);
2965	return ret;
2966}
2967
2968/**
2969 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2970 * @inode:  the inode which may have a disconnected dentry
2971 * @dentry: a negative dentry which we want to point to the inode.
2972 *
2973 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2974 * place of the given dentry and return it, else simply d_add the inode
2975 * to the dentry and return NULL.
2976 *
2977 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2978 * we should error out: directories can't have multiple aliases.
2979 *
2980 * This is needed in the lookup routine of any filesystem that is exportable
2981 * (via knfsd) so that we can build dcache paths to directories effectively.
2982 *
2983 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2984 * is returned.  This matches the expected return value of ->lookup.
2985 *
2986 * Cluster filesystems may call this function with a negative, hashed dentry.
2987 * In that case, we know that the inode will be a regular file, and also this
2988 * will only occur during atomic_open. So we need to check for the dentry
2989 * being already hashed only in the final case.
2990 */
2991struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2992{
2993	if (IS_ERR(inode))
2994		return ERR_CAST(inode);
2995
2996	BUG_ON(!d_unhashed(dentry));
2997
2998	if (!inode)
2999		goto out;
3000
3001	security_d_instantiate(dentry, inode);
3002	spin_lock(&inode->i_lock);
3003	if (S_ISDIR(inode->i_mode)) {
3004		struct dentry *new = __d_find_any_alias(inode);
3005		if (unlikely(new)) {
3006			/* The reference to new ensures it remains an alias */
3007			spin_unlock(&inode->i_lock);
3008			write_seqlock(&rename_lock);
3009			if (unlikely(d_ancestor(new, dentry))) {
3010				write_sequnlock(&rename_lock);
3011				dput(new);
3012				new = ERR_PTR(-ELOOP);
3013				pr_warn_ratelimited(
3014					"VFS: Lookup of '%s' in %s %s"
3015					" would have caused loop\n",
3016					dentry->d_name.name,
3017					inode->i_sb->s_type->name,
3018					inode->i_sb->s_id);
3019			} else if (!IS_ROOT(new)) {
3020				struct dentry *old_parent = dget(new->d_parent);
3021				int err = __d_unalias(dentry, new);
3022				write_sequnlock(&rename_lock);
3023				if (err) {
3024					dput(new);
3025					new = ERR_PTR(err);
3026				}
3027				dput(old_parent);
3028			} else {
3029				__d_move(new, dentry, false);
3030				write_sequnlock(&rename_lock);
3031			}
3032			iput(inode);
3033			return new;
3034		}
3035	}
3036out:
3037	__d_add(dentry, inode);
3038	return NULL;
3039}
3040EXPORT_SYMBOL(d_splice_alias);
3041
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3042/*
3043 * Test whether new_dentry is a subdirectory of old_dentry.
3044 *
3045 * Trivially implemented using the dcache structure
3046 */
3047
3048/**
3049 * is_subdir - is new dentry a subdirectory of old_dentry
3050 * @new_dentry: new dentry
3051 * @old_dentry: old dentry
3052 *
3053 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3054 * Returns false otherwise.
3055 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3056 */
3057  
3058bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3059{
3060	bool subdir;
3061	unsigned seq;
3062
3063	if (new_dentry == old_dentry)
3064		return true;
3065
3066	/* Access d_parent under rcu as d_move() may change it. */
3067	rcu_read_lock();
3068	seq = read_seqbegin(&rename_lock);
3069	subdir = d_ancestor(old_dentry, new_dentry);
3070	 /* Try lockless once... */
3071	if (read_seqretry(&rename_lock, seq)) {
3072		/* ...else acquire lock for progress even on deep chains. */
3073		read_seqlock_excl(&rename_lock);
3074		subdir = d_ancestor(old_dentry, new_dentry);
3075		read_sequnlock_excl(&rename_lock);
3076	}
3077	rcu_read_unlock();
3078	return subdir;
 
 
 
3079}
3080EXPORT_SYMBOL(is_subdir);
3081
3082static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3083{
3084	struct dentry *root = data;
3085	if (dentry != root) {
3086		if (d_unhashed(dentry) || !dentry->d_inode)
3087			return D_WALK_SKIP;
3088
3089		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3090			dentry->d_flags |= DCACHE_GENOCIDE;
3091			dentry->d_lockref.count--;
3092		}
3093	}
3094	return D_WALK_CONTINUE;
3095}
3096
3097void d_genocide(struct dentry *parent)
3098{
3099	d_walk(parent, parent, d_genocide_kill);
3100}
3101
3102void d_mark_tmpfile(struct file *file, struct inode *inode)
3103{
3104	struct dentry *dentry = file->f_path.dentry;
3105
3106	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3107		!hlist_unhashed(&dentry->d_u.d_alias) ||
3108		!d_unlinked(dentry));
3109	spin_lock(&dentry->d_parent->d_lock);
3110	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3111	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3112				(unsigned long long)inode->i_ino);
3113	spin_unlock(&dentry->d_lock);
3114	spin_unlock(&dentry->d_parent->d_lock);
3115}
3116EXPORT_SYMBOL(d_mark_tmpfile);
3117
3118void d_tmpfile(struct file *file, struct inode *inode)
3119{
3120	struct dentry *dentry = file->f_path.dentry;
3121
3122	inode_dec_link_count(inode);
3123	d_mark_tmpfile(file, inode);
3124	d_instantiate(dentry, inode);
3125}
3126EXPORT_SYMBOL(d_tmpfile);
3127
3128/*
3129 * Obtain inode number of the parent dentry.
3130 */
3131ino_t d_parent_ino(struct dentry *dentry)
3132{
3133	struct dentry *parent;
3134	struct inode *iparent;
3135	unsigned seq;
3136	ino_t ret;
3137
3138	scoped_guard(rcu) {
3139		seq = raw_seqcount_begin(&dentry->d_seq);
3140		parent = READ_ONCE(dentry->d_parent);
3141		iparent = d_inode_rcu(parent);
3142		if (likely(iparent)) {
3143			ret = iparent->i_ino;
3144			if (!read_seqcount_retry(&dentry->d_seq, seq))
3145				return ret;
3146		}
3147	}
3148
3149	spin_lock(&dentry->d_lock);
3150	ret = dentry->d_parent->d_inode->i_ino;
3151	spin_unlock(&dentry->d_lock);
3152	return ret;
3153}
3154EXPORT_SYMBOL(d_parent_ino);
3155
3156static __initdata unsigned long dhash_entries;
3157static int __init set_dhash_entries(char *str)
3158{
3159	if (!str)
3160		return 0;
3161	dhash_entries = simple_strtoul(str, &str, 0);
3162	return 1;
3163}
3164__setup("dhash_entries=", set_dhash_entries);
3165
3166static void __init dcache_init_early(void)
3167{
 
 
3168	/* If hashes are distributed across NUMA nodes, defer
3169	 * hash allocation until vmalloc space is available.
3170	 */
3171	if (hashdist)
3172		return;
3173
3174	dentry_hashtable =
3175		alloc_large_system_hash("Dentry cache",
3176					sizeof(struct hlist_bl_head),
3177					dhash_entries,
3178					13,
3179					HASH_EARLY | HASH_ZERO,
3180					&d_hash_shift,
3181					NULL,
3182					0,
3183					0);
3184	d_hash_shift = 32 - d_hash_shift;
3185
3186	runtime_const_init(shift, d_hash_shift);
3187	runtime_const_init(ptr, dentry_hashtable);
3188}
3189
3190static void __init dcache_init(void)
3191{
3192	/*
 
 
3193	 * A constructor could be added for stable state like the lists,
3194	 * but it is probably not worth it because of the cache nature
3195	 * of the dcache.
3196	 */
3197	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3198		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3199		d_iname);
3200
3201	/* Hash may have been set up in dcache_init_early */
3202	if (!hashdist)
3203		return;
3204
3205	dentry_hashtable =
3206		alloc_large_system_hash("Dentry cache",
3207					sizeof(struct hlist_bl_head),
3208					dhash_entries,
3209					13,
3210					HASH_ZERO,
3211					&d_hash_shift,
3212					NULL,
3213					0,
3214					0);
3215	d_hash_shift = 32 - d_hash_shift;
3216
3217	runtime_const_init(shift, d_hash_shift);
3218	runtime_const_init(ptr, dentry_hashtable);
3219}
3220
3221/* SLAB cache for __getname() consumers */
3222struct kmem_cache *names_cachep __ro_after_init;
3223EXPORT_SYMBOL(names_cachep);
3224
 
 
3225void __init vfs_caches_init_early(void)
3226{
3227	int i;
3228
3229	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3230		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3231
3232	dcache_init_early();
3233	inode_init_early();
3234}
3235
3236void __init vfs_caches_init(void)
3237{
3238	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3239			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3240
3241	dcache_init();
3242	inode_init();
3243	files_init();
3244	files_maxfiles_init();
3245	mnt_init();
3246	bdev_cache_init();
3247	chrdev_init();
3248}
v4.10.11
 
   1/*
   2 * fs/dcache.c
   3 *
   4 * Complete reimplementation
   5 * (C) 1997 Thomas Schoebel-Theuer,
   6 * with heavy changes by Linus Torvalds
   7 */
   8
   9/*
  10 * Notes on the allocation strategy:
  11 *
  12 * The dcache is a master of the icache - whenever a dcache entry
  13 * exists, the inode will always exist. "iput()" is done either when
  14 * the dcache entry is deleted or garbage collected.
  15 */
  16
  17#include <linux/syscalls.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/fs.h>
 
  21#include <linux/fsnotify.h>
  22#include <linux/slab.h>
  23#include <linux/init.h>
  24#include <linux/hash.h>
  25#include <linux/cache.h>
  26#include <linux/export.h>
  27#include <linux/mount.h>
  28#include <linux/file.h>
  29#include <linux/uaccess.h>
  30#include <linux/security.h>
  31#include <linux/seqlock.h>
  32#include <linux/swap.h>
  33#include <linux/bootmem.h>
  34#include <linux/fs_struct.h>
  35#include <linux/hardirq.h>
  36#include <linux/bit_spinlock.h>
  37#include <linux/rculist_bl.h>
  38#include <linux/prefetch.h>
  39#include <linux/ratelimit.h>
  40#include <linux/list_lru.h>
  41#include <linux/kasan.h>
  42
  43#include "internal.h"
  44#include "mount.h"
  45
 
 
  46/*
  47 * Usage:
  48 * dcache->d_inode->i_lock protects:
  49 *   - i_dentry, d_u.d_alias, d_inode of aliases
  50 * dcache_hash_bucket lock protects:
  51 *   - the dcache hash table
  52 * s_anon bl list spinlock protects:
  53 *   - the s_anon list (see __d_drop)
  54 * dentry->d_sb->s_dentry_lru_lock protects:
  55 *   - the dcache lru lists and counters
  56 * d_lock protects:
  57 *   - d_flags
  58 *   - d_name
  59 *   - d_lru
  60 *   - d_count
  61 *   - d_unhashed()
  62 *   - d_parent and d_subdirs
  63 *   - childrens' d_child and d_parent
  64 *   - d_u.d_alias, d_inode
  65 *
  66 * Ordering:
  67 * dentry->d_inode->i_lock
  68 *   dentry->d_lock
  69 *     dentry->d_sb->s_dentry_lru_lock
  70 *     dcache_hash_bucket lock
  71 *     s_anon lock
  72 *
  73 * If there is an ancestor relationship:
  74 * dentry->d_parent->...->d_parent->d_lock
  75 *   ...
  76 *     dentry->d_parent->d_lock
  77 *       dentry->d_lock
  78 *
  79 * If no ancestor relationship:
  80 * if (dentry1 < dentry2)
  81 *   dentry1->d_lock
  82 *     dentry2->d_lock
  83 */
  84int sysctl_vfs_cache_pressure __read_mostly = 100;
  85EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  86
  87__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  88
  89EXPORT_SYMBOL(rename_lock);
  90
  91static struct kmem_cache *dentry_cache __read_mostly;
 
 
 
 
 
 
 
  92
  93/*
  94 * This is the single most critical data structure when it comes
  95 * to the dcache: the hashtable for lookups. Somebody should try
  96 * to make this good - I've just made it work.
  97 *
  98 * This hash-function tries to avoid losing too many bits of hash
  99 * information, yet avoid using a prime hash-size or similar.
 
 
 
 
 
 100 */
 101
 102static unsigned int d_hash_mask __read_mostly;
 103static unsigned int d_hash_shift __read_mostly;
 104
 105static struct hlist_bl_head *dentry_hashtable __read_mostly;
 106
 107static inline struct hlist_bl_head *d_hash(unsigned int hash)
 108{
 109	return dentry_hashtable + (hash >> (32 - d_hash_shift));
 
 110}
 111
 112#define IN_LOOKUP_SHIFT 10
 113static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 114
 115static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 116					unsigned int hash)
 117{
 118	hash += (unsigned long) parent / L1_CACHE_BYTES;
 119	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 120}
 121
 122
 123/* Statistics gathering. */
 124struct dentry_stat_t dentry_stat = {
 125	.age_limit = 45,
 
 
 
 126};
 127
 128static DEFINE_PER_CPU(long, nr_dentry);
 129static DEFINE_PER_CPU(long, nr_dentry_unused);
 
 
 130
 131#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 
 
 
 
 132
 133/*
 134 * Here we resort to our own counters instead of using generic per-cpu counters
 135 * for consistency with what the vfs inode code does. We are expected to harvest
 136 * better code and performance by having our own specialized counters.
 137 *
 138 * Please note that the loop is done over all possible CPUs, not over all online
 139 * CPUs. The reason for this is that we don't want to play games with CPUs going
 140 * on and off. If one of them goes off, we will just keep their counters.
 141 *
 142 * glommer: See cffbc8a for details, and if you ever intend to change this,
 143 * please update all vfs counters to match.
 144 */
 145static long get_nr_dentry(void)
 146{
 147	int i;
 148	long sum = 0;
 149	for_each_possible_cpu(i)
 150		sum += per_cpu(nr_dentry, i);
 151	return sum < 0 ? 0 : sum;
 152}
 153
 154static long get_nr_dentry_unused(void)
 155{
 156	int i;
 157	long sum = 0;
 158	for_each_possible_cpu(i)
 159		sum += per_cpu(nr_dentry_unused, i);
 160	return sum < 0 ? 0 : sum;
 161}
 162
 163int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
 164		   size_t *lenp, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 165{
 166	dentry_stat.nr_dentry = get_nr_dentry();
 167	dentry_stat.nr_unused = get_nr_dentry_unused();
 
 168	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 169}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170#endif
 171
 172/*
 173 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 174 * The strings are both count bytes long, and count is non-zero.
 175 */
 176#ifdef CONFIG_DCACHE_WORD_ACCESS
 177
 178#include <asm/word-at-a-time.h>
 179/*
 180 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 181 * aligned allocation for this particular component. We don't
 182 * strictly need the load_unaligned_zeropad() safety, but it
 183 * doesn't hurt either.
 184 *
 185 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 186 * need the careful unaligned handling.
 187 */
 188static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 189{
 190	unsigned long a,b,mask;
 191
 192	for (;;) {
 193		a = *(unsigned long *)cs;
 194		b = load_unaligned_zeropad(ct);
 195		if (tcount < sizeof(unsigned long))
 196			break;
 197		if (unlikely(a != b))
 198			return 1;
 199		cs += sizeof(unsigned long);
 200		ct += sizeof(unsigned long);
 201		tcount -= sizeof(unsigned long);
 202		if (!tcount)
 203			return 0;
 204	}
 205	mask = bytemask_from_count(tcount);
 206	return unlikely(!!((a ^ b) & mask));
 207}
 208
 209#else
 210
 211static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 212{
 213	do {
 214		if (*cs != *ct)
 215			return 1;
 216		cs++;
 217		ct++;
 218		tcount--;
 219	} while (tcount);
 220	return 0;
 221}
 222
 223#endif
 224
 225static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 226{
 227	/*
 228	 * Be careful about RCU walk racing with rename:
 229	 * use 'lockless_dereference' to fetch the name pointer.
 230	 *
 231	 * NOTE! Even if a rename will mean that the length
 232	 * was not loaded atomically, we don't care. The
 233	 * RCU walk will check the sequence count eventually,
 234	 * and catch it. And we won't overrun the buffer,
 235	 * because we're reading the name pointer atomically,
 236	 * and a dentry name is guaranteed to be properly
 237	 * terminated with a NUL byte.
 238	 *
 239	 * End result: even if 'len' is wrong, we'll exit
 240	 * early because the data cannot match (there can
 241	 * be no NUL in the ct/tcount data)
 242	 */
 243	const unsigned char *cs = lockless_dereference(dentry->d_name.name);
 244
 245	return dentry_string_cmp(cs, ct, tcount);
 246}
 247
 248struct external_name {
 249	union {
 250		atomic_t count;
 251		struct rcu_head head;
 252	} u;
 253	unsigned char name[];
 254};
 255
 256static inline struct external_name *external_name(struct dentry *dentry)
 257{
 258	return container_of(dentry->d_name.name, struct external_name, name[0]);
 259}
 260
 261static void __d_free(struct rcu_head *head)
 262{
 263	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 264
 265	kmem_cache_free(dentry_cache, dentry); 
 266}
 267
 268static void __d_free_external(struct rcu_head *head)
 269{
 270	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 271	kfree(external_name(dentry));
 272	kmem_cache_free(dentry_cache, dentry); 
 273}
 274
 275static inline int dname_external(const struct dentry *dentry)
 276{
 277	return dentry->d_name.name != dentry->d_iname;
 278}
 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280static inline void __d_set_inode_and_type(struct dentry *dentry,
 281					  struct inode *inode,
 282					  unsigned type_flags)
 283{
 284	unsigned flags;
 285
 286	dentry->d_inode = inode;
 287	flags = READ_ONCE(dentry->d_flags);
 288	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 289	flags |= type_flags;
 290	WRITE_ONCE(dentry->d_flags, flags);
 291}
 292
 293static inline void __d_clear_type_and_inode(struct dentry *dentry)
 294{
 295	unsigned flags = READ_ONCE(dentry->d_flags);
 296
 297	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 298	WRITE_ONCE(dentry->d_flags, flags);
 299	dentry->d_inode = NULL;
 
 
 
 
 
 
 300}
 301
 302static void dentry_free(struct dentry *dentry)
 303{
 304	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 305	if (unlikely(dname_external(dentry))) {
 306		struct external_name *p = external_name(dentry);
 307		if (likely(atomic_dec_and_test(&p->u.count))) {
 308			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 309			return;
 310		}
 311	}
 312	/* if dentry was never visible to RCU, immediate free is OK */
 313	if (!(dentry->d_flags & DCACHE_RCUACCESS))
 314		__d_free(&dentry->d_u.d_rcu);
 315	else
 316		call_rcu(&dentry->d_u.d_rcu, __d_free);
 317}
 318
 319/*
 320 * Release the dentry's inode, using the filesystem
 321 * d_iput() operation if defined.
 322 */
 323static void dentry_unlink_inode(struct dentry * dentry)
 324	__releases(dentry->d_lock)
 325	__releases(dentry->d_inode->i_lock)
 326{
 327	struct inode *inode = dentry->d_inode;
 328	bool hashed = !d_unhashed(dentry);
 329
 330	if (hashed)
 331		raw_write_seqcount_begin(&dentry->d_seq);
 332	__d_clear_type_and_inode(dentry);
 333	hlist_del_init(&dentry->d_u.d_alias);
 334	if (hashed)
 335		raw_write_seqcount_end(&dentry->d_seq);
 336	spin_unlock(&dentry->d_lock);
 337	spin_unlock(&inode->i_lock);
 338	if (!inode->i_nlink)
 339		fsnotify_inoderemove(inode);
 340	if (dentry->d_op && dentry->d_op->d_iput)
 341		dentry->d_op->d_iput(dentry, inode);
 342	else
 343		iput(inode);
 344}
 345
 346/*
 347 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 348 * is in use - which includes both the "real" per-superblock
 349 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 350 *
 351 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 352 * on the shrink list (ie not on the superblock LRU list).
 353 *
 354 * The per-cpu "nr_dentry_unused" counters are updated with
 355 * the DCACHE_LRU_LIST bit.
 356 *
 
 
 
 
 
 357 * These helper functions make sure we always follow the
 358 * rules. d_lock must be held by the caller.
 359 */
 360#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 361static void d_lru_add(struct dentry *dentry)
 362{
 363	D_FLAG_VERIFY(dentry, 0);
 364	dentry->d_flags |= DCACHE_LRU_LIST;
 365	this_cpu_inc(nr_dentry_unused);
 366	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 367}
 368
 369static void d_lru_del(struct dentry *dentry)
 370{
 371	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 372	dentry->d_flags &= ~DCACHE_LRU_LIST;
 373	this_cpu_dec(nr_dentry_unused);
 374	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 375}
 376
 377static void d_shrink_del(struct dentry *dentry)
 378{
 379	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 380	list_del_init(&dentry->d_lru);
 381	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 382	this_cpu_dec(nr_dentry_unused);
 383}
 384
 385static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 386{
 387	D_FLAG_VERIFY(dentry, 0);
 388	list_add(&dentry->d_lru, list);
 389	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 390	this_cpu_inc(nr_dentry_unused);
 391}
 392
 393/*
 394 * These can only be called under the global LRU lock, ie during the
 395 * callback for freeing the LRU list. "isolate" removes it from the
 396 * LRU lists entirely, while shrink_move moves it to the indicated
 397 * private list.
 398 */
 399static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 400{
 401	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 402	dentry->d_flags &= ~DCACHE_LRU_LIST;
 403	this_cpu_dec(nr_dentry_unused);
 
 
 404	list_lru_isolate(lru, &dentry->d_lru);
 405}
 406
 407static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 408			      struct list_head *list)
 409{
 410	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 411	dentry->d_flags |= DCACHE_SHRINK_LIST;
 
 
 412	list_lru_isolate_move(lru, &dentry->d_lru, list);
 413}
 414
 415/*
 416 * dentry_lru_(add|del)_list) must be called with d_lock held.
 417 */
 418static void dentry_lru_add(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419{
 420	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
 421		d_lru_add(dentry);
 
 
 
 422}
 
 423
 424/**
 425 * d_drop - drop a dentry
 426 * @dentry: dentry to drop
 427 *
 428 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 429 * be found through a VFS lookup any more. Note that this is different from
 430 * deleting the dentry - d_delete will try to mark the dentry negative if
 431 * possible, giving a successful _negative_ lookup, while d_drop will
 432 * just make the cache lookup fail.
 433 *
 434 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 435 * reason (NFS timeouts or autofs deletes).
 436 *
 437 * __d_drop requires dentry->d_lock.
 
 
 
 438 */
 439void __d_drop(struct dentry *dentry)
 440{
 441	if (!d_unhashed(dentry)) {
 442		struct hlist_bl_head *b;
 443		/*
 444		 * Hashed dentries are normally on the dentry hashtable,
 445		 * with the exception of those newly allocated by
 446		 * d_obtain_alias, which are always IS_ROOT:
 447		 */
 448		if (unlikely(IS_ROOT(dentry)))
 449			b = &dentry->d_sb->s_anon;
 450		else
 451			b = d_hash(dentry->d_name.hash);
 452
 453		hlist_bl_lock(b);
 454		__hlist_bl_del(&dentry->d_hash);
 455		dentry->d_hash.pprev = NULL;
 456		hlist_bl_unlock(b);
 457		/* After this call, in-progress rcu-walk path lookup will fail. */
 458		write_seqcount_invalidate(&dentry->d_seq);
 459	}
 460}
 461EXPORT_SYMBOL(__d_drop);
 462
 463void d_drop(struct dentry *dentry)
 464{
 465	spin_lock(&dentry->d_lock);
 466	__d_drop(dentry);
 467	spin_unlock(&dentry->d_lock);
 468}
 469EXPORT_SYMBOL(d_drop);
 470
 471static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
 472{
 473	struct dentry *next;
 474	/*
 475	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 476	 * attached to the dentry tree
 477	 */
 478	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 479	if (unlikely(list_empty(&dentry->d_child)))
 480		return;
 481	__list_del_entry(&dentry->d_child);
 482	/*
 483	 * Cursors can move around the list of children.  While we'd been
 484	 * a normal list member, it didn't matter - ->d_child.next would've
 485	 * been updated.  However, from now on it won't be and for the
 486	 * things like d_walk() it might end up with a nasty surprise.
 487	 * Normally d_walk() doesn't care about cursors moving around -
 488	 * ->d_lock on parent prevents that and since a cursor has no children
 489	 * of its own, we get through it without ever unlocking the parent.
 490	 * There is one exception, though - if we ascend from a child that
 491	 * gets killed as soon as we unlock it, the next sibling is found
 492	 * using the value left in its ->d_child.next.  And if _that_
 493	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 494	 * before d_walk() regains parent->d_lock, we'll end up skipping
 495	 * everything the cursor had been moved past.
 496	 *
 497	 * Solution: make sure that the pointer left behind in ->d_child.next
 498	 * points to something that won't be moving around.  I.e. skip the
 499	 * cursors.
 500	 */
 501	while (dentry->d_child.next != &parent->d_subdirs) {
 502		next = list_entry(dentry->d_child.next, struct dentry, d_child);
 503		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 504			break;
 505		dentry->d_child.next = next->d_child.next;
 506	}
 507}
 508
 509static void __dentry_kill(struct dentry *dentry)
 510{
 511	struct dentry *parent = NULL;
 512	bool can_free = true;
 513	if (!IS_ROOT(dentry))
 514		parent = dentry->d_parent;
 515
 516	/*
 517	 * The dentry is now unrecoverably dead to the world.
 518	 */
 519	lockref_mark_dead(&dentry->d_lockref);
 520
 521	/*
 522	 * inform the fs via d_prune that this dentry is about to be
 523	 * unhashed and destroyed.
 524	 */
 525	if (dentry->d_flags & DCACHE_OP_PRUNE)
 526		dentry->d_op->d_prune(dentry);
 527
 528	if (dentry->d_flags & DCACHE_LRU_LIST) {
 529		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 530			d_lru_del(dentry);
 531	}
 532	/* if it was on the hash then remove it */
 533	__d_drop(dentry);
 534	dentry_unlist(dentry, parent);
 535	if (parent)
 536		spin_unlock(&parent->d_lock);
 537	if (dentry->d_inode)
 538		dentry_unlink_inode(dentry);
 539	else
 540		spin_unlock(&dentry->d_lock);
 541	this_cpu_dec(nr_dentry);
 542	if (dentry->d_op && dentry->d_op->d_release)
 543		dentry->d_op->d_release(dentry);
 544
 545	spin_lock(&dentry->d_lock);
 546	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
 547		dentry->d_flags |= DCACHE_MAY_FREE;
 
 
 
 
 
 
 548		can_free = false;
 549	}
 550	spin_unlock(&dentry->d_lock);
 551	if (likely(can_free))
 552		dentry_free(dentry);
 
 
 
 
 
 553}
 554
 555/*
 556 * Finish off a dentry we've decided to kill.
 557 * dentry->d_lock must be held, returns with it unlocked.
 558 * If ref is non-zero, then decrement the refcount too.
 559 * Returns dentry requiring refcount drop, or NULL if we're done.
 
 
 
 
 560 */
 561static struct dentry *dentry_kill(struct dentry *dentry)
 562	__releases(dentry->d_lock)
 563{
 564	struct inode *inode = dentry->d_inode;
 565	struct dentry *parent = NULL;
 566
 567	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
 568		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569
 570	if (!IS_ROOT(dentry)) {
 571		parent = dentry->d_parent;
 572		if (unlikely(!spin_trylock(&parent->d_lock))) {
 573			if (inode)
 574				spin_unlock(&inode->i_lock);
 575			goto failed;
 576		}
 
 
 577	}
 578
 579	__dentry_kill(dentry);
 580	return parent;
 581
 582failed:
 583	spin_unlock(&dentry->d_lock);
 584	return dentry; /* try again with same dentry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 585}
 586
 587static inline struct dentry *lock_parent(struct dentry *dentry)
 588{
 589	struct dentry *parent = dentry->d_parent;
 590	if (IS_ROOT(dentry))
 591		return NULL;
 592	if (unlikely(dentry->d_lockref.count < 0))
 593		return NULL;
 594	if (likely(spin_trylock(&parent->d_lock)))
 595		return parent;
 596	rcu_read_lock();
 597	spin_unlock(&dentry->d_lock);
 598again:
 599	parent = ACCESS_ONCE(dentry->d_parent);
 600	spin_lock(&parent->d_lock);
 601	/*
 602	 * We can't blindly lock dentry until we are sure
 603	 * that we won't violate the locking order.
 604	 * Any changes of dentry->d_parent must have
 605	 * been done with parent->d_lock held, so
 606	 * spin_lock() above is enough of a barrier
 607	 * for checking if it's still our child.
 608	 */
 609	if (unlikely(parent != dentry->d_parent)) {
 610		spin_unlock(&parent->d_lock);
 611		goto again;
 612	}
 613	rcu_read_unlock();
 614	if (parent != dentry)
 615		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 616	else
 617		parent = NULL;
 618	return parent;
 619}
 
 620
 621/*
 622 * Try to do a lockless dput(), and return whether that was successful.
 623 *
 624 * If unsuccessful, we return false, having already taken the dentry lock.
 
 
 625 *
 626 * The caller needs to hold the RCU read lock, so that the dentry is
 627 * guaranteed to stay around even if the refcount goes down to zero!
 628 */
 629static inline bool fast_dput(struct dentry *dentry)
 630{
 631	int ret;
 632	unsigned int d_flags;
 633
 634	/*
 635	 * If we have a d_op->d_delete() operation, we sould not
 636	 * let the dentry count go to zero, so use "put_or_lock".
 637	 */
 638	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
 639		return lockref_put_or_lock(&dentry->d_lockref);
 640
 641	/*
 642	 * .. otherwise, we can try to just decrement the
 643	 * lockref optimistically.
 644	 */
 645	ret = lockref_put_return(&dentry->d_lockref);
 646
 647	/*
 648	 * If the lockref_put_return() failed due to the lock being held
 649	 * by somebody else, the fast path has failed. We will need to
 650	 * get the lock, and then check the count again.
 651	 */
 652	if (unlikely(ret < 0)) {
 653		spin_lock(&dentry->d_lock);
 654		if (dentry->d_lockref.count > 1) {
 655			dentry->d_lockref.count--;
 656			spin_unlock(&dentry->d_lock);
 657			return 1;
 658		}
 659		return 0;
 
 660	}
 661
 662	/*
 663	 * If we weren't the last ref, we're done.
 664	 */
 665	if (ret)
 666		return 1;
 667
 668	/*
 669	 * Careful, careful. The reference count went down
 670	 * to zero, but we don't hold the dentry lock, so
 671	 * somebody else could get it again, and do another
 672	 * dput(), and we need to not race with that.
 673	 *
 674	 * However, there is a very special and common case
 675	 * where we don't care, because there is nothing to
 676	 * do: the dentry is still hashed, it does not have
 677	 * a 'delete' op, and it's referenced and already on
 678	 * the LRU list.
 679	 *
 680	 * NOTE! Since we aren't locked, these values are
 681	 * not "stable". However, it is sufficient that at
 682	 * some point after we dropped the reference the
 683	 * dentry was hashed and the flags had the proper
 684	 * value. Other dentry users may have re-gotten
 685	 * a reference to the dentry and change that, but
 686	 * our work is done - we can leave the dentry
 687	 * around with a zero refcount.
 688	 */
 689	smp_rmb();
 690	d_flags = ACCESS_ONCE(dentry->d_flags);
 691	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
 692
 693	/* Nothing to do? Dropping the reference was all we needed? */
 694	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
 695		return 1;
 696
 697	/*
 698	 * Not the fast normal case? Get the lock. We've already decremented
 699	 * the refcount, but we'll need to re-check the situation after
 700	 * getting the lock.
 701	 */
 702	spin_lock(&dentry->d_lock);
 703
 704	/*
 705	 * Did somebody else grab a reference to it in the meantime, and
 706	 * we're no longer the last user after all? Alternatively, somebody
 707	 * else could have killed it and marked it dead. Either way, we
 708	 * don't need to do anything else.
 709	 */
 710	if (dentry->d_lockref.count) {
 
 711		spin_unlock(&dentry->d_lock);
 712		return 1;
 713	}
 714
 715	/*
 716	 * Re-get the reference we optimistically dropped. We hold the
 717	 * lock, and we just tested that it was zero, so we can just
 718	 * set it to 1.
 719	 */
 720	dentry->d_lockref.count = 1;
 721	return 0;
 722}
 723
 724
 725/* 
 726 * This is dput
 727 *
 728 * This is complicated by the fact that we do not want to put
 729 * dentries that are no longer on any hash chain on the unused
 730 * list: we'd much rather just get rid of them immediately.
 731 *
 732 * However, that implies that we have to traverse the dentry
 733 * tree upwards to the parents which might _also_ now be
 734 * scheduled for deletion (it may have been only waiting for
 735 * its last child to go away).
 736 *
 737 * This tail recursion is done by hand as we don't want to depend
 738 * on the compiler to always get this right (gcc generally doesn't).
 739 * Real recursion would eat up our stack space.
 740 */
 741
 742/*
 743 * dput - release a dentry
 744 * @dentry: dentry to release 
 745 *
 746 * Release a dentry. This will drop the usage count and if appropriate
 747 * call the dentry unlink method as well as removing it from the queues and
 748 * releasing its resources. If the parent dentries were scheduled for release
 749 * they too may now get deleted.
 750 */
 751void dput(struct dentry *dentry)
 752{
 753	if (unlikely(!dentry))
 754		return;
 755
 756repeat:
 757	might_sleep();
 758
 759	rcu_read_lock();
 760	if (likely(fast_dput(dentry))) {
 761		rcu_read_unlock();
 762		return;
 763	}
 764
 765	/* Slow case: now with the dentry lock held */
 
 
 
 
 
 
 
 
 
 766	rcu_read_unlock();
 767
 768	WARN_ON(d_in_lookup(dentry));
 769
 770	/* Unreachable? Get rid of it */
 771	if (unlikely(d_unhashed(dentry)))
 772		goto kill_it;
 773
 774	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
 775		goto kill_it;
 776
 777	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
 778		if (dentry->d_op->d_delete(dentry))
 779			goto kill_it;
 780	}
 781
 782	if (!(dentry->d_flags & DCACHE_REFERENCED))
 783		dentry->d_flags |= DCACHE_REFERENCED;
 784	dentry_lru_add(dentry);
 785
 786	dentry->d_lockref.count--;
 787	spin_unlock(&dentry->d_lock);
 788	return;
 789
 790kill_it:
 791	dentry = dentry_kill(dentry);
 792	if (dentry) {
 793		cond_resched();
 794		goto repeat;
 795	}
 796}
 797EXPORT_SYMBOL(dput);
 798
 799
 800/* This must be called with d_lock held */
 801static inline void __dget_dlock(struct dentry *dentry)
 802{
 803	dentry->d_lockref.count++;
 
 
 
 
 804}
 805
 806static inline void __dget(struct dentry *dentry)
 807{
 808	lockref_get(&dentry->d_lockref);
 
 
 
 
 
 
 
 809}
 810
 811struct dentry *dget_parent(struct dentry *dentry)
 812{
 813	int gotref;
 814	struct dentry *ret;
 
 815
 816	/*
 817	 * Do optimistic parent lookup without any
 818	 * locking.
 819	 */
 820	rcu_read_lock();
 821	ret = ACCESS_ONCE(dentry->d_parent);
 
 822	gotref = lockref_get_not_zero(&ret->d_lockref);
 823	rcu_read_unlock();
 824	if (likely(gotref)) {
 825		if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
 826			return ret;
 827		dput(ret);
 828	}
 829
 830repeat:
 831	/*
 832	 * Don't need rcu_dereference because we re-check it was correct under
 833	 * the lock.
 834	 */
 835	rcu_read_lock();
 836	ret = dentry->d_parent;
 837	spin_lock(&ret->d_lock);
 838	if (unlikely(ret != dentry->d_parent)) {
 839		spin_unlock(&ret->d_lock);
 840		rcu_read_unlock();
 841		goto repeat;
 842	}
 843	rcu_read_unlock();
 844	BUG_ON(!ret->d_lockref.count);
 845	ret->d_lockref.count++;
 846	spin_unlock(&ret->d_lock);
 847	return ret;
 848}
 849EXPORT_SYMBOL(dget_parent);
 850
 
 
 
 
 
 
 
 
 
 
 
 851/**
 852 * d_find_alias - grab a hashed alias of inode
 853 * @inode: inode in question
 854 *
 855 * If inode has a hashed alias, or is a directory and has any alias,
 856 * acquire the reference to alias and return it. Otherwise return NULL.
 857 * Notice that if inode is a directory there can be only one alias and
 858 * it can be unhashed only if it has no children, or if it is the root
 859 * of a filesystem, or if the directory was renamed and d_revalidate
 860 * was the first vfs operation to notice.
 861 *
 862 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 863 * any other hashed alias over that one.
 864 */
 
 
 
 
 
 
 
 
 
 
 
 865static struct dentry *__d_find_alias(struct inode *inode)
 866{
 867	struct dentry *alias, *discon_alias;
 
 
 
 868
 869again:
 870	discon_alias = NULL;
 871	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 872		spin_lock(&alias->d_lock);
 873 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 874			if (IS_ROOT(alias) &&
 875			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 876				discon_alias = alias;
 877			} else {
 878				__dget_dlock(alias);
 879				spin_unlock(&alias->d_lock);
 880				return alias;
 881			}
 882		}
 883		spin_unlock(&alias->d_lock);
 884	}
 885	if (discon_alias) {
 886		alias = discon_alias;
 887		spin_lock(&alias->d_lock);
 888		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 889			__dget_dlock(alias);
 890			spin_unlock(&alias->d_lock);
 891			return alias;
 892		}
 893		spin_unlock(&alias->d_lock);
 894		goto again;
 895	}
 896	return NULL;
 897}
 898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899struct dentry *d_find_alias(struct inode *inode)
 900{
 901	struct dentry *de = NULL;
 902
 903	if (!hlist_empty(&inode->i_dentry)) {
 904		spin_lock(&inode->i_lock);
 905		de = __d_find_alias(inode);
 906		spin_unlock(&inode->i_lock);
 907	}
 908	return de;
 909}
 910EXPORT_SYMBOL(d_find_alias);
 911
 912/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913 *	Try to kill dentries associated with this inode.
 914 * WARNING: you must own a reference to inode.
 915 */
 916void d_prune_aliases(struct inode *inode)
 917{
 
 918	struct dentry *dentry;
 919restart:
 920	spin_lock(&inode->i_lock);
 921	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
 922		spin_lock(&dentry->d_lock);
 923		if (!dentry->d_lockref.count) {
 924			struct dentry *parent = lock_parent(dentry);
 925			if (likely(!dentry->d_lockref.count)) {
 926				__dentry_kill(dentry);
 927				dput(parent);
 928				goto restart;
 929			}
 930			if (parent)
 931				spin_unlock(&parent->d_lock);
 932		}
 933		spin_unlock(&dentry->d_lock);
 934	}
 935	spin_unlock(&inode->i_lock);
 
 936}
 937EXPORT_SYMBOL(d_prune_aliases);
 938
 939static void shrink_dentry_list(struct list_head *list)
 940{
 941	struct dentry *dentry, *parent;
 
 
 
 
 
 
 
 
 942
 
 
 943	while (!list_empty(list)) {
 944		struct inode *inode;
 
 945		dentry = list_entry(list->prev, struct dentry, d_lru);
 946		spin_lock(&dentry->d_lock);
 947		parent = lock_parent(dentry);
 948
 949		/*
 950		 * The dispose list is isolated and dentries are not accounted
 951		 * to the LRU here, so we can simply remove it from the list
 952		 * here regardless of whether it is referenced or not.
 953		 */
 954		d_shrink_del(dentry);
 955
 956		/*
 957		 * We found an inuse dentry which was not removed from
 958		 * the LRU because of laziness during lookup. Do not free it.
 959		 */
 960		if (dentry->d_lockref.count > 0) {
 961			spin_unlock(&dentry->d_lock);
 962			if (parent)
 963				spin_unlock(&parent->d_lock);
 964			continue;
 965		}
 966
 967
 968		if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
 969			bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
 970			spin_unlock(&dentry->d_lock);
 971			if (parent)
 972				spin_unlock(&parent->d_lock);
 973			if (can_free)
 974				dentry_free(dentry);
 975			continue;
 976		}
 977
 978		inode = dentry->d_inode;
 979		if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
 980			d_shrink_add(dentry, list);
 981			spin_unlock(&dentry->d_lock);
 982			if (parent)
 983				spin_unlock(&parent->d_lock);
 984			continue;
 985		}
 986
 987		__dentry_kill(dentry);
 988
 989		/*
 990		 * We need to prune ancestors too. This is necessary to prevent
 991		 * quadratic behavior of shrink_dcache_parent(), but is also
 992		 * expected to be beneficial in reducing dentry cache
 993		 * fragmentation.
 994		 */
 995		dentry = parent;
 996		while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
 997			parent = lock_parent(dentry);
 998			if (dentry->d_lockref.count != 1) {
 999				dentry->d_lockref.count--;
1000				spin_unlock(&dentry->d_lock);
1001				if (parent)
1002					spin_unlock(&parent->d_lock);
1003				break;
1004			}
1005			inode = dentry->d_inode;	/* can't be NULL */
1006			if (unlikely(!spin_trylock(&inode->i_lock))) {
1007				spin_unlock(&dentry->d_lock);
1008				if (parent)
1009					spin_unlock(&parent->d_lock);
1010				cpu_relax();
1011				continue;
1012			}
1013			__dentry_kill(dentry);
1014			dentry = parent;
1015		}
1016	}
1017}
1018
1019static enum lru_status dentry_lru_isolate(struct list_head *item,
1020		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1021{
1022	struct list_head *freeable = arg;
1023	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1024
1025
1026	/*
1027	 * we are inverting the lru lock/dentry->d_lock here,
1028	 * so use a trylock. If we fail to get the lock, just skip
1029	 * it
1030	 */
1031	if (!spin_trylock(&dentry->d_lock))
1032		return LRU_SKIP;
1033
1034	/*
1035	 * Referenced dentries are still in use. If they have active
1036	 * counts, just remove them from the LRU. Otherwise give them
1037	 * another pass through the LRU.
1038	 */
1039	if (dentry->d_lockref.count) {
1040		d_lru_isolate(lru, dentry);
1041		spin_unlock(&dentry->d_lock);
1042		return LRU_REMOVED;
1043	}
1044
1045	if (dentry->d_flags & DCACHE_REFERENCED) {
1046		dentry->d_flags &= ~DCACHE_REFERENCED;
1047		spin_unlock(&dentry->d_lock);
1048
1049		/*
1050		 * The list move itself will be made by the common LRU code. At
1051		 * this point, we've dropped the dentry->d_lock but keep the
1052		 * lru lock. This is safe to do, since every list movement is
1053		 * protected by the lru lock even if both locks are held.
1054		 *
1055		 * This is guaranteed by the fact that all LRU management
1056		 * functions are intermediated by the LRU API calls like
1057		 * list_lru_add and list_lru_del. List movement in this file
1058		 * only ever occur through this functions or through callbacks
1059		 * like this one, that are called from the LRU API.
1060		 *
1061		 * The only exceptions to this are functions like
1062		 * shrink_dentry_list, and code that first checks for the
1063		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1064		 * operating only with stack provided lists after they are
1065		 * properly isolated from the main list.  It is thus, always a
1066		 * local access.
1067		 */
1068		return LRU_ROTATE;
1069	}
1070
1071	d_lru_shrink_move(lru, dentry, freeable);
1072	spin_unlock(&dentry->d_lock);
1073
1074	return LRU_REMOVED;
1075}
1076
1077/**
1078 * prune_dcache_sb - shrink the dcache
1079 * @sb: superblock
1080 * @sc: shrink control, passed to list_lru_shrink_walk()
1081 *
1082 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1083 * is done when we need more memory and called from the superblock shrinker
1084 * function.
1085 *
1086 * This function may fail to free any resources if all the dentries are in
1087 * use.
1088 */
1089long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1090{
1091	LIST_HEAD(dispose);
1092	long freed;
1093
1094	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1095				     dentry_lru_isolate, &dispose);
1096	shrink_dentry_list(&dispose);
1097	return freed;
1098}
1099
1100static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1101		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1102{
1103	struct list_head *freeable = arg;
1104	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1105
1106	/*
1107	 * we are inverting the lru lock/dentry->d_lock here,
1108	 * so use a trylock. If we fail to get the lock, just skip
1109	 * it
1110	 */
1111	if (!spin_trylock(&dentry->d_lock))
1112		return LRU_SKIP;
1113
1114	d_lru_shrink_move(lru, dentry, freeable);
1115	spin_unlock(&dentry->d_lock);
1116
1117	return LRU_REMOVED;
1118}
1119
1120
1121/**
1122 * shrink_dcache_sb - shrink dcache for a superblock
1123 * @sb: superblock
1124 *
1125 * Shrink the dcache for the specified super block. This is used to free
1126 * the dcache before unmounting a file system.
1127 */
1128void shrink_dcache_sb(struct super_block *sb)
1129{
1130	long freed;
1131
1132	do {
1133		LIST_HEAD(dispose);
1134
1135		freed = list_lru_walk(&sb->s_dentry_lru,
1136			dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1137
1138		this_cpu_sub(nr_dentry_unused, freed);
1139		shrink_dentry_list(&dispose);
1140	} while (freed > 0);
1141}
1142EXPORT_SYMBOL(shrink_dcache_sb);
1143
1144/**
1145 * enum d_walk_ret - action to talke during tree walk
1146 * @D_WALK_CONTINUE:	contrinue walk
1147 * @D_WALK_QUIT:	quit walk
1148 * @D_WALK_NORETRY:	quit when retry is needed
1149 * @D_WALK_SKIP:	skip this dentry and its children
1150 */
1151enum d_walk_ret {
1152	D_WALK_CONTINUE,
1153	D_WALK_QUIT,
1154	D_WALK_NORETRY,
1155	D_WALK_SKIP,
1156};
1157
1158/**
1159 * d_walk - walk the dentry tree
1160 * @parent:	start of walk
1161 * @data:	data passed to @enter() and @finish()
1162 * @enter:	callback when first entering the dentry
1163 * @finish:	callback when successfully finished the walk
1164 *
1165 * The @enter() and @finish() callbacks are called with d_lock held.
1166 */
1167static void d_walk(struct dentry *parent, void *data,
1168		   enum d_walk_ret (*enter)(void *, struct dentry *),
1169		   void (*finish)(void *))
1170{
1171	struct dentry *this_parent;
1172	struct list_head *next;
1173	unsigned seq = 0;
1174	enum d_walk_ret ret;
1175	bool retry = true;
1176
1177again:
1178	read_seqbegin_or_lock(&rename_lock, &seq);
1179	this_parent = parent;
1180	spin_lock(&this_parent->d_lock);
1181
1182	ret = enter(data, this_parent);
1183	switch (ret) {
1184	case D_WALK_CONTINUE:
1185		break;
1186	case D_WALK_QUIT:
1187	case D_WALK_SKIP:
1188		goto out_unlock;
1189	case D_WALK_NORETRY:
1190		retry = false;
1191		break;
1192	}
1193repeat:
1194	next = this_parent->d_subdirs.next;
1195resume:
1196	while (next != &this_parent->d_subdirs) {
1197		struct list_head *tmp = next;
1198		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1199		next = tmp->next;
1200
1201		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1202			continue;
1203
1204		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1205
1206		ret = enter(data, dentry);
1207		switch (ret) {
1208		case D_WALK_CONTINUE:
1209			break;
1210		case D_WALK_QUIT:
1211			spin_unlock(&dentry->d_lock);
1212			goto out_unlock;
1213		case D_WALK_NORETRY:
1214			retry = false;
1215			break;
1216		case D_WALK_SKIP:
1217			spin_unlock(&dentry->d_lock);
1218			continue;
1219		}
1220
1221		if (!list_empty(&dentry->d_subdirs)) {
1222			spin_unlock(&this_parent->d_lock);
1223			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1224			this_parent = dentry;
1225			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1226			goto repeat;
1227		}
1228		spin_unlock(&dentry->d_lock);
1229	}
1230	/*
1231	 * All done at this level ... ascend and resume the search.
1232	 */
1233	rcu_read_lock();
1234ascend:
1235	if (this_parent != parent) {
1236		struct dentry *child = this_parent;
1237		this_parent = child->d_parent;
1238
1239		spin_unlock(&child->d_lock);
1240		spin_lock(&this_parent->d_lock);
1241
1242		/* might go back up the wrong parent if we have had a rename. */
1243		if (need_seqretry(&rename_lock, seq))
1244			goto rename_retry;
1245		/* go into the first sibling still alive */
1246		do {
1247			next = child->d_child.next;
1248			if (next == &this_parent->d_subdirs)
1249				goto ascend;
1250			child = list_entry(next, struct dentry, d_child);
1251		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1252		rcu_read_unlock();
1253		goto resume;
1254	}
1255	if (need_seqretry(&rename_lock, seq))
1256		goto rename_retry;
1257	rcu_read_unlock();
1258	if (finish)
1259		finish(data);
1260
1261out_unlock:
1262	spin_unlock(&this_parent->d_lock);
1263	done_seqretry(&rename_lock, seq);
1264	return;
1265
1266rename_retry:
1267	spin_unlock(&this_parent->d_lock);
1268	rcu_read_unlock();
1269	BUG_ON(seq & 1);
1270	if (!retry)
1271		return;
1272	seq = 1;
1273	goto again;
1274}
1275
1276struct check_mount {
1277	struct vfsmount *mnt;
1278	unsigned int mounted;
1279};
1280
1281static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1282{
1283	struct check_mount *info = data;
1284	struct path path = { .mnt = info->mnt, .dentry = dentry };
1285
1286	if (likely(!d_mountpoint(dentry)))
1287		return D_WALK_CONTINUE;
1288	if (__path_is_mountpoint(&path)) {
1289		info->mounted = 1;
1290		return D_WALK_QUIT;
1291	}
1292	return D_WALK_CONTINUE;
1293}
1294
1295/**
1296 * path_has_submounts - check for mounts over a dentry in the
1297 *                      current namespace.
1298 * @parent: path to check.
1299 *
1300 * Return true if the parent or its subdirectories contain
1301 * a mount point in the current namespace.
1302 */
1303int path_has_submounts(const struct path *parent)
1304{
1305	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1306
1307	read_seqlock_excl(&mount_lock);
1308	d_walk(parent->dentry, &data, path_check_mount, NULL);
1309	read_sequnlock_excl(&mount_lock);
1310
1311	return data.mounted;
1312}
1313EXPORT_SYMBOL(path_has_submounts);
1314
1315/*
1316 * Called by mount code to set a mountpoint and check if the mountpoint is
1317 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1318 * subtree can become unreachable).
1319 *
1320 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1321 * this reason take rename_lock and d_lock on dentry and ancestors.
1322 */
1323int d_set_mounted(struct dentry *dentry)
1324{
1325	struct dentry *p;
1326	int ret = -ENOENT;
1327	write_seqlock(&rename_lock);
1328	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1329		/* Need exclusion wrt. d_invalidate() */
1330		spin_lock(&p->d_lock);
1331		if (unlikely(d_unhashed(p))) {
1332			spin_unlock(&p->d_lock);
1333			goto out;
1334		}
1335		spin_unlock(&p->d_lock);
1336	}
1337	spin_lock(&dentry->d_lock);
1338	if (!d_unlinked(dentry)) {
1339		ret = -EBUSY;
1340		if (!d_mountpoint(dentry)) {
1341			dentry->d_flags |= DCACHE_MOUNTED;
1342			ret = 0;
1343		}
1344	}
1345 	spin_unlock(&dentry->d_lock);
1346out:
1347	write_sequnlock(&rename_lock);
1348	return ret;
1349}
1350
1351/*
1352 * Search the dentry child list of the specified parent,
1353 * and move any unused dentries to the end of the unused
1354 * list for prune_dcache(). We descend to the next level
1355 * whenever the d_subdirs list is non-empty and continue
1356 * searching.
1357 *
1358 * It returns zero iff there are no unused children,
1359 * otherwise  it returns the number of children moved to
1360 * the end of the unused list. This may not be the total
1361 * number of unused children, because select_parent can
1362 * drop the lock and return early due to latency
1363 * constraints.
1364 */
1365
1366struct select_data {
1367	struct dentry *start;
 
 
 
 
1368	struct list_head dispose;
1369	int found;
1370};
1371
1372static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1373{
1374	struct select_data *data = _data;
1375	enum d_walk_ret ret = D_WALK_CONTINUE;
1376
1377	if (data->start == dentry)
1378		goto out;
1379
1380	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1381		data->found++;
1382	} else {
1383		if (dentry->d_flags & DCACHE_LRU_LIST)
1384			d_lru_del(dentry);
1385		if (!dentry->d_lockref.count) {
1386			d_shrink_add(dentry, &data->dispose);
1387			data->found++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388		}
 
1389	}
1390	/*
1391	 * We can return to the caller if we have found some (this
1392	 * ensures forward progress). We'll be coming back to find
1393	 * the rest.
1394	 */
1395	if (!list_empty(&data->dispose))
1396		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1397out:
1398	return ret;
1399}
1400
1401/**
1402 * shrink_dcache_parent - prune dcache
1403 * @parent: parent of entries to prune
1404 *
1405 * Prune the dcache to remove unused children of the parent dentry.
1406 */
1407void shrink_dcache_parent(struct dentry *parent)
1408{
1409	for (;;) {
1410		struct select_data data;
1411
1412		INIT_LIST_HEAD(&data.dispose);
1413		data.start = parent;
1414		data.found = 0;
 
 
 
 
1415
1416		d_walk(parent, &data, select_collect, NULL);
1417		if (!data.found)
1418			break;
1419
1420		shrink_dentry_list(&data.dispose);
1421		cond_resched();
 
 
 
 
 
 
 
 
 
 
1422	}
1423}
1424EXPORT_SYMBOL(shrink_dcache_parent);
1425
1426static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1427{
1428	/* it has busy descendents; complain about those instead */
1429	if (!list_empty(&dentry->d_subdirs))
1430		return D_WALK_CONTINUE;
1431
1432	/* root with refcount 1 is fine */
1433	if (dentry == _data && dentry->d_lockref.count == 1)
1434		return D_WALK_CONTINUE;
1435
1436	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1437			" still in use (%d) [unmount of %s %s]\n",
1438		       dentry,
1439		       dentry->d_inode ?
1440		       dentry->d_inode->i_ino : 0UL,
1441		       dentry,
1442		       dentry->d_lockref.count,
1443		       dentry->d_sb->s_type->name,
1444		       dentry->d_sb->s_id);
1445	WARN_ON(1);
1446	return D_WALK_CONTINUE;
1447}
1448
1449static void do_one_tree(struct dentry *dentry)
1450{
1451	shrink_dcache_parent(dentry);
1452	d_walk(dentry, dentry, umount_check, NULL);
1453	d_drop(dentry);
1454	dput(dentry);
1455}
1456
1457/*
1458 * destroy the dentries attached to a superblock on unmounting
1459 */
1460void shrink_dcache_for_umount(struct super_block *sb)
1461{
1462	struct dentry *dentry;
1463
1464	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1465
1466	dentry = sb->s_root;
1467	sb->s_root = NULL;
1468	do_one_tree(dentry);
1469
1470	while (!hlist_bl_empty(&sb->s_anon)) {
1471		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1472		do_one_tree(dentry);
1473	}
1474}
1475
1476struct detach_data {
1477	struct select_data select;
1478	struct dentry *mountpoint;
1479};
1480static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1481{
1482	struct detach_data *data = _data;
1483
1484	if (d_mountpoint(dentry)) {
1485		__dget_dlock(dentry);
1486		data->mountpoint = dentry;
1487		return D_WALK_QUIT;
1488	}
1489
1490	return select_collect(&data->select, dentry);
1491}
1492
1493static void check_and_drop(void *_data)
1494{
1495	struct detach_data *data = _data;
1496
1497	if (!data->mountpoint && !data->select.found)
1498		__d_drop(data->select.start);
1499}
1500
1501/**
1502 * d_invalidate - detach submounts, prune dcache, and drop
1503 * @dentry: dentry to invalidate (aka detach, prune and drop)
1504 *
1505 * no dcache lock.
1506 *
1507 * The final d_drop is done as an atomic operation relative to
1508 * rename_lock ensuring there are no races with d_set_mounted.  This
1509 * ensures there are no unhashed dentries on the path to a mountpoint.
1510 */
1511void d_invalidate(struct dentry *dentry)
1512{
1513	/*
1514	 * If it's already been dropped, return OK.
1515	 */
1516	spin_lock(&dentry->d_lock);
1517	if (d_unhashed(dentry)) {
1518		spin_unlock(&dentry->d_lock);
1519		return;
1520	}
 
1521	spin_unlock(&dentry->d_lock);
1522
1523	/* Negative dentries can be dropped without further checks */
1524	if (!dentry->d_inode) {
1525		d_drop(dentry);
1526		return;
1527	}
1528
 
1529	for (;;) {
1530		struct detach_data data;
1531
1532		data.mountpoint = NULL;
1533		INIT_LIST_HEAD(&data.select.dispose);
1534		data.select.start = dentry;
1535		data.select.found = 0;
1536
1537		d_walk(dentry, &data, detach_and_collect, check_and_drop);
1538
1539		if (data.select.found)
1540			shrink_dentry_list(&data.select.dispose);
1541
1542		if (data.mountpoint) {
1543			detach_mounts(data.mountpoint);
1544			dput(data.mountpoint);
1545		}
1546
1547		if (!data.mountpoint && !data.select.found)
1548			break;
1549
1550		cond_resched();
1551	}
1552}
1553EXPORT_SYMBOL(d_invalidate);
1554
1555/**
1556 * __d_alloc	-	allocate a dcache entry
1557 * @sb: filesystem it will belong to
1558 * @name: qstr of the name
1559 *
1560 * Allocates a dentry. It returns %NULL if there is insufficient memory
1561 * available. On a success the dentry is returned. The name passed in is
1562 * copied and the copy passed in may be reused after this call.
1563 */
1564 
1565struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1566{
1567	struct dentry *dentry;
1568	char *dname;
1569	int err;
1570
1571	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 
1572	if (!dentry)
1573		return NULL;
1574
1575	/*
1576	 * We guarantee that the inline name is always NUL-terminated.
1577	 * This way the memcpy() done by the name switching in rename
1578	 * will still always have a NUL at the end, even if we might
1579	 * be overwriting an internal NUL character
1580	 */
1581	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1582	if (unlikely(!name)) {
1583		static const struct qstr anon = QSTR_INIT("/", 1);
1584		name = &anon;
1585		dname = dentry->d_iname;
1586	} else if (name->len > DNAME_INLINE_LEN-1) {
1587		size_t size = offsetof(struct external_name, name[1]);
1588		struct external_name *p = kmalloc(size + name->len,
1589						  GFP_KERNEL_ACCOUNT);
 
1590		if (!p) {
1591			kmem_cache_free(dentry_cache, dentry); 
1592			return NULL;
1593		}
1594		atomic_set(&p->u.count, 1);
1595		dname = p->name;
1596		if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1597			kasan_unpoison_shadow(dname,
1598				round_up(name->len + 1,	sizeof(unsigned long)));
1599	} else  {
1600		dname = dentry->d_iname;
1601	}	
1602
1603	dentry->d_name.len = name->len;
1604	dentry->d_name.hash = name->hash;
1605	memcpy(dname, name->name, name->len);
1606	dname[name->len] = 0;
1607
1608	/* Make sure we always see the terminating NUL character */
1609	smp_wmb();
1610	dentry->d_name.name = dname;
1611
1612	dentry->d_lockref.count = 1;
1613	dentry->d_flags = 0;
1614	spin_lock_init(&dentry->d_lock);
1615	seqcount_init(&dentry->d_seq);
1616	dentry->d_inode = NULL;
1617	dentry->d_parent = dentry;
1618	dentry->d_sb = sb;
1619	dentry->d_op = NULL;
1620	dentry->d_fsdata = NULL;
1621	INIT_HLIST_BL_NODE(&dentry->d_hash);
1622	INIT_LIST_HEAD(&dentry->d_lru);
1623	INIT_LIST_HEAD(&dentry->d_subdirs);
1624	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1625	INIT_LIST_HEAD(&dentry->d_child);
1626	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1627
1628	if (dentry->d_op && dentry->d_op->d_init) {
1629		err = dentry->d_op->d_init(dentry);
1630		if (err) {
1631			if (dname_external(dentry))
1632				kfree(external_name(dentry));
1633			kmem_cache_free(dentry_cache, dentry);
1634			return NULL;
1635		}
1636	}
1637
1638	this_cpu_inc(nr_dentry);
1639
1640	return dentry;
1641}
1642
1643/**
1644 * d_alloc	-	allocate a dcache entry
1645 * @parent: parent of entry to allocate
1646 * @name: qstr of the name
1647 *
1648 * Allocates a dentry. It returns %NULL if there is insufficient memory
1649 * available. On a success the dentry is returned. The name passed in is
1650 * copied and the copy passed in may be reused after this call.
1651 */
1652struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1653{
1654	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1655	if (!dentry)
1656		return NULL;
1657	dentry->d_flags |= DCACHE_RCUACCESS;
1658	spin_lock(&parent->d_lock);
1659	/*
1660	 * don't need child lock because it is not subject
1661	 * to concurrency here
1662	 */
1663	__dget_dlock(parent);
1664	dentry->d_parent = parent;
1665	list_add(&dentry->d_child, &parent->d_subdirs);
1666	spin_unlock(&parent->d_lock);
1667
1668	return dentry;
1669}
1670EXPORT_SYMBOL(d_alloc);
1671
 
 
 
 
 
 
1672struct dentry *d_alloc_cursor(struct dentry * parent)
1673{
1674	struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1675	if (dentry) {
1676		dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1677		dentry->d_parent = dget(parent);
1678	}
1679	return dentry;
1680}
1681
1682/**
1683 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1684 * @sb: the superblock
1685 * @name: qstr of the name
1686 *
1687 * For a filesystem that just pins its dentries in memory and never
1688 * performs lookups at all, return an unhashed IS_ROOT dentry.
 
 
 
 
 
 
 
1689 */
1690struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1691{
1692	return __d_alloc(sb, name);
 
 
 
 
 
 
 
 
 
1693}
1694EXPORT_SYMBOL(d_alloc_pseudo);
1695
1696struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1697{
1698	struct qstr q;
1699
1700	q.name = name;
1701	q.hash_len = hashlen_string(parent, name);
1702	return d_alloc(parent, &q);
1703}
1704EXPORT_SYMBOL(d_alloc_name);
1705
1706void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1707{
1708	WARN_ON_ONCE(dentry->d_op);
1709	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1710				DCACHE_OP_COMPARE	|
1711				DCACHE_OP_REVALIDATE	|
1712				DCACHE_OP_WEAK_REVALIDATE	|
1713				DCACHE_OP_DELETE	|
1714				DCACHE_OP_REAL));
1715	dentry->d_op = op;
1716	if (!op)
1717		return;
1718	if (op->d_hash)
1719		dentry->d_flags |= DCACHE_OP_HASH;
1720	if (op->d_compare)
1721		dentry->d_flags |= DCACHE_OP_COMPARE;
1722	if (op->d_revalidate)
1723		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1724	if (op->d_weak_revalidate)
1725		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1726	if (op->d_delete)
1727		dentry->d_flags |= DCACHE_OP_DELETE;
1728	if (op->d_prune)
1729		dentry->d_flags |= DCACHE_OP_PRUNE;
1730	if (op->d_real)
1731		dentry->d_flags |= DCACHE_OP_REAL;
1732
1733}
1734EXPORT_SYMBOL(d_set_d_op);
1735
1736
1737/*
1738 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1739 * @dentry - The dentry to mark
1740 *
1741 * Mark a dentry as falling through to the lower layer (as set with
1742 * d_pin_lower()).  This flag may be recorded on the medium.
1743 */
1744void d_set_fallthru(struct dentry *dentry)
1745{
1746	spin_lock(&dentry->d_lock);
1747	dentry->d_flags |= DCACHE_FALLTHRU;
1748	spin_unlock(&dentry->d_lock);
1749}
1750EXPORT_SYMBOL(d_set_fallthru);
1751
1752static unsigned d_flags_for_inode(struct inode *inode)
1753{
1754	unsigned add_flags = DCACHE_REGULAR_TYPE;
1755
1756	if (!inode)
1757		return DCACHE_MISS_TYPE;
1758
1759	if (S_ISDIR(inode->i_mode)) {
1760		add_flags = DCACHE_DIRECTORY_TYPE;
1761		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1762			if (unlikely(!inode->i_op->lookup))
1763				add_flags = DCACHE_AUTODIR_TYPE;
1764			else
1765				inode->i_opflags |= IOP_LOOKUP;
1766		}
1767		goto type_determined;
1768	}
1769
1770	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1771		if (unlikely(inode->i_op->get_link)) {
1772			add_flags = DCACHE_SYMLINK_TYPE;
1773			goto type_determined;
1774		}
1775		inode->i_opflags |= IOP_NOFOLLOW;
1776	}
1777
1778	if (unlikely(!S_ISREG(inode->i_mode)))
1779		add_flags = DCACHE_SPECIAL_TYPE;
1780
1781type_determined:
1782	if (unlikely(IS_AUTOMOUNT(inode)))
1783		add_flags |= DCACHE_NEED_AUTOMOUNT;
1784	return add_flags;
1785}
1786
1787static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1788{
1789	unsigned add_flags = d_flags_for_inode(inode);
1790	WARN_ON(d_in_lookup(dentry));
1791
1792	spin_lock(&dentry->d_lock);
 
 
 
 
 
 
 
1793	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1794	raw_write_seqcount_begin(&dentry->d_seq);
1795	__d_set_inode_and_type(dentry, inode, add_flags);
1796	raw_write_seqcount_end(&dentry->d_seq);
1797	fsnotify_update_flags(dentry);
1798	spin_unlock(&dentry->d_lock);
1799}
1800
1801/**
1802 * d_instantiate - fill in inode information for a dentry
1803 * @entry: dentry to complete
1804 * @inode: inode to attach to this dentry
1805 *
1806 * Fill in inode information in the entry.
1807 *
1808 * This turns negative dentries into productive full members
1809 * of society.
1810 *
1811 * NOTE! This assumes that the inode count has been incremented
1812 * (or otherwise set) by the caller to indicate that it is now
1813 * in use by the dcache.
1814 */
1815 
1816void d_instantiate(struct dentry *entry, struct inode * inode)
1817{
1818	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1819	if (inode) {
1820		security_d_instantiate(entry, inode);
1821		spin_lock(&inode->i_lock);
1822		__d_instantiate(entry, inode);
1823		spin_unlock(&inode->i_lock);
1824	}
1825}
1826EXPORT_SYMBOL(d_instantiate);
1827
1828/**
1829 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1830 * @entry: dentry to complete
1831 * @inode: inode to attach to this dentry
1832 *
1833 * Fill in inode information in the entry.  If a directory alias is found, then
1834 * return an error (and drop inode).  Together with d_materialise_unique() this
1835 * guarantees that a directory inode may never have more than one alias.
1836 */
1837int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1838{
1839	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1840
 
1841	security_d_instantiate(entry, inode);
1842	spin_lock(&inode->i_lock);
1843	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1844		spin_unlock(&inode->i_lock);
1845		iput(inode);
1846		return -EBUSY;
1847	}
1848	__d_instantiate(entry, inode);
 
 
 
 
 
 
 
 
 
1849	spin_unlock(&inode->i_lock);
1850
1851	return 0;
1852}
1853EXPORT_SYMBOL(d_instantiate_no_diralias);
1854
1855struct dentry *d_make_root(struct inode *root_inode)
1856{
1857	struct dentry *res = NULL;
1858
1859	if (root_inode) {
1860		res = __d_alloc(root_inode->i_sb, NULL);
1861		if (res)
1862			d_instantiate(res, root_inode);
1863		else
1864			iput(root_inode);
1865	}
1866	return res;
1867}
1868EXPORT_SYMBOL(d_make_root);
1869
1870static struct dentry * __d_find_any_alias(struct inode *inode)
1871{
1872	struct dentry *alias;
1873
1874	if (hlist_empty(&inode->i_dentry))
1875		return NULL;
1876	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1877	__dget(alias);
1878	return alias;
1879}
1880
1881/**
1882 * d_find_any_alias - find any alias for a given inode
1883 * @inode: inode to find an alias for
1884 *
1885 * If any aliases exist for the given inode, take and return a
1886 * reference for one of them.  If no aliases exist, return %NULL.
1887 */
1888struct dentry *d_find_any_alias(struct inode *inode)
1889{
1890	struct dentry *de;
1891
1892	spin_lock(&inode->i_lock);
1893	de = __d_find_any_alias(inode);
1894	spin_unlock(&inode->i_lock);
1895	return de;
1896}
1897EXPORT_SYMBOL(d_find_any_alias);
1898
1899static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1900{
1901	struct dentry *tmp;
1902	struct dentry *res;
1903	unsigned add_flags;
1904
1905	if (!inode)
1906		return ERR_PTR(-ESTALE);
1907	if (IS_ERR(inode))
1908		return ERR_CAST(inode);
1909
1910	res = d_find_any_alias(inode);
 
 
1911	if (res)
1912		goto out_iput;
1913
1914	tmp = __d_alloc(inode->i_sb, NULL);
1915	if (!tmp) {
1916		res = ERR_PTR(-ENOMEM);
1917		goto out_iput;
1918	}
1919
1920	security_d_instantiate(tmp, inode);
1921	spin_lock(&inode->i_lock);
1922	res = __d_find_any_alias(inode);
1923	if (res) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1924		spin_unlock(&inode->i_lock);
1925		dput(tmp);
1926		goto out_iput;
1927	}
1928
1929	/* attach a disconnected dentry */
1930	add_flags = d_flags_for_inode(inode);
1931
1932	if (disconnected)
1933		add_flags |= DCACHE_DISCONNECTED;
1934
1935	spin_lock(&tmp->d_lock);
1936	__d_set_inode_and_type(tmp, inode, add_flags);
1937	hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1938	hlist_bl_lock(&tmp->d_sb->s_anon);
1939	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1940	hlist_bl_unlock(&tmp->d_sb->s_anon);
1941	spin_unlock(&tmp->d_lock);
1942	spin_unlock(&inode->i_lock);
1943
1944	return tmp;
1945
1946 out_iput:
1947	iput(inode);
1948	return res;
1949}
1950
1951/**
1952 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1953 * @inode: inode to allocate the dentry for
1954 *
1955 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1956 * similar open by handle operations.  The returned dentry may be anonymous,
1957 * or may have a full name (if the inode was already in the cache).
1958 *
1959 * When called on a directory inode, we must ensure that the inode only ever
1960 * has one dentry.  If a dentry is found, that is returned instead of
1961 * allocating a new one.
1962 *
1963 * On successful return, the reference to the inode has been transferred
1964 * to the dentry.  In case of an error the reference on the inode is released.
1965 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1966 * be passed in and the error will be propagated to the return value,
1967 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1968 */
1969struct dentry *d_obtain_alias(struct inode *inode)
1970{
1971	return __d_obtain_alias(inode, 1);
1972}
1973EXPORT_SYMBOL(d_obtain_alias);
1974
1975/**
1976 * d_obtain_root - find or allocate a dentry for a given inode
1977 * @inode: inode to allocate the dentry for
1978 *
1979 * Obtain an IS_ROOT dentry for the root of a filesystem.
1980 *
1981 * We must ensure that directory inodes only ever have one dentry.  If a
1982 * dentry is found, that is returned instead of allocating a new one.
1983 *
1984 * On successful return, the reference to the inode has been transferred
1985 * to the dentry.  In case of an error the reference on the inode is
1986 * released.  A %NULL or IS_ERR inode may be passed in and will be the
1987 * error will be propagate to the return value, with a %NULL @inode
1988 * replaced by ERR_PTR(-ESTALE).
1989 */
1990struct dentry *d_obtain_root(struct inode *inode)
1991{
1992	return __d_obtain_alias(inode, 0);
1993}
1994EXPORT_SYMBOL(d_obtain_root);
1995
1996/**
1997 * d_add_ci - lookup or allocate new dentry with case-exact name
 
1998 * @inode:  the inode case-insensitive lookup has found
1999 * @dentry: the negative dentry that was passed to the parent's lookup func
2000 * @name:   the case-exact name to be associated with the returned dentry
2001 *
2002 * This is to avoid filling the dcache with case-insensitive names to the
2003 * same inode, only the actual correct case is stored in the dcache for
2004 * case-insensitive filesystems.
2005 *
2006 * For a case-insensitive lookup match and if the the case-exact dentry
2007 * already exists in in the dcache, use it and return it.
2008 *
2009 * If no entry exists with the exact case name, allocate new dentry with
2010 * the exact case, and return the spliced entry.
2011 */
2012struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2013			struct qstr *name)
2014{
2015	struct dentry *found, *res;
2016
2017	/*
2018	 * First check if a dentry matching the name already exists,
2019	 * if not go ahead and create it now.
2020	 */
2021	found = d_hash_and_lookup(dentry->d_parent, name);
2022	if (found) {
2023		iput(inode);
2024		return found;
2025	}
2026	if (d_in_lookup(dentry)) {
2027		found = d_alloc_parallel(dentry->d_parent, name,
2028					dentry->d_wait);
2029		if (IS_ERR(found) || !d_in_lookup(found)) {
2030			iput(inode);
2031			return found;
2032		}
2033	} else {
2034		found = d_alloc(dentry->d_parent, name);
2035		if (!found) {
2036			iput(inode);
2037			return ERR_PTR(-ENOMEM);
2038		} 
2039	}
2040	res = d_splice_alias(inode, found);
2041	if (res) {
 
2042		dput(found);
2043		return res;
2044	}
2045	return found;
2046}
2047EXPORT_SYMBOL(d_add_ci);
2048
2049
2050static inline bool d_same_name(const struct dentry *dentry,
2051				const struct dentry *parent,
2052				const struct qstr *name)
 
 
 
 
 
 
2053{
2054	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2055		if (dentry->d_name.len != name->len)
2056			return false;
2057		return dentry_cmp(dentry, name->name, name->len) == 0;
2058	}
2059	return parent->d_op->d_compare(dentry,
2060				       dentry->d_name.len, dentry->d_name.name,
2061				       name) == 0;
2062}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063
2064/**
2065 * __d_lookup_rcu - search for a dentry (racy, store-free)
2066 * @parent: parent dentry
2067 * @name: qstr of name we wish to find
2068 * @seqp: returns d_seq value at the point where the dentry was found
2069 * Returns: dentry, or NULL
2070 *
2071 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2072 * resolution (store-free path walking) design described in
2073 * Documentation/filesystems/path-lookup.txt.
2074 *
2075 * This is not to be used outside core vfs.
2076 *
2077 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2078 * held, and rcu_read_lock held. The returned dentry must not be stored into
2079 * without taking d_lock and checking d_seq sequence count against @seq
2080 * returned here.
2081 *
2082 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2083 * function.
2084 *
2085 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2086 * the returned dentry, so long as its parent's seqlock is checked after the
2087 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2088 * is formed, giving integrity down the path walk.
2089 *
2090 * NOTE! The caller *has* to check the resulting dentry against the sequence
2091 * number we've returned before using any of the resulting dentry state!
2092 */
2093struct dentry *__d_lookup_rcu(const struct dentry *parent,
2094				const struct qstr *name,
2095				unsigned *seqp)
2096{
2097	u64 hashlen = name->hash_len;
2098	const unsigned char *str = name->name;
2099	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2100	struct hlist_bl_node *node;
2101	struct dentry *dentry;
2102
2103	/*
2104	 * Note: There is significant duplication with __d_lookup_rcu which is
2105	 * required to prevent single threaded performance regressions
2106	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2107	 * Keep the two functions in sync.
2108	 */
2109
 
 
 
2110	/*
2111	 * The hash list is protected using RCU.
2112	 *
2113	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2114	 * races with d_move().
2115	 *
2116	 * It is possible that concurrent renames can mess up our list
2117	 * walk here and result in missing our dentry, resulting in the
2118	 * false-negative result. d_lookup() protects against concurrent
2119	 * renames using rename_lock seqlock.
2120	 *
2121	 * See Documentation/filesystems/path-lookup.txt for more details.
2122	 */
2123	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2124		unsigned seq;
2125
2126seqretry:
2127		/*
2128		 * The dentry sequence count protects us from concurrent
2129		 * renames, and thus protects parent and name fields.
2130		 *
2131		 * The caller must perform a seqcount check in order
2132		 * to do anything useful with the returned dentry.
2133		 *
2134		 * NOTE! We do a "raw" seqcount_begin here. That means that
2135		 * we don't wait for the sequence count to stabilize if it
2136		 * is in the middle of a sequence change. If we do the slow
2137		 * dentry compare, we will do seqretries until it is stable,
2138		 * and if we end up with a successful lookup, we actually
2139		 * want to exit RCU lookup anyway.
2140		 *
2141		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2142		 * we are still guaranteed NUL-termination of ->d_name.name.
2143		 */
2144		seq = raw_seqcount_begin(&dentry->d_seq);
2145		if (dentry->d_parent != parent)
2146			continue;
2147		if (d_unhashed(dentry))
2148			continue;
2149
2150		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2151			int tlen;
2152			const char *tname;
2153			if (dentry->d_name.hash != hashlen_hash(hashlen))
2154				continue;
2155			tlen = dentry->d_name.len;
2156			tname = dentry->d_name.name;
2157			/* we want a consistent (name,len) pair */
2158			if (read_seqcount_retry(&dentry->d_seq, seq)) {
2159				cpu_relax();
2160				goto seqretry;
2161			}
2162			if (parent->d_op->d_compare(dentry,
2163						    tlen, tname, name) != 0)
2164				continue;
2165		} else {
2166			if (dentry->d_name.hash_len != hashlen)
2167				continue;
2168			if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2169				continue;
2170		}
2171		*seqp = seq;
2172		return dentry;
2173	}
2174	return NULL;
2175}
2176
2177/**
2178 * d_lookup - search for a dentry
2179 * @parent: parent dentry
2180 * @name: qstr of name we wish to find
2181 * Returns: dentry, or NULL
2182 *
2183 * d_lookup searches the children of the parent dentry for the name in
2184 * question. If the dentry is found its reference count is incremented and the
2185 * dentry is returned. The caller must use dput to free the entry when it has
2186 * finished using it. %NULL is returned if the dentry does not exist.
2187 */
2188struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2189{
2190	struct dentry *dentry;
2191	unsigned seq;
2192
2193	do {
2194		seq = read_seqbegin(&rename_lock);
2195		dentry = __d_lookup(parent, name);
2196		if (dentry)
2197			break;
2198	} while (read_seqretry(&rename_lock, seq));
2199	return dentry;
2200}
2201EXPORT_SYMBOL(d_lookup);
2202
2203/**
2204 * __d_lookup - search for a dentry (racy)
2205 * @parent: parent dentry
2206 * @name: qstr of name we wish to find
2207 * Returns: dentry, or NULL
2208 *
2209 * __d_lookup is like d_lookup, however it may (rarely) return a
2210 * false-negative result due to unrelated rename activity.
2211 *
2212 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2213 * however it must be used carefully, eg. with a following d_lookup in
2214 * the case of failure.
2215 *
2216 * __d_lookup callers must be commented.
2217 */
2218struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2219{
2220	unsigned int hash = name->hash;
2221	struct hlist_bl_head *b = d_hash(hash);
2222	struct hlist_bl_node *node;
2223	struct dentry *found = NULL;
2224	struct dentry *dentry;
2225
2226	/*
2227	 * Note: There is significant duplication with __d_lookup_rcu which is
2228	 * required to prevent single threaded performance regressions
2229	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2230	 * Keep the two functions in sync.
2231	 */
2232
2233	/*
2234	 * The hash list is protected using RCU.
2235	 *
2236	 * Take d_lock when comparing a candidate dentry, to avoid races
2237	 * with d_move().
2238	 *
2239	 * It is possible that concurrent renames can mess up our list
2240	 * walk here and result in missing our dentry, resulting in the
2241	 * false-negative result. d_lookup() protects against concurrent
2242	 * renames using rename_lock seqlock.
2243	 *
2244	 * See Documentation/filesystems/path-lookup.txt for more details.
2245	 */
2246	rcu_read_lock();
2247	
2248	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2249
2250		if (dentry->d_name.hash != hash)
2251			continue;
2252
2253		spin_lock(&dentry->d_lock);
2254		if (dentry->d_parent != parent)
2255			goto next;
2256		if (d_unhashed(dentry))
2257			goto next;
2258
2259		if (!d_same_name(dentry, parent, name))
2260			goto next;
2261
2262		dentry->d_lockref.count++;
2263		found = dentry;
2264		spin_unlock(&dentry->d_lock);
2265		break;
2266next:
2267		spin_unlock(&dentry->d_lock);
2268 	}
2269 	rcu_read_unlock();
2270
2271 	return found;
2272}
2273
2274/**
2275 * d_hash_and_lookup - hash the qstr then search for a dentry
2276 * @dir: Directory to search in
2277 * @name: qstr of name we wish to find
2278 *
2279 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2280 */
2281struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2282{
2283	/*
2284	 * Check for a fs-specific hash function. Note that we must
2285	 * calculate the standard hash first, as the d_op->d_hash()
2286	 * routine may choose to leave the hash value unchanged.
2287	 */
2288	name->hash = full_name_hash(dir, name->name, name->len);
2289	if (dir->d_flags & DCACHE_OP_HASH) {
2290		int err = dir->d_op->d_hash(dir, name);
2291		if (unlikely(err < 0))
2292			return ERR_PTR(err);
2293	}
2294	return d_lookup(dir, name);
2295}
2296EXPORT_SYMBOL(d_hash_and_lookup);
2297
2298/*
2299 * When a file is deleted, we have two options:
2300 * - turn this dentry into a negative dentry
2301 * - unhash this dentry and free it.
2302 *
2303 * Usually, we want to just turn this into
2304 * a negative dentry, but if anybody else is
2305 * currently using the dentry or the inode
2306 * we can't do that and we fall back on removing
2307 * it from the hash queues and waiting for
2308 * it to be deleted later when it has no users
2309 */
2310 
2311/**
2312 * d_delete - delete a dentry
2313 * @dentry: The dentry to delete
2314 *
2315 * Turn the dentry into a negative dentry if possible, otherwise
2316 * remove it from the hash queues so it can be deleted later
2317 */
2318 
2319void d_delete(struct dentry * dentry)
2320{
2321	struct inode *inode;
2322	int isdir = 0;
 
 
2323	/*
2324	 * Are we the only user?
2325	 */
2326again:
2327	spin_lock(&dentry->d_lock);
2328	inode = dentry->d_inode;
2329	isdir = S_ISDIR(inode->i_mode);
2330	if (dentry->d_lockref.count == 1) {
2331		if (!spin_trylock(&inode->i_lock)) {
2332			spin_unlock(&dentry->d_lock);
2333			cpu_relax();
2334			goto again;
2335		}
2336		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2337		dentry_unlink_inode(dentry);
2338		fsnotify_nameremove(dentry, isdir);
2339		return;
 
 
2340	}
2341
2342	if (!d_unhashed(dentry))
2343		__d_drop(dentry);
2344
2345	spin_unlock(&dentry->d_lock);
2346
2347	fsnotify_nameremove(dentry, isdir);
2348}
2349EXPORT_SYMBOL(d_delete);
2350
2351static void __d_rehash(struct dentry *entry)
2352{
2353	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2354	BUG_ON(!d_unhashed(entry));
2355	hlist_bl_lock(b);
2356	hlist_bl_add_head_rcu(&entry->d_hash, b);
2357	hlist_bl_unlock(b);
2358}
2359
2360/**
2361 * d_rehash	- add an entry back to the hash
2362 * @entry: dentry to add to the hash
2363 *
2364 * Adds a dentry to the hash according to its name.
2365 */
2366 
2367void d_rehash(struct dentry * entry)
2368{
2369	spin_lock(&entry->d_lock);
2370	__d_rehash(entry);
2371	spin_unlock(&entry->d_lock);
2372}
2373EXPORT_SYMBOL(d_rehash);
2374
2375static inline unsigned start_dir_add(struct inode *dir)
2376{
2377
2378	for (;;) {
2379		unsigned n = dir->i_dir_seq;
2380		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2381			return n;
2382		cpu_relax();
2383	}
2384}
2385
2386static inline void end_dir_add(struct inode *dir, unsigned n)
 
2387{
2388	smp_store_release(&dir->i_dir_seq, n + 2);
 
 
2389}
2390
2391static void d_wait_lookup(struct dentry *dentry)
2392{
2393	if (d_in_lookup(dentry)) {
2394		DECLARE_WAITQUEUE(wait, current);
2395		add_wait_queue(dentry->d_wait, &wait);
2396		do {
2397			set_current_state(TASK_UNINTERRUPTIBLE);
2398			spin_unlock(&dentry->d_lock);
2399			schedule();
2400			spin_lock(&dentry->d_lock);
2401		} while (d_in_lookup(dentry));
2402	}
2403}
2404
2405struct dentry *d_alloc_parallel(struct dentry *parent,
2406				const struct qstr *name,
2407				wait_queue_head_t *wq)
2408{
2409	unsigned int hash = name->hash;
2410	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2411	struct hlist_bl_node *node;
2412	struct dentry *new = d_alloc(parent, name);
2413	struct dentry *dentry;
2414	unsigned seq, r_seq, d_seq;
2415
2416	if (unlikely(!new))
2417		return ERR_PTR(-ENOMEM);
2418
2419retry:
2420	rcu_read_lock();
2421	seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2422	r_seq = read_seqbegin(&rename_lock);
2423	dentry = __d_lookup_rcu(parent, name, &d_seq);
2424	if (unlikely(dentry)) {
2425		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2426			rcu_read_unlock();
2427			goto retry;
2428		}
2429		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2430			rcu_read_unlock();
2431			dput(dentry);
2432			goto retry;
2433		}
2434		rcu_read_unlock();
2435		dput(new);
2436		return dentry;
2437	}
2438	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2439		rcu_read_unlock();
2440		goto retry;
2441	}
 
 
 
 
 
 
2442	hlist_bl_lock(b);
2443	if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2444		hlist_bl_unlock(b);
2445		rcu_read_unlock();
2446		goto retry;
2447	}
2448	/*
2449	 * No changes for the parent since the beginning of d_lookup().
2450	 * Since all removals from the chain happen with hlist_bl_lock(),
2451	 * any potential in-lookup matches are going to stay here until
2452	 * we unlock the chain.  All fields are stable in everything
2453	 * we encounter.
2454	 */
2455	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2456		if (dentry->d_name.hash != hash)
2457			continue;
2458		if (dentry->d_parent != parent)
2459			continue;
2460		if (!d_same_name(dentry, parent, name))
2461			continue;
2462		hlist_bl_unlock(b);
2463		/* now we can try to grab a reference */
2464		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2465			rcu_read_unlock();
2466			goto retry;
2467		}
2468
2469		rcu_read_unlock();
2470		/*
2471		 * somebody is likely to be still doing lookup for it;
2472		 * wait for them to finish
2473		 */
2474		spin_lock(&dentry->d_lock);
2475		d_wait_lookup(dentry);
2476		/*
2477		 * it's not in-lookup anymore; in principle we should repeat
2478		 * everything from dcache lookup, but it's likely to be what
2479		 * d_lookup() would've found anyway.  If it is, just return it;
2480		 * otherwise we really have to repeat the whole thing.
2481		 */
2482		if (unlikely(dentry->d_name.hash != hash))
2483			goto mismatch;
2484		if (unlikely(dentry->d_parent != parent))
2485			goto mismatch;
2486		if (unlikely(d_unhashed(dentry)))
2487			goto mismatch;
2488		if (unlikely(!d_same_name(dentry, parent, name)))
2489			goto mismatch;
2490		/* OK, it *is* a hashed match; return it */
2491		spin_unlock(&dentry->d_lock);
2492		dput(new);
2493		return dentry;
2494	}
2495	rcu_read_unlock();
2496	/* we can't take ->d_lock here; it's OK, though. */
2497	new->d_flags |= DCACHE_PAR_LOOKUP;
2498	new->d_wait = wq;
2499	hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2500	hlist_bl_unlock(b);
2501	return new;
2502mismatch:
2503	spin_unlock(&dentry->d_lock);
2504	dput(dentry);
2505	goto retry;
2506}
2507EXPORT_SYMBOL(d_alloc_parallel);
2508
2509void __d_lookup_done(struct dentry *dentry)
 
 
 
 
 
2510{
2511	struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2512						 dentry->d_name.hash);
 
 
 
 
2513	hlist_bl_lock(b);
2514	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2515	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2516	wake_up_all(dentry->d_wait);
2517	dentry->d_wait = NULL;
2518	hlist_bl_unlock(b);
2519	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2520	INIT_LIST_HEAD(&dentry->d_lru);
 
 
 
 
 
 
 
 
2521}
2522EXPORT_SYMBOL(__d_lookup_done);
2523
2524/* inode->i_lock held if inode is non-NULL */
2525
2526static inline void __d_add(struct dentry *dentry, struct inode *inode)
2527{
 
2528	struct inode *dir = NULL;
2529	unsigned n;
2530	spin_lock(&dentry->d_lock);
2531	if (unlikely(d_in_lookup(dentry))) {
2532		dir = dentry->d_parent->d_inode;
2533		n = start_dir_add(dir);
2534		__d_lookup_done(dentry);
2535	}
2536	if (inode) {
2537		unsigned add_flags = d_flags_for_inode(inode);
2538		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2539		raw_write_seqcount_begin(&dentry->d_seq);
2540		__d_set_inode_and_type(dentry, inode, add_flags);
2541		raw_write_seqcount_end(&dentry->d_seq);
2542		fsnotify_update_flags(dentry);
2543	}
2544	__d_rehash(dentry);
2545	if (dir)
2546		end_dir_add(dir, n);
2547	spin_unlock(&dentry->d_lock);
2548	if (inode)
2549		spin_unlock(&inode->i_lock);
2550}
2551
2552/**
2553 * d_add - add dentry to hash queues
2554 * @entry: dentry to add
2555 * @inode: The inode to attach to this dentry
2556 *
2557 * This adds the entry to the hash queues and initializes @inode.
2558 * The entry was actually filled in earlier during d_alloc().
2559 */
2560
2561void d_add(struct dentry *entry, struct inode *inode)
2562{
2563	if (inode) {
2564		security_d_instantiate(entry, inode);
2565		spin_lock(&inode->i_lock);
2566	}
2567	__d_add(entry, inode);
2568}
2569EXPORT_SYMBOL(d_add);
2570
2571/**
2572 * d_exact_alias - find and hash an exact unhashed alias
2573 * @entry: dentry to add
2574 * @inode: The inode to go with this dentry
2575 *
2576 * If an unhashed dentry with the same name/parent and desired
2577 * inode already exists, hash and return it.  Otherwise, return
2578 * NULL.
2579 *
2580 * Parent directory should be locked.
2581 */
2582struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2583{
2584	struct dentry *alias;
2585	unsigned int hash = entry->d_name.hash;
2586
2587	spin_lock(&inode->i_lock);
2588	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2589		/*
2590		 * Don't need alias->d_lock here, because aliases with
2591		 * d_parent == entry->d_parent are not subject to name or
2592		 * parent changes, because the parent inode i_mutex is held.
2593		 */
2594		if (alias->d_name.hash != hash)
2595			continue;
2596		if (alias->d_parent != entry->d_parent)
2597			continue;
2598		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2599			continue;
2600		spin_lock(&alias->d_lock);
2601		if (!d_unhashed(alias)) {
2602			spin_unlock(&alias->d_lock);
2603			alias = NULL;
2604		} else {
2605			__dget_dlock(alias);
2606			__d_rehash(alias);
2607			spin_unlock(&alias->d_lock);
2608		}
2609		spin_unlock(&inode->i_lock);
2610		return alias;
2611	}
2612	spin_unlock(&inode->i_lock);
2613	return NULL;
2614}
2615EXPORT_SYMBOL(d_exact_alias);
2616
2617/**
2618 * dentry_update_name_case - update case insensitive dentry with a new name
2619 * @dentry: dentry to be updated
2620 * @name: new name
2621 *
2622 * Update a case insensitive dentry with new case of name.
2623 *
2624 * dentry must have been returned by d_lookup with name @name. Old and new
2625 * name lengths must match (ie. no d_compare which allows mismatched name
2626 * lengths).
2627 *
2628 * Parent inode i_mutex must be held over d_lookup and into this call (to
2629 * keep renames and concurrent inserts, and readdir(2) away).
2630 */
2631void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2632{
2633	BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2634	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2635
2636	spin_lock(&dentry->d_lock);
2637	write_seqcount_begin(&dentry->d_seq);
2638	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2639	write_seqcount_end(&dentry->d_seq);
2640	spin_unlock(&dentry->d_lock);
2641}
2642EXPORT_SYMBOL(dentry_update_name_case);
2643
2644static void swap_names(struct dentry *dentry, struct dentry *target)
2645{
2646	if (unlikely(dname_external(target))) {
2647		if (unlikely(dname_external(dentry))) {
2648			/*
2649			 * Both external: swap the pointers
2650			 */
2651			swap(target->d_name.name, dentry->d_name.name);
2652		} else {
2653			/*
2654			 * dentry:internal, target:external.  Steal target's
2655			 * storage and make target internal.
2656			 */
2657			memcpy(target->d_iname, dentry->d_name.name,
2658					dentry->d_name.len + 1);
2659			dentry->d_name.name = target->d_name.name;
2660			target->d_name.name = target->d_iname;
2661		}
2662	} else {
2663		if (unlikely(dname_external(dentry))) {
2664			/*
2665			 * dentry:external, target:internal.  Give dentry's
2666			 * storage to target and make dentry internal
2667			 */
2668			memcpy(dentry->d_iname, target->d_name.name,
2669					target->d_name.len + 1);
2670			target->d_name.name = dentry->d_name.name;
2671			dentry->d_name.name = dentry->d_iname;
2672		} else {
2673			/*
2674			 * Both are internal.
2675			 */
2676			unsigned int i;
2677			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2678			kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2679			kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2680			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2681				swap(((long *) &dentry->d_iname)[i],
2682				     ((long *) &target->d_iname)[i]);
2683			}
2684		}
2685	}
2686	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2687}
2688
2689static void copy_name(struct dentry *dentry, struct dentry *target)
2690{
2691	struct external_name *old_name = NULL;
2692	if (unlikely(dname_external(dentry)))
2693		old_name = external_name(dentry);
2694	if (unlikely(dname_external(target))) {
2695		atomic_inc(&external_name(target)->u.count);
2696		dentry->d_name = target->d_name;
2697	} else {
2698		memcpy(dentry->d_iname, target->d_name.name,
2699				target->d_name.len + 1);
2700		dentry->d_name.name = dentry->d_iname;
2701		dentry->d_name.hash_len = target->d_name.hash_len;
2702	}
2703	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2704		kfree_rcu(old_name, u.head);
2705}
2706
2707static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2708{
2709	/*
2710	 * XXXX: do we really need to take target->d_lock?
2711	 */
2712	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2713		spin_lock(&target->d_parent->d_lock);
2714	else {
2715		if (d_ancestor(dentry->d_parent, target->d_parent)) {
2716			spin_lock(&dentry->d_parent->d_lock);
2717			spin_lock_nested(&target->d_parent->d_lock,
2718						DENTRY_D_LOCK_NESTED);
2719		} else {
2720			spin_lock(&target->d_parent->d_lock);
2721			spin_lock_nested(&dentry->d_parent->d_lock,
2722						DENTRY_D_LOCK_NESTED);
2723		}
2724	}
2725	if (target < dentry) {
2726		spin_lock_nested(&target->d_lock, 2);
2727		spin_lock_nested(&dentry->d_lock, 3);
2728	} else {
2729		spin_lock_nested(&dentry->d_lock, 2);
2730		spin_lock_nested(&target->d_lock, 3);
2731	}
2732}
2733
2734static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2735{
2736	if (target->d_parent != dentry->d_parent)
2737		spin_unlock(&dentry->d_parent->d_lock);
2738	if (target->d_parent != target)
2739		spin_unlock(&target->d_parent->d_lock);
2740	spin_unlock(&target->d_lock);
2741	spin_unlock(&dentry->d_lock);
2742}
2743
2744/*
2745 * When switching names, the actual string doesn't strictly have to
2746 * be preserved in the target - because we're dropping the target
2747 * anyway. As such, we can just do a simple memcpy() to copy over
2748 * the new name before we switch, unless we are going to rehash
2749 * it.  Note that if we *do* unhash the target, we are not allowed
2750 * to rehash it without giving it a new name/hash key - whether
2751 * we swap or overwrite the names here, resulting name won't match
2752 * the reality in filesystem; it's only there for d_path() purposes.
2753 * Note that all of this is happening under rename_lock, so the
2754 * any hash lookup seeing it in the middle of manipulations will
2755 * be discarded anyway.  So we do not care what happens to the hash
2756 * key in that case.
2757 */
2758/*
2759 * __d_move - move a dentry
2760 * @dentry: entry to move
2761 * @target: new dentry
2762 * @exchange: exchange the two dentries
2763 *
2764 * Update the dcache to reflect the move of a file name. Negative
2765 * dcache entries should not be moved in this way. Caller must hold
2766 * rename_lock, the i_mutex of the source and target directories,
2767 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2768 */
2769static void __d_move(struct dentry *dentry, struct dentry *target,
2770		     bool exchange)
2771{
 
 
2772	struct inode *dir = NULL;
2773	unsigned n;
2774	if (!dentry->d_inode)
2775		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2776
2777	BUG_ON(d_ancestor(dentry, target));
 
 
 
2778	BUG_ON(d_ancestor(target, dentry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2779
2780	dentry_lock_for_move(dentry, target);
2781	if (unlikely(d_in_lookup(target))) {
2782		dir = target->d_parent->d_inode;
2783		n = start_dir_add(dir);
2784		__d_lookup_done(target);
2785	}
2786
2787	write_seqcount_begin(&dentry->d_seq);
2788	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2789
2790	/* unhash both */
2791	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2792	__d_drop(dentry);
2793	__d_drop(target);
 
2794
2795	/* Switch the names.. */
2796	if (exchange)
 
 
 
 
 
 
 
 
2797		swap_names(dentry, target);
2798	else
2799		copy_name(dentry, target);
2800
2801	/* rehash in new place(s) */
2802	__d_rehash(dentry);
2803	if (exchange)
2804		__d_rehash(target);
2805
2806	/* ... and switch them in the tree */
2807	if (IS_ROOT(dentry)) {
2808		/* splicing a tree */
2809		dentry->d_flags |= DCACHE_RCUACCESS;
2810		dentry->d_parent = target->d_parent;
2811		target->d_parent = target;
2812		list_del_init(&target->d_child);
2813		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2814	} else {
2815		/* swapping two dentries */
2816		swap(dentry->d_parent, target->d_parent);
2817		list_move(&target->d_child, &target->d_parent->d_subdirs);
2818		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2819		if (exchange)
2820			fsnotify_update_flags(target);
2821		fsnotify_update_flags(dentry);
2822	}
 
 
 
 
 
 
2823
2824	write_seqcount_end(&target->d_seq);
2825	write_seqcount_end(&dentry->d_seq);
2826
2827	if (dir)
2828		end_dir_add(dir, n);
2829	dentry_unlock_for_move(dentry, target);
 
 
 
 
 
 
2830}
2831
2832/*
2833 * d_move - move a dentry
2834 * @dentry: entry to move
2835 * @target: new dentry
2836 *
2837 * Update the dcache to reflect the move of a file name. Negative
2838 * dcache entries should not be moved in this way. See the locking
2839 * requirements for __d_move.
2840 */
2841void d_move(struct dentry *dentry, struct dentry *target)
2842{
2843	write_seqlock(&rename_lock);
2844	__d_move(dentry, target, false);
2845	write_sequnlock(&rename_lock);
2846}
2847EXPORT_SYMBOL(d_move);
2848
2849/*
2850 * d_exchange - exchange two dentries
2851 * @dentry1: first dentry
2852 * @dentry2: second dentry
2853 */
2854void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2855{
2856	write_seqlock(&rename_lock);
2857
2858	WARN_ON(!dentry1->d_inode);
2859	WARN_ON(!dentry2->d_inode);
2860	WARN_ON(IS_ROOT(dentry1));
2861	WARN_ON(IS_ROOT(dentry2));
2862
2863	__d_move(dentry1, dentry2, true);
2864
2865	write_sequnlock(&rename_lock);
2866}
2867
2868/**
2869 * d_ancestor - search for an ancestor
2870 * @p1: ancestor dentry
2871 * @p2: child dentry
2872 *
2873 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2874 * an ancestor of p2, else NULL.
2875 */
2876struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2877{
2878	struct dentry *p;
2879
2880	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2881		if (p->d_parent == p1)
2882			return p;
2883	}
2884	return NULL;
2885}
2886
2887/*
2888 * This helper attempts to cope with remotely renamed directories
2889 *
2890 * It assumes that the caller is already holding
2891 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2892 *
2893 * Note: If ever the locking in lock_rename() changes, then please
2894 * remember to update this too...
2895 */
2896static int __d_unalias(struct inode *inode,
2897		struct dentry *dentry, struct dentry *alias)
2898{
2899	struct mutex *m1 = NULL;
2900	struct rw_semaphore *m2 = NULL;
2901	int ret = -ESTALE;
2902
2903	/* If alias and dentry share a parent, then no extra locks required */
2904	if (alias->d_parent == dentry->d_parent)
2905		goto out_unalias;
2906
2907	/* See lock_rename() */
2908	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2909		goto out_err;
2910	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2911	if (!inode_trylock_shared(alias->d_parent->d_inode))
2912		goto out_err;
2913	m2 = &alias->d_parent->d_inode->i_rwsem;
2914out_unalias:
2915	__d_move(alias, dentry, false);
2916	ret = 0;
2917out_err:
2918	if (m2)
2919		up_read(m2);
2920	if (m1)
2921		mutex_unlock(m1);
2922	return ret;
2923}
2924
2925/**
2926 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2927 * @inode:  the inode which may have a disconnected dentry
2928 * @dentry: a negative dentry which we want to point to the inode.
2929 *
2930 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2931 * place of the given dentry and return it, else simply d_add the inode
2932 * to the dentry and return NULL.
2933 *
2934 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2935 * we should error out: directories can't have multiple aliases.
2936 *
2937 * This is needed in the lookup routine of any filesystem that is exportable
2938 * (via knfsd) so that we can build dcache paths to directories effectively.
2939 *
2940 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2941 * is returned.  This matches the expected return value of ->lookup.
2942 *
2943 * Cluster filesystems may call this function with a negative, hashed dentry.
2944 * In that case, we know that the inode will be a regular file, and also this
2945 * will only occur during atomic_open. So we need to check for the dentry
2946 * being already hashed only in the final case.
2947 */
2948struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2949{
2950	if (IS_ERR(inode))
2951		return ERR_CAST(inode);
2952
2953	BUG_ON(!d_unhashed(dentry));
2954
2955	if (!inode)
2956		goto out;
2957
2958	security_d_instantiate(dentry, inode);
2959	spin_lock(&inode->i_lock);
2960	if (S_ISDIR(inode->i_mode)) {
2961		struct dentry *new = __d_find_any_alias(inode);
2962		if (unlikely(new)) {
2963			/* The reference to new ensures it remains an alias */
2964			spin_unlock(&inode->i_lock);
2965			write_seqlock(&rename_lock);
2966			if (unlikely(d_ancestor(new, dentry))) {
2967				write_sequnlock(&rename_lock);
2968				dput(new);
2969				new = ERR_PTR(-ELOOP);
2970				pr_warn_ratelimited(
2971					"VFS: Lookup of '%s' in %s %s"
2972					" would have caused loop\n",
2973					dentry->d_name.name,
2974					inode->i_sb->s_type->name,
2975					inode->i_sb->s_id);
2976			} else if (!IS_ROOT(new)) {
2977				int err = __d_unalias(inode, dentry, new);
 
2978				write_sequnlock(&rename_lock);
2979				if (err) {
2980					dput(new);
2981					new = ERR_PTR(err);
2982				}
 
2983			} else {
2984				__d_move(new, dentry, false);
2985				write_sequnlock(&rename_lock);
2986			}
2987			iput(inode);
2988			return new;
2989		}
2990	}
2991out:
2992	__d_add(dentry, inode);
2993	return NULL;
2994}
2995EXPORT_SYMBOL(d_splice_alias);
2996
2997static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2998{
2999	*buflen -= namelen;
3000	if (*buflen < 0)
3001		return -ENAMETOOLONG;
3002	*buffer -= namelen;
3003	memcpy(*buffer, str, namelen);
3004	return 0;
3005}
3006
3007/**
3008 * prepend_name - prepend a pathname in front of current buffer pointer
3009 * @buffer: buffer pointer
3010 * @buflen: allocated length of the buffer
3011 * @name:   name string and length qstr structure
3012 *
3013 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3014 * make sure that either the old or the new name pointer and length are
3015 * fetched. However, there may be mismatch between length and pointer.
3016 * The length cannot be trusted, we need to copy it byte-by-byte until
3017 * the length is reached or a null byte is found. It also prepends "/" at
3018 * the beginning of the name. The sequence number check at the caller will
3019 * retry it again when a d_move() does happen. So any garbage in the buffer
3020 * due to mismatched pointer and length will be discarded.
3021 *
3022 * Data dependency barrier is needed to make sure that we see that terminating
3023 * NUL.  Alpha strikes again, film at 11...
3024 */
3025static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3026{
3027	const char *dname = ACCESS_ONCE(name->name);
3028	u32 dlen = ACCESS_ONCE(name->len);
3029	char *p;
3030
3031	smp_read_barrier_depends();
3032
3033	*buflen -= dlen + 1;
3034	if (*buflen < 0)
3035		return -ENAMETOOLONG;
3036	p = *buffer -= dlen + 1;
3037	*p++ = '/';
3038	while (dlen--) {
3039		char c = *dname++;
3040		if (!c)
3041			break;
3042		*p++ = c;
3043	}
3044	return 0;
3045}
3046
3047/**
3048 * prepend_path - Prepend path string to a buffer
3049 * @path: the dentry/vfsmount to report
3050 * @root: root vfsmnt/dentry
3051 * @buffer: pointer to the end of the buffer
3052 * @buflen: pointer to buffer length
3053 *
3054 * The function will first try to write out the pathname without taking any
3055 * lock other than the RCU read lock to make sure that dentries won't go away.
3056 * It only checks the sequence number of the global rename_lock as any change
3057 * in the dentry's d_seq will be preceded by changes in the rename_lock
3058 * sequence number. If the sequence number had been changed, it will restart
3059 * the whole pathname back-tracing sequence again by taking the rename_lock.
3060 * In this case, there is no need to take the RCU read lock as the recursive
3061 * parent pointer references will keep the dentry chain alive as long as no
3062 * rename operation is performed.
3063 */
3064static int prepend_path(const struct path *path,
3065			const struct path *root,
3066			char **buffer, int *buflen)
3067{
3068	struct dentry *dentry;
3069	struct vfsmount *vfsmnt;
3070	struct mount *mnt;
3071	int error = 0;
3072	unsigned seq, m_seq = 0;
3073	char *bptr;
3074	int blen;
3075
3076	rcu_read_lock();
3077restart_mnt:
3078	read_seqbegin_or_lock(&mount_lock, &m_seq);
3079	seq = 0;
3080	rcu_read_lock();
3081restart:
3082	bptr = *buffer;
3083	blen = *buflen;
3084	error = 0;
3085	dentry = path->dentry;
3086	vfsmnt = path->mnt;
3087	mnt = real_mount(vfsmnt);
3088	read_seqbegin_or_lock(&rename_lock, &seq);
3089	while (dentry != root->dentry || vfsmnt != root->mnt) {
3090		struct dentry * parent;
3091
3092		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3093			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3094			/* Escaped? */
3095			if (dentry != vfsmnt->mnt_root) {
3096				bptr = *buffer;
3097				blen = *buflen;
3098				error = 3;
3099				break;
3100			}
3101			/* Global root? */
3102			if (mnt != parent) {
3103				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3104				mnt = parent;
3105				vfsmnt = &mnt->mnt;
3106				continue;
3107			}
3108			if (!error)
3109				error = is_mounted(vfsmnt) ? 1 : 2;
3110			break;
3111		}
3112		parent = dentry->d_parent;
3113		prefetch(parent);
3114		error = prepend_name(&bptr, &blen, &dentry->d_name);
3115		if (error)
3116			break;
3117
3118		dentry = parent;
3119	}
3120	if (!(seq & 1))
3121		rcu_read_unlock();
3122	if (need_seqretry(&rename_lock, seq)) {
3123		seq = 1;
3124		goto restart;
3125	}
3126	done_seqretry(&rename_lock, seq);
3127
3128	if (!(m_seq & 1))
3129		rcu_read_unlock();
3130	if (need_seqretry(&mount_lock, m_seq)) {
3131		m_seq = 1;
3132		goto restart_mnt;
3133	}
3134	done_seqretry(&mount_lock, m_seq);
3135
3136	if (error >= 0 && bptr == *buffer) {
3137		if (--blen < 0)
3138			error = -ENAMETOOLONG;
3139		else
3140			*--bptr = '/';
3141	}
3142	*buffer = bptr;
3143	*buflen = blen;
3144	return error;
3145}
3146
3147/**
3148 * __d_path - return the path of a dentry
3149 * @path: the dentry/vfsmount to report
3150 * @root: root vfsmnt/dentry
3151 * @buf: buffer to return value in
3152 * @buflen: buffer length
3153 *
3154 * Convert a dentry into an ASCII path name.
3155 *
3156 * Returns a pointer into the buffer or an error code if the
3157 * path was too long.
3158 *
3159 * "buflen" should be positive.
3160 *
3161 * If the path is not reachable from the supplied root, return %NULL.
3162 */
3163char *__d_path(const struct path *path,
3164	       const struct path *root,
3165	       char *buf, int buflen)
3166{
3167	char *res = buf + buflen;
3168	int error;
3169
3170	prepend(&res, &buflen, "\0", 1);
3171	error = prepend_path(path, root, &res, &buflen);
3172
3173	if (error < 0)
3174		return ERR_PTR(error);
3175	if (error > 0)
3176		return NULL;
3177	return res;
3178}
3179
3180char *d_absolute_path(const struct path *path,
3181	       char *buf, int buflen)
3182{
3183	struct path root = {};
3184	char *res = buf + buflen;
3185	int error;
3186
3187	prepend(&res, &buflen, "\0", 1);
3188	error = prepend_path(path, &root, &res, &buflen);
3189
3190	if (error > 1)
3191		error = -EINVAL;
3192	if (error < 0)
3193		return ERR_PTR(error);
3194	return res;
3195}
3196
3197/*
3198 * same as __d_path but appends "(deleted)" for unlinked files.
3199 */
3200static int path_with_deleted(const struct path *path,
3201			     const struct path *root,
3202			     char **buf, int *buflen)
3203{
3204	prepend(buf, buflen, "\0", 1);
3205	if (d_unlinked(path->dentry)) {
3206		int error = prepend(buf, buflen, " (deleted)", 10);
3207		if (error)
3208			return error;
3209	}
3210
3211	return prepend_path(path, root, buf, buflen);
3212}
3213
3214static int prepend_unreachable(char **buffer, int *buflen)
3215{
3216	return prepend(buffer, buflen, "(unreachable)", 13);
3217}
3218
3219static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3220{
3221	unsigned seq;
3222
3223	do {
3224		seq = read_seqcount_begin(&fs->seq);
3225		*root = fs->root;
3226	} while (read_seqcount_retry(&fs->seq, seq));
3227}
3228
3229/**
3230 * d_path - return the path of a dentry
3231 * @path: path to report
3232 * @buf: buffer to return value in
3233 * @buflen: buffer length
3234 *
3235 * Convert a dentry into an ASCII path name. If the entry has been deleted
3236 * the string " (deleted)" is appended. Note that this is ambiguous.
3237 *
3238 * Returns a pointer into the buffer or an error code if the path was
3239 * too long. Note: Callers should use the returned pointer, not the passed
3240 * in buffer, to use the name! The implementation often starts at an offset
3241 * into the buffer, and may leave 0 bytes at the start.
3242 *
3243 * "buflen" should be positive.
3244 */
3245char *d_path(const struct path *path, char *buf, int buflen)
3246{
3247	char *res = buf + buflen;
3248	struct path root;
3249	int error;
3250
3251	/*
3252	 * We have various synthetic filesystems that never get mounted.  On
3253	 * these filesystems dentries are never used for lookup purposes, and
3254	 * thus don't need to be hashed.  They also don't need a name until a
3255	 * user wants to identify the object in /proc/pid/fd/.  The little hack
3256	 * below allows us to generate a name for these objects on demand:
3257	 *
3258	 * Some pseudo inodes are mountable.  When they are mounted
3259	 * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3260	 * and instead have d_path return the mounted path.
3261	 */
3262	if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3263	    (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3264		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3265
3266	rcu_read_lock();
3267	get_fs_root_rcu(current->fs, &root);
3268	error = path_with_deleted(path, &root, &res, &buflen);
3269	rcu_read_unlock();
3270
3271	if (error < 0)
3272		res = ERR_PTR(error);
3273	return res;
3274}
3275EXPORT_SYMBOL(d_path);
3276
3277/*
3278 * Helper function for dentry_operations.d_dname() members
3279 */
3280char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3281			const char *fmt, ...)
3282{
3283	va_list args;
3284	char temp[64];
3285	int sz;
3286
3287	va_start(args, fmt);
3288	sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3289	va_end(args);
3290
3291	if (sz > sizeof(temp) || sz > buflen)
3292		return ERR_PTR(-ENAMETOOLONG);
3293
3294	buffer += buflen - sz;
3295	return memcpy(buffer, temp, sz);
3296}
3297
3298char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3299{
3300	char *end = buffer + buflen;
3301	/* these dentries are never renamed, so d_lock is not needed */
3302	if (prepend(&end, &buflen, " (deleted)", 11) ||
3303	    prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3304	    prepend(&end, &buflen, "/", 1))  
3305		end = ERR_PTR(-ENAMETOOLONG);
3306	return end;
3307}
3308EXPORT_SYMBOL(simple_dname);
3309
3310/*
3311 * Write full pathname from the root of the filesystem into the buffer.
3312 */
3313static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3314{
3315	struct dentry *dentry;
3316	char *end, *retval;
3317	int len, seq = 0;
3318	int error = 0;
3319
3320	if (buflen < 2)
3321		goto Elong;
3322
3323	rcu_read_lock();
3324restart:
3325	dentry = d;
3326	end = buf + buflen;
3327	len = buflen;
3328	prepend(&end, &len, "\0", 1);
3329	/* Get '/' right */
3330	retval = end-1;
3331	*retval = '/';
3332	read_seqbegin_or_lock(&rename_lock, &seq);
3333	while (!IS_ROOT(dentry)) {
3334		struct dentry *parent = dentry->d_parent;
3335
3336		prefetch(parent);
3337		error = prepend_name(&end, &len, &dentry->d_name);
3338		if (error)
3339			break;
3340
3341		retval = end;
3342		dentry = parent;
3343	}
3344	if (!(seq & 1))
3345		rcu_read_unlock();
3346	if (need_seqretry(&rename_lock, seq)) {
3347		seq = 1;
3348		goto restart;
3349	}
3350	done_seqretry(&rename_lock, seq);
3351	if (error)
3352		goto Elong;
3353	return retval;
3354Elong:
3355	return ERR_PTR(-ENAMETOOLONG);
3356}
3357
3358char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3359{
3360	return __dentry_path(dentry, buf, buflen);
3361}
3362EXPORT_SYMBOL(dentry_path_raw);
3363
3364char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3365{
3366	char *p = NULL;
3367	char *retval;
3368
3369	if (d_unlinked(dentry)) {
3370		p = buf + buflen;
3371		if (prepend(&p, &buflen, "//deleted", 10) != 0)
3372			goto Elong;
3373		buflen++;
3374	}
3375	retval = __dentry_path(dentry, buf, buflen);
3376	if (!IS_ERR(retval) && p)
3377		*p = '/';	/* restore '/' overriden with '\0' */
3378	return retval;
3379Elong:
3380	return ERR_PTR(-ENAMETOOLONG);
3381}
3382
3383static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3384				    struct path *pwd)
3385{
3386	unsigned seq;
3387
3388	do {
3389		seq = read_seqcount_begin(&fs->seq);
3390		*root = fs->root;
3391		*pwd = fs->pwd;
3392	} while (read_seqcount_retry(&fs->seq, seq));
3393}
3394
3395/*
3396 * NOTE! The user-level library version returns a
3397 * character pointer. The kernel system call just
3398 * returns the length of the buffer filled (which
3399 * includes the ending '\0' character), or a negative
3400 * error value. So libc would do something like
3401 *
3402 *	char *getcwd(char * buf, size_t size)
3403 *	{
3404 *		int retval;
3405 *
3406 *		retval = sys_getcwd(buf, size);
3407 *		if (retval >= 0)
3408 *			return buf;
3409 *		errno = -retval;
3410 *		return NULL;
3411 *	}
3412 */
3413SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3414{
3415	int error;
3416	struct path pwd, root;
3417	char *page = __getname();
3418
3419	if (!page)
3420		return -ENOMEM;
3421
3422	rcu_read_lock();
3423	get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3424
3425	error = -ENOENT;
3426	if (!d_unlinked(pwd.dentry)) {
3427		unsigned long len;
3428		char *cwd = page + PATH_MAX;
3429		int buflen = PATH_MAX;
3430
3431		prepend(&cwd, &buflen, "\0", 1);
3432		error = prepend_path(&pwd, &root, &cwd, &buflen);
3433		rcu_read_unlock();
3434
3435		if (error < 0)
3436			goto out;
3437
3438		/* Unreachable from current root */
3439		if (error > 0) {
3440			error = prepend_unreachable(&cwd, &buflen);
3441			if (error)
3442				goto out;
3443		}
3444
3445		error = -ERANGE;
3446		len = PATH_MAX + page - cwd;
3447		if (len <= size) {
3448			error = len;
3449			if (copy_to_user(buf, cwd, len))
3450				error = -EFAULT;
3451		}
3452	} else {
3453		rcu_read_unlock();
3454	}
3455
3456out:
3457	__putname(page);
3458	return error;
3459}
3460
3461/*
3462 * Test whether new_dentry is a subdirectory of old_dentry.
3463 *
3464 * Trivially implemented using the dcache structure
3465 */
3466
3467/**
3468 * is_subdir - is new dentry a subdirectory of old_dentry
3469 * @new_dentry: new dentry
3470 * @old_dentry: old dentry
3471 *
3472 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3473 * Returns false otherwise.
3474 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3475 */
3476  
3477bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3478{
3479	bool result;
3480	unsigned seq;
3481
3482	if (new_dentry == old_dentry)
3483		return true;
3484
3485	do {
3486		/* for restarting inner loop in case of seq retry */
3487		seq = read_seqbegin(&rename_lock);
3488		/*
3489		 * Need rcu_readlock to protect against the d_parent trashing
3490		 * due to d_move
3491		 */
3492		rcu_read_lock();
3493		if (d_ancestor(old_dentry, new_dentry))
3494			result = true;
3495		else
3496			result = false;
3497		rcu_read_unlock();
3498	} while (read_seqretry(&rename_lock, seq));
3499
3500	return result;
3501}
 
3502
3503static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3504{
3505	struct dentry *root = data;
3506	if (dentry != root) {
3507		if (d_unhashed(dentry) || !dentry->d_inode)
3508			return D_WALK_SKIP;
3509
3510		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3511			dentry->d_flags |= DCACHE_GENOCIDE;
3512			dentry->d_lockref.count--;
3513		}
3514	}
3515	return D_WALK_CONTINUE;
3516}
3517
3518void d_genocide(struct dentry *parent)
3519{
3520	d_walk(parent, parent, d_genocide_kill, NULL);
3521}
3522
3523void d_tmpfile(struct dentry *dentry, struct inode *inode)
3524{
3525	inode_dec_link_count(inode);
 
3526	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3527		!hlist_unhashed(&dentry->d_u.d_alias) ||
3528		!d_unlinked(dentry));
3529	spin_lock(&dentry->d_parent->d_lock);
3530	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3531	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3532				(unsigned long long)inode->i_ino);
3533	spin_unlock(&dentry->d_lock);
3534	spin_unlock(&dentry->d_parent->d_lock);
 
 
 
 
 
 
 
 
 
3535	d_instantiate(dentry, inode);
3536}
3537EXPORT_SYMBOL(d_tmpfile);
3538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3539static __initdata unsigned long dhash_entries;
3540static int __init set_dhash_entries(char *str)
3541{
3542	if (!str)
3543		return 0;
3544	dhash_entries = simple_strtoul(str, &str, 0);
3545	return 1;
3546}
3547__setup("dhash_entries=", set_dhash_entries);
3548
3549static void __init dcache_init_early(void)
3550{
3551	unsigned int loop;
3552
3553	/* If hashes are distributed across NUMA nodes, defer
3554	 * hash allocation until vmalloc space is available.
3555	 */
3556	if (hashdist)
3557		return;
3558
3559	dentry_hashtable =
3560		alloc_large_system_hash("Dentry cache",
3561					sizeof(struct hlist_bl_head),
3562					dhash_entries,
3563					13,
3564					HASH_EARLY,
3565					&d_hash_shift,
3566					&d_hash_mask,
3567					0,
3568					0);
 
3569
3570	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3571		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3572}
3573
3574static void __init dcache_init(void)
3575{
3576	unsigned int loop;
3577
3578	/* 
3579	 * A constructor could be added for stable state like the lists,
3580	 * but it is probably not worth it because of the cache nature
3581	 * of the dcache. 
3582	 */
3583	dentry_cache = KMEM_CACHE(dentry,
3584		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
 
3585
3586	/* Hash may have been set up in dcache_init_early */
3587	if (!hashdist)
3588		return;
3589
3590	dentry_hashtable =
3591		alloc_large_system_hash("Dentry cache",
3592					sizeof(struct hlist_bl_head),
3593					dhash_entries,
3594					13,
3595					0,
3596					&d_hash_shift,
3597					&d_hash_mask,
3598					0,
3599					0);
 
3600
3601	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3602		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3603}
3604
3605/* SLAB cache for __getname() consumers */
3606struct kmem_cache *names_cachep __read_mostly;
3607EXPORT_SYMBOL(names_cachep);
3608
3609EXPORT_SYMBOL(d_genocide);
3610
3611void __init vfs_caches_init_early(void)
3612{
 
 
 
 
 
3613	dcache_init_early();
3614	inode_init_early();
3615}
3616
3617void __init vfs_caches_init(void)
3618{
3619	names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3620			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3621
3622	dcache_init();
3623	inode_init();
3624	files_init();
3625	files_maxfiles_init();
3626	mnt_init();
3627	bdev_cache_init();
3628	chrdev_init();
3629}