Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dcache.c
   4 *
   5 * Complete reimplementation
   6 * (C) 1997 Thomas Schoebel-Theuer,
   7 * with heavy changes by Linus Torvalds
   8 */
   9
  10/*
  11 * Notes on the allocation strategy:
  12 *
  13 * The dcache is a master of the icache - whenever a dcache entry
  14 * exists, the inode will always exist. "iput()" is done either when
  15 * the dcache entry is deleted or garbage collected.
  16 */
  17
  18#include <linux/ratelimit.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/fs.h>
  22#include <linux/fscrypt.h>
  23#include <linux/fsnotify.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/hash.h>
  27#include <linux/cache.h>
  28#include <linux/export.h>
 
 
 
  29#include <linux/security.h>
  30#include <linux/seqlock.h>
  31#include <linux/memblock.h>
 
 
 
  32#include <linux/bit_spinlock.h>
  33#include <linux/rculist_bl.h>
 
 
  34#include <linux/list_lru.h>
 
 
  35#include "internal.h"
  36#include "mount.h"
  37
  38#include <asm/runtime-const.h>
  39
  40/*
  41 * Usage:
  42 * dcache->d_inode->i_lock protects:
  43 *   - i_dentry, d_u.d_alias, d_inode of aliases
  44 * dcache_hash_bucket lock protects:
  45 *   - the dcache hash table
  46 * s_roots bl list spinlock protects:
  47 *   - the s_roots list (see __d_drop)
  48 * dentry->d_sb->s_dentry_lru_lock protects:
  49 *   - the dcache lru lists and counters
  50 * d_lock protects:
  51 *   - d_flags
  52 *   - d_name
  53 *   - d_lru
  54 *   - d_count
  55 *   - d_unhashed()
  56 *   - d_parent and d_chilren
  57 *   - childrens' d_sib and d_parent
  58 *   - d_u.d_alias, d_inode
  59 *
  60 * Ordering:
  61 * dentry->d_inode->i_lock
  62 *   dentry->d_lock
  63 *     dentry->d_sb->s_dentry_lru_lock
  64 *     dcache_hash_bucket lock
  65 *     s_roots lock
  66 *
  67 * If there is an ancestor relationship:
  68 * dentry->d_parent->...->d_parent->d_lock
  69 *   ...
  70 *     dentry->d_parent->d_lock
  71 *       dentry->d_lock
  72 *
  73 * If no ancestor relationship:
  74 * arbitrary, since it's serialized on rename_lock
 
 
  75 */
  76int sysctl_vfs_cache_pressure __read_mostly = 100;
  77EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  78
  79__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  80
  81EXPORT_SYMBOL(rename_lock);
  82
  83static struct kmem_cache *dentry_cache __ro_after_init;
  84
  85const struct qstr empty_name = QSTR_INIT("", 0);
  86EXPORT_SYMBOL(empty_name);
  87const struct qstr slash_name = QSTR_INIT("/", 1);
  88EXPORT_SYMBOL(slash_name);
  89const struct qstr dotdot_name = QSTR_INIT("..", 2);
  90EXPORT_SYMBOL(dotdot_name);
  91
  92/*
  93 * This is the single most critical data structure when it comes
  94 * to the dcache: the hashtable for lookups. Somebody should try
  95 * to make this good - I've just made it work.
  96 *
  97 * This hash-function tries to avoid losing too many bits of hash
  98 * information, yet avoid using a prime hash-size or similar.
  99 *
 100 * Marking the variables "used" ensures that the compiler doesn't
 101 * optimize them away completely on architectures with runtime
 102 * constant infrastructure, this allows debuggers to see their
 103 * values. But updating these values has no effect on those arches.
 104 */
 105
 106static unsigned int d_hash_shift __ro_after_init __used;
 
 107
 108static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
 109
 110static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
 111{
 112	return runtime_const_ptr(dentry_hashtable) +
 113		runtime_const_shift_right_32(hashlen, d_hash_shift);
 114}
 115
 116#define IN_LOOKUP_SHIFT 10
 117static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 118
 119static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 120					unsigned int hash)
 121{
 122	hash += (unsigned long) parent / L1_CACHE_BYTES;
 123	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 124}
 125
 126struct dentry_stat_t {
 127	long nr_dentry;
 128	long nr_unused;
 129	long age_limit;		/* age in seconds */
 130	long want_pages;	/* pages requested by system */
 131	long nr_negative;	/* # of unused negative dentries */
 132	long dummy;		/* Reserved for future use */
 133};
 134
 135static DEFINE_PER_CPU(long, nr_dentry);
 136static DEFINE_PER_CPU(long, nr_dentry_unused);
 137static DEFINE_PER_CPU(long, nr_dentry_negative);
 138static int dentry_negative_policy;
 139
 140#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 141/* Statistics gathering. */
 142static struct dentry_stat_t dentry_stat = {
 143	.age_limit = 45,
 144};
 145
 146/*
 147 * Here we resort to our own counters instead of using generic per-cpu counters
 148 * for consistency with what the vfs inode code does. We are expected to harvest
 149 * better code and performance by having our own specialized counters.
 150 *
 151 * Please note that the loop is done over all possible CPUs, not over all online
 152 * CPUs. The reason for this is that we don't want to play games with CPUs going
 153 * on and off. If one of them goes off, we will just keep their counters.
 154 *
 155 * glommer: See cffbc8a for details, and if you ever intend to change this,
 156 * please update all vfs counters to match.
 157 */
 158static long get_nr_dentry(void)
 159{
 160	int i;
 161	long sum = 0;
 162	for_each_possible_cpu(i)
 163		sum += per_cpu(nr_dentry, i);
 164	return sum < 0 ? 0 : sum;
 165}
 166
 167static long get_nr_dentry_unused(void)
 168{
 169	int i;
 170	long sum = 0;
 171	for_each_possible_cpu(i)
 172		sum += per_cpu(nr_dentry_unused, i);
 173	return sum < 0 ? 0 : sum;
 174}
 175
 176static long get_nr_dentry_negative(void)
 177{
 178	int i;
 179	long sum = 0;
 180
 181	for_each_possible_cpu(i)
 182		sum += per_cpu(nr_dentry_negative, i);
 183	return sum < 0 ? 0 : sum;
 184}
 185
 186static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
 187			  size_t *lenp, loff_t *ppos)
 188{
 189	dentry_stat.nr_dentry = get_nr_dentry();
 190	dentry_stat.nr_unused = get_nr_dentry_unused();
 191	dentry_stat.nr_negative = get_nr_dentry_negative();
 192	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 193}
 194
 195static struct ctl_table fs_dcache_sysctls[] = {
 196	{
 197		.procname	= "dentry-state",
 198		.data		= &dentry_stat,
 199		.maxlen		= 6*sizeof(long),
 200		.mode		= 0444,
 201		.proc_handler	= proc_nr_dentry,
 202	},
 203	{
 204		.procname	= "dentry-negative",
 205		.data		= &dentry_negative_policy,
 206		.maxlen		= sizeof(dentry_negative_policy),
 207		.mode		= 0644,
 208		.proc_handler	= proc_dointvec_minmax,
 209		.extra1		= SYSCTL_ZERO,
 210		.extra2		= SYSCTL_ONE,
 211	},
 212};
 213
 214static int __init init_fs_dcache_sysctls(void)
 215{
 216	register_sysctl_init("fs", fs_dcache_sysctls);
 217	return 0;
 218}
 219fs_initcall(init_fs_dcache_sysctls);
 220#endif
 221
 222/*
 223 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 224 * The strings are both count bytes long, and count is non-zero.
 225 */
 226#ifdef CONFIG_DCACHE_WORD_ACCESS
 227
 228#include <asm/word-at-a-time.h>
 229/*
 230 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 231 * aligned allocation for this particular component. We don't
 232 * strictly need the load_unaligned_zeropad() safety, but it
 233 * doesn't hurt either.
 234 *
 235 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 236 * need the careful unaligned handling.
 237 */
 238static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 239{
 240	unsigned long a,b,mask;
 241
 242	for (;;) {
 243		a = read_word_at_a_time(cs);
 244		b = load_unaligned_zeropad(ct);
 245		if (tcount < sizeof(unsigned long))
 246			break;
 247		if (unlikely(a != b))
 248			return 1;
 249		cs += sizeof(unsigned long);
 250		ct += sizeof(unsigned long);
 251		tcount -= sizeof(unsigned long);
 252		if (!tcount)
 253			return 0;
 254	}
 255	mask = bytemask_from_count(tcount);
 256	return unlikely(!!((a ^ b) & mask));
 257}
 258
 259#else
 260
 261static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 262{
 263	do {
 264		if (*cs != *ct)
 265			return 1;
 266		cs++;
 267		ct++;
 268		tcount--;
 269	} while (tcount);
 270	return 0;
 271}
 272
 273#endif
 274
 275static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 276{
 
 277	/*
 278	 * Be careful about RCU walk racing with rename:
 279	 * use 'READ_ONCE' to fetch the name pointer.
 280	 *
 281	 * NOTE! Even if a rename will mean that the length
 282	 * was not loaded atomically, we don't care. The
 283	 * RCU walk will check the sequence count eventually,
 284	 * and catch it. And we won't overrun the buffer,
 285	 * because we're reading the name pointer atomically,
 286	 * and a dentry name is guaranteed to be properly
 287	 * terminated with a NUL byte.
 288	 *
 289	 * End result: even if 'len' is wrong, we'll exit
 290	 * early because the data cannot match (there can
 291	 * be no NUL in the ct/tcount data)
 292	 */
 293	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 294
 295	return dentry_string_cmp(cs, ct, tcount);
 296}
 297
 298struct external_name {
 299	union {
 300		atomic_t count;
 301		struct rcu_head head;
 302	} u;
 303	unsigned char name[];
 304};
 305
 306static inline struct external_name *external_name(struct dentry *dentry)
 307{
 308	return container_of(dentry->d_name.name, struct external_name, name[0]);
 309}
 310
 311static void __d_free(struct rcu_head *head)
 312{
 313	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 314
 315	kmem_cache_free(dentry_cache, dentry); 
 316}
 317
 318static void __d_free_external(struct rcu_head *head)
 319{
 320	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 321	kfree(external_name(dentry));
 322	kmem_cache_free(dentry_cache, dentry);
 323}
 324
 325static inline int dname_external(const struct dentry *dentry)
 326{
 327	return dentry->d_name.name != dentry->d_iname;
 328}
 329
 330void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
 331{
 332	spin_lock(&dentry->d_lock);
 333	name->name = dentry->d_name;
 334	if (unlikely(dname_external(dentry))) {
 335		atomic_inc(&external_name(dentry)->u.count);
 336	} else {
 337		memcpy(name->inline_name, dentry->d_iname,
 338		       dentry->d_name.len + 1);
 339		name->name.name = name->inline_name;
 340	}
 341	spin_unlock(&dentry->d_lock);
 342}
 343EXPORT_SYMBOL(take_dentry_name_snapshot);
 344
 345void release_dentry_name_snapshot(struct name_snapshot *name)
 346{
 347	if (unlikely(name->name.name != name->inline_name)) {
 348		struct external_name *p;
 349		p = container_of(name->name.name, struct external_name, name[0]);
 350		if (unlikely(atomic_dec_and_test(&p->u.count)))
 351			kfree_rcu(p, u.head);
 352	}
 353}
 354EXPORT_SYMBOL(release_dentry_name_snapshot);
 355
 356static inline void __d_set_inode_and_type(struct dentry *dentry,
 357					  struct inode *inode,
 358					  unsigned type_flags)
 359{
 360	unsigned flags;
 361
 362	dentry->d_inode = inode;
 363	flags = READ_ONCE(dentry->d_flags);
 364	flags &= ~DCACHE_ENTRY_TYPE;
 365	flags |= type_flags;
 366	smp_store_release(&dentry->d_flags, flags);
 367}
 368
 369static inline void __d_clear_type_and_inode(struct dentry *dentry)
 370{
 371	unsigned flags = READ_ONCE(dentry->d_flags);
 372
 373	flags &= ~DCACHE_ENTRY_TYPE;
 374	WRITE_ONCE(dentry->d_flags, flags);
 375	dentry->d_inode = NULL;
 376	/*
 377	 * The negative counter only tracks dentries on the LRU. Don't inc if
 378	 * d_lru is on another list.
 379	 */
 380	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
 381		this_cpu_inc(nr_dentry_negative);
 382}
 383
 384static void dentry_free(struct dentry *dentry)
 385{
 386	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 387	if (unlikely(dname_external(dentry))) {
 388		struct external_name *p = external_name(dentry);
 389		if (likely(atomic_dec_and_test(&p->u.count))) {
 390			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 391			return;
 392		}
 393	}
 394	/* if dentry was never visible to RCU, immediate free is OK */
 395	if (dentry->d_flags & DCACHE_NORCU)
 396		__d_free(&dentry->d_u.d_rcu);
 397	else
 398		call_rcu(&dentry->d_u.d_rcu, __d_free);
 399}
 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 401/*
 402 * Release the dentry's inode, using the filesystem
 403 * d_iput() operation if defined.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404 */
 405static void dentry_unlink_inode(struct dentry * dentry)
 406	__releases(dentry->d_lock)
 407	__releases(dentry->d_inode->i_lock)
 408{
 409	struct inode *inode = dentry->d_inode;
 410
 411	raw_write_seqcount_begin(&dentry->d_seq);
 412	__d_clear_type_and_inode(dentry);
 413	hlist_del_init(&dentry->d_u.d_alias);
 414	raw_write_seqcount_end(&dentry->d_seq);
 415	spin_unlock(&dentry->d_lock);
 416	spin_unlock(&inode->i_lock);
 417	if (!inode->i_nlink)
 418		fsnotify_inoderemove(inode);
 419	if (dentry->d_op && dentry->d_op->d_iput)
 420		dentry->d_op->d_iput(dentry, inode);
 421	else
 422		iput(inode);
 423}
 424
 425/*
 426 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 427 * is in use - which includes both the "real" per-superblock
 428 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 429 *
 430 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 431 * on the shrink list (ie not on the superblock LRU list).
 432 *
 433 * The per-cpu "nr_dentry_unused" counters are updated with
 434 * the DCACHE_LRU_LIST bit.
 435 *
 436 * The per-cpu "nr_dentry_negative" counters are only updated
 437 * when deleted from or added to the per-superblock LRU list, not
 438 * from/to the shrink list. That is to avoid an unneeded dec/inc
 439 * pair when moving from LRU to shrink list in select_collect().
 440 *
 441 * These helper functions make sure we always follow the
 442 * rules. d_lock must be held by the caller.
 443 */
 444#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 445static void d_lru_add(struct dentry *dentry)
 446{
 447	D_FLAG_VERIFY(dentry, 0);
 448	dentry->d_flags |= DCACHE_LRU_LIST;
 449	this_cpu_inc(nr_dentry_unused);
 450	if (d_is_negative(dentry))
 451		this_cpu_inc(nr_dentry_negative);
 452	WARN_ON_ONCE(!list_lru_add_obj(
 453			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 454}
 455
 456static void d_lru_del(struct dentry *dentry)
 457{
 458	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 459	dentry->d_flags &= ~DCACHE_LRU_LIST;
 460	this_cpu_dec(nr_dentry_unused);
 461	if (d_is_negative(dentry))
 462		this_cpu_dec(nr_dentry_negative);
 463	WARN_ON_ONCE(!list_lru_del_obj(
 464			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 465}
 466
 467static void d_shrink_del(struct dentry *dentry)
 468{
 469	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 470	list_del_init(&dentry->d_lru);
 471	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 472	this_cpu_dec(nr_dentry_unused);
 473}
 474
 475static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 476{
 477	D_FLAG_VERIFY(dentry, 0);
 478	list_add(&dentry->d_lru, list);
 479	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 480	this_cpu_inc(nr_dentry_unused);
 481}
 482
 483/*
 484 * These can only be called under the global LRU lock, ie during the
 485 * callback for freeing the LRU list. "isolate" removes it from the
 486 * LRU lists entirely, while shrink_move moves it to the indicated
 487 * private list.
 488 */
 489static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 490{
 491	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 492	dentry->d_flags &= ~DCACHE_LRU_LIST;
 493	this_cpu_dec(nr_dentry_unused);
 494	if (d_is_negative(dentry))
 495		this_cpu_dec(nr_dentry_negative);
 496	list_lru_isolate(lru, &dentry->d_lru);
 497}
 498
 499static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 500			      struct list_head *list)
 501{
 502	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 503	dentry->d_flags |= DCACHE_SHRINK_LIST;
 504	if (d_is_negative(dentry))
 505		this_cpu_dec(nr_dentry_negative);
 506	list_lru_isolate_move(lru, &dentry->d_lru, list);
 507}
 508
 509static void ___d_drop(struct dentry *dentry)
 510{
 511	struct hlist_bl_head *b;
 512	/*
 513	 * Hashed dentries are normally on the dentry hashtable,
 514	 * with the exception of those newly allocated by
 515	 * d_obtain_root, which are always IS_ROOT:
 516	 */
 517	if (unlikely(IS_ROOT(dentry)))
 518		b = &dentry->d_sb->s_roots;
 519	else
 520		b = d_hash(dentry->d_name.hash);
 521
 522	hlist_bl_lock(b);
 523	__hlist_bl_del(&dentry->d_hash);
 524	hlist_bl_unlock(b);
 525}
 526
 527void __d_drop(struct dentry *dentry)
 528{
 529	if (!d_unhashed(dentry)) {
 530		___d_drop(dentry);
 531		dentry->d_hash.pprev = NULL;
 532		write_seqcount_invalidate(&dentry->d_seq);
 533	}
 534}
 535EXPORT_SYMBOL(__d_drop);
 536
 537/**
 538 * d_drop - drop a dentry
 539 * @dentry: dentry to drop
 540 *
 541 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 542 * be found through a VFS lookup any more. Note that this is different from
 543 * deleting the dentry - d_delete will try to mark the dentry negative if
 544 * possible, giving a successful _negative_ lookup, while d_drop will
 545 * just make the cache lookup fail.
 546 *
 547 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 548 * reason (NFS timeouts or autofs deletes).
 549 *
 550 * __d_drop requires dentry->d_lock
 551 *
 552 * ___d_drop doesn't mark dentry as "unhashed"
 553 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
 554 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555void d_drop(struct dentry *dentry)
 556{
 557	spin_lock(&dentry->d_lock);
 558	__d_drop(dentry);
 559	spin_unlock(&dentry->d_lock);
 560}
 561EXPORT_SYMBOL(d_drop);
 562
 563static inline void dentry_unlist(struct dentry *dentry)
 564{
 565	struct dentry *next;
 566	/*
 567	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 568	 * attached to the dentry tree
 569	 */
 570	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 571	if (unlikely(hlist_unhashed(&dentry->d_sib)))
 572		return;
 573	__hlist_del(&dentry->d_sib);
 574	/*
 575	 * Cursors can move around the list of children.  While we'd been
 576	 * a normal list member, it didn't matter - ->d_sib.next would've
 577	 * been updated.  However, from now on it won't be and for the
 578	 * things like d_walk() it might end up with a nasty surprise.
 579	 * Normally d_walk() doesn't care about cursors moving around -
 580	 * ->d_lock on parent prevents that and since a cursor has no children
 581	 * of its own, we get through it without ever unlocking the parent.
 582	 * There is one exception, though - if we ascend from a child that
 583	 * gets killed as soon as we unlock it, the next sibling is found
 584	 * using the value left in its ->d_sib.next.  And if _that_
 585	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 586	 * before d_walk() regains parent->d_lock, we'll end up skipping
 587	 * everything the cursor had been moved past.
 588	 *
 589	 * Solution: make sure that the pointer left behind in ->d_sib.next
 590	 * points to something that won't be moving around.  I.e. skip the
 591	 * cursors.
 592	 */
 593	while (dentry->d_sib.next) {
 594		next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
 595		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 596			break;
 597		dentry->d_sib.next = next->d_sib.next;
 598	}
 599}
 600
 601static struct dentry *__dentry_kill(struct dentry *dentry)
 602{
 603	struct dentry *parent = NULL;
 604	bool can_free = true;
 
 
 605
 606	/*
 607	 * The dentry is now unrecoverably dead to the world.
 608	 */
 609	lockref_mark_dead(&dentry->d_lockref);
 610
 611	/*
 612	 * inform the fs via d_prune that this dentry is about to be
 613	 * unhashed and destroyed.
 614	 */
 615	if (dentry->d_flags & DCACHE_OP_PRUNE)
 616		dentry->d_op->d_prune(dentry);
 617
 618	if (dentry->d_flags & DCACHE_LRU_LIST) {
 619		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 620			d_lru_del(dentry);
 621	}
 622	/* if it was on the hash then remove it */
 623	__d_drop(dentry);
 624	if (dentry->d_inode)
 625		dentry_unlink_inode(dentry);
 626	else
 627		spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 628	this_cpu_dec(nr_dentry);
 629	if (dentry->d_op && dentry->d_op->d_release)
 630		dentry->d_op->d_release(dentry);
 631
 632	cond_resched();
 633	/* now that it's negative, ->d_parent is stable */
 634	if (!IS_ROOT(dentry)) {
 635		parent = dentry->d_parent;
 636		spin_lock(&parent->d_lock);
 637	}
 638	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 639	dentry_unlist(dentry);
 640	if (dentry->d_flags & DCACHE_SHRINK_LIST)
 641		can_free = false;
 
 642	spin_unlock(&dentry->d_lock);
 643	if (likely(can_free))
 644		dentry_free(dentry);
 645	if (parent && --parent->d_lockref.count) {
 646		spin_unlock(&parent->d_lock);
 647		return NULL;
 648	}
 649	return parent;
 650}
 651
 652/*
 653 * Lock a dentry for feeding it to __dentry_kill().
 654 * Called under rcu_read_lock() and dentry->d_lock; the former
 655 * guarantees that nothing we access will be freed under us.
 656 * Note that dentry is *not* protected from concurrent dentry_kill(),
 657 * d_delete(), etc.
 658 *
 659 * Return false if dentry is busy.  Otherwise, return true and have
 660 * that dentry's inode locked.
 661 */
 662
 663static bool lock_for_kill(struct dentry *dentry)
 664{
 665	struct inode *inode = dentry->d_inode;
 
 666
 667	if (unlikely(dentry->d_lockref.count))
 668		return false;
 669
 670	if (!inode || likely(spin_trylock(&inode->i_lock)))
 671		return true;
 672
 673	do {
 674		spin_unlock(&dentry->d_lock);
 675		spin_lock(&inode->i_lock);
 676		spin_lock(&dentry->d_lock);
 677		if (likely(inode == dentry->d_inode))
 678			break;
 679		spin_unlock(&inode->i_lock);
 680		inode = dentry->d_inode;
 681	} while (inode);
 682	if (likely(!dentry->d_lockref.count))
 683		return true;
 684	if (inode)
 685		spin_unlock(&inode->i_lock);
 686	return false;
 687}
 688
 689/*
 690 * Decide if dentry is worth retaining.  Usually this is called with dentry
 691 * locked; if not locked, we are more limited and might not be able to tell
 692 * without a lock.  False in this case means "punt to locked path and recheck".
 693 *
 694 * In case we aren't locked, these predicates are not "stable". However, it is
 695 * sufficient that at some point after we dropped the reference the dentry was
 696 * hashed and the flags had the proper value. Other dentry users may have
 697 * re-gotten a reference to the dentry and change that, but our work is done -
 698 * we can leave the dentry around with a zero refcount.
 699 */
 700static inline bool retain_dentry(struct dentry *dentry, bool locked)
 701{
 702	unsigned int d_flags;
 703
 704	smp_rmb();
 705	d_flags = READ_ONCE(dentry->d_flags);
 
 
 
 
 
 
 706
 707	// Unreachable? Nobody would be able to look it up, no point retaining
 708	if (unlikely(d_unhashed(dentry)))
 709		return false;
 710
 711	// Same if it's disconnected
 712	if (unlikely(d_flags & DCACHE_DISCONNECTED))
 713		return false;
 714
 715	// ->d_delete() might tell us not to bother, but that requires
 716	// ->d_lock; can't decide without it
 717	if (unlikely(d_flags & DCACHE_OP_DELETE)) {
 718		if (!locked || dentry->d_op->d_delete(dentry))
 719			return false;
 720	}
 721
 722	// Explicitly told not to bother
 723	if (unlikely(d_flags & DCACHE_DONTCACHE))
 724		return false;
 725
 726	// At this point it looks like we ought to keep it.  We also might
 727	// need to do something - put it on LRU if it wasn't there already
 728	// and mark it referenced if it was on LRU, but not marked yet.
 729	// Unfortunately, both actions require ->d_lock, so in lockless
 730	// case we'd have to punt rather than doing those.
 731	if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
 732		if (!locked)
 733			return false;
 734		d_lru_add(dentry);
 735	} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
 736		if (!locked)
 737			return false;
 738		dentry->d_flags |= DCACHE_REFERENCED;
 739	}
 740	return true;
 741}
 742
 743void d_mark_dontcache(struct inode *inode)
 744{
 745	struct dentry *de;
 746
 747	spin_lock(&inode->i_lock);
 748	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
 749		spin_lock(&de->d_lock);
 750		de->d_flags |= DCACHE_DONTCACHE;
 751		spin_unlock(&de->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752	}
 753	inode->i_state |= I_DONTCACHE;
 754	spin_unlock(&inode->i_lock);
 
 
 
 
 755}
 756EXPORT_SYMBOL(d_mark_dontcache);
 757
 758/*
 759 * Try to do a lockless dput(), and return whether that was successful.
 760 *
 761 * If unsuccessful, we return false, having already taken the dentry lock.
 762 * In that case refcount is guaranteed to be zero and we have already
 763 * decided that it's not worth keeping around.
 764 *
 765 * The caller needs to hold the RCU read lock, so that the dentry is
 766 * guaranteed to stay around even if the refcount goes down to zero!
 767 */
 768static inline bool fast_dput(struct dentry *dentry)
 769{
 770	int ret;
 
 771
 772	/*
 773	 * try to decrement the lockref optimistically.
 
 
 
 
 
 
 
 
 774	 */
 775	ret = lockref_put_return(&dentry->d_lockref);
 776
 777	/*
 778	 * If the lockref_put_return() failed due to the lock being held
 779	 * by somebody else, the fast path has failed. We will need to
 780	 * get the lock, and then check the count again.
 781	 */
 782	if (unlikely(ret < 0)) {
 783		spin_lock(&dentry->d_lock);
 784		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
 
 785			spin_unlock(&dentry->d_lock);
 786			return true;
 787		}
 788		dentry->d_lockref.count--;
 789		goto locked;
 790	}
 791
 792	/*
 793	 * If we weren't the last ref, we're done.
 794	 */
 795	if (ret)
 796		return true;
 797
 798	/*
 799	 * Can we decide that decrement of refcount is all we needed without
 800	 * taking the lock?  There's a very common case when it's all we need -
 801	 * dentry looks like it ought to be retained and there's nothing else
 802	 * to do.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803	 */
 804	if (retain_dentry(dentry, false))
 805		return true;
 
 
 
 
 
 806
 807	/*
 808	 * Either not worth retaining or we can't tell without the lock.
 809	 * Get the lock, then.  We've already decremented the refcount to 0,
 810	 * but we'll need to re-check the situation after getting the lock.
 811	 */
 812	spin_lock(&dentry->d_lock);
 813
 814	/*
 815	 * Did somebody else grab a reference to it in the meantime, and
 816	 * we're no longer the last user after all? Alternatively, somebody
 817	 * else could have killed it and marked it dead. Either way, we
 818	 * don't need to do anything else.
 819	 */
 820locked:
 821	if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
 822		spin_unlock(&dentry->d_lock);
 823		return true;
 824	}
 825	return false;
 
 
 
 
 
 
 
 826}
 827
 828
 829/* 
 830 * This is dput
 831 *
 832 * This is complicated by the fact that we do not want to put
 833 * dentries that are no longer on any hash chain on the unused
 834 * list: we'd much rather just get rid of them immediately.
 835 *
 836 * However, that implies that we have to traverse the dentry
 837 * tree upwards to the parents which might _also_ now be
 838 * scheduled for deletion (it may have been only waiting for
 839 * its last child to go away).
 840 *
 841 * This tail recursion is done by hand as we don't want to depend
 842 * on the compiler to always get this right (gcc generally doesn't).
 843 * Real recursion would eat up our stack space.
 844 */
 845
 846/*
 847 * dput - release a dentry
 848 * @dentry: dentry to release 
 849 *
 850 * Release a dentry. This will drop the usage count and if appropriate
 851 * call the dentry unlink method as well as removing it from the queues and
 852 * releasing its resources. If the parent dentries were scheduled for release
 853 * they too may now get deleted.
 854 */
 855void dput(struct dentry *dentry)
 856{
 857	if (!dentry)
 858		return;
 859	might_sleep();
 
 860	rcu_read_lock();
 861	if (likely(fast_dput(dentry))) {
 862		rcu_read_unlock();
 863		return;
 864	}
 865	while (lock_for_kill(dentry)) {
 866		rcu_read_unlock();
 867		dentry = __dentry_kill(dentry);
 868		if (!dentry)
 869			return;
 870		if (retain_dentry(dentry, true)) {
 871			spin_unlock(&dentry->d_lock);
 872			return;
 873		}
 874		rcu_read_lock();
 875	}
 876	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877	spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 878}
 879EXPORT_SYMBOL(dput);
 880
 881static void to_shrink_list(struct dentry *dentry, struct list_head *list)
 882__must_hold(&dentry->d_lock)
 
 883{
 884	if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
 885		if (dentry->d_flags & DCACHE_LRU_LIST)
 886			d_lru_del(dentry);
 887		d_shrink_add(dentry, list);
 888	}
 889}
 890
 891void dput_to_list(struct dentry *dentry, struct list_head *list)
 892{
 893	rcu_read_lock();
 894	if (likely(fast_dput(dentry))) {
 895		rcu_read_unlock();
 896		return;
 897	}
 898	rcu_read_unlock();
 899	to_shrink_list(dentry, list);
 900	spin_unlock(&dentry->d_lock);
 901}
 902
 903struct dentry *dget_parent(struct dentry *dentry)
 904{
 905	int gotref;
 906	struct dentry *ret;
 907	unsigned seq;
 908
 909	/*
 910	 * Do optimistic parent lookup without any
 911	 * locking.
 912	 */
 913	rcu_read_lock();
 914	seq = raw_seqcount_begin(&dentry->d_seq);
 915	ret = READ_ONCE(dentry->d_parent);
 916	gotref = lockref_get_not_zero(&ret->d_lockref);
 917	rcu_read_unlock();
 918	if (likely(gotref)) {
 919		if (!read_seqcount_retry(&dentry->d_seq, seq))
 920			return ret;
 921		dput(ret);
 922	}
 923
 924repeat:
 925	/*
 926	 * Don't need rcu_dereference because we re-check it was correct under
 927	 * the lock.
 928	 */
 929	rcu_read_lock();
 930	ret = dentry->d_parent;
 931	spin_lock(&ret->d_lock);
 932	if (unlikely(ret != dentry->d_parent)) {
 933		spin_unlock(&ret->d_lock);
 934		rcu_read_unlock();
 935		goto repeat;
 936	}
 937	rcu_read_unlock();
 938	BUG_ON(!ret->d_lockref.count);
 939	ret->d_lockref.count++;
 940	spin_unlock(&ret->d_lock);
 941	return ret;
 942}
 943EXPORT_SYMBOL(dget_parent);
 944
 945static struct dentry * __d_find_any_alias(struct inode *inode)
 946{
 947	struct dentry *alias;
 948
 949	if (hlist_empty(&inode->i_dentry))
 950		return NULL;
 951	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
 952	lockref_get(&alias->d_lockref);
 953	return alias;
 954}
 955
 956/**
 957 * d_find_any_alias - find any alias for a given inode
 958 * @inode: inode to find an alias for
 959 *
 960 * If any aliases exist for the given inode, take and return a
 961 * reference for one of them.  If no aliases exist, return %NULL.
 
 
 
 
 
 
 
 962 */
 963struct dentry *d_find_any_alias(struct inode *inode)
 964{
 965	struct dentry *de;
 966
 967	spin_lock(&inode->i_lock);
 968	de = __d_find_any_alias(inode);
 969	spin_unlock(&inode->i_lock);
 970	return de;
 971}
 972EXPORT_SYMBOL(d_find_any_alias);
 973
 974static struct dentry *__d_find_alias(struct inode *inode)
 975{
 976	struct dentry *alias;
 977
 978	if (S_ISDIR(inode->i_mode))
 979		return __d_find_any_alias(inode);
 980
 
 
 981	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 982		spin_lock(&alias->d_lock);
 983 		if (!d_unhashed(alias)) {
 984			dget_dlock(alias);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985			spin_unlock(&alias->d_lock);
 986			return alias;
 987		}
 988		spin_unlock(&alias->d_lock);
 
 989	}
 990	return NULL;
 991}
 992
 993/**
 994 * d_find_alias - grab a hashed alias of inode
 995 * @inode: inode in question
 996 *
 997 * If inode has a hashed alias, or is a directory and has any alias,
 998 * acquire the reference to alias and return it. Otherwise return NULL.
 999 * Notice that if inode is a directory there can be only one alias and
1000 * it can be unhashed only if it has no children, or if it is the root
1001 * of a filesystem, or if the directory was renamed and d_revalidate
1002 * was the first vfs operation to notice.
1003 *
1004 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1005 * any other hashed alias over that one.
1006 */
1007struct dentry *d_find_alias(struct inode *inode)
1008{
1009	struct dentry *de = NULL;
1010
1011	if (!hlist_empty(&inode->i_dentry)) {
1012		spin_lock(&inode->i_lock);
1013		de = __d_find_alias(inode);
1014		spin_unlock(&inode->i_lock);
1015	}
1016	return de;
1017}
1018EXPORT_SYMBOL(d_find_alias);
1019
1020/*
1021 *  Caller MUST be holding rcu_read_lock() and be guaranteed
1022 *  that inode won't get freed until rcu_read_unlock().
1023 */
1024struct dentry *d_find_alias_rcu(struct inode *inode)
1025{
1026	struct hlist_head *l = &inode->i_dentry;
1027	struct dentry *de = NULL;
1028
1029	spin_lock(&inode->i_lock);
1030	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1031	// used without having I_FREEING set, which means no aliases left
1032	if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1033		if (S_ISDIR(inode->i_mode)) {
1034			de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1035		} else {
1036			hlist_for_each_entry(de, l, d_u.d_alias)
1037				if (!d_unhashed(de))
1038					break;
1039		}
1040	}
1041	spin_unlock(&inode->i_lock);
1042	return de;
1043}
1044
1045/*
1046 *	Try to kill dentries associated with this inode.
1047 * WARNING: you must own a reference to inode.
1048 */
1049void d_prune_aliases(struct inode *inode)
1050{
1051	LIST_HEAD(dispose);
1052	struct dentry *dentry;
1053
1054	spin_lock(&inode->i_lock);
1055	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1056		spin_lock(&dentry->d_lock);
1057		if (!dentry->d_lockref.count)
1058			to_shrink_list(dentry, &dispose);
 
 
 
 
 
 
 
 
1059		spin_unlock(&dentry->d_lock);
1060	}
1061	spin_unlock(&inode->i_lock);
1062	shrink_dentry_list(&dispose);
1063}
1064EXPORT_SYMBOL(d_prune_aliases);
1065
1066static inline void shrink_kill(struct dentry *victim)
1067{
1068	do {
1069		rcu_read_unlock();
1070		victim = __dentry_kill(victim);
1071		rcu_read_lock();
1072	} while (victim && lock_for_kill(victim));
1073	rcu_read_unlock();
1074	if (victim)
1075		spin_unlock(&victim->d_lock);
1076}
1077
1078void shrink_dentry_list(struct list_head *list)
1079{
1080	while (!list_empty(list)) {
1081		struct dentry *dentry;
1082
1083		dentry = list_entry(list->prev, struct dentry, d_lru);
1084		spin_lock(&dentry->d_lock);
1085		rcu_read_lock();
1086		if (!lock_for_kill(dentry)) {
1087			bool can_free;
1088			rcu_read_unlock();
1089			d_shrink_del(dentry);
1090			can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091			spin_unlock(&dentry->d_lock);
 
 
1092			if (can_free)
1093				dentry_free(dentry);
1094			continue;
1095		}
1096		d_shrink_del(dentry);
1097		shrink_kill(dentry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098	}
1099}
1100
1101static enum lru_status dentry_lru_isolate(struct list_head *item,
1102		struct list_lru_one *lru, void *arg)
1103{
1104	struct list_head *freeable = arg;
1105	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1106
1107
1108	/*
1109	 * we are inverting the lru lock/dentry->d_lock here,
1110	 * so use a trylock. If we fail to get the lock, just skip
1111	 * it
1112	 */
1113	if (!spin_trylock(&dentry->d_lock))
1114		return LRU_SKIP;
1115
1116	/*
1117	 * Referenced dentries are still in use. If they have active
1118	 * counts, just remove them from the LRU. Otherwise give them
1119	 * another pass through the LRU.
1120	 */
1121	if (dentry->d_lockref.count) {
1122		d_lru_isolate(lru, dentry);
1123		spin_unlock(&dentry->d_lock);
1124		return LRU_REMOVED;
1125	}
1126
1127	if (dentry->d_flags & DCACHE_REFERENCED) {
1128		dentry->d_flags &= ~DCACHE_REFERENCED;
1129		spin_unlock(&dentry->d_lock);
1130
1131		/*
1132		 * The list move itself will be made by the common LRU code. At
1133		 * this point, we've dropped the dentry->d_lock but keep the
1134		 * lru lock. This is safe to do, since every list movement is
1135		 * protected by the lru lock even if both locks are held.
1136		 *
1137		 * This is guaranteed by the fact that all LRU management
1138		 * functions are intermediated by the LRU API calls like
1139		 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1140		 * only ever occur through this functions or through callbacks
1141		 * like this one, that are called from the LRU API.
1142		 *
1143		 * The only exceptions to this are functions like
1144		 * shrink_dentry_list, and code that first checks for the
1145		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1146		 * operating only with stack provided lists after they are
1147		 * properly isolated from the main list.  It is thus, always a
1148		 * local access.
1149		 */
1150		return LRU_ROTATE;
1151	}
1152
1153	d_lru_shrink_move(lru, dentry, freeable);
1154	spin_unlock(&dentry->d_lock);
1155
1156	return LRU_REMOVED;
1157}
1158
1159/**
1160 * prune_dcache_sb - shrink the dcache
1161 * @sb: superblock
1162 * @sc: shrink control, passed to list_lru_shrink_walk()
1163 *
1164 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1165 * is done when we need more memory and called from the superblock shrinker
1166 * function.
1167 *
1168 * This function may fail to free any resources if all the dentries are in
1169 * use.
1170 */
1171long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1172{
1173	LIST_HEAD(dispose);
1174	long freed;
1175
1176	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1177				     dentry_lru_isolate, &dispose);
1178	shrink_dentry_list(&dispose);
1179	return freed;
1180}
1181
1182static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1183		struct list_lru_one *lru, void *arg)
1184{
1185	struct list_head *freeable = arg;
1186	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1187
1188	/*
1189	 * we are inverting the lru lock/dentry->d_lock here,
1190	 * so use a trylock. If we fail to get the lock, just skip
1191	 * it
1192	 */
1193	if (!spin_trylock(&dentry->d_lock))
1194		return LRU_SKIP;
1195
1196	d_lru_shrink_move(lru, dentry, freeable);
1197	spin_unlock(&dentry->d_lock);
1198
1199	return LRU_REMOVED;
1200}
1201
1202
1203/**
1204 * shrink_dcache_sb - shrink dcache for a superblock
1205 * @sb: superblock
1206 *
1207 * Shrink the dcache for the specified super block. This is used to free
1208 * the dcache before unmounting a file system.
1209 */
1210void shrink_dcache_sb(struct super_block *sb)
1211{
 
 
1212	do {
1213		LIST_HEAD(dispose);
1214
1215		list_lru_walk(&sb->s_dentry_lru,
1216			dentry_lru_isolate_shrink, &dispose, 1024);
 
 
1217		shrink_dentry_list(&dispose);
1218	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1219}
1220EXPORT_SYMBOL(shrink_dcache_sb);
1221
1222/**
1223 * enum d_walk_ret - action to talke during tree walk
1224 * @D_WALK_CONTINUE:	contrinue walk
1225 * @D_WALK_QUIT:	quit walk
1226 * @D_WALK_NORETRY:	quit when retry is needed
1227 * @D_WALK_SKIP:	skip this dentry and its children
1228 */
1229enum d_walk_ret {
1230	D_WALK_CONTINUE,
1231	D_WALK_QUIT,
1232	D_WALK_NORETRY,
1233	D_WALK_SKIP,
1234};
1235
1236/**
1237 * d_walk - walk the dentry tree
1238 * @parent:	start of walk
1239 * @data:	data passed to @enter() and @finish()
1240 * @enter:	callback when first entering the dentry
 
1241 *
1242 * The @enter() callbacks are called with d_lock held.
1243 */
1244static void d_walk(struct dentry *parent, void *data,
1245		   enum d_walk_ret (*enter)(void *, struct dentry *))
 
1246{
1247	struct dentry *this_parent, *dentry;
 
1248	unsigned seq = 0;
1249	enum d_walk_ret ret;
1250	bool retry = true;
1251
1252again:
1253	read_seqbegin_or_lock(&rename_lock, &seq);
1254	this_parent = parent;
1255	spin_lock(&this_parent->d_lock);
1256
1257	ret = enter(data, this_parent);
1258	switch (ret) {
1259	case D_WALK_CONTINUE:
1260		break;
1261	case D_WALK_QUIT:
1262	case D_WALK_SKIP:
1263		goto out_unlock;
1264	case D_WALK_NORETRY:
1265		retry = false;
1266		break;
1267	}
1268repeat:
1269	dentry = d_first_child(this_parent);
1270resume:
1271	hlist_for_each_entry_from(dentry, d_sib) {
1272		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1273			continue;
 
1274
1275		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1276
1277		ret = enter(data, dentry);
1278		switch (ret) {
1279		case D_WALK_CONTINUE:
1280			break;
1281		case D_WALK_QUIT:
1282			spin_unlock(&dentry->d_lock);
1283			goto out_unlock;
1284		case D_WALK_NORETRY:
1285			retry = false;
1286			break;
1287		case D_WALK_SKIP:
1288			spin_unlock(&dentry->d_lock);
1289			continue;
1290		}
1291
1292		if (!hlist_empty(&dentry->d_children)) {
1293			spin_unlock(&this_parent->d_lock);
1294			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1295			this_parent = dentry;
1296			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1297			goto repeat;
1298		}
1299		spin_unlock(&dentry->d_lock);
1300	}
1301	/*
1302	 * All done at this level ... ascend and resume the search.
1303	 */
1304	rcu_read_lock();
1305ascend:
1306	if (this_parent != parent) {
1307		dentry = this_parent;
1308		this_parent = dentry->d_parent;
1309
1310		spin_unlock(&dentry->d_lock);
1311		spin_lock(&this_parent->d_lock);
1312
1313		/* might go back up the wrong parent if we have had a rename. */
1314		if (need_seqretry(&rename_lock, seq))
1315			goto rename_retry;
1316		/* go into the first sibling still alive */
1317		hlist_for_each_entry_continue(dentry, d_sib) {
1318			if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1319				rcu_read_unlock();
1320				goto resume;
1321			}
1322		}
1323		goto ascend;
 
1324	}
1325	if (need_seqretry(&rename_lock, seq))
1326		goto rename_retry;
1327	rcu_read_unlock();
 
 
1328
1329out_unlock:
1330	spin_unlock(&this_parent->d_lock);
1331	done_seqretry(&rename_lock, seq);
1332	return;
1333
1334rename_retry:
1335	spin_unlock(&this_parent->d_lock);
1336	rcu_read_unlock();
1337	BUG_ON(seq & 1);
1338	if (!retry)
1339		return;
1340	seq = 1;
1341	goto again;
1342}
1343
1344struct check_mount {
1345	struct vfsmount *mnt;
1346	unsigned int mounted;
1347};
 
1348
1349static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1350{
1351	struct check_mount *info = data;
1352	struct path path = { .mnt = info->mnt, .dentry = dentry };
1353
1354	if (likely(!d_mountpoint(dentry)))
1355		return D_WALK_CONTINUE;
1356	if (__path_is_mountpoint(&path)) {
1357		info->mounted = 1;
1358		return D_WALK_QUIT;
1359	}
1360	return D_WALK_CONTINUE;
1361}
1362
1363/**
1364 * path_has_submounts - check for mounts over a dentry in the
1365 *                      current namespace.
1366 * @parent: path to check.
1367 *
1368 * Return true if the parent or its subdirectories contain
1369 * a mount point in the current namespace.
1370 */
1371int path_has_submounts(const struct path *parent)
1372{
1373	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1374
1375	read_seqlock_excl(&mount_lock);
1376	d_walk(parent->dentry, &data, path_check_mount);
1377	read_sequnlock_excl(&mount_lock);
1378
1379	return data.mounted;
1380}
1381EXPORT_SYMBOL(path_has_submounts);
1382
1383/*
1384 * Called by mount code to set a mountpoint and check if the mountpoint is
1385 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1386 * subtree can become unreachable).
1387 *
1388 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1389 * this reason take rename_lock and d_lock on dentry and ancestors.
1390 */
1391int d_set_mounted(struct dentry *dentry)
1392{
1393	struct dentry *p;
1394	int ret = -ENOENT;
1395	write_seqlock(&rename_lock);
1396	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1397		/* Need exclusion wrt. d_invalidate() */
1398		spin_lock(&p->d_lock);
1399		if (unlikely(d_unhashed(p))) {
1400			spin_unlock(&p->d_lock);
1401			goto out;
1402		}
1403		spin_unlock(&p->d_lock);
1404	}
1405	spin_lock(&dentry->d_lock);
1406	if (!d_unlinked(dentry)) {
1407		ret = -EBUSY;
1408		if (!d_mountpoint(dentry)) {
1409			dentry->d_flags |= DCACHE_MOUNTED;
1410			ret = 0;
1411		}
1412	}
1413 	spin_unlock(&dentry->d_lock);
1414out:
1415	write_sequnlock(&rename_lock);
1416	return ret;
1417}
1418
1419/*
1420 * Search the dentry child list of the specified parent,
1421 * and move any unused dentries to the end of the unused
1422 * list for prune_dcache(). We descend to the next level
1423 * whenever the d_children list is non-empty and continue
1424 * searching.
1425 *
1426 * It returns zero iff there are no unused children,
1427 * otherwise  it returns the number of children moved to
1428 * the end of the unused list. This may not be the total
1429 * number of unused children, because select_parent can
1430 * drop the lock and return early due to latency
1431 * constraints.
1432 */
1433
1434struct select_data {
1435	struct dentry *start;
1436	union {
1437		long found;
1438		struct dentry *victim;
1439	};
1440	struct list_head dispose;
 
1441};
1442
1443static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1444{
1445	struct select_data *data = _data;
1446	enum d_walk_ret ret = D_WALK_CONTINUE;
1447
1448	if (data->start == dentry)
1449		goto out;
1450
1451	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1452		data->found++;
1453	} else if (!dentry->d_lockref.count) {
1454		to_shrink_list(dentry, &data->dispose);
1455		data->found++;
1456	} else if (dentry->d_lockref.count < 0) {
1457		data->found++;
1458	}
1459	/*
1460	 * We can return to the caller if we have found some (this
1461	 * ensures forward progress). We'll be coming back to find
1462	 * the rest.
1463	 */
1464	if (!list_empty(&data->dispose))
1465		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1466out:
1467	return ret;
1468}
1469
1470static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1471{
1472	struct select_data *data = _data;
1473	enum d_walk_ret ret = D_WALK_CONTINUE;
1474
1475	if (data->start == dentry)
1476		goto out;
1477
1478	if (!dentry->d_lockref.count) {
1479		if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1480			rcu_read_lock();
1481			data->victim = dentry;
1482			return D_WALK_QUIT;
1483		}
1484		to_shrink_list(dentry, &data->dispose);
1485	}
1486	/*
1487	 * We can return to the caller if we have found some (this
1488	 * ensures forward progress). We'll be coming back to find
1489	 * the rest.
1490	 */
1491	if (!list_empty(&data->dispose))
1492		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1493out:
1494	return ret;
1495}
1496
1497/**
1498 * shrink_dcache_parent - prune dcache
1499 * @parent: parent of entries to prune
1500 *
1501 * Prune the dcache to remove unused children of the parent dentry.
1502 */
1503void shrink_dcache_parent(struct dentry *parent)
1504{
1505	for (;;) {
1506		struct select_data data = {.start = parent};
1507
1508		INIT_LIST_HEAD(&data.dispose);
1509		d_walk(parent, &data, select_collect);
1510
1511		if (!list_empty(&data.dispose)) {
1512			shrink_dentry_list(&data.dispose);
1513			continue;
1514		}
1515
1516		cond_resched();
1517		if (!data.found)
1518			break;
1519		data.victim = NULL;
1520		d_walk(parent, &data, select_collect2);
1521		if (data.victim) {
1522			spin_lock(&data.victim->d_lock);
1523			if (!lock_for_kill(data.victim)) {
1524				spin_unlock(&data.victim->d_lock);
1525				rcu_read_unlock();
1526			} else {
1527				shrink_kill(data.victim);
1528			}
1529		}
1530		if (!list_empty(&data.dispose))
1531			shrink_dentry_list(&data.dispose);
1532	}
1533}
1534EXPORT_SYMBOL(shrink_dcache_parent);
1535
1536static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1537{
1538	/* it has busy descendents; complain about those instead */
1539	if (!hlist_empty(&dentry->d_children))
1540		return D_WALK_CONTINUE;
1541
1542	/* root with refcount 1 is fine */
1543	if (dentry == _data && dentry->d_lockref.count == 1)
1544		return D_WALK_CONTINUE;
1545
1546	WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1547			" still in use (%d) [unmount of %s %s]\n",
1548		       dentry,
1549		       dentry->d_inode ?
1550		       dentry->d_inode->i_ino : 0UL,
1551		       dentry,
1552		       dentry->d_lockref.count,
1553		       dentry->d_sb->s_type->name,
1554		       dentry->d_sb->s_id);
 
1555	return D_WALK_CONTINUE;
1556}
1557
1558static void do_one_tree(struct dentry *dentry)
1559{
1560	shrink_dcache_parent(dentry);
1561	d_walk(dentry, dentry, umount_check);
1562	d_drop(dentry);
1563	dput(dentry);
1564}
1565
1566/*
1567 * destroy the dentries attached to a superblock on unmounting
1568 */
1569void shrink_dcache_for_umount(struct super_block *sb)
1570{
1571	struct dentry *dentry;
1572
1573	rwsem_assert_held_write(&sb->s_umount);
1574
1575	dentry = sb->s_root;
1576	sb->s_root = NULL;
1577	do_one_tree(dentry);
1578
1579	while (!hlist_bl_empty(&sb->s_roots)) {
1580		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1581		do_one_tree(dentry);
1582	}
1583}
1584
1585static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
 
 
 
 
1586{
1587	struct dentry **victim = _data;
 
1588	if (d_mountpoint(dentry)) {
1589		*victim = dget_dlock(dentry);
 
1590		return D_WALK_QUIT;
1591	}
1592	return D_WALK_CONTINUE;
 
 
 
 
 
 
 
 
 
1593}
1594
1595/**
1596 * d_invalidate - detach submounts, prune dcache, and drop
1597 * @dentry: dentry to invalidate (aka detach, prune and drop)
 
 
 
 
 
 
1598 */
1599void d_invalidate(struct dentry *dentry)
1600{
1601	bool had_submounts = false;
 
 
1602	spin_lock(&dentry->d_lock);
1603	if (d_unhashed(dentry)) {
1604		spin_unlock(&dentry->d_lock);
1605		return;
1606	}
1607	__d_drop(dentry);
1608	spin_unlock(&dentry->d_lock);
1609
1610	/* Negative dentries can be dropped without further checks */
1611	if (!dentry->d_inode)
 
1612		return;
 
1613
1614	shrink_dcache_parent(dentry);
1615	for (;;) {
1616		struct dentry *victim = NULL;
1617		d_walk(dentry, &victim, find_submount);
1618		if (!victim) {
1619			if (had_submounts)
1620				shrink_dcache_parent(dentry);
1621			return;
 
 
 
 
 
 
 
 
 
1622		}
1623		had_submounts = true;
1624		detach_mounts(victim);
1625		dput(victim);
 
 
1626	}
1627}
1628EXPORT_SYMBOL(d_invalidate);
1629
1630/**
1631 * __d_alloc	-	allocate a dcache entry
1632 * @sb: filesystem it will belong to
1633 * @name: qstr of the name
1634 *
1635 * Allocates a dentry. It returns %NULL if there is insufficient memory
1636 * available. On a success the dentry is returned. The name passed in is
1637 * copied and the copy passed in may be reused after this call.
1638 */
1639 
1640static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1641{
1642	struct dentry *dentry;
1643	char *dname;
1644	int err;
1645
1646	dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1647				      GFP_KERNEL);
1648	if (!dentry)
1649		return NULL;
1650
1651	/*
1652	 * We guarantee that the inline name is always NUL-terminated.
1653	 * This way the memcpy() done by the name switching in rename
1654	 * will still always have a NUL at the end, even if we might
1655	 * be overwriting an internal NUL character
1656	 */
1657	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1658	if (unlikely(!name)) {
1659		name = &slash_name;
1660		dname = dentry->d_iname;
1661	} else if (name->len > DNAME_INLINE_LEN-1) {
1662		size_t size = offsetof(struct external_name, name[1]);
1663		struct external_name *p = kmalloc(size + name->len,
1664						  GFP_KERNEL_ACCOUNT |
1665						  __GFP_RECLAIMABLE);
1666		if (!p) {
1667			kmem_cache_free(dentry_cache, dentry); 
1668			return NULL;
1669		}
1670		atomic_set(&p->u.count, 1);
1671		dname = p->name;
 
 
 
1672	} else  {
1673		dname = dentry->d_iname;
1674	}	
1675
1676	dentry->d_name.len = name->len;
1677	dentry->d_name.hash = name->hash;
1678	memcpy(dname, name->name, name->len);
1679	dname[name->len] = 0;
1680
1681	/* Make sure we always see the terminating NUL character */
1682	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
 
1683
1684	dentry->d_lockref.count = 1;
1685	dentry->d_flags = 0;
1686	spin_lock_init(&dentry->d_lock);
1687	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1688	dentry->d_inode = NULL;
1689	dentry->d_parent = dentry;
1690	dentry->d_sb = sb;
1691	dentry->d_op = NULL;
1692	dentry->d_fsdata = NULL;
1693	INIT_HLIST_BL_NODE(&dentry->d_hash);
1694	INIT_LIST_HEAD(&dentry->d_lru);
1695	INIT_HLIST_HEAD(&dentry->d_children);
1696	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1697	INIT_HLIST_NODE(&dentry->d_sib);
1698	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1699
1700	if (dentry->d_op && dentry->d_op->d_init) {
1701		err = dentry->d_op->d_init(dentry);
1702		if (err) {
1703			if (dname_external(dentry))
1704				kfree(external_name(dentry));
1705			kmem_cache_free(dentry_cache, dentry);
1706			return NULL;
1707		}
1708	}
1709
1710	this_cpu_inc(nr_dentry);
1711
1712	return dentry;
1713}
1714
1715/**
1716 * d_alloc	-	allocate a dcache entry
1717 * @parent: parent of entry to allocate
1718 * @name: qstr of the name
1719 *
1720 * Allocates a dentry. It returns %NULL if there is insufficient memory
1721 * available. On a success the dentry is returned. The name passed in is
1722 * copied and the copy passed in may be reused after this call.
1723 */
1724struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1725{
1726	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1727	if (!dentry)
1728		return NULL;
 
1729	spin_lock(&parent->d_lock);
1730	/*
1731	 * don't need child lock because it is not subject
1732	 * to concurrency here
1733	 */
1734	dentry->d_parent = dget_dlock(parent);
1735	hlist_add_head(&dentry->d_sib, &parent->d_children);
 
1736	spin_unlock(&parent->d_lock);
1737
1738	return dentry;
1739}
1740EXPORT_SYMBOL(d_alloc);
1741
1742struct dentry *d_alloc_anon(struct super_block *sb)
1743{
1744	return __d_alloc(sb, NULL);
1745}
1746EXPORT_SYMBOL(d_alloc_anon);
1747
1748struct dentry *d_alloc_cursor(struct dentry * parent)
1749{
1750	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1751	if (dentry) {
1752		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1753		dentry->d_parent = dget(parent);
1754	}
1755	return dentry;
1756}
1757
1758/**
1759 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1760 * @sb: the superblock
1761 * @name: qstr of the name
1762 *
1763 * For a filesystem that just pins its dentries in memory and never
1764 * performs lookups at all, return an unhashed IS_ROOT dentry.
1765 * This is used for pipes, sockets et.al. - the stuff that should
1766 * never be anyone's children or parents.  Unlike all other
1767 * dentries, these will not have RCU delay between dropping the
1768 * last reference and freeing them.
1769 *
1770 * The only user is alloc_file_pseudo() and that's what should
1771 * be considered a public interface.  Don't use directly.
1772 */
1773struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1774{
1775	static const struct dentry_operations anon_ops = {
1776		.d_dname = simple_dname
1777	};
1778	struct dentry *dentry = __d_alloc(sb, name);
1779	if (likely(dentry)) {
1780		dentry->d_flags |= DCACHE_NORCU;
1781		if (!sb->s_d_op)
1782			d_set_d_op(dentry, &anon_ops);
1783	}
1784	return dentry;
1785}
 
1786
1787struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1788{
1789	struct qstr q;
1790
1791	q.name = name;
1792	q.hash_len = hashlen_string(parent, name);
 
1793	return d_alloc(parent, &q);
1794}
1795EXPORT_SYMBOL(d_alloc_name);
1796
1797void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1798{
1799	WARN_ON_ONCE(dentry->d_op);
1800	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1801				DCACHE_OP_COMPARE	|
1802				DCACHE_OP_REVALIDATE	|
1803				DCACHE_OP_WEAK_REVALIDATE	|
1804				DCACHE_OP_DELETE	|
 
1805				DCACHE_OP_REAL));
1806	dentry->d_op = op;
1807	if (!op)
1808		return;
1809	if (op->d_hash)
1810		dentry->d_flags |= DCACHE_OP_HASH;
1811	if (op->d_compare)
1812		dentry->d_flags |= DCACHE_OP_COMPARE;
1813	if (op->d_revalidate)
1814		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1815	if (op->d_weak_revalidate)
1816		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1817	if (op->d_delete)
1818		dentry->d_flags |= DCACHE_OP_DELETE;
1819	if (op->d_prune)
1820		dentry->d_flags |= DCACHE_OP_PRUNE;
 
 
1821	if (op->d_real)
1822		dentry->d_flags |= DCACHE_OP_REAL;
1823
1824}
1825EXPORT_SYMBOL(d_set_d_op);
1826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827static unsigned d_flags_for_inode(struct inode *inode)
1828{
1829	unsigned add_flags = DCACHE_REGULAR_TYPE;
1830
1831	if (!inode)
1832		return DCACHE_MISS_TYPE;
1833
1834	if (S_ISDIR(inode->i_mode)) {
1835		add_flags = DCACHE_DIRECTORY_TYPE;
1836		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1837			if (unlikely(!inode->i_op->lookup))
1838				add_flags = DCACHE_AUTODIR_TYPE;
1839			else
1840				inode->i_opflags |= IOP_LOOKUP;
1841		}
1842		goto type_determined;
1843	}
1844
1845	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1846		if (unlikely(inode->i_op->get_link)) {
1847			add_flags = DCACHE_SYMLINK_TYPE;
1848			goto type_determined;
1849		}
1850		inode->i_opflags |= IOP_NOFOLLOW;
1851	}
1852
1853	if (unlikely(!S_ISREG(inode->i_mode)))
1854		add_flags = DCACHE_SPECIAL_TYPE;
1855
1856type_determined:
1857	if (unlikely(IS_AUTOMOUNT(inode)))
1858		add_flags |= DCACHE_NEED_AUTOMOUNT;
1859	return add_flags;
1860}
1861
1862static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1863{
1864	unsigned add_flags = d_flags_for_inode(inode);
1865	WARN_ON(d_in_lookup(dentry));
1866
1867	spin_lock(&dentry->d_lock);
1868	/*
1869	 * The negative counter only tracks dentries on the LRU. Don't dec if
1870	 * d_lru is on another list.
1871	 */
1872	if ((dentry->d_flags &
1873	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
1874		this_cpu_dec(nr_dentry_negative);
1875	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1876	raw_write_seqcount_begin(&dentry->d_seq);
1877	__d_set_inode_and_type(dentry, inode, add_flags);
1878	raw_write_seqcount_end(&dentry->d_seq);
1879	fsnotify_update_flags(dentry);
1880	spin_unlock(&dentry->d_lock);
1881}
1882
1883/**
1884 * d_instantiate - fill in inode information for a dentry
1885 * @entry: dentry to complete
1886 * @inode: inode to attach to this dentry
1887 *
1888 * Fill in inode information in the entry.
1889 *
1890 * This turns negative dentries into productive full members
1891 * of society.
1892 *
1893 * NOTE! This assumes that the inode count has been incremented
1894 * (or otherwise set) by the caller to indicate that it is now
1895 * in use by the dcache.
1896 */
1897 
1898void d_instantiate(struct dentry *entry, struct inode * inode)
1899{
1900	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1901	if (inode) {
1902		security_d_instantiate(entry, inode);
1903		spin_lock(&inode->i_lock);
1904		__d_instantiate(entry, inode);
1905		spin_unlock(&inode->i_lock);
1906	}
 
1907}
1908EXPORT_SYMBOL(d_instantiate);
1909
1910/*
1911 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1912 * with lockdep-related part of unlock_new_inode() done before
1913 * anything else.  Use that instead of open-coding d_instantiate()/
1914 * unlock_new_inode() combinations.
 
 
 
1915 */
1916void d_instantiate_new(struct dentry *entry, struct inode *inode)
1917{
1918	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1919	BUG_ON(!inode);
1920	lockdep_annotate_inode_mutex_key(inode);
1921	security_d_instantiate(entry, inode);
1922	spin_lock(&inode->i_lock);
 
 
 
 
 
1923	__d_instantiate(entry, inode);
1924	WARN_ON(!(inode->i_state & I_NEW));
1925	inode->i_state &= ~I_NEW & ~I_CREATING;
1926	/*
1927	 * Pairs with the barrier in prepare_to_wait_event() to make sure
1928	 * ___wait_var_event() either sees the bit cleared or
1929	 * waitqueue_active() check in wake_up_var() sees the waiter.
1930	 */
1931	smp_mb();
1932	inode_wake_up_bit(inode, __I_NEW);
1933	spin_unlock(&inode->i_lock);
 
 
 
1934}
1935EXPORT_SYMBOL(d_instantiate_new);
1936
1937struct dentry *d_make_root(struct inode *root_inode)
1938{
1939	struct dentry *res = NULL;
1940
1941	if (root_inode) {
1942		res = d_alloc_anon(root_inode->i_sb);
 
 
1943		if (res)
1944			d_instantiate(res, root_inode);
1945		else
1946			iput(root_inode);
1947	}
1948	return res;
1949}
1950EXPORT_SYMBOL(d_make_root);
1951
1952static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1953{
1954	struct super_block *sb;
1955	struct dentry *new, *res;
 
 
 
 
 
 
 
 
 
 
 
 
 
1956
1957	if (!inode)
1958		return ERR_PTR(-ESTALE);
1959	if (IS_ERR(inode))
1960		return ERR_CAST(inode);
1961
1962	sb = inode->i_sb;
1963
1964	res = d_find_any_alias(inode); /* existing alias? */
1965	if (res)
1966		goto out;
1967
1968	new = d_alloc_anon(sb);
1969	if (!new) {
1970		res = ERR_PTR(-ENOMEM);
1971		goto out;
1972	}
1973
1974	security_d_instantiate(new, inode);
1975	spin_lock(&inode->i_lock);
1976	res = __d_find_any_alias(inode); /* recheck under lock */
1977	if (likely(!res)) { /* still no alias, attach a disconnected dentry */
1978		unsigned add_flags = d_flags_for_inode(inode);
1979
1980		if (disconnected)
1981			add_flags |= DCACHE_DISCONNECTED;
1982
1983		spin_lock(&new->d_lock);
1984		__d_set_inode_and_type(new, inode, add_flags);
1985		hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
1986		if (!disconnected) {
1987			hlist_bl_lock(&sb->s_roots);
1988			hlist_bl_add_head(&new->d_hash, &sb->s_roots);
1989			hlist_bl_unlock(&sb->s_roots);
1990		}
1991		spin_unlock(&new->d_lock);
1992		spin_unlock(&inode->i_lock);
1993		inode = NULL; /* consumed by new->d_inode */
1994		res = new;
1995	} else {
1996		spin_unlock(&inode->i_lock);
1997		dput(new);
 
1998	}
1999
2000 out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001	iput(inode);
2002	return res;
2003}
2004
2005/**
2006 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2007 * @inode: inode to allocate the dentry for
2008 *
2009 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2010 * similar open by handle operations.  The returned dentry may be anonymous,
2011 * or may have a full name (if the inode was already in the cache).
2012 *
2013 * When called on a directory inode, we must ensure that the inode only ever
2014 * has one dentry.  If a dentry is found, that is returned instead of
2015 * allocating a new one.
2016 *
2017 * On successful return, the reference to the inode has been transferred
2018 * to the dentry.  In case of an error the reference on the inode is released.
2019 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2020 * be passed in and the error will be propagated to the return value,
2021 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2022 */
2023struct dentry *d_obtain_alias(struct inode *inode)
2024{
2025	return __d_obtain_alias(inode, true);
2026}
2027EXPORT_SYMBOL(d_obtain_alias);
2028
2029/**
2030 * d_obtain_root - find or allocate a dentry for a given inode
2031 * @inode: inode to allocate the dentry for
2032 *
2033 * Obtain an IS_ROOT dentry for the root of a filesystem.
2034 *
2035 * We must ensure that directory inodes only ever have one dentry.  If a
2036 * dentry is found, that is returned instead of allocating a new one.
2037 *
2038 * On successful return, the reference to the inode has been transferred
2039 * to the dentry.  In case of an error the reference on the inode is
2040 * released.  A %NULL or IS_ERR inode may be passed in and will be the
2041 * error will be propagate to the return value, with a %NULL @inode
2042 * replaced by ERR_PTR(-ESTALE).
2043 */
2044struct dentry *d_obtain_root(struct inode *inode)
2045{
2046	return __d_obtain_alias(inode, false);
2047}
2048EXPORT_SYMBOL(d_obtain_root);
2049
2050/**
2051 * d_add_ci - lookup or allocate new dentry with case-exact name
2052 * @dentry: the negative dentry that was passed to the parent's lookup func
2053 * @inode:  the inode case-insensitive lookup has found
 
2054 * @name:   the case-exact name to be associated with the returned dentry
2055 *
2056 * This is to avoid filling the dcache with case-insensitive names to the
2057 * same inode, only the actual correct case is stored in the dcache for
2058 * case-insensitive filesystems.
2059 *
2060 * For a case-insensitive lookup match and if the case-exact dentry
2061 * already exists in the dcache, use it and return it.
2062 *
2063 * If no entry exists with the exact case name, allocate new dentry with
2064 * the exact case, and return the spliced entry.
2065 */
2066struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2067			struct qstr *name)
2068{
2069	struct dentry *found, *res;
 
2070
2071	/*
2072	 * First check if a dentry matching the name already exists,
2073	 * if not go ahead and create it now.
2074	 */
2075	found = d_hash_and_lookup(dentry->d_parent, name);
2076	if (found) {
2077		iput(inode);
2078		return found;
2079	}
2080	if (d_in_lookup(dentry)) {
2081		found = d_alloc_parallel(dentry->d_parent, name,
2082					dentry->d_wait);
2083		if (IS_ERR(found) || !d_in_lookup(found)) {
2084			iput(inode);
2085			return found;
 
2086		}
2087	} else {
2088		found = d_alloc(dentry->d_parent, name);
2089		if (!found) {
2090			iput(inode);
2091			return ERR_PTR(-ENOMEM);
2092		} 
2093	}
2094	res = d_splice_alias(inode, found);
2095	if (res) {
2096		d_lookup_done(found);
2097		dput(found);
2098		return res;
2099	}
 
2100	return found;
2101}
2102EXPORT_SYMBOL(d_add_ci);
2103
2104/**
2105 * d_same_name - compare dentry name with case-exact name
2106 * @dentry: the negative dentry that was passed to the parent's lookup func
2107 * @parent: parent dentry
2108 * @name:   the case-exact name to be associated with the returned dentry
2109 *
2110 * Return: true if names are same, or false
2111 */
2112bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2113		 const struct qstr *name)
2114{
2115	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2116		if (dentry->d_name.len != name->len)
2117			return false;
2118		return dentry_cmp(dentry, name->name, name->len) == 0;
2119	}
2120	return parent->d_op->d_compare(dentry,
2121				       dentry->d_name.len, dentry->d_name.name,
2122				       name) == 0;
2123}
2124EXPORT_SYMBOL_GPL(d_same_name);
 
2125
2126/*
2127 * This is __d_lookup_rcu() when the parent dentry has
2128 * DCACHE_OP_COMPARE, which makes things much nastier.
2129 */
2130static noinline struct dentry *__d_lookup_rcu_op_compare(
2131	const struct dentry *parent,
2132	const struct qstr *name,
2133	unsigned *seqp)
2134{
2135	u64 hashlen = name->hash_len;
2136	struct hlist_bl_head *b = d_hash(hashlen);
2137	struct hlist_bl_node *node;
2138	struct dentry *dentry;
2139
2140	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2141		int tlen;
2142		const char *tname;
2143		unsigned seq;
2144
2145seqretry:
2146		seq = raw_seqcount_begin(&dentry->d_seq);
2147		if (dentry->d_parent != parent)
2148			continue;
2149		if (d_unhashed(dentry))
2150			continue;
2151		if (dentry->d_name.hash != hashlen_hash(hashlen))
2152			continue;
2153		tlen = dentry->d_name.len;
2154		tname = dentry->d_name.name;
2155		/* we want a consistent (name,len) pair */
2156		if (read_seqcount_retry(&dentry->d_seq, seq)) {
2157			cpu_relax();
2158			goto seqretry;
2159		}
2160		if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2161			continue;
2162		*seqp = seq;
2163		return dentry;
2164	}
2165	return NULL;
 
 
2166}
2167
2168/**
2169 * __d_lookup_rcu - search for a dentry (racy, store-free)
2170 * @parent: parent dentry
2171 * @name: qstr of name we wish to find
2172 * @seqp: returns d_seq value at the point where the dentry was found
2173 * Returns: dentry, or NULL
2174 *
2175 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2176 * resolution (store-free path walking) design described in
2177 * Documentation/filesystems/path-lookup.txt.
2178 *
2179 * This is not to be used outside core vfs.
2180 *
2181 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2182 * held, and rcu_read_lock held. The returned dentry must not be stored into
2183 * without taking d_lock and checking d_seq sequence count against @seq
2184 * returned here.
2185 *
 
 
 
2186 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2187 * the returned dentry, so long as its parent's seqlock is checked after the
2188 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2189 * is formed, giving integrity down the path walk.
2190 *
2191 * NOTE! The caller *has* to check the resulting dentry against the sequence
2192 * number we've returned before using any of the resulting dentry state!
2193 */
2194struct dentry *__d_lookup_rcu(const struct dentry *parent,
2195				const struct qstr *name,
2196				unsigned *seqp)
2197{
2198	u64 hashlen = name->hash_len;
2199	const unsigned char *str = name->name;
2200	struct hlist_bl_head *b = d_hash(hashlen);
2201	struct hlist_bl_node *node;
2202	struct dentry *dentry;
2203
2204	/*
2205	 * Note: There is significant duplication with __d_lookup_rcu which is
2206	 * required to prevent single threaded performance regressions
2207	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2208	 * Keep the two functions in sync.
2209	 */
2210
2211	if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2212		return __d_lookup_rcu_op_compare(parent, name, seqp);
2213
2214	/*
2215	 * The hash list is protected using RCU.
2216	 *
2217	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2218	 * races with d_move().
2219	 *
2220	 * It is possible that concurrent renames can mess up our list
2221	 * walk here and result in missing our dentry, resulting in the
2222	 * false-negative result. d_lookup() protects against concurrent
2223	 * renames using rename_lock seqlock.
2224	 *
2225	 * See Documentation/filesystems/path-lookup.txt for more details.
2226	 */
2227	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2228		unsigned seq;
2229
 
2230		/*
2231		 * The dentry sequence count protects us from concurrent
2232		 * renames, and thus protects parent and name fields.
2233		 *
2234		 * The caller must perform a seqcount check in order
2235		 * to do anything useful with the returned dentry.
2236		 *
2237		 * NOTE! We do a "raw" seqcount_begin here. That means that
2238		 * we don't wait for the sequence count to stabilize if it
2239		 * is in the middle of a sequence change. If we do the slow
2240		 * dentry compare, we will do seqretries until it is stable,
2241		 * and if we end up with a successful lookup, we actually
2242		 * want to exit RCU lookup anyway.
2243		 *
2244		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2245		 * we are still guaranteed NUL-termination of ->d_name.name.
2246		 */
2247		seq = raw_seqcount_begin(&dentry->d_seq);
2248		if (dentry->d_parent != parent)
2249			continue;
2250		if (d_unhashed(dentry))
2251			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252		if (dentry->d_name.hash_len != hashlen)
2253			continue;
2254		if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2255			continue;
2256		*seqp = seq;
2257		return dentry;
 
2258	}
2259	return NULL;
2260}
2261
2262/**
2263 * d_lookup - search for a dentry
2264 * @parent: parent dentry
2265 * @name: qstr of name we wish to find
2266 * Returns: dentry, or NULL
2267 *
2268 * d_lookup searches the children of the parent dentry for the name in
2269 * question. If the dentry is found its reference count is incremented and the
2270 * dentry is returned. The caller must use dput to free the entry when it has
2271 * finished using it. %NULL is returned if the dentry does not exist.
2272 */
2273struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2274{
2275	struct dentry *dentry;
2276	unsigned seq;
2277
2278	do {
2279		seq = read_seqbegin(&rename_lock);
2280		dentry = __d_lookup(parent, name);
2281		if (dentry)
2282			break;
2283	} while (read_seqretry(&rename_lock, seq));
2284	return dentry;
2285}
2286EXPORT_SYMBOL(d_lookup);
2287
2288/**
2289 * __d_lookup - search for a dentry (racy)
2290 * @parent: parent dentry
2291 * @name: qstr of name we wish to find
2292 * Returns: dentry, or NULL
2293 *
2294 * __d_lookup is like d_lookup, however it may (rarely) return a
2295 * false-negative result due to unrelated rename activity.
2296 *
2297 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2298 * however it must be used carefully, eg. with a following d_lookup in
2299 * the case of failure.
2300 *
2301 * __d_lookup callers must be commented.
2302 */
2303struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2304{
 
2305	unsigned int hash = name->hash;
2306	struct hlist_bl_head *b = d_hash(hash);
 
2307	struct hlist_bl_node *node;
2308	struct dentry *found = NULL;
2309	struct dentry *dentry;
2310
2311	/*
2312	 * Note: There is significant duplication with __d_lookup_rcu which is
2313	 * required to prevent single threaded performance regressions
2314	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2315	 * Keep the two functions in sync.
2316	 */
2317
2318	/*
2319	 * The hash list is protected using RCU.
2320	 *
2321	 * Take d_lock when comparing a candidate dentry, to avoid races
2322	 * with d_move().
2323	 *
2324	 * It is possible that concurrent renames can mess up our list
2325	 * walk here and result in missing our dentry, resulting in the
2326	 * false-negative result. d_lookup() protects against concurrent
2327	 * renames using rename_lock seqlock.
2328	 *
2329	 * See Documentation/filesystems/path-lookup.txt for more details.
2330	 */
2331	rcu_read_lock();
2332	
2333	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2334
2335		if (dentry->d_name.hash != hash)
2336			continue;
2337
2338		spin_lock(&dentry->d_lock);
2339		if (dentry->d_parent != parent)
2340			goto next;
2341		if (d_unhashed(dentry))
2342			goto next;
2343
2344		if (!d_same_name(dentry, parent, name))
2345			goto next;
 
 
 
 
 
 
 
 
 
 
 
 
 
2346
2347		dentry->d_lockref.count++;
2348		found = dentry;
2349		spin_unlock(&dentry->d_lock);
2350		break;
2351next:
2352		spin_unlock(&dentry->d_lock);
2353 	}
2354 	rcu_read_unlock();
2355
2356 	return found;
2357}
2358
2359/**
2360 * d_hash_and_lookup - hash the qstr then search for a dentry
2361 * @dir: Directory to search in
2362 * @name: qstr of name we wish to find
2363 *
2364 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2365 */
2366struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2367{
2368	/*
2369	 * Check for a fs-specific hash function. Note that we must
2370	 * calculate the standard hash first, as the d_op->d_hash()
2371	 * routine may choose to leave the hash value unchanged.
2372	 */
2373	name->hash = full_name_hash(dir, name->name, name->len);
2374	if (dir->d_flags & DCACHE_OP_HASH) {
2375		int err = dir->d_op->d_hash(dir, name);
2376		if (unlikely(err < 0))
2377			return ERR_PTR(err);
2378	}
2379	return d_lookup(dir, name);
2380}
2381EXPORT_SYMBOL(d_hash_and_lookup);
2382
2383/*
2384 * When a file is deleted, we have two options:
2385 * - turn this dentry into a negative dentry
2386 * - unhash this dentry and free it.
2387 *
2388 * Usually, we want to just turn this into
2389 * a negative dentry, but if anybody else is
2390 * currently using the dentry or the inode
2391 * we can't do that and we fall back on removing
2392 * it from the hash queues and waiting for
2393 * it to be deleted later when it has no users
2394 */
2395 
2396/**
2397 * d_delete - delete a dentry
2398 * @dentry: The dentry to delete
2399 *
2400 * Turn the dentry into a negative dentry if possible, otherwise
2401 * remove it from the hash queues so it can be deleted later
2402 */
2403 
2404void d_delete(struct dentry * dentry)
2405{
2406	struct inode *inode = dentry->d_inode;
2407
2408	spin_lock(&inode->i_lock);
2409	spin_lock(&dentry->d_lock);
2410	/*
2411	 * Are we the only user?
2412	 */
 
 
 
 
2413	if (dentry->d_lockref.count == 1) {
2414		if (dentry_negative_policy)
2415			__d_drop(dentry);
 
 
 
2416		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2417		dentry_unlink_inode(dentry);
2418	} else {
2419		__d_drop(dentry);
2420		spin_unlock(&dentry->d_lock);
2421		spin_unlock(&inode->i_lock);
2422	}
 
 
 
 
 
 
 
2423}
2424EXPORT_SYMBOL(d_delete);
2425
2426static void __d_rehash(struct dentry *entry)
2427{
2428	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2429
2430	hlist_bl_lock(b);
 
2431	hlist_bl_add_head_rcu(&entry->d_hash, b);
2432	hlist_bl_unlock(b);
2433}
2434
 
 
 
 
 
2435/**
2436 * d_rehash	- add an entry back to the hash
2437 * @entry: dentry to add to the hash
2438 *
2439 * Adds a dentry to the hash according to its name.
2440 */
2441 
2442void d_rehash(struct dentry * entry)
2443{
2444	spin_lock(&entry->d_lock);
2445	__d_rehash(entry);
2446	spin_unlock(&entry->d_lock);
2447}
2448EXPORT_SYMBOL(d_rehash);
2449
2450static inline unsigned start_dir_add(struct inode *dir)
2451{
2452	preempt_disable_nested();
2453	for (;;) {
2454		unsigned n = dir->i_dir_seq;
2455		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2456			return n;
2457		cpu_relax();
2458	}
2459}
2460
2461static inline void end_dir_add(struct inode *dir, unsigned int n,
2462			       wait_queue_head_t *d_wait)
2463{
2464	smp_store_release(&dir->i_dir_seq, n + 2);
2465	preempt_enable_nested();
2466	wake_up_all(d_wait);
2467}
2468
2469static void d_wait_lookup(struct dentry *dentry)
2470{
2471	if (d_in_lookup(dentry)) {
2472		DECLARE_WAITQUEUE(wait, current);
2473		add_wait_queue(dentry->d_wait, &wait);
2474		do {
2475			set_current_state(TASK_UNINTERRUPTIBLE);
2476			spin_unlock(&dentry->d_lock);
2477			schedule();
2478			spin_lock(&dentry->d_lock);
2479		} while (d_in_lookup(dentry));
2480	}
2481}
2482
2483struct dentry *d_alloc_parallel(struct dentry *parent,
2484				const struct qstr *name,
2485				wait_queue_head_t *wq)
2486{
2487	unsigned int hash = name->hash;
2488	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2489	struct hlist_bl_node *node;
2490	struct dentry *new = d_alloc(parent, name);
2491	struct dentry *dentry;
2492	unsigned seq, r_seq, d_seq;
2493
2494	if (unlikely(!new))
2495		return ERR_PTR(-ENOMEM);
2496
2497retry:
2498	rcu_read_lock();
2499	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2500	r_seq = read_seqbegin(&rename_lock);
2501	dentry = __d_lookup_rcu(parent, name, &d_seq);
2502	if (unlikely(dentry)) {
2503		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2504			rcu_read_unlock();
2505			goto retry;
2506		}
2507		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2508			rcu_read_unlock();
2509			dput(dentry);
2510			goto retry;
2511		}
2512		rcu_read_unlock();
2513		dput(new);
2514		return dentry;
2515	}
2516	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2517		rcu_read_unlock();
2518		goto retry;
2519	}
2520
2521	if (unlikely(seq & 1)) {
2522		rcu_read_unlock();
2523		goto retry;
2524	}
2525
2526	hlist_bl_lock(b);
2527	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2528		hlist_bl_unlock(b);
2529		rcu_read_unlock();
2530		goto retry;
2531	}
2532	/*
2533	 * No changes for the parent since the beginning of d_lookup().
2534	 * Since all removals from the chain happen with hlist_bl_lock(),
2535	 * any potential in-lookup matches are going to stay here until
2536	 * we unlock the chain.  All fields are stable in everything
2537	 * we encounter.
2538	 */
2539	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2540		if (dentry->d_name.hash != hash)
2541			continue;
2542		if (dentry->d_parent != parent)
2543			continue;
2544		if (!d_same_name(dentry, parent, name))
2545			continue;
2546		hlist_bl_unlock(b);
2547		/* now we can try to grab a reference */
2548		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2549			rcu_read_unlock();
2550			goto retry;
2551		}
2552
2553		rcu_read_unlock();
2554		/*
2555		 * somebody is likely to be still doing lookup for it;
2556		 * wait for them to finish
2557		 */
2558		spin_lock(&dentry->d_lock);
2559		d_wait_lookup(dentry);
2560		/*
2561		 * it's not in-lookup anymore; in principle we should repeat
2562		 * everything from dcache lookup, but it's likely to be what
2563		 * d_lookup() would've found anyway.  If it is, just return it;
2564		 * otherwise we really have to repeat the whole thing.
2565		 */
2566		if (unlikely(dentry->d_name.hash != hash))
2567			goto mismatch;
2568		if (unlikely(dentry->d_parent != parent))
2569			goto mismatch;
2570		if (unlikely(d_unhashed(dentry)))
2571			goto mismatch;
2572		if (unlikely(!d_same_name(dentry, parent, name)))
2573			goto mismatch;
2574		/* OK, it *is* a hashed match; return it */
2575		spin_unlock(&dentry->d_lock);
2576		dput(new);
2577		return dentry;
2578	}
2579	rcu_read_unlock();
2580	/* we can't take ->d_lock here; it's OK, though. */
2581	new->d_flags |= DCACHE_PAR_LOOKUP;
2582	new->d_wait = wq;
2583	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
2584	hlist_bl_unlock(b);
2585	return new;
2586mismatch:
2587	spin_unlock(&dentry->d_lock);
2588	dput(dentry);
2589	goto retry;
2590}
2591EXPORT_SYMBOL(d_alloc_parallel);
2592
2593/*
2594 * - Unhash the dentry
2595 * - Retrieve and clear the waitqueue head in dentry
2596 * - Return the waitqueue head
2597 */
2598static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2599{
2600	wait_queue_head_t *d_wait;
2601	struct hlist_bl_head *b;
2602
2603	lockdep_assert_held(&dentry->d_lock);
2604
2605	b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2606	hlist_bl_lock(b);
2607	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2608	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2609	d_wait = dentry->d_wait;
2610	dentry->d_wait = NULL;
2611	hlist_bl_unlock(b);
2612	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2613	INIT_LIST_HEAD(&dentry->d_lru);
2614	return d_wait;
2615}
2616
2617void __d_lookup_unhash_wake(struct dentry *dentry)
2618{
2619	spin_lock(&dentry->d_lock);
2620	wake_up_all(__d_lookup_unhash(dentry));
2621	spin_unlock(&dentry->d_lock);
2622}
2623EXPORT_SYMBOL(__d_lookup_unhash_wake);
2624
2625/* inode->i_lock held if inode is non-NULL */
2626
2627static inline void __d_add(struct dentry *dentry, struct inode *inode)
2628{
2629	wait_queue_head_t *d_wait;
2630	struct inode *dir = NULL;
2631	unsigned n;
2632	spin_lock(&dentry->d_lock);
2633	if (unlikely(d_in_lookup(dentry))) {
2634		dir = dentry->d_parent->d_inode;
2635		n = start_dir_add(dir);
2636		d_wait = __d_lookup_unhash(dentry);
2637	}
2638	if (inode) {
2639		unsigned add_flags = d_flags_for_inode(inode);
2640		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2641		raw_write_seqcount_begin(&dentry->d_seq);
2642		__d_set_inode_and_type(dentry, inode, add_flags);
2643		raw_write_seqcount_end(&dentry->d_seq);
2644		fsnotify_update_flags(dentry);
2645	}
2646	__d_rehash(dentry);
2647	if (dir)
2648		end_dir_add(dir, n, d_wait);
2649	spin_unlock(&dentry->d_lock);
2650	if (inode)
2651		spin_unlock(&inode->i_lock);
 
 
 
2652}
2653
2654/**
2655 * d_add - add dentry to hash queues
2656 * @entry: dentry to add
2657 * @inode: The inode to attach to this dentry
2658 *
2659 * This adds the entry to the hash queues and initializes @inode.
2660 * The entry was actually filled in earlier during d_alloc().
2661 */
2662
2663void d_add(struct dentry *entry, struct inode *inode)
2664{
2665	if (inode) {
2666		security_d_instantiate(entry, inode);
2667		spin_lock(&inode->i_lock);
2668	}
2669	__d_add(entry, inode);
2670}
2671EXPORT_SYMBOL(d_add);
2672
2673/**
2674 * d_exact_alias - find and hash an exact unhashed alias
2675 * @entry: dentry to add
2676 * @inode: The inode to go with this dentry
2677 *
2678 * If an unhashed dentry with the same name/parent and desired
2679 * inode already exists, hash and return it.  Otherwise, return
2680 * NULL.
2681 *
2682 * Parent directory should be locked.
2683 */
2684struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2685{
2686	struct dentry *alias;
 
 
2687	unsigned int hash = entry->d_name.hash;
2688
2689	spin_lock(&inode->i_lock);
2690	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2691		/*
2692		 * Don't need alias->d_lock here, because aliases with
2693		 * d_parent == entry->d_parent are not subject to name or
2694		 * parent changes, because the parent inode i_mutex is held.
2695		 */
2696		if (alias->d_name.hash != hash)
2697			continue;
2698		if (alias->d_parent != entry->d_parent)
2699			continue;
2700		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
 
 
2701			continue;
2702		spin_lock(&alias->d_lock);
2703		if (!d_unhashed(alias)) {
2704			spin_unlock(&alias->d_lock);
2705			alias = NULL;
2706		} else {
2707			dget_dlock(alias);
2708			__d_rehash(alias);
2709			spin_unlock(&alias->d_lock);
2710		}
2711		spin_unlock(&inode->i_lock);
2712		return alias;
2713	}
2714	spin_unlock(&inode->i_lock);
2715	return NULL;
2716}
2717EXPORT_SYMBOL(d_exact_alias);
2718
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2719static void swap_names(struct dentry *dentry, struct dentry *target)
2720{
2721	if (unlikely(dname_external(target))) {
2722		if (unlikely(dname_external(dentry))) {
2723			/*
2724			 * Both external: swap the pointers
2725			 */
2726			swap(target->d_name.name, dentry->d_name.name);
2727		} else {
2728			/*
2729			 * dentry:internal, target:external.  Steal target's
2730			 * storage and make target internal.
2731			 */
2732			memcpy(target->d_iname, dentry->d_name.name,
2733					dentry->d_name.len + 1);
2734			dentry->d_name.name = target->d_name.name;
2735			target->d_name.name = target->d_iname;
2736		}
2737	} else {
2738		if (unlikely(dname_external(dentry))) {
2739			/*
2740			 * dentry:external, target:internal.  Give dentry's
2741			 * storage to target and make dentry internal
2742			 */
2743			memcpy(dentry->d_iname, target->d_name.name,
2744					target->d_name.len + 1);
2745			target->d_name.name = dentry->d_name.name;
2746			dentry->d_name.name = dentry->d_iname;
2747		} else {
2748			/*
2749			 * Both are internal.
2750			 */
2751			unsigned int i;
2752			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
 
 
2753			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2754				swap(((long *) &dentry->d_iname)[i],
2755				     ((long *) &target->d_iname)[i]);
2756			}
2757		}
2758	}
2759	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2760}
2761
2762static void copy_name(struct dentry *dentry, struct dentry *target)
2763{
2764	struct external_name *old_name = NULL;
2765	if (unlikely(dname_external(dentry)))
2766		old_name = external_name(dentry);
2767	if (unlikely(dname_external(target))) {
2768		atomic_inc(&external_name(target)->u.count);
2769		dentry->d_name = target->d_name;
2770	} else {
2771		memcpy(dentry->d_iname, target->d_name.name,
2772				target->d_name.len + 1);
2773		dentry->d_name.name = dentry->d_iname;
2774		dentry->d_name.hash_len = target->d_name.hash_len;
2775	}
2776	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2777		kfree_rcu(old_name, u.head);
2778}
2779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780/*
2781 * __d_move - move a dentry
2782 * @dentry: entry to move
2783 * @target: new dentry
2784 * @exchange: exchange the two dentries
2785 *
2786 * Update the dcache to reflect the move of a file name. Negative
2787 * dcache entries should not be moved in this way. Caller must hold
2788 * rename_lock, the i_mutex of the source and target directories,
2789 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2790 */
2791static void __d_move(struct dentry *dentry, struct dentry *target,
2792		     bool exchange)
2793{
2794	struct dentry *old_parent, *p;
2795	wait_queue_head_t *d_wait;
2796	struct inode *dir = NULL;
2797	unsigned n;
2798
2799	WARN_ON(!dentry->d_inode);
2800	if (WARN_ON(dentry == target))
2801		return;
2802
 
2803	BUG_ON(d_ancestor(target, dentry));
2804	old_parent = dentry->d_parent;
2805	p = d_ancestor(old_parent, target);
2806	if (IS_ROOT(dentry)) {
2807		BUG_ON(p);
2808		spin_lock(&target->d_parent->d_lock);
2809	} else if (!p) {
2810		/* target is not a descendent of dentry->d_parent */
2811		spin_lock(&target->d_parent->d_lock);
2812		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2813	} else {
2814		BUG_ON(p == dentry);
2815		spin_lock(&old_parent->d_lock);
2816		if (p != target)
2817			spin_lock_nested(&target->d_parent->d_lock,
2818					DENTRY_D_LOCK_NESTED);
2819	}
2820	spin_lock_nested(&dentry->d_lock, 2);
2821	spin_lock_nested(&target->d_lock, 3);
2822
2823	if (unlikely(d_in_lookup(target))) {
2824		dir = target->d_parent->d_inode;
2825		n = start_dir_add(dir);
2826		d_wait = __d_lookup_unhash(target);
2827	}
2828
2829	write_seqcount_begin(&dentry->d_seq);
2830	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2831
2832	/* unhash both */
2833	if (!d_unhashed(dentry))
2834		___d_drop(dentry);
2835	if (!d_unhashed(target))
2836		___d_drop(target);
2837
2838	/* ... and switch them in the tree */
2839	dentry->d_parent = target->d_parent;
2840	if (!exchange) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2841		copy_name(dentry, target);
2842		target->d_hash.pprev = NULL;
2843		dentry->d_parent->d_lockref.count++;
2844		if (dentry != old_parent) /* wasn't IS_ROOT */
2845			WARN_ON(!--old_parent->d_lockref.count);
 
 
 
 
2846	} else {
2847		target->d_parent = old_parent;
2848		swap_names(dentry, target);
2849		if (!hlist_unhashed(&target->d_sib))
2850			__hlist_del(&target->d_sib);
2851		hlist_add_head(&target->d_sib, &target->d_parent->d_children);
2852		__d_rehash(target);
2853		fsnotify_update_flags(target);
2854	}
2855	if (!hlist_unhashed(&dentry->d_sib))
2856		__hlist_del(&dentry->d_sib);
2857	hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
2858	__d_rehash(dentry);
2859	fsnotify_update_flags(dentry);
2860	fscrypt_handle_d_move(dentry);
2861
2862	write_seqcount_end(&target->d_seq);
2863	write_seqcount_end(&dentry->d_seq);
2864
2865	if (dir)
2866		end_dir_add(dir, n, d_wait);
2867
2868	if (dentry->d_parent != old_parent)
2869		spin_unlock(&dentry->d_parent->d_lock);
2870	if (dentry != old_parent)
2871		spin_unlock(&old_parent->d_lock);
2872	spin_unlock(&target->d_lock);
2873	spin_unlock(&dentry->d_lock);
2874}
2875
2876/*
2877 * d_move - move a dentry
2878 * @dentry: entry to move
2879 * @target: new dentry
2880 *
2881 * Update the dcache to reflect the move of a file name. Negative
2882 * dcache entries should not be moved in this way. See the locking
2883 * requirements for __d_move.
2884 */
2885void d_move(struct dentry *dentry, struct dentry *target)
2886{
2887	write_seqlock(&rename_lock);
2888	__d_move(dentry, target, false);
2889	write_sequnlock(&rename_lock);
2890}
2891EXPORT_SYMBOL(d_move);
2892
2893/*
2894 * d_exchange - exchange two dentries
2895 * @dentry1: first dentry
2896 * @dentry2: second dentry
2897 */
2898void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2899{
2900	write_seqlock(&rename_lock);
2901
2902	WARN_ON(!dentry1->d_inode);
2903	WARN_ON(!dentry2->d_inode);
2904	WARN_ON(IS_ROOT(dentry1));
2905	WARN_ON(IS_ROOT(dentry2));
2906
2907	__d_move(dentry1, dentry2, true);
2908
2909	write_sequnlock(&rename_lock);
2910}
2911
2912/**
2913 * d_ancestor - search for an ancestor
2914 * @p1: ancestor dentry
2915 * @p2: child dentry
2916 *
2917 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2918 * an ancestor of p2, else NULL.
2919 */
2920struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2921{
2922	struct dentry *p;
2923
2924	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2925		if (p->d_parent == p1)
2926			return p;
2927	}
2928	return NULL;
2929}
2930
2931/*
2932 * This helper attempts to cope with remotely renamed directories
2933 *
2934 * It assumes that the caller is already holding
2935 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2936 *
2937 * Note: If ever the locking in lock_rename() changes, then please
2938 * remember to update this too...
2939 */
2940static int __d_unalias(struct dentry *dentry, struct dentry *alias)
 
2941{
2942	struct mutex *m1 = NULL;
2943	struct rw_semaphore *m2 = NULL;
2944	int ret = -ESTALE;
2945
2946	/* If alias and dentry share a parent, then no extra locks required */
2947	if (alias->d_parent == dentry->d_parent)
2948		goto out_unalias;
2949
2950	/* See lock_rename() */
2951	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2952		goto out_err;
2953	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2954	if (!inode_trylock_shared(alias->d_parent->d_inode))
2955		goto out_err;
2956	m2 = &alias->d_parent->d_inode->i_rwsem;
2957out_unalias:
2958	__d_move(alias, dentry, false);
2959	ret = 0;
2960out_err:
2961	if (m2)
2962		up_read(m2);
2963	if (m1)
2964		mutex_unlock(m1);
2965	return ret;
2966}
2967
2968/**
2969 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2970 * @inode:  the inode which may have a disconnected dentry
2971 * @dentry: a negative dentry which we want to point to the inode.
2972 *
2973 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2974 * place of the given dentry and return it, else simply d_add the inode
2975 * to the dentry and return NULL.
2976 *
2977 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2978 * we should error out: directories can't have multiple aliases.
2979 *
2980 * This is needed in the lookup routine of any filesystem that is exportable
2981 * (via knfsd) so that we can build dcache paths to directories effectively.
2982 *
2983 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2984 * is returned.  This matches the expected return value of ->lookup.
2985 *
2986 * Cluster filesystems may call this function with a negative, hashed dentry.
2987 * In that case, we know that the inode will be a regular file, and also this
2988 * will only occur during atomic_open. So we need to check for the dentry
2989 * being already hashed only in the final case.
2990 */
2991struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2992{
2993	if (IS_ERR(inode))
2994		return ERR_CAST(inode);
2995
2996	BUG_ON(!d_unhashed(dentry));
2997
2998	if (!inode)
2999		goto out;
3000
3001	security_d_instantiate(dentry, inode);
3002	spin_lock(&inode->i_lock);
3003	if (S_ISDIR(inode->i_mode)) {
3004		struct dentry *new = __d_find_any_alias(inode);
3005		if (unlikely(new)) {
3006			/* The reference to new ensures it remains an alias */
3007			spin_unlock(&inode->i_lock);
3008			write_seqlock(&rename_lock);
3009			if (unlikely(d_ancestor(new, dentry))) {
3010				write_sequnlock(&rename_lock);
3011				dput(new);
3012				new = ERR_PTR(-ELOOP);
3013				pr_warn_ratelimited(
3014					"VFS: Lookup of '%s' in %s %s"
3015					" would have caused loop\n",
3016					dentry->d_name.name,
3017					inode->i_sb->s_type->name,
3018					inode->i_sb->s_id);
3019			} else if (!IS_ROOT(new)) {
3020				struct dentry *old_parent = dget(new->d_parent);
3021				int err = __d_unalias(dentry, new);
3022				write_sequnlock(&rename_lock);
3023				if (err) {
3024					dput(new);
3025					new = ERR_PTR(err);
3026				}
3027				dput(old_parent);
3028			} else {
3029				__d_move(new, dentry, false);
3030				write_sequnlock(&rename_lock);
 
3031			}
3032			iput(inode);
3033			return new;
3034		}
3035	}
3036out:
3037	__d_add(dentry, inode);
3038	return NULL;
3039}
3040EXPORT_SYMBOL(d_splice_alias);
3041
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3042/*
3043 * Test whether new_dentry is a subdirectory of old_dentry.
3044 *
3045 * Trivially implemented using the dcache structure
3046 */
3047
3048/**
3049 * is_subdir - is new dentry a subdirectory of old_dentry
3050 * @new_dentry: new dentry
3051 * @old_dentry: old dentry
3052 *
3053 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3054 * Returns false otherwise.
3055 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3056 */
3057  
3058bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3059{
3060	bool subdir;
3061	unsigned seq;
3062
3063	if (new_dentry == old_dentry)
3064		return true;
3065
3066	/* Access d_parent under rcu as d_move() may change it. */
3067	rcu_read_lock();
3068	seq = read_seqbegin(&rename_lock);
3069	subdir = d_ancestor(old_dentry, new_dentry);
3070	 /* Try lockless once... */
3071	if (read_seqretry(&rename_lock, seq)) {
3072		/* ...else acquire lock for progress even on deep chains. */
3073		read_seqlock_excl(&rename_lock);
3074		subdir = d_ancestor(old_dentry, new_dentry);
3075		read_sequnlock_excl(&rename_lock);
3076	}
3077	rcu_read_unlock();
3078	return subdir;
 
 
 
3079}
3080EXPORT_SYMBOL(is_subdir);
3081
3082static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3083{
3084	struct dentry *root = data;
3085	if (dentry != root) {
3086		if (d_unhashed(dentry) || !dentry->d_inode)
3087			return D_WALK_SKIP;
3088
3089		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3090			dentry->d_flags |= DCACHE_GENOCIDE;
3091			dentry->d_lockref.count--;
3092		}
3093	}
3094	return D_WALK_CONTINUE;
3095}
3096
3097void d_genocide(struct dentry *parent)
3098{
3099	d_walk(parent, parent, d_genocide_kill);
3100}
3101
3102void d_mark_tmpfile(struct file *file, struct inode *inode)
3103{
3104	struct dentry *dentry = file->f_path.dentry;
3105
3106	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3107		!hlist_unhashed(&dentry->d_u.d_alias) ||
3108		!d_unlinked(dentry));
3109	spin_lock(&dentry->d_parent->d_lock);
3110	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3111	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3112				(unsigned long long)inode->i_ino);
3113	spin_unlock(&dentry->d_lock);
3114	spin_unlock(&dentry->d_parent->d_lock);
3115}
3116EXPORT_SYMBOL(d_mark_tmpfile);
3117
3118void d_tmpfile(struct file *file, struct inode *inode)
3119{
3120	struct dentry *dentry = file->f_path.dentry;
3121
3122	inode_dec_link_count(inode);
3123	d_mark_tmpfile(file, inode);
3124	d_instantiate(dentry, inode);
3125}
3126EXPORT_SYMBOL(d_tmpfile);
3127
3128/*
3129 * Obtain inode number of the parent dentry.
3130 */
3131ino_t d_parent_ino(struct dentry *dentry)
3132{
3133	struct dentry *parent;
3134	struct inode *iparent;
3135	unsigned seq;
3136	ino_t ret;
3137
3138	scoped_guard(rcu) {
3139		seq = raw_seqcount_begin(&dentry->d_seq);
3140		parent = READ_ONCE(dentry->d_parent);
3141		iparent = d_inode_rcu(parent);
3142		if (likely(iparent)) {
3143			ret = iparent->i_ino;
3144			if (!read_seqcount_retry(&dentry->d_seq, seq))
3145				return ret;
3146		}
3147	}
3148
3149	spin_lock(&dentry->d_lock);
3150	ret = dentry->d_parent->d_inode->i_ino;
3151	spin_unlock(&dentry->d_lock);
3152	return ret;
3153}
3154EXPORT_SYMBOL(d_parent_ino);
3155
3156static __initdata unsigned long dhash_entries;
3157static int __init set_dhash_entries(char *str)
3158{
3159	if (!str)
3160		return 0;
3161	dhash_entries = simple_strtoul(str, &str, 0);
3162	return 1;
3163}
3164__setup("dhash_entries=", set_dhash_entries);
3165
3166static void __init dcache_init_early(void)
3167{
 
 
3168	/* If hashes are distributed across NUMA nodes, defer
3169	 * hash allocation until vmalloc space is available.
3170	 */
3171	if (hashdist)
3172		return;
3173
3174	dentry_hashtable =
3175		alloc_large_system_hash("Dentry cache",
3176					sizeof(struct hlist_bl_head),
3177					dhash_entries,
3178					13,
3179					HASH_EARLY | HASH_ZERO,
3180					&d_hash_shift,
3181					NULL,
3182					0,
3183					0);
3184	d_hash_shift = 32 - d_hash_shift;
3185
3186	runtime_const_init(shift, d_hash_shift);
3187	runtime_const_init(ptr, dentry_hashtable);
3188}
3189
3190static void __init dcache_init(void)
3191{
3192	/*
 
 
3193	 * A constructor could be added for stable state like the lists,
3194	 * but it is probably not worth it because of the cache nature
3195	 * of the dcache.
3196	 */
3197	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3198		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3199		d_iname);
3200
3201	/* Hash may have been set up in dcache_init_early */
3202	if (!hashdist)
3203		return;
3204
3205	dentry_hashtable =
3206		alloc_large_system_hash("Dentry cache",
3207					sizeof(struct hlist_bl_head),
3208					dhash_entries,
3209					13,
3210					HASH_ZERO,
3211					&d_hash_shift,
3212					NULL,
3213					0,
3214					0);
3215	d_hash_shift = 32 - d_hash_shift;
3216
3217	runtime_const_init(shift, d_hash_shift);
3218	runtime_const_init(ptr, dentry_hashtable);
3219}
3220
3221/* SLAB cache for __getname() consumers */
3222struct kmem_cache *names_cachep __ro_after_init;
3223EXPORT_SYMBOL(names_cachep);
3224
 
 
3225void __init vfs_caches_init_early(void)
3226{
3227	int i;
3228
3229	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3230		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3231
3232	dcache_init_early();
3233	inode_init_early();
3234}
3235
3236void __init vfs_caches_init(void)
3237{
3238	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3239			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3240
3241	dcache_init();
3242	inode_init();
3243	files_init();
3244	files_maxfiles_init();
3245	mnt_init();
3246	bdev_cache_init();
3247	chrdev_init();
3248}
v4.6
 
   1/*
   2 * fs/dcache.c
   3 *
   4 * Complete reimplementation
   5 * (C) 1997 Thomas Schoebel-Theuer,
   6 * with heavy changes by Linus Torvalds
   7 */
   8
   9/*
  10 * Notes on the allocation strategy:
  11 *
  12 * The dcache is a master of the icache - whenever a dcache entry
  13 * exists, the inode will always exist. "iput()" is done either when
  14 * the dcache entry is deleted or garbage collected.
  15 */
  16
  17#include <linux/syscalls.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/fs.h>
 
  21#include <linux/fsnotify.h>
  22#include <linux/slab.h>
  23#include <linux/init.h>
  24#include <linux/hash.h>
  25#include <linux/cache.h>
  26#include <linux/export.h>
  27#include <linux/mount.h>
  28#include <linux/file.h>
  29#include <asm/uaccess.h>
  30#include <linux/security.h>
  31#include <linux/seqlock.h>
  32#include <linux/swap.h>
  33#include <linux/bootmem.h>
  34#include <linux/fs_struct.h>
  35#include <linux/hardirq.h>
  36#include <linux/bit_spinlock.h>
  37#include <linux/rculist_bl.h>
  38#include <linux/prefetch.h>
  39#include <linux/ratelimit.h>
  40#include <linux/list_lru.h>
  41#include <linux/kasan.h>
  42
  43#include "internal.h"
  44#include "mount.h"
  45
 
 
  46/*
  47 * Usage:
  48 * dcache->d_inode->i_lock protects:
  49 *   - i_dentry, d_u.d_alias, d_inode of aliases
  50 * dcache_hash_bucket lock protects:
  51 *   - the dcache hash table
  52 * s_anon bl list spinlock protects:
  53 *   - the s_anon list (see __d_drop)
  54 * dentry->d_sb->s_dentry_lru_lock protects:
  55 *   - the dcache lru lists and counters
  56 * d_lock protects:
  57 *   - d_flags
  58 *   - d_name
  59 *   - d_lru
  60 *   - d_count
  61 *   - d_unhashed()
  62 *   - d_parent and d_subdirs
  63 *   - childrens' d_child and d_parent
  64 *   - d_u.d_alias, d_inode
  65 *
  66 * Ordering:
  67 * dentry->d_inode->i_lock
  68 *   dentry->d_lock
  69 *     dentry->d_sb->s_dentry_lru_lock
  70 *     dcache_hash_bucket lock
  71 *     s_anon lock
  72 *
  73 * If there is an ancestor relationship:
  74 * dentry->d_parent->...->d_parent->d_lock
  75 *   ...
  76 *     dentry->d_parent->d_lock
  77 *       dentry->d_lock
  78 *
  79 * If no ancestor relationship:
  80 * if (dentry1 < dentry2)
  81 *   dentry1->d_lock
  82 *     dentry2->d_lock
  83 */
  84int sysctl_vfs_cache_pressure __read_mostly = 100;
  85EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  86
  87__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  88
  89EXPORT_SYMBOL(rename_lock);
  90
  91static struct kmem_cache *dentry_cache __read_mostly;
 
 
 
 
 
 
 
  92
  93/*
  94 * This is the single most critical data structure when it comes
  95 * to the dcache: the hashtable for lookups. Somebody should try
  96 * to make this good - I've just made it work.
  97 *
  98 * This hash-function tries to avoid losing too many bits of hash
  99 * information, yet avoid using a prime hash-size or similar.
 
 
 
 
 
 100 */
 101
 102static unsigned int d_hash_mask __read_mostly;
 103static unsigned int d_hash_shift __read_mostly;
 104
 105static struct hlist_bl_head *dentry_hashtable __read_mostly;
 106
 107static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
 
 
 
 
 
 
 
 
 
 108					unsigned int hash)
 109{
 110	hash += (unsigned long) parent / L1_CACHE_BYTES;
 111	return dentry_hashtable + hash_32(hash, d_hash_shift);
 112}
 113
 114/* Statistics gathering. */
 115struct dentry_stat_t dentry_stat = {
 116	.age_limit = 45,
 
 
 
 
 117};
 118
 119static DEFINE_PER_CPU(long, nr_dentry);
 120static DEFINE_PER_CPU(long, nr_dentry_unused);
 
 
 121
 122#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 
 
 
 
 123
 124/*
 125 * Here we resort to our own counters instead of using generic per-cpu counters
 126 * for consistency with what the vfs inode code does. We are expected to harvest
 127 * better code and performance by having our own specialized counters.
 128 *
 129 * Please note that the loop is done over all possible CPUs, not over all online
 130 * CPUs. The reason for this is that we don't want to play games with CPUs going
 131 * on and off. If one of them goes off, we will just keep their counters.
 132 *
 133 * glommer: See cffbc8a for details, and if you ever intend to change this,
 134 * please update all vfs counters to match.
 135 */
 136static long get_nr_dentry(void)
 137{
 138	int i;
 139	long sum = 0;
 140	for_each_possible_cpu(i)
 141		sum += per_cpu(nr_dentry, i);
 142	return sum < 0 ? 0 : sum;
 143}
 144
 145static long get_nr_dentry_unused(void)
 146{
 147	int i;
 148	long sum = 0;
 149	for_each_possible_cpu(i)
 150		sum += per_cpu(nr_dentry_unused, i);
 151	return sum < 0 ? 0 : sum;
 152}
 153
 154int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
 155		   size_t *lenp, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 156{
 157	dentry_stat.nr_dentry = get_nr_dentry();
 158	dentry_stat.nr_unused = get_nr_dentry_unused();
 
 159	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 160}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161#endif
 162
 163/*
 164 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 165 * The strings are both count bytes long, and count is non-zero.
 166 */
 167#ifdef CONFIG_DCACHE_WORD_ACCESS
 168
 169#include <asm/word-at-a-time.h>
 170/*
 171 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 172 * aligned allocation for this particular component. We don't
 173 * strictly need the load_unaligned_zeropad() safety, but it
 174 * doesn't hurt either.
 175 *
 176 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 177 * need the careful unaligned handling.
 178 */
 179static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 180{
 181	unsigned long a,b,mask;
 182
 183	for (;;) {
 184		a = *(unsigned long *)cs;
 185		b = load_unaligned_zeropad(ct);
 186		if (tcount < sizeof(unsigned long))
 187			break;
 188		if (unlikely(a != b))
 189			return 1;
 190		cs += sizeof(unsigned long);
 191		ct += sizeof(unsigned long);
 192		tcount -= sizeof(unsigned long);
 193		if (!tcount)
 194			return 0;
 195	}
 196	mask = bytemask_from_count(tcount);
 197	return unlikely(!!((a ^ b) & mask));
 198}
 199
 200#else
 201
 202static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 203{
 204	do {
 205		if (*cs != *ct)
 206			return 1;
 207		cs++;
 208		ct++;
 209		tcount--;
 210	} while (tcount);
 211	return 0;
 212}
 213
 214#endif
 215
 216static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 217{
 218	const unsigned char *cs;
 219	/*
 220	 * Be careful about RCU walk racing with rename:
 221	 * use ACCESS_ONCE to fetch the name pointer.
 222	 *
 223	 * NOTE! Even if a rename will mean that the length
 224	 * was not loaded atomically, we don't care. The
 225	 * RCU walk will check the sequence count eventually,
 226	 * and catch it. And we won't overrun the buffer,
 227	 * because we're reading the name pointer atomically,
 228	 * and a dentry name is guaranteed to be properly
 229	 * terminated with a NUL byte.
 230	 *
 231	 * End result: even if 'len' is wrong, we'll exit
 232	 * early because the data cannot match (there can
 233	 * be no NUL in the ct/tcount data)
 234	 */
 235	cs = ACCESS_ONCE(dentry->d_name.name);
 236	smp_read_barrier_depends();
 237	return dentry_string_cmp(cs, ct, tcount);
 238}
 239
 240struct external_name {
 241	union {
 242		atomic_t count;
 243		struct rcu_head head;
 244	} u;
 245	unsigned char name[];
 246};
 247
 248static inline struct external_name *external_name(struct dentry *dentry)
 249{
 250	return container_of(dentry->d_name.name, struct external_name, name[0]);
 251}
 252
 253static void __d_free(struct rcu_head *head)
 254{
 255	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 256
 257	kmem_cache_free(dentry_cache, dentry); 
 258}
 259
 260static void __d_free_external(struct rcu_head *head)
 261{
 262	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 263	kfree(external_name(dentry));
 264	kmem_cache_free(dentry_cache, dentry); 
 265}
 266
 267static inline int dname_external(const struct dentry *dentry)
 268{
 269	return dentry->d_name.name != dentry->d_iname;
 270}
 271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272static inline void __d_set_inode_and_type(struct dentry *dentry,
 273					  struct inode *inode,
 274					  unsigned type_flags)
 275{
 276	unsigned flags;
 277
 278	dentry->d_inode = inode;
 279	flags = READ_ONCE(dentry->d_flags);
 280	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 281	flags |= type_flags;
 282	WRITE_ONCE(dentry->d_flags, flags);
 283}
 284
 285static inline void __d_clear_type_and_inode(struct dentry *dentry)
 286{
 287	unsigned flags = READ_ONCE(dentry->d_flags);
 288
 289	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
 290	WRITE_ONCE(dentry->d_flags, flags);
 291	dentry->d_inode = NULL;
 
 
 
 
 
 
 292}
 293
 294static void dentry_free(struct dentry *dentry)
 295{
 296	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 297	if (unlikely(dname_external(dentry))) {
 298		struct external_name *p = external_name(dentry);
 299		if (likely(atomic_dec_and_test(&p->u.count))) {
 300			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 301			return;
 302		}
 303	}
 304	/* if dentry was never visible to RCU, immediate free is OK */
 305	if (!(dentry->d_flags & DCACHE_RCUACCESS))
 306		__d_free(&dentry->d_u.d_rcu);
 307	else
 308		call_rcu(&dentry->d_u.d_rcu, __d_free);
 309}
 310
 311/**
 312 * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
 313 * @dentry: the target dentry
 314 * After this call, in-progress rcu-walk path lookup will fail. This
 315 * should be called after unhashing, and after changing d_inode (if
 316 * the dentry has not already been unhashed).
 317 */
 318static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
 319{
 320	lockdep_assert_held(&dentry->d_lock);
 321	/* Go through am invalidation barrier */
 322	write_seqcount_invalidate(&dentry->d_seq);
 323}
 324
 325/*
 326 * Release the dentry's inode, using the filesystem
 327 * d_iput() operation if defined. Dentry has no refcount
 328 * and is unhashed.
 329 */
 330static void dentry_iput(struct dentry * dentry)
 331	__releases(dentry->d_lock)
 332	__releases(dentry->d_inode->i_lock)
 333{
 334	struct inode *inode = dentry->d_inode;
 335	if (inode) {
 336		__d_clear_type_and_inode(dentry);
 337		hlist_del_init(&dentry->d_u.d_alias);
 338		spin_unlock(&dentry->d_lock);
 339		spin_unlock(&inode->i_lock);
 340		if (!inode->i_nlink)
 341			fsnotify_inoderemove(inode);
 342		if (dentry->d_op && dentry->d_op->d_iput)
 343			dentry->d_op->d_iput(dentry, inode);
 344		else
 345			iput(inode);
 346	} else {
 347		spin_unlock(&dentry->d_lock);
 348	}
 349}
 350
 351/*
 352 * Release the dentry's inode, using the filesystem
 353 * d_iput() operation if defined. dentry remains in-use.
 354 */
 355static void dentry_unlink_inode(struct dentry * dentry)
 356	__releases(dentry->d_lock)
 357	__releases(dentry->d_inode->i_lock)
 358{
 359	struct inode *inode = dentry->d_inode;
 360
 361	raw_write_seqcount_begin(&dentry->d_seq);
 362	__d_clear_type_and_inode(dentry);
 363	hlist_del_init(&dentry->d_u.d_alias);
 364	raw_write_seqcount_end(&dentry->d_seq);
 365	spin_unlock(&dentry->d_lock);
 366	spin_unlock(&inode->i_lock);
 367	if (!inode->i_nlink)
 368		fsnotify_inoderemove(inode);
 369	if (dentry->d_op && dentry->d_op->d_iput)
 370		dentry->d_op->d_iput(dentry, inode);
 371	else
 372		iput(inode);
 373}
 374
 375/*
 376 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 377 * is in use - which includes both the "real" per-superblock
 378 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 379 *
 380 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 381 * on the shrink list (ie not on the superblock LRU list).
 382 *
 383 * The per-cpu "nr_dentry_unused" counters are updated with
 384 * the DCACHE_LRU_LIST bit.
 385 *
 
 
 
 
 
 386 * These helper functions make sure we always follow the
 387 * rules. d_lock must be held by the caller.
 388 */
 389#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 390static void d_lru_add(struct dentry *dentry)
 391{
 392	D_FLAG_VERIFY(dentry, 0);
 393	dentry->d_flags |= DCACHE_LRU_LIST;
 394	this_cpu_inc(nr_dentry_unused);
 395	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 396}
 397
 398static void d_lru_del(struct dentry *dentry)
 399{
 400	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 401	dentry->d_flags &= ~DCACHE_LRU_LIST;
 402	this_cpu_dec(nr_dentry_unused);
 403	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 404}
 405
 406static void d_shrink_del(struct dentry *dentry)
 407{
 408	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 409	list_del_init(&dentry->d_lru);
 410	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 411	this_cpu_dec(nr_dentry_unused);
 412}
 413
 414static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 415{
 416	D_FLAG_VERIFY(dentry, 0);
 417	list_add(&dentry->d_lru, list);
 418	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 419	this_cpu_inc(nr_dentry_unused);
 420}
 421
 422/*
 423 * These can only be called under the global LRU lock, ie during the
 424 * callback for freeing the LRU list. "isolate" removes it from the
 425 * LRU lists entirely, while shrink_move moves it to the indicated
 426 * private list.
 427 */
 428static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 429{
 430	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 431	dentry->d_flags &= ~DCACHE_LRU_LIST;
 432	this_cpu_dec(nr_dentry_unused);
 
 
 433	list_lru_isolate(lru, &dentry->d_lru);
 434}
 435
 436static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 437			      struct list_head *list)
 438{
 439	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 440	dentry->d_flags |= DCACHE_SHRINK_LIST;
 
 
 441	list_lru_isolate_move(lru, &dentry->d_lru, list);
 442}
 443
 444/*
 445 * dentry_lru_(add|del)_list) must be called with d_lock held.
 446 */
 447static void dentry_lru_add(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448{
 449	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
 450		d_lru_add(dentry);
 
 
 
 451}
 
 452
 453/**
 454 * d_drop - drop a dentry
 455 * @dentry: dentry to drop
 456 *
 457 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 458 * be found through a VFS lookup any more. Note that this is different from
 459 * deleting the dentry - d_delete will try to mark the dentry negative if
 460 * possible, giving a successful _negative_ lookup, while d_drop will
 461 * just make the cache lookup fail.
 462 *
 463 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 464 * reason (NFS timeouts or autofs deletes).
 465 *
 466 * __d_drop requires dentry->d_lock.
 
 
 
 467 */
 468void __d_drop(struct dentry *dentry)
 469{
 470	if (!d_unhashed(dentry)) {
 471		struct hlist_bl_head *b;
 472		/*
 473		 * Hashed dentries are normally on the dentry hashtable,
 474		 * with the exception of those newly allocated by
 475		 * d_obtain_alias, which are always IS_ROOT:
 476		 */
 477		if (unlikely(IS_ROOT(dentry)))
 478			b = &dentry->d_sb->s_anon;
 479		else
 480			b = d_hash(dentry->d_parent, dentry->d_name.hash);
 481
 482		hlist_bl_lock(b);
 483		__hlist_bl_del(&dentry->d_hash);
 484		dentry->d_hash.pprev = NULL;
 485		hlist_bl_unlock(b);
 486		dentry_rcuwalk_invalidate(dentry);
 487	}
 488}
 489EXPORT_SYMBOL(__d_drop);
 490
 491void d_drop(struct dentry *dentry)
 492{
 493	spin_lock(&dentry->d_lock);
 494	__d_drop(dentry);
 495	spin_unlock(&dentry->d_lock);
 496}
 497EXPORT_SYMBOL(d_drop);
 498
 499static void __dentry_kill(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500{
 501	struct dentry *parent = NULL;
 502	bool can_free = true;
 503	if (!IS_ROOT(dentry))
 504		parent = dentry->d_parent;
 505
 506	/*
 507	 * The dentry is now unrecoverably dead to the world.
 508	 */
 509	lockref_mark_dead(&dentry->d_lockref);
 510
 511	/*
 512	 * inform the fs via d_prune that this dentry is about to be
 513	 * unhashed and destroyed.
 514	 */
 515	if (dentry->d_flags & DCACHE_OP_PRUNE)
 516		dentry->d_op->d_prune(dentry);
 517
 518	if (dentry->d_flags & DCACHE_LRU_LIST) {
 519		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 520			d_lru_del(dentry);
 521	}
 522	/* if it was on the hash then remove it */
 523	__d_drop(dentry);
 524	__list_del_entry(&dentry->d_child);
 525	/*
 526	 * Inform d_walk() that we are no longer attached to the
 527	 * dentry tree
 528	 */
 529	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 530	if (parent)
 531		spin_unlock(&parent->d_lock);
 532	dentry_iput(dentry);
 533	/*
 534	 * dentry_iput drops the locks, at which point nobody (except
 535	 * transient RCU lookups) can reach this dentry.
 536	 */
 537	BUG_ON(dentry->d_lockref.count > 0);
 538	this_cpu_dec(nr_dentry);
 539	if (dentry->d_op && dentry->d_op->d_release)
 540		dentry->d_op->d_release(dentry);
 541
 542	spin_lock(&dentry->d_lock);
 543	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
 544		dentry->d_flags |= DCACHE_MAY_FREE;
 
 
 
 
 
 
 545		can_free = false;
 546	}
 547	spin_unlock(&dentry->d_lock);
 548	if (likely(can_free))
 549		dentry_free(dentry);
 
 
 
 
 
 550}
 551
 552/*
 553 * Finish off a dentry we've decided to kill.
 554 * dentry->d_lock must be held, returns with it unlocked.
 555 * If ref is non-zero, then decrement the refcount too.
 556 * Returns dentry requiring refcount drop, or NULL if we're done.
 
 
 
 
 557 */
 558static struct dentry *dentry_kill(struct dentry *dentry)
 559	__releases(dentry->d_lock)
 560{
 561	struct inode *inode = dentry->d_inode;
 562	struct dentry *parent = NULL;
 563
 564	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
 565		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566
 567	if (!IS_ROOT(dentry)) {
 568		parent = dentry->d_parent;
 569		if (unlikely(!spin_trylock(&parent->d_lock))) {
 570			if (inode)
 571				spin_unlock(&inode->i_lock);
 572			goto failed;
 573		}
 574	}
 575
 576	__dentry_kill(dentry);
 577	return parent;
 
 578
 579failed:
 580	spin_unlock(&dentry->d_lock);
 581	cpu_relax();
 582	return dentry; /* try again with same dentry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583}
 584
 585static inline struct dentry *lock_parent(struct dentry *dentry)
 586{
 587	struct dentry *parent = dentry->d_parent;
 588	if (IS_ROOT(dentry))
 589		return NULL;
 590	if (unlikely(dentry->d_lockref.count < 0))
 591		return NULL;
 592	if (likely(spin_trylock(&parent->d_lock)))
 593		return parent;
 594	rcu_read_lock();
 595	spin_unlock(&dentry->d_lock);
 596again:
 597	parent = ACCESS_ONCE(dentry->d_parent);
 598	spin_lock(&parent->d_lock);
 599	/*
 600	 * We can't blindly lock dentry until we are sure
 601	 * that we won't violate the locking order.
 602	 * Any changes of dentry->d_parent must have
 603	 * been done with parent->d_lock held, so
 604	 * spin_lock() above is enough of a barrier
 605	 * for checking if it's still our child.
 606	 */
 607	if (unlikely(parent != dentry->d_parent)) {
 608		spin_unlock(&parent->d_lock);
 609		goto again;
 610	}
 611	rcu_read_unlock();
 612	if (parent != dentry)
 613		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 614	else
 615		parent = NULL;
 616	return parent;
 617}
 
 618
 619/*
 620 * Try to do a lockless dput(), and return whether that was successful.
 621 *
 622 * If unsuccessful, we return false, having already taken the dentry lock.
 
 
 623 *
 624 * The caller needs to hold the RCU read lock, so that the dentry is
 625 * guaranteed to stay around even if the refcount goes down to zero!
 626 */
 627static inline bool fast_dput(struct dentry *dentry)
 628{
 629	int ret;
 630	unsigned int d_flags;
 631
 632	/*
 633	 * If we have a d_op->d_delete() operation, we sould not
 634	 * let the dentry count go to zero, so use "put_or_lock".
 635	 */
 636	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
 637		return lockref_put_or_lock(&dentry->d_lockref);
 638
 639	/*
 640	 * .. otherwise, we can try to just decrement the
 641	 * lockref optimistically.
 642	 */
 643	ret = lockref_put_return(&dentry->d_lockref);
 644
 645	/*
 646	 * If the lockref_put_return() failed due to the lock being held
 647	 * by somebody else, the fast path has failed. We will need to
 648	 * get the lock, and then check the count again.
 649	 */
 650	if (unlikely(ret < 0)) {
 651		spin_lock(&dentry->d_lock);
 652		if (dentry->d_lockref.count > 1) {
 653			dentry->d_lockref.count--;
 654			spin_unlock(&dentry->d_lock);
 655			return 1;
 656		}
 657		return 0;
 
 658	}
 659
 660	/*
 661	 * If we weren't the last ref, we're done.
 662	 */
 663	if (ret)
 664		return 1;
 665
 666	/*
 667	 * Careful, careful. The reference count went down
 668	 * to zero, but we don't hold the dentry lock, so
 669	 * somebody else could get it again, and do another
 670	 * dput(), and we need to not race with that.
 671	 *
 672	 * However, there is a very special and common case
 673	 * where we don't care, because there is nothing to
 674	 * do: the dentry is still hashed, it does not have
 675	 * a 'delete' op, and it's referenced and already on
 676	 * the LRU list.
 677	 *
 678	 * NOTE! Since we aren't locked, these values are
 679	 * not "stable". However, it is sufficient that at
 680	 * some point after we dropped the reference the
 681	 * dentry was hashed and the flags had the proper
 682	 * value. Other dentry users may have re-gotten
 683	 * a reference to the dentry and change that, but
 684	 * our work is done - we can leave the dentry
 685	 * around with a zero refcount.
 686	 */
 687	smp_rmb();
 688	d_flags = ACCESS_ONCE(dentry->d_flags);
 689	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
 690
 691	/* Nothing to do? Dropping the reference was all we needed? */
 692	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
 693		return 1;
 694
 695	/*
 696	 * Not the fast normal case? Get the lock. We've already decremented
 697	 * the refcount, but we'll need to re-check the situation after
 698	 * getting the lock.
 699	 */
 700	spin_lock(&dentry->d_lock);
 701
 702	/*
 703	 * Did somebody else grab a reference to it in the meantime, and
 704	 * we're no longer the last user after all? Alternatively, somebody
 705	 * else could have killed it and marked it dead. Either way, we
 706	 * don't need to do anything else.
 707	 */
 708	if (dentry->d_lockref.count) {
 
 709		spin_unlock(&dentry->d_lock);
 710		return 1;
 711	}
 712
 713	/*
 714	 * Re-get the reference we optimistically dropped. We hold the
 715	 * lock, and we just tested that it was zero, so we can just
 716	 * set it to 1.
 717	 */
 718	dentry->d_lockref.count = 1;
 719	return 0;
 720}
 721
 722
 723/* 
 724 * This is dput
 725 *
 726 * This is complicated by the fact that we do not want to put
 727 * dentries that are no longer on any hash chain on the unused
 728 * list: we'd much rather just get rid of them immediately.
 729 *
 730 * However, that implies that we have to traverse the dentry
 731 * tree upwards to the parents which might _also_ now be
 732 * scheduled for deletion (it may have been only waiting for
 733 * its last child to go away).
 734 *
 735 * This tail recursion is done by hand as we don't want to depend
 736 * on the compiler to always get this right (gcc generally doesn't).
 737 * Real recursion would eat up our stack space.
 738 */
 739
 740/*
 741 * dput - release a dentry
 742 * @dentry: dentry to release 
 743 *
 744 * Release a dentry. This will drop the usage count and if appropriate
 745 * call the dentry unlink method as well as removing it from the queues and
 746 * releasing its resources. If the parent dentries were scheduled for release
 747 * they too may now get deleted.
 748 */
 749void dput(struct dentry *dentry)
 750{
 751	if (unlikely(!dentry))
 752		return;
 753
 754repeat:
 755	rcu_read_lock();
 756	if (likely(fast_dput(dentry))) {
 757		rcu_read_unlock();
 758		return;
 759	}
 760
 761	/* Slow case: now with the dentry lock held */
 
 
 
 
 
 
 
 
 
 762	rcu_read_unlock();
 763
 764	/* Unreachable? Get rid of it */
 765	if (unlikely(d_unhashed(dentry)))
 766		goto kill_it;
 767
 768	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
 769		goto kill_it;
 770
 771	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
 772		if (dentry->d_op->d_delete(dentry))
 773			goto kill_it;
 774	}
 775
 776	if (!(dentry->d_flags & DCACHE_REFERENCED))
 777		dentry->d_flags |= DCACHE_REFERENCED;
 778	dentry_lru_add(dentry);
 779
 780	dentry->d_lockref.count--;
 781	spin_unlock(&dentry->d_lock);
 782	return;
 783
 784kill_it:
 785	dentry = dentry_kill(dentry);
 786	if (dentry)
 787		goto repeat;
 788}
 789EXPORT_SYMBOL(dput);
 790
 791
 792/* This must be called with d_lock held */
 793static inline void __dget_dlock(struct dentry *dentry)
 794{
 795	dentry->d_lockref.count++;
 
 
 
 
 796}
 797
 798static inline void __dget(struct dentry *dentry)
 799{
 800	lockref_get(&dentry->d_lockref);
 
 
 
 
 
 
 
 801}
 802
 803struct dentry *dget_parent(struct dentry *dentry)
 804{
 805	int gotref;
 806	struct dentry *ret;
 
 807
 808	/*
 809	 * Do optimistic parent lookup without any
 810	 * locking.
 811	 */
 812	rcu_read_lock();
 813	ret = ACCESS_ONCE(dentry->d_parent);
 
 814	gotref = lockref_get_not_zero(&ret->d_lockref);
 815	rcu_read_unlock();
 816	if (likely(gotref)) {
 817		if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
 818			return ret;
 819		dput(ret);
 820	}
 821
 822repeat:
 823	/*
 824	 * Don't need rcu_dereference because we re-check it was correct under
 825	 * the lock.
 826	 */
 827	rcu_read_lock();
 828	ret = dentry->d_parent;
 829	spin_lock(&ret->d_lock);
 830	if (unlikely(ret != dentry->d_parent)) {
 831		spin_unlock(&ret->d_lock);
 832		rcu_read_unlock();
 833		goto repeat;
 834	}
 835	rcu_read_unlock();
 836	BUG_ON(!ret->d_lockref.count);
 837	ret->d_lockref.count++;
 838	spin_unlock(&ret->d_lock);
 839	return ret;
 840}
 841EXPORT_SYMBOL(dget_parent);
 842
 
 
 
 
 
 
 
 
 
 
 
 843/**
 844 * d_find_alias - grab a hashed alias of inode
 845 * @inode: inode in question
 846 *
 847 * If inode has a hashed alias, or is a directory and has any alias,
 848 * acquire the reference to alias and return it. Otherwise return NULL.
 849 * Notice that if inode is a directory there can be only one alias and
 850 * it can be unhashed only if it has no children, or if it is the root
 851 * of a filesystem, or if the directory was renamed and d_revalidate
 852 * was the first vfs operation to notice.
 853 *
 854 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 855 * any other hashed alias over that one.
 856 */
 
 
 
 
 
 
 
 
 
 
 
 857static struct dentry *__d_find_alias(struct inode *inode)
 858{
 859	struct dentry *alias, *discon_alias;
 
 
 
 860
 861again:
 862	discon_alias = NULL;
 863	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 864		spin_lock(&alias->d_lock);
 865 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 866			if (IS_ROOT(alias) &&
 867			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 868				discon_alias = alias;
 869			} else {
 870				__dget_dlock(alias);
 871				spin_unlock(&alias->d_lock);
 872				return alias;
 873			}
 874		}
 875		spin_unlock(&alias->d_lock);
 876	}
 877	if (discon_alias) {
 878		alias = discon_alias;
 879		spin_lock(&alias->d_lock);
 880		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 881			__dget_dlock(alias);
 882			spin_unlock(&alias->d_lock);
 883			return alias;
 884		}
 885		spin_unlock(&alias->d_lock);
 886		goto again;
 887	}
 888	return NULL;
 889}
 890
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891struct dentry *d_find_alias(struct inode *inode)
 892{
 893	struct dentry *de = NULL;
 894
 895	if (!hlist_empty(&inode->i_dentry)) {
 896		spin_lock(&inode->i_lock);
 897		de = __d_find_alias(inode);
 898		spin_unlock(&inode->i_lock);
 899	}
 900	return de;
 901}
 902EXPORT_SYMBOL(d_find_alias);
 903
 904/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905 *	Try to kill dentries associated with this inode.
 906 * WARNING: you must own a reference to inode.
 907 */
 908void d_prune_aliases(struct inode *inode)
 909{
 
 910	struct dentry *dentry;
 911restart:
 912	spin_lock(&inode->i_lock);
 913	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
 914		spin_lock(&dentry->d_lock);
 915		if (!dentry->d_lockref.count) {
 916			struct dentry *parent = lock_parent(dentry);
 917			if (likely(!dentry->d_lockref.count)) {
 918				__dentry_kill(dentry);
 919				dput(parent);
 920				goto restart;
 921			}
 922			if (parent)
 923				spin_unlock(&parent->d_lock);
 924		}
 925		spin_unlock(&dentry->d_lock);
 926	}
 927	spin_unlock(&inode->i_lock);
 
 928}
 929EXPORT_SYMBOL(d_prune_aliases);
 930
 931static void shrink_dentry_list(struct list_head *list)
 932{
 933	struct dentry *dentry, *parent;
 
 
 
 
 
 
 
 
 934
 
 
 935	while (!list_empty(list)) {
 936		struct inode *inode;
 
 937		dentry = list_entry(list->prev, struct dentry, d_lru);
 938		spin_lock(&dentry->d_lock);
 939		parent = lock_parent(dentry);
 940
 941		/*
 942		 * The dispose list is isolated and dentries are not accounted
 943		 * to the LRU here, so we can simply remove it from the list
 944		 * here regardless of whether it is referenced or not.
 945		 */
 946		d_shrink_del(dentry);
 947
 948		/*
 949		 * We found an inuse dentry which was not removed from
 950		 * the LRU because of laziness during lookup. Do not free it.
 951		 */
 952		if (dentry->d_lockref.count > 0) {
 953			spin_unlock(&dentry->d_lock);
 954			if (parent)
 955				spin_unlock(&parent->d_lock);
 956			continue;
 957		}
 958
 959
 960		if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
 961			bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
 962			spin_unlock(&dentry->d_lock);
 963			if (parent)
 964				spin_unlock(&parent->d_lock);
 965			if (can_free)
 966				dentry_free(dentry);
 967			continue;
 968		}
 969
 970		inode = dentry->d_inode;
 971		if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
 972			d_shrink_add(dentry, list);
 973			spin_unlock(&dentry->d_lock);
 974			if (parent)
 975				spin_unlock(&parent->d_lock);
 976			continue;
 977		}
 978
 979		__dentry_kill(dentry);
 980
 981		/*
 982		 * We need to prune ancestors too. This is necessary to prevent
 983		 * quadratic behavior of shrink_dcache_parent(), but is also
 984		 * expected to be beneficial in reducing dentry cache
 985		 * fragmentation.
 986		 */
 987		dentry = parent;
 988		while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
 989			parent = lock_parent(dentry);
 990			if (dentry->d_lockref.count != 1) {
 991				dentry->d_lockref.count--;
 992				spin_unlock(&dentry->d_lock);
 993				if (parent)
 994					spin_unlock(&parent->d_lock);
 995				break;
 996			}
 997			inode = dentry->d_inode;	/* can't be NULL */
 998			if (unlikely(!spin_trylock(&inode->i_lock))) {
 999				spin_unlock(&dentry->d_lock);
1000				if (parent)
1001					spin_unlock(&parent->d_lock);
1002				cpu_relax();
1003				continue;
1004			}
1005			__dentry_kill(dentry);
1006			dentry = parent;
1007		}
1008	}
1009}
1010
1011static enum lru_status dentry_lru_isolate(struct list_head *item,
1012		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1013{
1014	struct list_head *freeable = arg;
1015	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1016
1017
1018	/*
1019	 * we are inverting the lru lock/dentry->d_lock here,
1020	 * so use a trylock. If we fail to get the lock, just skip
1021	 * it
1022	 */
1023	if (!spin_trylock(&dentry->d_lock))
1024		return LRU_SKIP;
1025
1026	/*
1027	 * Referenced dentries are still in use. If they have active
1028	 * counts, just remove them from the LRU. Otherwise give them
1029	 * another pass through the LRU.
1030	 */
1031	if (dentry->d_lockref.count) {
1032		d_lru_isolate(lru, dentry);
1033		spin_unlock(&dentry->d_lock);
1034		return LRU_REMOVED;
1035	}
1036
1037	if (dentry->d_flags & DCACHE_REFERENCED) {
1038		dentry->d_flags &= ~DCACHE_REFERENCED;
1039		spin_unlock(&dentry->d_lock);
1040
1041		/*
1042		 * The list move itself will be made by the common LRU code. At
1043		 * this point, we've dropped the dentry->d_lock but keep the
1044		 * lru lock. This is safe to do, since every list movement is
1045		 * protected by the lru lock even if both locks are held.
1046		 *
1047		 * This is guaranteed by the fact that all LRU management
1048		 * functions are intermediated by the LRU API calls like
1049		 * list_lru_add and list_lru_del. List movement in this file
1050		 * only ever occur through this functions or through callbacks
1051		 * like this one, that are called from the LRU API.
1052		 *
1053		 * The only exceptions to this are functions like
1054		 * shrink_dentry_list, and code that first checks for the
1055		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1056		 * operating only with stack provided lists after they are
1057		 * properly isolated from the main list.  It is thus, always a
1058		 * local access.
1059		 */
1060		return LRU_ROTATE;
1061	}
1062
1063	d_lru_shrink_move(lru, dentry, freeable);
1064	spin_unlock(&dentry->d_lock);
1065
1066	return LRU_REMOVED;
1067}
1068
1069/**
1070 * prune_dcache_sb - shrink the dcache
1071 * @sb: superblock
1072 * @sc: shrink control, passed to list_lru_shrink_walk()
1073 *
1074 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1075 * is done when we need more memory and called from the superblock shrinker
1076 * function.
1077 *
1078 * This function may fail to free any resources if all the dentries are in
1079 * use.
1080 */
1081long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1082{
1083	LIST_HEAD(dispose);
1084	long freed;
1085
1086	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1087				     dentry_lru_isolate, &dispose);
1088	shrink_dentry_list(&dispose);
1089	return freed;
1090}
1091
1092static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1093		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1094{
1095	struct list_head *freeable = arg;
1096	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1097
1098	/*
1099	 * we are inverting the lru lock/dentry->d_lock here,
1100	 * so use a trylock. If we fail to get the lock, just skip
1101	 * it
1102	 */
1103	if (!spin_trylock(&dentry->d_lock))
1104		return LRU_SKIP;
1105
1106	d_lru_shrink_move(lru, dentry, freeable);
1107	spin_unlock(&dentry->d_lock);
1108
1109	return LRU_REMOVED;
1110}
1111
1112
1113/**
1114 * shrink_dcache_sb - shrink dcache for a superblock
1115 * @sb: superblock
1116 *
1117 * Shrink the dcache for the specified super block. This is used to free
1118 * the dcache before unmounting a file system.
1119 */
1120void shrink_dcache_sb(struct super_block *sb)
1121{
1122	long freed;
1123
1124	do {
1125		LIST_HEAD(dispose);
1126
1127		freed = list_lru_walk(&sb->s_dentry_lru,
1128			dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1129
1130		this_cpu_sub(nr_dentry_unused, freed);
1131		shrink_dentry_list(&dispose);
1132	} while (freed > 0);
1133}
1134EXPORT_SYMBOL(shrink_dcache_sb);
1135
1136/**
1137 * enum d_walk_ret - action to talke during tree walk
1138 * @D_WALK_CONTINUE:	contrinue walk
1139 * @D_WALK_QUIT:	quit walk
1140 * @D_WALK_NORETRY:	quit when retry is needed
1141 * @D_WALK_SKIP:	skip this dentry and its children
1142 */
1143enum d_walk_ret {
1144	D_WALK_CONTINUE,
1145	D_WALK_QUIT,
1146	D_WALK_NORETRY,
1147	D_WALK_SKIP,
1148};
1149
1150/**
1151 * d_walk - walk the dentry tree
1152 * @parent:	start of walk
1153 * @data:	data passed to @enter() and @finish()
1154 * @enter:	callback when first entering the dentry
1155 * @finish:	callback when successfully finished the walk
1156 *
1157 * The @enter() and @finish() callbacks are called with d_lock held.
1158 */
1159static void d_walk(struct dentry *parent, void *data,
1160		   enum d_walk_ret (*enter)(void *, struct dentry *),
1161		   void (*finish)(void *))
1162{
1163	struct dentry *this_parent;
1164	struct list_head *next;
1165	unsigned seq = 0;
1166	enum d_walk_ret ret;
1167	bool retry = true;
1168
1169again:
1170	read_seqbegin_or_lock(&rename_lock, &seq);
1171	this_parent = parent;
1172	spin_lock(&this_parent->d_lock);
1173
1174	ret = enter(data, this_parent);
1175	switch (ret) {
1176	case D_WALK_CONTINUE:
1177		break;
1178	case D_WALK_QUIT:
1179	case D_WALK_SKIP:
1180		goto out_unlock;
1181	case D_WALK_NORETRY:
1182		retry = false;
1183		break;
1184	}
1185repeat:
1186	next = this_parent->d_subdirs.next;
1187resume:
1188	while (next != &this_parent->d_subdirs) {
1189		struct list_head *tmp = next;
1190		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1191		next = tmp->next;
1192
1193		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1194
1195		ret = enter(data, dentry);
1196		switch (ret) {
1197		case D_WALK_CONTINUE:
1198			break;
1199		case D_WALK_QUIT:
1200			spin_unlock(&dentry->d_lock);
1201			goto out_unlock;
1202		case D_WALK_NORETRY:
1203			retry = false;
1204			break;
1205		case D_WALK_SKIP:
1206			spin_unlock(&dentry->d_lock);
1207			continue;
1208		}
1209
1210		if (!list_empty(&dentry->d_subdirs)) {
1211			spin_unlock(&this_parent->d_lock);
1212			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1213			this_parent = dentry;
1214			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1215			goto repeat;
1216		}
1217		spin_unlock(&dentry->d_lock);
1218	}
1219	/*
1220	 * All done at this level ... ascend and resume the search.
1221	 */
1222	rcu_read_lock();
1223ascend:
1224	if (this_parent != parent) {
1225		struct dentry *child = this_parent;
1226		this_parent = child->d_parent;
1227
1228		spin_unlock(&child->d_lock);
1229		spin_lock(&this_parent->d_lock);
1230
1231		/* might go back up the wrong parent if we have had a rename. */
1232		if (need_seqretry(&rename_lock, seq))
1233			goto rename_retry;
1234		/* go into the first sibling still alive */
1235		do {
1236			next = child->d_child.next;
1237			if (next == &this_parent->d_subdirs)
1238				goto ascend;
1239			child = list_entry(next, struct dentry, d_child);
1240		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1241		rcu_read_unlock();
1242		goto resume;
1243	}
1244	if (need_seqretry(&rename_lock, seq))
1245		goto rename_retry;
1246	rcu_read_unlock();
1247	if (finish)
1248		finish(data);
1249
1250out_unlock:
1251	spin_unlock(&this_parent->d_lock);
1252	done_seqretry(&rename_lock, seq);
1253	return;
1254
1255rename_retry:
1256	spin_unlock(&this_parent->d_lock);
1257	rcu_read_unlock();
1258	BUG_ON(seq & 1);
1259	if (!retry)
1260		return;
1261	seq = 1;
1262	goto again;
1263}
1264
1265/*
1266 * Search for at least 1 mount point in the dentry's subdirs.
1267 * We descend to the next level whenever the d_subdirs
1268 * list is non-empty and continue searching.
1269 */
1270
1271static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1272{
1273	int *ret = data;
1274	if (d_mountpoint(dentry)) {
1275		*ret = 1;
 
 
 
 
1276		return D_WALK_QUIT;
1277	}
1278	return D_WALK_CONTINUE;
1279}
1280
1281/**
1282 * have_submounts - check for mounts over a dentry
1283 * @parent: dentry to check.
 
1284 *
1285 * Return true if the parent or its subdirectories contain
1286 * a mount point
1287 */
1288int have_submounts(struct dentry *parent)
1289{
1290	int ret = 0;
1291
1292	d_walk(parent, &ret, check_mount, NULL);
 
 
1293
1294	return ret;
1295}
1296EXPORT_SYMBOL(have_submounts);
1297
1298/*
1299 * Called by mount code to set a mountpoint and check if the mountpoint is
1300 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1301 * subtree can become unreachable).
1302 *
1303 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1304 * this reason take rename_lock and d_lock on dentry and ancestors.
1305 */
1306int d_set_mounted(struct dentry *dentry)
1307{
1308	struct dentry *p;
1309	int ret = -ENOENT;
1310	write_seqlock(&rename_lock);
1311	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1312		/* Need exclusion wrt. d_invalidate() */
1313		spin_lock(&p->d_lock);
1314		if (unlikely(d_unhashed(p))) {
1315			spin_unlock(&p->d_lock);
1316			goto out;
1317		}
1318		spin_unlock(&p->d_lock);
1319	}
1320	spin_lock(&dentry->d_lock);
1321	if (!d_unlinked(dentry)) {
1322		dentry->d_flags |= DCACHE_MOUNTED;
1323		ret = 0;
 
 
 
1324	}
1325 	spin_unlock(&dentry->d_lock);
1326out:
1327	write_sequnlock(&rename_lock);
1328	return ret;
1329}
1330
1331/*
1332 * Search the dentry child list of the specified parent,
1333 * and move any unused dentries to the end of the unused
1334 * list for prune_dcache(). We descend to the next level
1335 * whenever the d_subdirs list is non-empty and continue
1336 * searching.
1337 *
1338 * It returns zero iff there are no unused children,
1339 * otherwise  it returns the number of children moved to
1340 * the end of the unused list. This may not be the total
1341 * number of unused children, because select_parent can
1342 * drop the lock and return early due to latency
1343 * constraints.
1344 */
1345
1346struct select_data {
1347	struct dentry *start;
 
 
 
 
1348	struct list_head dispose;
1349	int found;
1350};
1351
1352static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1353{
1354	struct select_data *data = _data;
1355	enum d_walk_ret ret = D_WALK_CONTINUE;
1356
1357	if (data->start == dentry)
1358		goto out;
1359
1360	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1361		data->found++;
1362	} else {
1363		if (dentry->d_flags & DCACHE_LRU_LIST)
1364			d_lru_del(dentry);
1365		if (!dentry->d_lockref.count) {
1366			d_shrink_add(dentry, &data->dispose);
1367			data->found++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1368		}
 
1369	}
1370	/*
1371	 * We can return to the caller if we have found some (this
1372	 * ensures forward progress). We'll be coming back to find
1373	 * the rest.
1374	 */
1375	if (!list_empty(&data->dispose))
1376		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1377out:
1378	return ret;
1379}
1380
1381/**
1382 * shrink_dcache_parent - prune dcache
1383 * @parent: parent of entries to prune
1384 *
1385 * Prune the dcache to remove unused children of the parent dentry.
1386 */
1387void shrink_dcache_parent(struct dentry *parent)
1388{
1389	for (;;) {
1390		struct select_data data;
1391
1392		INIT_LIST_HEAD(&data.dispose);
1393		data.start = parent;
1394		data.found = 0;
 
 
 
 
1395
1396		d_walk(parent, &data, select_collect, NULL);
1397		if (!data.found)
1398			break;
1399
1400		shrink_dentry_list(&data.dispose);
1401		cond_resched();
 
 
 
 
 
 
 
 
 
 
1402	}
1403}
1404EXPORT_SYMBOL(shrink_dcache_parent);
1405
1406static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1407{
1408	/* it has busy descendents; complain about those instead */
1409	if (!list_empty(&dentry->d_subdirs))
1410		return D_WALK_CONTINUE;
1411
1412	/* root with refcount 1 is fine */
1413	if (dentry == _data && dentry->d_lockref.count == 1)
1414		return D_WALK_CONTINUE;
1415
1416	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1417			" still in use (%d) [unmount of %s %s]\n",
1418		       dentry,
1419		       dentry->d_inode ?
1420		       dentry->d_inode->i_ino : 0UL,
1421		       dentry,
1422		       dentry->d_lockref.count,
1423		       dentry->d_sb->s_type->name,
1424		       dentry->d_sb->s_id);
1425	WARN_ON(1);
1426	return D_WALK_CONTINUE;
1427}
1428
1429static void do_one_tree(struct dentry *dentry)
1430{
1431	shrink_dcache_parent(dentry);
1432	d_walk(dentry, dentry, umount_check, NULL);
1433	d_drop(dentry);
1434	dput(dentry);
1435}
1436
1437/*
1438 * destroy the dentries attached to a superblock on unmounting
1439 */
1440void shrink_dcache_for_umount(struct super_block *sb)
1441{
1442	struct dentry *dentry;
1443
1444	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1445
1446	dentry = sb->s_root;
1447	sb->s_root = NULL;
1448	do_one_tree(dentry);
1449
1450	while (!hlist_bl_empty(&sb->s_anon)) {
1451		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1452		do_one_tree(dentry);
1453	}
1454}
1455
1456struct detach_data {
1457	struct select_data select;
1458	struct dentry *mountpoint;
1459};
1460static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1461{
1462	struct detach_data *data = _data;
1463
1464	if (d_mountpoint(dentry)) {
1465		__dget_dlock(dentry);
1466		data->mountpoint = dentry;
1467		return D_WALK_QUIT;
1468	}
1469
1470	return select_collect(&data->select, dentry);
1471}
1472
1473static void check_and_drop(void *_data)
1474{
1475	struct detach_data *data = _data;
1476
1477	if (!data->mountpoint && !data->select.found)
1478		__d_drop(data->select.start);
1479}
1480
1481/**
1482 * d_invalidate - detach submounts, prune dcache, and drop
1483 * @dentry: dentry to invalidate (aka detach, prune and drop)
1484 *
1485 * no dcache lock.
1486 *
1487 * The final d_drop is done as an atomic operation relative to
1488 * rename_lock ensuring there are no races with d_set_mounted.  This
1489 * ensures there are no unhashed dentries on the path to a mountpoint.
1490 */
1491void d_invalidate(struct dentry *dentry)
1492{
1493	/*
1494	 * If it's already been dropped, return OK.
1495	 */
1496	spin_lock(&dentry->d_lock);
1497	if (d_unhashed(dentry)) {
1498		spin_unlock(&dentry->d_lock);
1499		return;
1500	}
 
1501	spin_unlock(&dentry->d_lock);
1502
1503	/* Negative dentries can be dropped without further checks */
1504	if (!dentry->d_inode) {
1505		d_drop(dentry);
1506		return;
1507	}
1508
 
1509	for (;;) {
1510		struct detach_data data;
1511
1512		data.mountpoint = NULL;
1513		INIT_LIST_HEAD(&data.select.dispose);
1514		data.select.start = dentry;
1515		data.select.found = 0;
1516
1517		d_walk(dentry, &data, detach_and_collect, check_and_drop);
1518
1519		if (data.select.found)
1520			shrink_dentry_list(&data.select.dispose);
1521
1522		if (data.mountpoint) {
1523			detach_mounts(data.mountpoint);
1524			dput(data.mountpoint);
1525		}
1526
1527		if (!data.mountpoint && !data.select.found)
1528			break;
1529
1530		cond_resched();
1531	}
1532}
1533EXPORT_SYMBOL(d_invalidate);
1534
1535/**
1536 * __d_alloc	-	allocate a dcache entry
1537 * @sb: filesystem it will belong to
1538 * @name: qstr of the name
1539 *
1540 * Allocates a dentry. It returns %NULL if there is insufficient memory
1541 * available. On a success the dentry is returned. The name passed in is
1542 * copied and the copy passed in may be reused after this call.
1543 */
1544 
1545struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1546{
1547	struct dentry *dentry;
1548	char *dname;
 
1549
1550	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 
1551	if (!dentry)
1552		return NULL;
1553
1554	/*
1555	 * We guarantee that the inline name is always NUL-terminated.
1556	 * This way the memcpy() done by the name switching in rename
1557	 * will still always have a NUL at the end, even if we might
1558	 * be overwriting an internal NUL character
1559	 */
1560	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1561	if (name->len > DNAME_INLINE_LEN-1) {
 
 
 
1562		size_t size = offsetof(struct external_name, name[1]);
1563		struct external_name *p = kmalloc(size + name->len,
1564						  GFP_KERNEL_ACCOUNT);
 
1565		if (!p) {
1566			kmem_cache_free(dentry_cache, dentry); 
1567			return NULL;
1568		}
1569		atomic_set(&p->u.count, 1);
1570		dname = p->name;
1571		if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1572			kasan_unpoison_shadow(dname,
1573				round_up(name->len + 1,	sizeof(unsigned long)));
1574	} else  {
1575		dname = dentry->d_iname;
1576	}	
1577
1578	dentry->d_name.len = name->len;
1579	dentry->d_name.hash = name->hash;
1580	memcpy(dname, name->name, name->len);
1581	dname[name->len] = 0;
1582
1583	/* Make sure we always see the terminating NUL character */
1584	smp_wmb();
1585	dentry->d_name.name = dname;
1586
1587	dentry->d_lockref.count = 1;
1588	dentry->d_flags = 0;
1589	spin_lock_init(&dentry->d_lock);
1590	seqcount_init(&dentry->d_seq);
1591	dentry->d_inode = NULL;
1592	dentry->d_parent = dentry;
1593	dentry->d_sb = sb;
1594	dentry->d_op = NULL;
1595	dentry->d_fsdata = NULL;
1596	INIT_HLIST_BL_NODE(&dentry->d_hash);
1597	INIT_LIST_HEAD(&dentry->d_lru);
1598	INIT_LIST_HEAD(&dentry->d_subdirs);
1599	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1600	INIT_LIST_HEAD(&dentry->d_child);
1601	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1602
 
 
 
 
 
 
 
 
 
 
1603	this_cpu_inc(nr_dentry);
1604
1605	return dentry;
1606}
1607
1608/**
1609 * d_alloc	-	allocate a dcache entry
1610 * @parent: parent of entry to allocate
1611 * @name: qstr of the name
1612 *
1613 * Allocates a dentry. It returns %NULL if there is insufficient memory
1614 * available. On a success the dentry is returned. The name passed in is
1615 * copied and the copy passed in may be reused after this call.
1616 */
1617struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1618{
1619	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1620	if (!dentry)
1621		return NULL;
1622
1623	spin_lock(&parent->d_lock);
1624	/*
1625	 * don't need child lock because it is not subject
1626	 * to concurrency here
1627	 */
1628	__dget_dlock(parent);
1629	dentry->d_parent = parent;
1630	list_add(&dentry->d_child, &parent->d_subdirs);
1631	spin_unlock(&parent->d_lock);
1632
1633	return dentry;
1634}
1635EXPORT_SYMBOL(d_alloc);
1636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637/**
1638 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1639 * @sb: the superblock
1640 * @name: qstr of the name
1641 *
1642 * For a filesystem that just pins its dentries in memory and never
1643 * performs lookups at all, return an unhashed IS_ROOT dentry.
 
 
 
 
 
 
 
1644 */
1645struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1646{
1647	return __d_alloc(sb, name);
 
 
 
 
 
 
 
 
 
1648}
1649EXPORT_SYMBOL(d_alloc_pseudo);
1650
1651struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1652{
1653	struct qstr q;
1654
1655	q.name = name;
1656	q.len = strlen(name);
1657	q.hash = full_name_hash(q.name, q.len);
1658	return d_alloc(parent, &q);
1659}
1660EXPORT_SYMBOL(d_alloc_name);
1661
1662void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1663{
1664	WARN_ON_ONCE(dentry->d_op);
1665	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1666				DCACHE_OP_COMPARE	|
1667				DCACHE_OP_REVALIDATE	|
1668				DCACHE_OP_WEAK_REVALIDATE	|
1669				DCACHE_OP_DELETE	|
1670				DCACHE_OP_SELECT_INODE	|
1671				DCACHE_OP_REAL));
1672	dentry->d_op = op;
1673	if (!op)
1674		return;
1675	if (op->d_hash)
1676		dentry->d_flags |= DCACHE_OP_HASH;
1677	if (op->d_compare)
1678		dentry->d_flags |= DCACHE_OP_COMPARE;
1679	if (op->d_revalidate)
1680		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1681	if (op->d_weak_revalidate)
1682		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1683	if (op->d_delete)
1684		dentry->d_flags |= DCACHE_OP_DELETE;
1685	if (op->d_prune)
1686		dentry->d_flags |= DCACHE_OP_PRUNE;
1687	if (op->d_select_inode)
1688		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
1689	if (op->d_real)
1690		dentry->d_flags |= DCACHE_OP_REAL;
1691
1692}
1693EXPORT_SYMBOL(d_set_d_op);
1694
1695
1696/*
1697 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1698 * @dentry - The dentry to mark
1699 *
1700 * Mark a dentry as falling through to the lower layer (as set with
1701 * d_pin_lower()).  This flag may be recorded on the medium.
1702 */
1703void d_set_fallthru(struct dentry *dentry)
1704{
1705	spin_lock(&dentry->d_lock);
1706	dentry->d_flags |= DCACHE_FALLTHRU;
1707	spin_unlock(&dentry->d_lock);
1708}
1709EXPORT_SYMBOL(d_set_fallthru);
1710
1711static unsigned d_flags_for_inode(struct inode *inode)
1712{
1713	unsigned add_flags = DCACHE_REGULAR_TYPE;
1714
1715	if (!inode)
1716		return DCACHE_MISS_TYPE;
1717
1718	if (S_ISDIR(inode->i_mode)) {
1719		add_flags = DCACHE_DIRECTORY_TYPE;
1720		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1721			if (unlikely(!inode->i_op->lookup))
1722				add_flags = DCACHE_AUTODIR_TYPE;
1723			else
1724				inode->i_opflags |= IOP_LOOKUP;
1725		}
1726		goto type_determined;
1727	}
1728
1729	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1730		if (unlikely(inode->i_op->get_link)) {
1731			add_flags = DCACHE_SYMLINK_TYPE;
1732			goto type_determined;
1733		}
1734		inode->i_opflags |= IOP_NOFOLLOW;
1735	}
1736
1737	if (unlikely(!S_ISREG(inode->i_mode)))
1738		add_flags = DCACHE_SPECIAL_TYPE;
1739
1740type_determined:
1741	if (unlikely(IS_AUTOMOUNT(inode)))
1742		add_flags |= DCACHE_NEED_AUTOMOUNT;
1743	return add_flags;
1744}
1745
1746static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1747{
1748	unsigned add_flags = d_flags_for_inode(inode);
 
1749
1750	spin_lock(&dentry->d_lock);
 
 
 
 
 
 
 
1751	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1752	raw_write_seqcount_begin(&dentry->d_seq);
1753	__d_set_inode_and_type(dentry, inode, add_flags);
1754	raw_write_seqcount_end(&dentry->d_seq);
1755	__fsnotify_d_instantiate(dentry);
1756	spin_unlock(&dentry->d_lock);
1757}
1758
1759/**
1760 * d_instantiate - fill in inode information for a dentry
1761 * @entry: dentry to complete
1762 * @inode: inode to attach to this dentry
1763 *
1764 * Fill in inode information in the entry.
1765 *
1766 * This turns negative dentries into productive full members
1767 * of society.
1768 *
1769 * NOTE! This assumes that the inode count has been incremented
1770 * (or otherwise set) by the caller to indicate that it is now
1771 * in use by the dcache.
1772 */
1773 
1774void d_instantiate(struct dentry *entry, struct inode * inode)
1775{
1776	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1777	if (inode) {
 
1778		spin_lock(&inode->i_lock);
1779		__d_instantiate(entry, inode);
1780		spin_unlock(&inode->i_lock);
1781	}
1782	security_d_instantiate(entry, inode);
1783}
1784EXPORT_SYMBOL(d_instantiate);
1785
1786/**
1787 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1788 * @entry: dentry to complete
1789 * @inode: inode to attach to this dentry
1790 *
1791 * Fill in inode information in the entry.  If a directory alias is found, then
1792 * return an error (and drop inode).  Together with d_materialise_unique() this
1793 * guarantees that a directory inode may never have more than one alias.
1794 */
1795int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1796{
1797	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1798
 
 
1799	spin_lock(&inode->i_lock);
1800	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1801		spin_unlock(&inode->i_lock);
1802		iput(inode);
1803		return -EBUSY;
1804	}
1805	__d_instantiate(entry, inode);
 
 
 
 
 
 
 
 
 
1806	spin_unlock(&inode->i_lock);
1807	security_d_instantiate(entry, inode);
1808
1809	return 0;
1810}
1811EXPORT_SYMBOL(d_instantiate_no_diralias);
1812
1813struct dentry *d_make_root(struct inode *root_inode)
1814{
1815	struct dentry *res = NULL;
1816
1817	if (root_inode) {
1818		static const struct qstr name = QSTR_INIT("/", 1);
1819
1820		res = __d_alloc(root_inode->i_sb, &name);
1821		if (res)
1822			d_instantiate(res, root_inode);
1823		else
1824			iput(root_inode);
1825	}
1826	return res;
1827}
1828EXPORT_SYMBOL(d_make_root);
1829
1830static struct dentry * __d_find_any_alias(struct inode *inode)
1831{
1832	struct dentry *alias;
1833
1834	if (hlist_empty(&inode->i_dentry))
1835		return NULL;
1836	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1837	__dget(alias);
1838	return alias;
1839}
1840
1841/**
1842 * d_find_any_alias - find any alias for a given inode
1843 * @inode: inode to find an alias for
1844 *
1845 * If any aliases exist for the given inode, take and return a
1846 * reference for one of them.  If no aliases exist, return %NULL.
1847 */
1848struct dentry *d_find_any_alias(struct inode *inode)
1849{
1850	struct dentry *de;
1851
1852	spin_lock(&inode->i_lock);
1853	de = __d_find_any_alias(inode);
1854	spin_unlock(&inode->i_lock);
1855	return de;
1856}
1857EXPORT_SYMBOL(d_find_any_alias);
1858
1859static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1860{
1861	static const struct qstr anonstring = QSTR_INIT("/", 1);
1862	struct dentry *tmp;
1863	struct dentry *res;
1864	unsigned add_flags;
1865
1866	if (!inode)
1867		return ERR_PTR(-ESTALE);
1868	if (IS_ERR(inode))
1869		return ERR_CAST(inode);
1870
1871	res = d_find_any_alias(inode);
 
 
1872	if (res)
1873		goto out_iput;
1874
1875	tmp = __d_alloc(inode->i_sb, &anonstring);
1876	if (!tmp) {
1877		res = ERR_PTR(-ENOMEM);
1878		goto out_iput;
1879	}
1880
 
1881	spin_lock(&inode->i_lock);
1882	res = __d_find_any_alias(inode);
1883	if (res) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1884		spin_unlock(&inode->i_lock);
1885		dput(tmp);
1886		goto out_iput;
1887	}
1888
1889	/* attach a disconnected dentry */
1890	add_flags = d_flags_for_inode(inode);
1891
1892	if (disconnected)
1893		add_flags |= DCACHE_DISCONNECTED;
1894
1895	spin_lock(&tmp->d_lock);
1896	__d_set_inode_and_type(tmp, inode, add_flags);
1897	hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1898	hlist_bl_lock(&tmp->d_sb->s_anon);
1899	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1900	hlist_bl_unlock(&tmp->d_sb->s_anon);
1901	spin_unlock(&tmp->d_lock);
1902	spin_unlock(&inode->i_lock);
1903	security_d_instantiate(tmp, inode);
1904
1905	return tmp;
1906
1907 out_iput:
1908	if (res && !IS_ERR(res))
1909		security_d_instantiate(res, inode);
1910	iput(inode);
1911	return res;
1912}
1913
1914/**
1915 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1916 * @inode: inode to allocate the dentry for
1917 *
1918 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1919 * similar open by handle operations.  The returned dentry may be anonymous,
1920 * or may have a full name (if the inode was already in the cache).
1921 *
1922 * When called on a directory inode, we must ensure that the inode only ever
1923 * has one dentry.  If a dentry is found, that is returned instead of
1924 * allocating a new one.
1925 *
1926 * On successful return, the reference to the inode has been transferred
1927 * to the dentry.  In case of an error the reference on the inode is released.
1928 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1929 * be passed in and the error will be propagated to the return value,
1930 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1931 */
1932struct dentry *d_obtain_alias(struct inode *inode)
1933{
1934	return __d_obtain_alias(inode, 1);
1935}
1936EXPORT_SYMBOL(d_obtain_alias);
1937
1938/**
1939 * d_obtain_root - find or allocate a dentry for a given inode
1940 * @inode: inode to allocate the dentry for
1941 *
1942 * Obtain an IS_ROOT dentry for the root of a filesystem.
1943 *
1944 * We must ensure that directory inodes only ever have one dentry.  If a
1945 * dentry is found, that is returned instead of allocating a new one.
1946 *
1947 * On successful return, the reference to the inode has been transferred
1948 * to the dentry.  In case of an error the reference on the inode is
1949 * released.  A %NULL or IS_ERR inode may be passed in and will be the
1950 * error will be propagate to the return value, with a %NULL @inode
1951 * replaced by ERR_PTR(-ESTALE).
1952 */
1953struct dentry *d_obtain_root(struct inode *inode)
1954{
1955	return __d_obtain_alias(inode, 0);
1956}
1957EXPORT_SYMBOL(d_obtain_root);
1958
1959/**
1960 * d_add_ci - lookup or allocate new dentry with case-exact name
 
1961 * @inode:  the inode case-insensitive lookup has found
1962 * @dentry: the negative dentry that was passed to the parent's lookup func
1963 * @name:   the case-exact name to be associated with the returned dentry
1964 *
1965 * This is to avoid filling the dcache with case-insensitive names to the
1966 * same inode, only the actual correct case is stored in the dcache for
1967 * case-insensitive filesystems.
1968 *
1969 * For a case-insensitive lookup match and if the the case-exact dentry
1970 * already exists in in the dcache, use it and return it.
1971 *
1972 * If no entry exists with the exact case name, allocate new dentry with
1973 * the exact case, and return the spliced entry.
1974 */
1975struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1976			struct qstr *name)
1977{
1978	struct dentry *found;
1979	struct dentry *new;
1980
1981	/*
1982	 * First check if a dentry matching the name already exists,
1983	 * if not go ahead and create it now.
1984	 */
1985	found = d_hash_and_lookup(dentry->d_parent, name);
1986	if (!found) {
1987		new = d_alloc(dentry->d_parent, name);
1988		if (!new) {
1989			found = ERR_PTR(-ENOMEM);
1990		} else {
1991			found = d_splice_alias(inode, new);
1992			if (found) {
1993				dput(new);
1994				return found;
1995			}
1996			return new;
1997		}
 
 
 
 
 
 
 
 
 
 
 
 
1998	}
1999	iput(inode);
2000	return found;
2001}
2002EXPORT_SYMBOL(d_add_ci);
2003
2004/*
2005 * Do the slow-case of the dentry name compare.
 
 
 
2006 *
2007 * Unlike the dentry_cmp() function, we need to atomically
2008 * load the name and length information, so that the
2009 * filesystem can rely on them, and can use the 'name' and
2010 * 'len' information without worrying about walking off the
2011 * end of memory etc.
2012 *
2013 * Thus the read_seqcount_retry() and the "duplicate" info
2014 * in arguments (the low-level filesystem should not look
2015 * at the dentry inode or name contents directly, since
2016 * rename can change them while we're in RCU mode).
2017 */
2018enum slow_d_compare {
2019	D_COMP_OK,
2020	D_COMP_NOMATCH,
2021	D_COMP_SEQRETRY,
2022};
2023
2024static noinline enum slow_d_compare slow_dentry_cmp(
2025		const struct dentry *parent,
2026		struct dentry *dentry,
2027		unsigned int seq,
2028		const struct qstr *name)
 
 
 
2029{
2030	int tlen = dentry->d_name.len;
2031	const char *tname = dentry->d_name.name;
 
 
2032
2033	if (read_seqcount_retry(&dentry->d_seq, seq)) {
2034		cpu_relax();
2035		return D_COMP_SEQRETRY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2036	}
2037	if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2038		return D_COMP_NOMATCH;
2039	return D_COMP_OK;
2040}
2041
2042/**
2043 * __d_lookup_rcu - search for a dentry (racy, store-free)
2044 * @parent: parent dentry
2045 * @name: qstr of name we wish to find
2046 * @seqp: returns d_seq value at the point where the dentry was found
2047 * Returns: dentry, or NULL
2048 *
2049 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2050 * resolution (store-free path walking) design described in
2051 * Documentation/filesystems/path-lookup.txt.
2052 *
2053 * This is not to be used outside core vfs.
2054 *
2055 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2056 * held, and rcu_read_lock held. The returned dentry must not be stored into
2057 * without taking d_lock and checking d_seq sequence count against @seq
2058 * returned here.
2059 *
2060 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2061 * function.
2062 *
2063 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2064 * the returned dentry, so long as its parent's seqlock is checked after the
2065 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2066 * is formed, giving integrity down the path walk.
2067 *
2068 * NOTE! The caller *has* to check the resulting dentry against the sequence
2069 * number we've returned before using any of the resulting dentry state!
2070 */
2071struct dentry *__d_lookup_rcu(const struct dentry *parent,
2072				const struct qstr *name,
2073				unsigned *seqp)
2074{
2075	u64 hashlen = name->hash_len;
2076	const unsigned char *str = name->name;
2077	struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2078	struct hlist_bl_node *node;
2079	struct dentry *dentry;
2080
2081	/*
2082	 * Note: There is significant duplication with __d_lookup_rcu which is
2083	 * required to prevent single threaded performance regressions
2084	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2085	 * Keep the two functions in sync.
2086	 */
2087
 
 
 
2088	/*
2089	 * The hash list is protected using RCU.
2090	 *
2091	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2092	 * races with d_move().
2093	 *
2094	 * It is possible that concurrent renames can mess up our list
2095	 * walk here and result in missing our dentry, resulting in the
2096	 * false-negative result. d_lookup() protects against concurrent
2097	 * renames using rename_lock seqlock.
2098	 *
2099	 * See Documentation/filesystems/path-lookup.txt for more details.
2100	 */
2101	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2102		unsigned seq;
2103
2104seqretry:
2105		/*
2106		 * The dentry sequence count protects us from concurrent
2107		 * renames, and thus protects parent and name fields.
2108		 *
2109		 * The caller must perform a seqcount check in order
2110		 * to do anything useful with the returned dentry.
2111		 *
2112		 * NOTE! We do a "raw" seqcount_begin here. That means that
2113		 * we don't wait for the sequence count to stabilize if it
2114		 * is in the middle of a sequence change. If we do the slow
2115		 * dentry compare, we will do seqretries until it is stable,
2116		 * and if we end up with a successful lookup, we actually
2117		 * want to exit RCU lookup anyway.
 
 
 
2118		 */
2119		seq = raw_seqcount_begin(&dentry->d_seq);
2120		if (dentry->d_parent != parent)
2121			continue;
2122		if (d_unhashed(dentry))
2123			continue;
2124
2125		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2126			if (dentry->d_name.hash != hashlen_hash(hashlen))
2127				continue;
2128			*seqp = seq;
2129			switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2130			case D_COMP_OK:
2131				return dentry;
2132			case D_COMP_NOMATCH:
2133				continue;
2134			default:
2135				goto seqretry;
2136			}
2137		}
2138
2139		if (dentry->d_name.hash_len != hashlen)
2140			continue;
 
 
2141		*seqp = seq;
2142		if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2143			return dentry;
2144	}
2145	return NULL;
2146}
2147
2148/**
2149 * d_lookup - search for a dentry
2150 * @parent: parent dentry
2151 * @name: qstr of name we wish to find
2152 * Returns: dentry, or NULL
2153 *
2154 * d_lookup searches the children of the parent dentry for the name in
2155 * question. If the dentry is found its reference count is incremented and the
2156 * dentry is returned. The caller must use dput to free the entry when it has
2157 * finished using it. %NULL is returned if the dentry does not exist.
2158 */
2159struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2160{
2161	struct dentry *dentry;
2162	unsigned seq;
2163
2164	do {
2165		seq = read_seqbegin(&rename_lock);
2166		dentry = __d_lookup(parent, name);
2167		if (dentry)
2168			break;
2169	} while (read_seqretry(&rename_lock, seq));
2170	return dentry;
2171}
2172EXPORT_SYMBOL(d_lookup);
2173
2174/**
2175 * __d_lookup - search for a dentry (racy)
2176 * @parent: parent dentry
2177 * @name: qstr of name we wish to find
2178 * Returns: dentry, or NULL
2179 *
2180 * __d_lookup is like d_lookup, however it may (rarely) return a
2181 * false-negative result due to unrelated rename activity.
2182 *
2183 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2184 * however it must be used carefully, eg. with a following d_lookup in
2185 * the case of failure.
2186 *
2187 * __d_lookup callers must be commented.
2188 */
2189struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2190{
2191	unsigned int len = name->len;
2192	unsigned int hash = name->hash;
2193	const unsigned char *str = name->name;
2194	struct hlist_bl_head *b = d_hash(parent, hash);
2195	struct hlist_bl_node *node;
2196	struct dentry *found = NULL;
2197	struct dentry *dentry;
2198
2199	/*
2200	 * Note: There is significant duplication with __d_lookup_rcu which is
2201	 * required to prevent single threaded performance regressions
2202	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2203	 * Keep the two functions in sync.
2204	 */
2205
2206	/*
2207	 * The hash list is protected using RCU.
2208	 *
2209	 * Take d_lock when comparing a candidate dentry, to avoid races
2210	 * with d_move().
2211	 *
2212	 * It is possible that concurrent renames can mess up our list
2213	 * walk here and result in missing our dentry, resulting in the
2214	 * false-negative result. d_lookup() protects against concurrent
2215	 * renames using rename_lock seqlock.
2216	 *
2217	 * See Documentation/filesystems/path-lookup.txt for more details.
2218	 */
2219	rcu_read_lock();
2220	
2221	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2222
2223		if (dentry->d_name.hash != hash)
2224			continue;
2225
2226		spin_lock(&dentry->d_lock);
2227		if (dentry->d_parent != parent)
2228			goto next;
2229		if (d_unhashed(dentry))
2230			goto next;
2231
2232		/*
2233		 * It is safe to compare names since d_move() cannot
2234		 * change the qstr (protected by d_lock).
2235		 */
2236		if (parent->d_flags & DCACHE_OP_COMPARE) {
2237			int tlen = dentry->d_name.len;
2238			const char *tname = dentry->d_name.name;
2239			if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2240				goto next;
2241		} else {
2242			if (dentry->d_name.len != len)
2243				goto next;
2244			if (dentry_cmp(dentry, str, len))
2245				goto next;
2246		}
2247
2248		dentry->d_lockref.count++;
2249		found = dentry;
2250		spin_unlock(&dentry->d_lock);
2251		break;
2252next:
2253		spin_unlock(&dentry->d_lock);
2254 	}
2255 	rcu_read_unlock();
2256
2257 	return found;
2258}
2259
2260/**
2261 * d_hash_and_lookup - hash the qstr then search for a dentry
2262 * @dir: Directory to search in
2263 * @name: qstr of name we wish to find
2264 *
2265 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2266 */
2267struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2268{
2269	/*
2270	 * Check for a fs-specific hash function. Note that we must
2271	 * calculate the standard hash first, as the d_op->d_hash()
2272	 * routine may choose to leave the hash value unchanged.
2273	 */
2274	name->hash = full_name_hash(name->name, name->len);
2275	if (dir->d_flags & DCACHE_OP_HASH) {
2276		int err = dir->d_op->d_hash(dir, name);
2277		if (unlikely(err < 0))
2278			return ERR_PTR(err);
2279	}
2280	return d_lookup(dir, name);
2281}
2282EXPORT_SYMBOL(d_hash_and_lookup);
2283
2284/*
2285 * When a file is deleted, we have two options:
2286 * - turn this dentry into a negative dentry
2287 * - unhash this dentry and free it.
2288 *
2289 * Usually, we want to just turn this into
2290 * a negative dentry, but if anybody else is
2291 * currently using the dentry or the inode
2292 * we can't do that and we fall back on removing
2293 * it from the hash queues and waiting for
2294 * it to be deleted later when it has no users
2295 */
2296 
2297/**
2298 * d_delete - delete a dentry
2299 * @dentry: The dentry to delete
2300 *
2301 * Turn the dentry into a negative dentry if possible, otherwise
2302 * remove it from the hash queues so it can be deleted later
2303 */
2304 
2305void d_delete(struct dentry * dentry)
2306{
2307	struct inode *inode;
2308	int isdir = 0;
 
 
2309	/*
2310	 * Are we the only user?
2311	 */
2312again:
2313	spin_lock(&dentry->d_lock);
2314	inode = dentry->d_inode;
2315	isdir = S_ISDIR(inode->i_mode);
2316	if (dentry->d_lockref.count == 1) {
2317		if (!spin_trylock(&inode->i_lock)) {
2318			spin_unlock(&dentry->d_lock);
2319			cpu_relax();
2320			goto again;
2321		}
2322		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2323		dentry_unlink_inode(dentry);
2324		fsnotify_nameremove(dentry, isdir);
2325		return;
 
 
2326	}
2327
2328	if (!d_unhashed(dentry))
2329		__d_drop(dentry);
2330
2331	spin_unlock(&dentry->d_lock);
2332
2333	fsnotify_nameremove(dentry, isdir);
2334}
2335EXPORT_SYMBOL(d_delete);
2336
2337static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2338{
2339	BUG_ON(!d_unhashed(entry));
 
2340	hlist_bl_lock(b);
2341	entry->d_flags |= DCACHE_RCUACCESS;
2342	hlist_bl_add_head_rcu(&entry->d_hash, b);
2343	hlist_bl_unlock(b);
2344}
2345
2346static void _d_rehash(struct dentry * entry)
2347{
2348	__d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2349}
2350
2351/**
2352 * d_rehash	- add an entry back to the hash
2353 * @entry: dentry to add to the hash
2354 *
2355 * Adds a dentry to the hash according to its name.
2356 */
2357 
2358void d_rehash(struct dentry * entry)
2359{
2360	spin_lock(&entry->d_lock);
2361	_d_rehash(entry);
2362	spin_unlock(&entry->d_lock);
2363}
2364EXPORT_SYMBOL(d_rehash);
2365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2366
2367/* inode->i_lock held if inode is non-NULL */
2368
2369static inline void __d_add(struct dentry *dentry, struct inode *inode)
2370{
 
 
 
 
 
 
 
 
 
2371	if (inode) {
2372		__d_instantiate(dentry, inode);
 
 
 
 
 
 
 
 
 
 
 
2373		spin_unlock(&inode->i_lock);
2374	}
2375	security_d_instantiate(dentry, inode);
2376	d_rehash(dentry);
2377}
2378
2379/**
2380 * d_add - add dentry to hash queues
2381 * @entry: dentry to add
2382 * @inode: The inode to attach to this dentry
2383 *
2384 * This adds the entry to the hash queues and initializes @inode.
2385 * The entry was actually filled in earlier during d_alloc().
2386 */
2387
2388void d_add(struct dentry *entry, struct inode *inode)
2389{
2390	if (inode)
 
2391		spin_lock(&inode->i_lock);
 
2392	__d_add(entry, inode);
2393}
2394EXPORT_SYMBOL(d_add);
2395
2396/**
2397 * d_exact_alias - find and hash an exact unhashed alias
2398 * @entry: dentry to add
2399 * @inode: The inode to go with this dentry
2400 *
2401 * If an unhashed dentry with the same name/parent and desired
2402 * inode already exists, hash and return it.  Otherwise, return
2403 * NULL.
2404 *
2405 * Parent directory should be locked.
2406 */
2407struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2408{
2409	struct dentry *alias;
2410	int len = entry->d_name.len;
2411	const char *name = entry->d_name.name;
2412	unsigned int hash = entry->d_name.hash;
2413
2414	spin_lock(&inode->i_lock);
2415	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2416		/*
2417		 * Don't need alias->d_lock here, because aliases with
2418		 * d_parent == entry->d_parent are not subject to name or
2419		 * parent changes, because the parent inode i_mutex is held.
2420		 */
2421		if (alias->d_name.hash != hash)
2422			continue;
2423		if (alias->d_parent != entry->d_parent)
2424			continue;
2425		if (alias->d_name.len != len)
2426			continue;
2427		if (dentry_cmp(alias, name, len))
2428			continue;
2429		spin_lock(&alias->d_lock);
2430		if (!d_unhashed(alias)) {
2431			spin_unlock(&alias->d_lock);
2432			alias = NULL;
2433		} else {
2434			__dget_dlock(alias);
2435			_d_rehash(alias);
2436			spin_unlock(&alias->d_lock);
2437		}
2438		spin_unlock(&inode->i_lock);
2439		return alias;
2440	}
2441	spin_unlock(&inode->i_lock);
2442	return NULL;
2443}
2444EXPORT_SYMBOL(d_exact_alias);
2445
2446/**
2447 * dentry_update_name_case - update case insensitive dentry with a new name
2448 * @dentry: dentry to be updated
2449 * @name: new name
2450 *
2451 * Update a case insensitive dentry with new case of name.
2452 *
2453 * dentry must have been returned by d_lookup with name @name. Old and new
2454 * name lengths must match (ie. no d_compare which allows mismatched name
2455 * lengths).
2456 *
2457 * Parent inode i_mutex must be held over d_lookup and into this call (to
2458 * keep renames and concurrent inserts, and readdir(2) away).
2459 */
2460void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2461{
2462	BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2463	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2464
2465	spin_lock(&dentry->d_lock);
2466	write_seqcount_begin(&dentry->d_seq);
2467	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2468	write_seqcount_end(&dentry->d_seq);
2469	spin_unlock(&dentry->d_lock);
2470}
2471EXPORT_SYMBOL(dentry_update_name_case);
2472
2473static void swap_names(struct dentry *dentry, struct dentry *target)
2474{
2475	if (unlikely(dname_external(target))) {
2476		if (unlikely(dname_external(dentry))) {
2477			/*
2478			 * Both external: swap the pointers
2479			 */
2480			swap(target->d_name.name, dentry->d_name.name);
2481		} else {
2482			/*
2483			 * dentry:internal, target:external.  Steal target's
2484			 * storage and make target internal.
2485			 */
2486			memcpy(target->d_iname, dentry->d_name.name,
2487					dentry->d_name.len + 1);
2488			dentry->d_name.name = target->d_name.name;
2489			target->d_name.name = target->d_iname;
2490		}
2491	} else {
2492		if (unlikely(dname_external(dentry))) {
2493			/*
2494			 * dentry:external, target:internal.  Give dentry's
2495			 * storage to target and make dentry internal
2496			 */
2497			memcpy(dentry->d_iname, target->d_name.name,
2498					target->d_name.len + 1);
2499			target->d_name.name = dentry->d_name.name;
2500			dentry->d_name.name = dentry->d_iname;
2501		} else {
2502			/*
2503			 * Both are internal.
2504			 */
2505			unsigned int i;
2506			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2507			kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2508			kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2509			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2510				swap(((long *) &dentry->d_iname)[i],
2511				     ((long *) &target->d_iname)[i]);
2512			}
2513		}
2514	}
2515	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2516}
2517
2518static void copy_name(struct dentry *dentry, struct dentry *target)
2519{
2520	struct external_name *old_name = NULL;
2521	if (unlikely(dname_external(dentry)))
2522		old_name = external_name(dentry);
2523	if (unlikely(dname_external(target))) {
2524		atomic_inc(&external_name(target)->u.count);
2525		dentry->d_name = target->d_name;
2526	} else {
2527		memcpy(dentry->d_iname, target->d_name.name,
2528				target->d_name.len + 1);
2529		dentry->d_name.name = dentry->d_iname;
2530		dentry->d_name.hash_len = target->d_name.hash_len;
2531	}
2532	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2533		kfree_rcu(old_name, u.head);
2534}
2535
2536static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2537{
2538	/*
2539	 * XXXX: do we really need to take target->d_lock?
2540	 */
2541	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2542		spin_lock(&target->d_parent->d_lock);
2543	else {
2544		if (d_ancestor(dentry->d_parent, target->d_parent)) {
2545			spin_lock(&dentry->d_parent->d_lock);
2546			spin_lock_nested(&target->d_parent->d_lock,
2547						DENTRY_D_LOCK_NESTED);
2548		} else {
2549			spin_lock(&target->d_parent->d_lock);
2550			spin_lock_nested(&dentry->d_parent->d_lock,
2551						DENTRY_D_LOCK_NESTED);
2552		}
2553	}
2554	if (target < dentry) {
2555		spin_lock_nested(&target->d_lock, 2);
2556		spin_lock_nested(&dentry->d_lock, 3);
2557	} else {
2558		spin_lock_nested(&dentry->d_lock, 2);
2559		spin_lock_nested(&target->d_lock, 3);
2560	}
2561}
2562
2563static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2564{
2565	if (target->d_parent != dentry->d_parent)
2566		spin_unlock(&dentry->d_parent->d_lock);
2567	if (target->d_parent != target)
2568		spin_unlock(&target->d_parent->d_lock);
2569	spin_unlock(&target->d_lock);
2570	spin_unlock(&dentry->d_lock);
2571}
2572
2573/*
2574 * When switching names, the actual string doesn't strictly have to
2575 * be preserved in the target - because we're dropping the target
2576 * anyway. As such, we can just do a simple memcpy() to copy over
2577 * the new name before we switch, unless we are going to rehash
2578 * it.  Note that if we *do* unhash the target, we are not allowed
2579 * to rehash it without giving it a new name/hash key - whether
2580 * we swap or overwrite the names here, resulting name won't match
2581 * the reality in filesystem; it's only there for d_path() purposes.
2582 * Note that all of this is happening under rename_lock, so the
2583 * any hash lookup seeing it in the middle of manipulations will
2584 * be discarded anyway.  So we do not care what happens to the hash
2585 * key in that case.
2586 */
2587/*
2588 * __d_move - move a dentry
2589 * @dentry: entry to move
2590 * @target: new dentry
2591 * @exchange: exchange the two dentries
2592 *
2593 * Update the dcache to reflect the move of a file name. Negative
2594 * dcache entries should not be moved in this way. Caller must hold
2595 * rename_lock, the i_mutex of the source and target directories,
2596 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2597 */
2598static void __d_move(struct dentry *dentry, struct dentry *target,
2599		     bool exchange)
2600{
2601	if (!dentry->d_inode)
2602		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
 
 
 
 
 
 
2603
2604	BUG_ON(d_ancestor(dentry, target));
2605	BUG_ON(d_ancestor(target, dentry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2606
2607	dentry_lock_for_move(dentry, target);
 
 
 
 
2608
2609	write_seqcount_begin(&dentry->d_seq);
2610	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2611
2612	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
 
 
 
 
2613
2614	/*
2615	 * Move the dentry to the target hash queue. Don't bother checking
2616	 * for the same hash queue because of how unlikely it is.
2617	 */
2618	__d_drop(dentry);
2619	__d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2620
2621	/*
2622	 * Unhash the target (d_delete() is not usable here).  If exchanging
2623	 * the two dentries, then rehash onto the other's hash queue.
2624	 */
2625	__d_drop(target);
2626	if (exchange) {
2627		__d_rehash(target,
2628			   d_hash(dentry->d_parent, dentry->d_name.hash));
2629	}
2630
2631	/* Switch the names.. */
2632	if (exchange)
2633		swap_names(dentry, target);
2634	else
2635		copy_name(dentry, target);
2636
2637	/* ... and switch them in the tree */
2638	if (IS_ROOT(dentry)) {
2639		/* splicing a tree */
2640		dentry->d_parent = target->d_parent;
2641		target->d_parent = target;
2642		list_del_init(&target->d_child);
2643		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2644	} else {
2645		/* swapping two dentries */
2646		swap(dentry->d_parent, target->d_parent);
2647		list_move(&target->d_child, &target->d_parent->d_subdirs);
2648		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2649		if (exchange)
2650			fsnotify_d_move(target);
2651		fsnotify_d_move(dentry);
2652	}
 
 
 
 
 
 
2653
2654	write_seqcount_end(&target->d_seq);
2655	write_seqcount_end(&dentry->d_seq);
2656
2657	dentry_unlock_for_move(dentry, target);
 
 
 
 
 
 
 
 
2658}
2659
2660/*
2661 * d_move - move a dentry
2662 * @dentry: entry to move
2663 * @target: new dentry
2664 *
2665 * Update the dcache to reflect the move of a file name. Negative
2666 * dcache entries should not be moved in this way. See the locking
2667 * requirements for __d_move.
2668 */
2669void d_move(struct dentry *dentry, struct dentry *target)
2670{
2671	write_seqlock(&rename_lock);
2672	__d_move(dentry, target, false);
2673	write_sequnlock(&rename_lock);
2674}
2675EXPORT_SYMBOL(d_move);
2676
2677/*
2678 * d_exchange - exchange two dentries
2679 * @dentry1: first dentry
2680 * @dentry2: second dentry
2681 */
2682void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2683{
2684	write_seqlock(&rename_lock);
2685
2686	WARN_ON(!dentry1->d_inode);
2687	WARN_ON(!dentry2->d_inode);
2688	WARN_ON(IS_ROOT(dentry1));
2689	WARN_ON(IS_ROOT(dentry2));
2690
2691	__d_move(dentry1, dentry2, true);
2692
2693	write_sequnlock(&rename_lock);
2694}
2695
2696/**
2697 * d_ancestor - search for an ancestor
2698 * @p1: ancestor dentry
2699 * @p2: child dentry
2700 *
2701 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2702 * an ancestor of p2, else NULL.
2703 */
2704struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2705{
2706	struct dentry *p;
2707
2708	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2709		if (p->d_parent == p1)
2710			return p;
2711	}
2712	return NULL;
2713}
2714
2715/*
2716 * This helper attempts to cope with remotely renamed directories
2717 *
2718 * It assumes that the caller is already holding
2719 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2720 *
2721 * Note: If ever the locking in lock_rename() changes, then please
2722 * remember to update this too...
2723 */
2724static int __d_unalias(struct inode *inode,
2725		struct dentry *dentry, struct dentry *alias)
2726{
2727	struct mutex *m1 = NULL, *m2 = NULL;
 
2728	int ret = -ESTALE;
2729
2730	/* If alias and dentry share a parent, then no extra locks required */
2731	if (alias->d_parent == dentry->d_parent)
2732		goto out_unalias;
2733
2734	/* See lock_rename() */
2735	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2736		goto out_err;
2737	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2738	if (!inode_trylock(alias->d_parent->d_inode))
2739		goto out_err;
2740	m2 = &alias->d_parent->d_inode->i_mutex;
2741out_unalias:
2742	__d_move(alias, dentry, false);
2743	ret = 0;
2744out_err:
2745	if (m2)
2746		mutex_unlock(m2);
2747	if (m1)
2748		mutex_unlock(m1);
2749	return ret;
2750}
2751
2752/**
2753 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2754 * @inode:  the inode which may have a disconnected dentry
2755 * @dentry: a negative dentry which we want to point to the inode.
2756 *
2757 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2758 * place of the given dentry and return it, else simply d_add the inode
2759 * to the dentry and return NULL.
2760 *
2761 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2762 * we should error out: directories can't have multiple aliases.
2763 *
2764 * This is needed in the lookup routine of any filesystem that is exportable
2765 * (via knfsd) so that we can build dcache paths to directories effectively.
2766 *
2767 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2768 * is returned.  This matches the expected return value of ->lookup.
2769 *
2770 * Cluster filesystems may call this function with a negative, hashed dentry.
2771 * In that case, we know that the inode will be a regular file, and also this
2772 * will only occur during atomic_open. So we need to check for the dentry
2773 * being already hashed only in the final case.
2774 */
2775struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2776{
2777	if (IS_ERR(inode))
2778		return ERR_CAST(inode);
2779
2780	BUG_ON(!d_unhashed(dentry));
2781
2782	if (!inode)
2783		goto out;
2784
 
2785	spin_lock(&inode->i_lock);
2786	if (S_ISDIR(inode->i_mode)) {
2787		struct dentry *new = __d_find_any_alias(inode);
2788		if (unlikely(new)) {
2789			/* The reference to new ensures it remains an alias */
2790			spin_unlock(&inode->i_lock);
2791			write_seqlock(&rename_lock);
2792			if (unlikely(d_ancestor(new, dentry))) {
2793				write_sequnlock(&rename_lock);
2794				dput(new);
2795				new = ERR_PTR(-ELOOP);
2796				pr_warn_ratelimited(
2797					"VFS: Lookup of '%s' in %s %s"
2798					" would have caused loop\n",
2799					dentry->d_name.name,
2800					inode->i_sb->s_type->name,
2801					inode->i_sb->s_id);
2802			} else if (!IS_ROOT(new)) {
2803				int err = __d_unalias(inode, dentry, new);
 
2804				write_sequnlock(&rename_lock);
2805				if (err) {
2806					dput(new);
2807					new = ERR_PTR(err);
2808				}
 
2809			} else {
2810				__d_move(new, dentry, false);
2811				write_sequnlock(&rename_lock);
2812				security_d_instantiate(new, inode);
2813			}
2814			iput(inode);
2815			return new;
2816		}
2817	}
2818out:
2819	__d_add(dentry, inode);
2820	return NULL;
2821}
2822EXPORT_SYMBOL(d_splice_alias);
2823
2824static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2825{
2826	*buflen -= namelen;
2827	if (*buflen < 0)
2828		return -ENAMETOOLONG;
2829	*buffer -= namelen;
2830	memcpy(*buffer, str, namelen);
2831	return 0;
2832}
2833
2834/**
2835 * prepend_name - prepend a pathname in front of current buffer pointer
2836 * @buffer: buffer pointer
2837 * @buflen: allocated length of the buffer
2838 * @name:   name string and length qstr structure
2839 *
2840 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2841 * make sure that either the old or the new name pointer and length are
2842 * fetched. However, there may be mismatch between length and pointer.
2843 * The length cannot be trusted, we need to copy it byte-by-byte until
2844 * the length is reached or a null byte is found. It also prepends "/" at
2845 * the beginning of the name. The sequence number check at the caller will
2846 * retry it again when a d_move() does happen. So any garbage in the buffer
2847 * due to mismatched pointer and length will be discarded.
2848 *
2849 * Data dependency barrier is needed to make sure that we see that terminating
2850 * NUL.  Alpha strikes again, film at 11...
2851 */
2852static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2853{
2854	const char *dname = ACCESS_ONCE(name->name);
2855	u32 dlen = ACCESS_ONCE(name->len);
2856	char *p;
2857
2858	smp_read_barrier_depends();
2859
2860	*buflen -= dlen + 1;
2861	if (*buflen < 0)
2862		return -ENAMETOOLONG;
2863	p = *buffer -= dlen + 1;
2864	*p++ = '/';
2865	while (dlen--) {
2866		char c = *dname++;
2867		if (!c)
2868			break;
2869		*p++ = c;
2870	}
2871	return 0;
2872}
2873
2874/**
2875 * prepend_path - Prepend path string to a buffer
2876 * @path: the dentry/vfsmount to report
2877 * @root: root vfsmnt/dentry
2878 * @buffer: pointer to the end of the buffer
2879 * @buflen: pointer to buffer length
2880 *
2881 * The function will first try to write out the pathname without taking any
2882 * lock other than the RCU read lock to make sure that dentries won't go away.
2883 * It only checks the sequence number of the global rename_lock as any change
2884 * in the dentry's d_seq will be preceded by changes in the rename_lock
2885 * sequence number. If the sequence number had been changed, it will restart
2886 * the whole pathname back-tracing sequence again by taking the rename_lock.
2887 * In this case, there is no need to take the RCU read lock as the recursive
2888 * parent pointer references will keep the dentry chain alive as long as no
2889 * rename operation is performed.
2890 */
2891static int prepend_path(const struct path *path,
2892			const struct path *root,
2893			char **buffer, int *buflen)
2894{
2895	struct dentry *dentry;
2896	struct vfsmount *vfsmnt;
2897	struct mount *mnt;
2898	int error = 0;
2899	unsigned seq, m_seq = 0;
2900	char *bptr;
2901	int blen;
2902
2903	rcu_read_lock();
2904restart_mnt:
2905	read_seqbegin_or_lock(&mount_lock, &m_seq);
2906	seq = 0;
2907	rcu_read_lock();
2908restart:
2909	bptr = *buffer;
2910	blen = *buflen;
2911	error = 0;
2912	dentry = path->dentry;
2913	vfsmnt = path->mnt;
2914	mnt = real_mount(vfsmnt);
2915	read_seqbegin_or_lock(&rename_lock, &seq);
2916	while (dentry != root->dentry || vfsmnt != root->mnt) {
2917		struct dentry * parent;
2918
2919		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2920			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2921			/* Escaped? */
2922			if (dentry != vfsmnt->mnt_root) {
2923				bptr = *buffer;
2924				blen = *buflen;
2925				error = 3;
2926				break;
2927			}
2928			/* Global root? */
2929			if (mnt != parent) {
2930				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2931				mnt = parent;
2932				vfsmnt = &mnt->mnt;
2933				continue;
2934			}
2935			if (!error)
2936				error = is_mounted(vfsmnt) ? 1 : 2;
2937			break;
2938		}
2939		parent = dentry->d_parent;
2940		prefetch(parent);
2941		error = prepend_name(&bptr, &blen, &dentry->d_name);
2942		if (error)
2943			break;
2944
2945		dentry = parent;
2946	}
2947	if (!(seq & 1))
2948		rcu_read_unlock();
2949	if (need_seqretry(&rename_lock, seq)) {
2950		seq = 1;
2951		goto restart;
2952	}
2953	done_seqretry(&rename_lock, seq);
2954
2955	if (!(m_seq & 1))
2956		rcu_read_unlock();
2957	if (need_seqretry(&mount_lock, m_seq)) {
2958		m_seq = 1;
2959		goto restart_mnt;
2960	}
2961	done_seqretry(&mount_lock, m_seq);
2962
2963	if (error >= 0 && bptr == *buffer) {
2964		if (--blen < 0)
2965			error = -ENAMETOOLONG;
2966		else
2967			*--bptr = '/';
2968	}
2969	*buffer = bptr;
2970	*buflen = blen;
2971	return error;
2972}
2973
2974/**
2975 * __d_path - return the path of a dentry
2976 * @path: the dentry/vfsmount to report
2977 * @root: root vfsmnt/dentry
2978 * @buf: buffer to return value in
2979 * @buflen: buffer length
2980 *
2981 * Convert a dentry into an ASCII path name.
2982 *
2983 * Returns a pointer into the buffer or an error code if the
2984 * path was too long.
2985 *
2986 * "buflen" should be positive.
2987 *
2988 * If the path is not reachable from the supplied root, return %NULL.
2989 */
2990char *__d_path(const struct path *path,
2991	       const struct path *root,
2992	       char *buf, int buflen)
2993{
2994	char *res = buf + buflen;
2995	int error;
2996
2997	prepend(&res, &buflen, "\0", 1);
2998	error = prepend_path(path, root, &res, &buflen);
2999
3000	if (error < 0)
3001		return ERR_PTR(error);
3002	if (error > 0)
3003		return NULL;
3004	return res;
3005}
3006
3007char *d_absolute_path(const struct path *path,
3008	       char *buf, int buflen)
3009{
3010	struct path root = {};
3011	char *res = buf + buflen;
3012	int error;
3013
3014	prepend(&res, &buflen, "\0", 1);
3015	error = prepend_path(path, &root, &res, &buflen);
3016
3017	if (error > 1)
3018		error = -EINVAL;
3019	if (error < 0)
3020		return ERR_PTR(error);
3021	return res;
3022}
3023
3024/*
3025 * same as __d_path but appends "(deleted)" for unlinked files.
3026 */
3027static int path_with_deleted(const struct path *path,
3028			     const struct path *root,
3029			     char **buf, int *buflen)
3030{
3031	prepend(buf, buflen, "\0", 1);
3032	if (d_unlinked(path->dentry)) {
3033		int error = prepend(buf, buflen, " (deleted)", 10);
3034		if (error)
3035			return error;
3036	}
3037
3038	return prepend_path(path, root, buf, buflen);
3039}
3040
3041static int prepend_unreachable(char **buffer, int *buflen)
3042{
3043	return prepend(buffer, buflen, "(unreachable)", 13);
3044}
3045
3046static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3047{
3048	unsigned seq;
3049
3050	do {
3051		seq = read_seqcount_begin(&fs->seq);
3052		*root = fs->root;
3053	} while (read_seqcount_retry(&fs->seq, seq));
3054}
3055
3056/**
3057 * d_path - return the path of a dentry
3058 * @path: path to report
3059 * @buf: buffer to return value in
3060 * @buflen: buffer length
3061 *
3062 * Convert a dentry into an ASCII path name. If the entry has been deleted
3063 * the string " (deleted)" is appended. Note that this is ambiguous.
3064 *
3065 * Returns a pointer into the buffer or an error code if the path was
3066 * too long. Note: Callers should use the returned pointer, not the passed
3067 * in buffer, to use the name! The implementation often starts at an offset
3068 * into the buffer, and may leave 0 bytes at the start.
3069 *
3070 * "buflen" should be positive.
3071 */
3072char *d_path(const struct path *path, char *buf, int buflen)
3073{
3074	char *res = buf + buflen;
3075	struct path root;
3076	int error;
3077
3078	/*
3079	 * We have various synthetic filesystems that never get mounted.  On
3080	 * these filesystems dentries are never used for lookup purposes, and
3081	 * thus don't need to be hashed.  They also don't need a name until a
3082	 * user wants to identify the object in /proc/pid/fd/.  The little hack
3083	 * below allows us to generate a name for these objects on demand:
3084	 *
3085	 * Some pseudo inodes are mountable.  When they are mounted
3086	 * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3087	 * and instead have d_path return the mounted path.
3088	 */
3089	if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3090	    (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3091		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3092
3093	rcu_read_lock();
3094	get_fs_root_rcu(current->fs, &root);
3095	error = path_with_deleted(path, &root, &res, &buflen);
3096	rcu_read_unlock();
3097
3098	if (error < 0)
3099		res = ERR_PTR(error);
3100	return res;
3101}
3102EXPORT_SYMBOL(d_path);
3103
3104/*
3105 * Helper function for dentry_operations.d_dname() members
3106 */
3107char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3108			const char *fmt, ...)
3109{
3110	va_list args;
3111	char temp[64];
3112	int sz;
3113
3114	va_start(args, fmt);
3115	sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3116	va_end(args);
3117
3118	if (sz > sizeof(temp) || sz > buflen)
3119		return ERR_PTR(-ENAMETOOLONG);
3120
3121	buffer += buflen - sz;
3122	return memcpy(buffer, temp, sz);
3123}
3124
3125char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3126{
3127	char *end = buffer + buflen;
3128	/* these dentries are never renamed, so d_lock is not needed */
3129	if (prepend(&end, &buflen, " (deleted)", 11) ||
3130	    prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3131	    prepend(&end, &buflen, "/", 1))  
3132		end = ERR_PTR(-ENAMETOOLONG);
3133	return end;
3134}
3135EXPORT_SYMBOL(simple_dname);
3136
3137/*
3138 * Write full pathname from the root of the filesystem into the buffer.
3139 */
3140static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3141{
3142	struct dentry *dentry;
3143	char *end, *retval;
3144	int len, seq = 0;
3145	int error = 0;
3146
3147	if (buflen < 2)
3148		goto Elong;
3149
3150	rcu_read_lock();
3151restart:
3152	dentry = d;
3153	end = buf + buflen;
3154	len = buflen;
3155	prepend(&end, &len, "\0", 1);
3156	/* Get '/' right */
3157	retval = end-1;
3158	*retval = '/';
3159	read_seqbegin_or_lock(&rename_lock, &seq);
3160	while (!IS_ROOT(dentry)) {
3161		struct dentry *parent = dentry->d_parent;
3162
3163		prefetch(parent);
3164		error = prepend_name(&end, &len, &dentry->d_name);
3165		if (error)
3166			break;
3167
3168		retval = end;
3169		dentry = parent;
3170	}
3171	if (!(seq & 1))
3172		rcu_read_unlock();
3173	if (need_seqretry(&rename_lock, seq)) {
3174		seq = 1;
3175		goto restart;
3176	}
3177	done_seqretry(&rename_lock, seq);
3178	if (error)
3179		goto Elong;
3180	return retval;
3181Elong:
3182	return ERR_PTR(-ENAMETOOLONG);
3183}
3184
3185char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3186{
3187	return __dentry_path(dentry, buf, buflen);
3188}
3189EXPORT_SYMBOL(dentry_path_raw);
3190
3191char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3192{
3193	char *p = NULL;
3194	char *retval;
3195
3196	if (d_unlinked(dentry)) {
3197		p = buf + buflen;
3198		if (prepend(&p, &buflen, "//deleted", 10) != 0)
3199			goto Elong;
3200		buflen++;
3201	}
3202	retval = __dentry_path(dentry, buf, buflen);
3203	if (!IS_ERR(retval) && p)
3204		*p = '/';	/* restore '/' overriden with '\0' */
3205	return retval;
3206Elong:
3207	return ERR_PTR(-ENAMETOOLONG);
3208}
3209
3210static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3211				    struct path *pwd)
3212{
3213	unsigned seq;
3214
3215	do {
3216		seq = read_seqcount_begin(&fs->seq);
3217		*root = fs->root;
3218		*pwd = fs->pwd;
3219	} while (read_seqcount_retry(&fs->seq, seq));
3220}
3221
3222/*
3223 * NOTE! The user-level library version returns a
3224 * character pointer. The kernel system call just
3225 * returns the length of the buffer filled (which
3226 * includes the ending '\0' character), or a negative
3227 * error value. So libc would do something like
3228 *
3229 *	char *getcwd(char * buf, size_t size)
3230 *	{
3231 *		int retval;
3232 *
3233 *		retval = sys_getcwd(buf, size);
3234 *		if (retval >= 0)
3235 *			return buf;
3236 *		errno = -retval;
3237 *		return NULL;
3238 *	}
3239 */
3240SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3241{
3242	int error;
3243	struct path pwd, root;
3244	char *page = __getname();
3245
3246	if (!page)
3247		return -ENOMEM;
3248
3249	rcu_read_lock();
3250	get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3251
3252	error = -ENOENT;
3253	if (!d_unlinked(pwd.dentry)) {
3254		unsigned long len;
3255		char *cwd = page + PATH_MAX;
3256		int buflen = PATH_MAX;
3257
3258		prepend(&cwd, &buflen, "\0", 1);
3259		error = prepend_path(&pwd, &root, &cwd, &buflen);
3260		rcu_read_unlock();
3261
3262		if (error < 0)
3263			goto out;
3264
3265		/* Unreachable from current root */
3266		if (error > 0) {
3267			error = prepend_unreachable(&cwd, &buflen);
3268			if (error)
3269				goto out;
3270		}
3271
3272		error = -ERANGE;
3273		len = PATH_MAX + page - cwd;
3274		if (len <= size) {
3275			error = len;
3276			if (copy_to_user(buf, cwd, len))
3277				error = -EFAULT;
3278		}
3279	} else {
3280		rcu_read_unlock();
3281	}
3282
3283out:
3284	__putname(page);
3285	return error;
3286}
3287
3288/*
3289 * Test whether new_dentry is a subdirectory of old_dentry.
3290 *
3291 * Trivially implemented using the dcache structure
3292 */
3293
3294/**
3295 * is_subdir - is new dentry a subdirectory of old_dentry
3296 * @new_dentry: new dentry
3297 * @old_dentry: old dentry
3298 *
3299 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3300 * Returns false otherwise.
3301 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3302 */
3303  
3304bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3305{
3306	bool result;
3307	unsigned seq;
3308
3309	if (new_dentry == old_dentry)
3310		return true;
3311
3312	do {
3313		/* for restarting inner loop in case of seq retry */
3314		seq = read_seqbegin(&rename_lock);
3315		/*
3316		 * Need rcu_readlock to protect against the d_parent trashing
3317		 * due to d_move
3318		 */
3319		rcu_read_lock();
3320		if (d_ancestor(old_dentry, new_dentry))
3321			result = true;
3322		else
3323			result = false;
3324		rcu_read_unlock();
3325	} while (read_seqretry(&rename_lock, seq));
3326
3327	return result;
3328}
 
3329
3330static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3331{
3332	struct dentry *root = data;
3333	if (dentry != root) {
3334		if (d_unhashed(dentry) || !dentry->d_inode)
3335			return D_WALK_SKIP;
3336
3337		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3338			dentry->d_flags |= DCACHE_GENOCIDE;
3339			dentry->d_lockref.count--;
3340		}
3341	}
3342	return D_WALK_CONTINUE;
3343}
3344
3345void d_genocide(struct dentry *parent)
3346{
3347	d_walk(parent, parent, d_genocide_kill, NULL);
3348}
3349
3350void d_tmpfile(struct dentry *dentry, struct inode *inode)
3351{
3352	inode_dec_link_count(inode);
 
3353	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3354		!hlist_unhashed(&dentry->d_u.d_alias) ||
3355		!d_unlinked(dentry));
3356	spin_lock(&dentry->d_parent->d_lock);
3357	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3358	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3359				(unsigned long long)inode->i_ino);
3360	spin_unlock(&dentry->d_lock);
3361	spin_unlock(&dentry->d_parent->d_lock);
 
 
 
 
 
 
 
 
 
3362	d_instantiate(dentry, inode);
3363}
3364EXPORT_SYMBOL(d_tmpfile);
3365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3366static __initdata unsigned long dhash_entries;
3367static int __init set_dhash_entries(char *str)
3368{
3369	if (!str)
3370		return 0;
3371	dhash_entries = simple_strtoul(str, &str, 0);
3372	return 1;
3373}
3374__setup("dhash_entries=", set_dhash_entries);
3375
3376static void __init dcache_init_early(void)
3377{
3378	unsigned int loop;
3379
3380	/* If hashes are distributed across NUMA nodes, defer
3381	 * hash allocation until vmalloc space is available.
3382	 */
3383	if (hashdist)
3384		return;
3385
3386	dentry_hashtable =
3387		alloc_large_system_hash("Dentry cache",
3388					sizeof(struct hlist_bl_head),
3389					dhash_entries,
3390					13,
3391					HASH_EARLY,
3392					&d_hash_shift,
3393					&d_hash_mask,
3394					0,
3395					0);
 
3396
3397	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3398		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3399}
3400
3401static void __init dcache_init(void)
3402{
3403	unsigned int loop;
3404
3405	/* 
3406	 * A constructor could be added for stable state like the lists,
3407	 * but it is probably not worth it because of the cache nature
3408	 * of the dcache. 
3409	 */
3410	dentry_cache = KMEM_CACHE(dentry,
3411		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
 
3412
3413	/* Hash may have been set up in dcache_init_early */
3414	if (!hashdist)
3415		return;
3416
3417	dentry_hashtable =
3418		alloc_large_system_hash("Dentry cache",
3419					sizeof(struct hlist_bl_head),
3420					dhash_entries,
3421					13,
3422					0,
3423					&d_hash_shift,
3424					&d_hash_mask,
3425					0,
3426					0);
 
3427
3428	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3429		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3430}
3431
3432/* SLAB cache for __getname() consumers */
3433struct kmem_cache *names_cachep __read_mostly;
3434EXPORT_SYMBOL(names_cachep);
3435
3436EXPORT_SYMBOL(d_genocide);
3437
3438void __init vfs_caches_init_early(void)
3439{
 
 
 
 
 
3440	dcache_init_early();
3441	inode_init_early();
3442}
3443
3444void __init vfs_caches_init(void)
3445{
3446	names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3447			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3448
3449	dcache_init();
3450	inode_init();
3451	files_init();
3452	files_maxfiles_init();
3453	mnt_init();
3454	bdev_cache_init();
3455	chrdev_init();
3456}