Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dcache.c
   4 *
   5 * Complete reimplementation
   6 * (C) 1997 Thomas Schoebel-Theuer,
   7 * with heavy changes by Linus Torvalds
   8 */
   9
  10/*
  11 * Notes on the allocation strategy:
  12 *
  13 * The dcache is a master of the icache - whenever a dcache entry
  14 * exists, the inode will always exist. "iput()" is done either when
  15 * the dcache entry is deleted or garbage collected.
  16 */
  17
  18#include <linux/ratelimit.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/fs.h>
  22#include <linux/fscrypt.h>
  23#include <linux/fsnotify.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/hash.h>
  27#include <linux/cache.h>
  28#include <linux/export.h>
 
 
 
  29#include <linux/security.h>
  30#include <linux/seqlock.h>
  31#include <linux/memblock.h>
 
 
 
  32#include <linux/bit_spinlock.h>
  33#include <linux/rculist_bl.h>
 
 
  34#include <linux/list_lru.h>
  35#include "internal.h"
  36#include "mount.h"
  37
  38#include <asm/runtime-const.h>
  39
  40/*
  41 * Usage:
  42 * dcache->d_inode->i_lock protects:
  43 *   - i_dentry, d_u.d_alias, d_inode of aliases
  44 * dcache_hash_bucket lock protects:
  45 *   - the dcache hash table
  46 * s_roots bl list spinlock protects:
  47 *   - the s_roots list (see __d_drop)
  48 * dentry->d_sb->s_dentry_lru_lock protects:
  49 *   - the dcache lru lists and counters
  50 * d_lock protects:
  51 *   - d_flags
  52 *   - d_name
  53 *   - d_lru
  54 *   - d_count
  55 *   - d_unhashed()
  56 *   - d_parent and d_chilren
  57 *   - childrens' d_sib and d_parent
  58 *   - d_u.d_alias, d_inode
  59 *
  60 * Ordering:
  61 * dentry->d_inode->i_lock
  62 *   dentry->d_lock
  63 *     dentry->d_sb->s_dentry_lru_lock
  64 *     dcache_hash_bucket lock
  65 *     s_roots lock
  66 *
  67 * If there is an ancestor relationship:
  68 * dentry->d_parent->...->d_parent->d_lock
  69 *   ...
  70 *     dentry->d_parent->d_lock
  71 *       dentry->d_lock
  72 *
  73 * If no ancestor relationship:
  74 * arbitrary, since it's serialized on rename_lock
 
 
  75 */
  76int sysctl_vfs_cache_pressure __read_mostly = 100;
  77EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  78
  79__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  80
  81EXPORT_SYMBOL(rename_lock);
  82
  83static struct kmem_cache *dentry_cache __ro_after_init;
  84
  85const struct qstr empty_name = QSTR_INIT("", 0);
  86EXPORT_SYMBOL(empty_name);
  87const struct qstr slash_name = QSTR_INIT("/", 1);
  88EXPORT_SYMBOL(slash_name);
  89const struct qstr dotdot_name = QSTR_INIT("..", 2);
  90EXPORT_SYMBOL(dotdot_name);
  91
  92/*
  93 * This is the single most critical data structure when it comes
  94 * to the dcache: the hashtable for lookups. Somebody should try
  95 * to make this good - I've just made it work.
  96 *
  97 * This hash-function tries to avoid losing too many bits of hash
  98 * information, yet avoid using a prime hash-size or similar.
  99 *
 100 * Marking the variables "used" ensures that the compiler doesn't
 101 * optimize them away completely on architectures with runtime
 102 * constant infrastructure, this allows debuggers to see their
 103 * values. But updating these values has no effect on those arches.
 104 */
 105
 106static unsigned int d_hash_shift __ro_after_init __used;
 
 107
 108static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
 109
 110static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
 111{
 112	return runtime_const_ptr(dentry_hashtable) +
 113		runtime_const_shift_right_32(hashlen, d_hash_shift);
 114}
 115
 116#define IN_LOOKUP_SHIFT 10
 117static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
 118
 119static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
 120					unsigned int hash)
 121{
 122	hash += (unsigned long) parent / L1_CACHE_BYTES;
 123	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
 
 124}
 125
 126struct dentry_stat_t {
 127	long nr_dentry;
 128	long nr_unused;
 129	long age_limit;		/* age in seconds */
 130	long want_pages;	/* pages requested by system */
 131	long nr_negative;	/* # of unused negative dentries */
 132	long dummy;		/* Reserved for future use */
 133};
 134
 135static DEFINE_PER_CPU(long, nr_dentry);
 136static DEFINE_PER_CPU(long, nr_dentry_unused);
 137static DEFINE_PER_CPU(long, nr_dentry_negative);
 138static int dentry_negative_policy;
 139
 140#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 141/* Statistics gathering. */
 142static struct dentry_stat_t dentry_stat = {
 143	.age_limit = 45,
 144};
 145
 146/*
 147 * Here we resort to our own counters instead of using generic per-cpu counters
 148 * for consistency with what the vfs inode code does. We are expected to harvest
 149 * better code and performance by having our own specialized counters.
 150 *
 151 * Please note that the loop is done over all possible CPUs, not over all online
 152 * CPUs. The reason for this is that we don't want to play games with CPUs going
 153 * on and off. If one of them goes off, we will just keep their counters.
 154 *
 155 * glommer: See cffbc8a for details, and if you ever intend to change this,
 156 * please update all vfs counters to match.
 157 */
 158static long get_nr_dentry(void)
 159{
 160	int i;
 161	long sum = 0;
 162	for_each_possible_cpu(i)
 163		sum += per_cpu(nr_dentry, i);
 164	return sum < 0 ? 0 : sum;
 165}
 166
 167static long get_nr_dentry_unused(void)
 168{
 169	int i;
 170	long sum = 0;
 171	for_each_possible_cpu(i)
 172		sum += per_cpu(nr_dentry_unused, i);
 173	return sum < 0 ? 0 : sum;
 174}
 175
 176static long get_nr_dentry_negative(void)
 177{
 178	int i;
 179	long sum = 0;
 180
 181	for_each_possible_cpu(i)
 182		sum += per_cpu(nr_dentry_negative, i);
 183	return sum < 0 ? 0 : sum;
 184}
 185
 186static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
 187			  size_t *lenp, loff_t *ppos)
 188{
 189	dentry_stat.nr_dentry = get_nr_dentry();
 190	dentry_stat.nr_unused = get_nr_dentry_unused();
 191	dentry_stat.nr_negative = get_nr_dentry_negative();
 192	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 193}
 194
 195static struct ctl_table fs_dcache_sysctls[] = {
 196	{
 197		.procname	= "dentry-state",
 198		.data		= &dentry_stat,
 199		.maxlen		= 6*sizeof(long),
 200		.mode		= 0444,
 201		.proc_handler	= proc_nr_dentry,
 202	},
 203	{
 204		.procname	= "dentry-negative",
 205		.data		= &dentry_negative_policy,
 206		.maxlen		= sizeof(dentry_negative_policy),
 207		.mode		= 0644,
 208		.proc_handler	= proc_dointvec_minmax,
 209		.extra1		= SYSCTL_ZERO,
 210		.extra2		= SYSCTL_ONE,
 211	},
 212};
 213
 214static int __init init_fs_dcache_sysctls(void)
 215{
 216	register_sysctl_init("fs", fs_dcache_sysctls);
 217	return 0;
 218}
 219fs_initcall(init_fs_dcache_sysctls);
 220#endif
 221
 222/*
 223 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 224 * The strings are both count bytes long, and count is non-zero.
 225 */
 226#ifdef CONFIG_DCACHE_WORD_ACCESS
 227
 228#include <asm/word-at-a-time.h>
 229/*
 230 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 231 * aligned allocation for this particular component. We don't
 232 * strictly need the load_unaligned_zeropad() safety, but it
 233 * doesn't hurt either.
 234 *
 235 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 236 * need the careful unaligned handling.
 237 */
 238static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 239{
 240	unsigned long a,b,mask;
 241
 242	for (;;) {
 243		a = read_word_at_a_time(cs);
 244		b = load_unaligned_zeropad(ct);
 245		if (tcount < sizeof(unsigned long))
 246			break;
 247		if (unlikely(a != b))
 248			return 1;
 249		cs += sizeof(unsigned long);
 250		ct += sizeof(unsigned long);
 251		tcount -= sizeof(unsigned long);
 252		if (!tcount)
 253			return 0;
 254	}
 255	mask = bytemask_from_count(tcount);
 256	return unlikely(!!((a ^ b) & mask));
 257}
 258
 259#else
 260
 261static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 262{
 263	do {
 264		if (*cs != *ct)
 265			return 1;
 266		cs++;
 267		ct++;
 268		tcount--;
 269	} while (tcount);
 270	return 0;
 271}
 272
 273#endif
 274
 275static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 276{
 
 277	/*
 278	 * Be careful about RCU walk racing with rename:
 279	 * use 'READ_ONCE' to fetch the name pointer.
 280	 *
 281	 * NOTE! Even if a rename will mean that the length
 282	 * was not loaded atomically, we don't care. The
 283	 * RCU walk will check the sequence count eventually,
 284	 * and catch it. And we won't overrun the buffer,
 285	 * because we're reading the name pointer atomically,
 286	 * and a dentry name is guaranteed to be properly
 287	 * terminated with a NUL byte.
 288	 *
 289	 * End result: even if 'len' is wrong, we'll exit
 290	 * early because the data cannot match (there can
 291	 * be no NUL in the ct/tcount data)
 292	 */
 293	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 294
 295	return dentry_string_cmp(cs, ct, tcount);
 296}
 297
 298struct external_name {
 299	union {
 300		atomic_t count;
 301		struct rcu_head head;
 302	} u;
 303	unsigned char name[];
 304};
 305
 306static inline struct external_name *external_name(struct dentry *dentry)
 307{
 308	return container_of(dentry->d_name.name, struct external_name, name[0]);
 309}
 310
 311static void __d_free(struct rcu_head *head)
 312{
 313	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 314
 
 
 
 315	kmem_cache_free(dentry_cache, dentry); 
 316}
 317
 318static void __d_free_external(struct rcu_head *head)
 319{
 320	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 321	kfree(external_name(dentry));
 322	kmem_cache_free(dentry_cache, dentry);
 
 
 323}
 324
 325static inline int dname_external(const struct dentry *dentry)
 326{
 327	return dentry->d_name.name != dentry->d_iname;
 
 
 
 
 
 
 
 
 
 328}
 329
 330void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
 
 
 
 
 
 
 
 331{
 332	spin_lock(&dentry->d_lock);
 333	name->name = dentry->d_name;
 334	if (unlikely(dname_external(dentry))) {
 335		atomic_inc(&external_name(dentry)->u.count);
 
 
 
 
 
 
 
 
 336	} else {
 337		memcpy(name->inline_name, dentry->d_iname,
 338		       dentry->d_name.len + 1);
 339		name->name.name = name->inline_name;
 340	}
 341	spin_unlock(&dentry->d_lock);
 342}
 343EXPORT_SYMBOL(take_dentry_name_snapshot);
 344
 345void release_dentry_name_snapshot(struct name_snapshot *name)
 346{
 347	if (unlikely(name->name.name != name->inline_name)) {
 348		struct external_name *p;
 349		p = container_of(name->name.name, struct external_name, name[0]);
 350		if (unlikely(atomic_dec_and_test(&p->u.count)))
 351			kfree_rcu(p, u.head);
 352	}
 353}
 354EXPORT_SYMBOL(release_dentry_name_snapshot);
 355
 356static inline void __d_set_inode_and_type(struct dentry *dentry,
 357					  struct inode *inode,
 358					  unsigned type_flags)
 359{
 360	unsigned flags;
 361
 362	dentry->d_inode = inode;
 363	flags = READ_ONCE(dentry->d_flags);
 364	flags &= ~DCACHE_ENTRY_TYPE;
 365	flags |= type_flags;
 366	smp_store_release(&dentry->d_flags, flags);
 367}
 368
 369static inline void __d_clear_type_and_inode(struct dentry *dentry)
 370{
 371	unsigned flags = READ_ONCE(dentry->d_flags);
 372
 373	flags &= ~DCACHE_ENTRY_TYPE;
 374	WRITE_ONCE(dentry->d_flags, flags);
 375	dentry->d_inode = NULL;
 376	/*
 377	 * The negative counter only tracks dentries on the LRU. Don't inc if
 378	 * d_lru is on another list.
 379	 */
 380	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
 381		this_cpu_inc(nr_dentry_negative);
 382}
 383
 384static void dentry_free(struct dentry *dentry)
 385{
 386	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
 387	if (unlikely(dname_external(dentry))) {
 388		struct external_name *p = external_name(dentry);
 389		if (likely(atomic_dec_and_test(&p->u.count))) {
 390			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
 391			return;
 392		}
 393	}
 394	/* if dentry was never visible to RCU, immediate free is OK */
 395	if (dentry->d_flags & DCACHE_NORCU)
 396		__d_free(&dentry->d_u.d_rcu);
 397	else
 398		call_rcu(&dentry->d_u.d_rcu, __d_free);
 399}
 400
 401/*
 402 * Release the dentry's inode, using the filesystem
 403 * d_iput() operation if defined.
 404 */
 405static void dentry_unlink_inode(struct dentry * dentry)
 406	__releases(dentry->d_lock)
 407	__releases(dentry->d_inode->i_lock)
 408{
 409	struct inode *inode = dentry->d_inode;
 410
 411	raw_write_seqcount_begin(&dentry->d_seq);
 412	__d_clear_type_and_inode(dentry);
 413	hlist_del_init(&dentry->d_u.d_alias);
 414	raw_write_seqcount_end(&dentry->d_seq);
 415	spin_unlock(&dentry->d_lock);
 416	spin_unlock(&inode->i_lock);
 417	if (!inode->i_nlink)
 418		fsnotify_inoderemove(inode);
 419	if (dentry->d_op && dentry->d_op->d_iput)
 420		dentry->d_op->d_iput(dentry, inode);
 421	else
 422		iput(inode);
 423}
 424
 425/*
 426 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 427 * is in use - which includes both the "real" per-superblock
 428 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 429 *
 430 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 431 * on the shrink list (ie not on the superblock LRU list).
 432 *
 433 * The per-cpu "nr_dentry_unused" counters are updated with
 434 * the DCACHE_LRU_LIST bit.
 435 *
 436 * The per-cpu "nr_dentry_negative" counters are only updated
 437 * when deleted from or added to the per-superblock LRU list, not
 438 * from/to the shrink list. That is to avoid an unneeded dec/inc
 439 * pair when moving from LRU to shrink list in select_collect().
 440 *
 441 * These helper functions make sure we always follow the
 442 * rules. d_lock must be held by the caller.
 443 */
 444#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 445static void d_lru_add(struct dentry *dentry)
 446{
 447	D_FLAG_VERIFY(dentry, 0);
 448	dentry->d_flags |= DCACHE_LRU_LIST;
 449	this_cpu_inc(nr_dentry_unused);
 450	if (d_is_negative(dentry))
 451		this_cpu_inc(nr_dentry_negative);
 452	WARN_ON_ONCE(!list_lru_add_obj(
 453			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 454}
 455
 456static void d_lru_del(struct dentry *dentry)
 457{
 458	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 459	dentry->d_flags &= ~DCACHE_LRU_LIST;
 460	this_cpu_dec(nr_dentry_unused);
 461	if (d_is_negative(dentry))
 462		this_cpu_dec(nr_dentry_negative);
 463	WARN_ON_ONCE(!list_lru_del_obj(
 464			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 465}
 466
 467static void d_shrink_del(struct dentry *dentry)
 468{
 469	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 470	list_del_init(&dentry->d_lru);
 471	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 472	this_cpu_dec(nr_dentry_unused);
 473}
 474
 475static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 476{
 477	D_FLAG_VERIFY(dentry, 0);
 478	list_add(&dentry->d_lru, list);
 479	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 480	this_cpu_inc(nr_dentry_unused);
 481}
 482
 483/*
 484 * These can only be called under the global LRU lock, ie during the
 485 * callback for freeing the LRU list. "isolate" removes it from the
 486 * LRU lists entirely, while shrink_move moves it to the indicated
 487 * private list.
 488 */
 489static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
 490{
 491	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 492	dentry->d_flags &= ~DCACHE_LRU_LIST;
 493	this_cpu_dec(nr_dentry_unused);
 494	if (d_is_negative(dentry))
 495		this_cpu_dec(nr_dentry_negative);
 496	list_lru_isolate(lru, &dentry->d_lru);
 497}
 498
 499static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 500			      struct list_head *list)
 501{
 502	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 503	dentry->d_flags |= DCACHE_SHRINK_LIST;
 504	if (d_is_negative(dentry))
 505		this_cpu_dec(nr_dentry_negative);
 506	list_lru_isolate_move(lru, &dentry->d_lru, list);
 507}
 508
 509static void ___d_drop(struct dentry *dentry)
 510{
 511	struct hlist_bl_head *b;
 512	/*
 513	 * Hashed dentries are normally on the dentry hashtable,
 514	 * with the exception of those newly allocated by
 515	 * d_obtain_root, which are always IS_ROOT:
 516	 */
 517	if (unlikely(IS_ROOT(dentry)))
 518		b = &dentry->d_sb->s_roots;
 519	else
 520		b = d_hash(dentry->d_name.hash);
 521
 522	hlist_bl_lock(b);
 523	__hlist_bl_del(&dentry->d_hash);
 524	hlist_bl_unlock(b);
 525}
 526
 527void __d_drop(struct dentry *dentry)
 
 
 
 528{
 529	if (!d_unhashed(dentry)) {
 530		___d_drop(dentry);
 531		dentry->d_hash.pprev = NULL;
 532		write_seqcount_invalidate(&dentry->d_seq);
 533	}
 534}
 535EXPORT_SYMBOL(__d_drop);
 536
 537/**
 538 * d_drop - drop a dentry
 539 * @dentry: dentry to drop
 540 *
 541 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 542 * be found through a VFS lookup any more. Note that this is different from
 543 * deleting the dentry - d_delete will try to mark the dentry negative if
 544 * possible, giving a successful _negative_ lookup, while d_drop will
 545 * just make the cache lookup fail.
 546 *
 547 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 548 * reason (NFS timeouts or autofs deletes).
 549 *
 550 * __d_drop requires dentry->d_lock
 551 *
 552 * ___d_drop doesn't mark dentry as "unhashed"
 553 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
 554 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555void d_drop(struct dentry *dentry)
 556{
 557	spin_lock(&dentry->d_lock);
 558	__d_drop(dentry);
 559	spin_unlock(&dentry->d_lock);
 560}
 561EXPORT_SYMBOL(d_drop);
 562
 563static inline void dentry_unlist(struct dentry *dentry)
 564{
 565	struct dentry *next;
 566	/*
 567	 * Inform d_walk() and shrink_dentry_list() that we are no longer
 568	 * attached to the dentry tree
 569	 */
 570	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 571	if (unlikely(hlist_unhashed(&dentry->d_sib)))
 572		return;
 573	__hlist_del(&dentry->d_sib);
 574	/*
 575	 * Cursors can move around the list of children.  While we'd been
 576	 * a normal list member, it didn't matter - ->d_sib.next would've
 577	 * been updated.  However, from now on it won't be and for the
 578	 * things like d_walk() it might end up with a nasty surprise.
 579	 * Normally d_walk() doesn't care about cursors moving around -
 580	 * ->d_lock on parent prevents that and since a cursor has no children
 581	 * of its own, we get through it without ever unlocking the parent.
 582	 * There is one exception, though - if we ascend from a child that
 583	 * gets killed as soon as we unlock it, the next sibling is found
 584	 * using the value left in its ->d_sib.next.  And if _that_
 585	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
 586	 * before d_walk() regains parent->d_lock, we'll end up skipping
 587	 * everything the cursor had been moved past.
 588	 *
 589	 * Solution: make sure that the pointer left behind in ->d_sib.next
 590	 * points to something that won't be moving around.  I.e. skip the
 591	 * cursors.
 592	 */
 593	while (dentry->d_sib.next) {
 594		next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
 595		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
 596			break;
 597		dentry->d_sib.next = next->d_sib.next;
 598	}
 599}
 600
 601static struct dentry *__dentry_kill(struct dentry *dentry)
 602{
 603	struct dentry *parent = NULL;
 604	bool can_free = true;
 
 
 605
 606	/*
 607	 * The dentry is now unrecoverably dead to the world.
 608	 */
 609	lockref_mark_dead(&dentry->d_lockref);
 610
 611	/*
 612	 * inform the fs via d_prune that this dentry is about to be
 613	 * unhashed and destroyed.
 614	 */
 615	if (dentry->d_flags & DCACHE_OP_PRUNE)
 616		dentry->d_op->d_prune(dentry);
 617
 618	if (dentry->d_flags & DCACHE_LRU_LIST) {
 619		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 620			d_lru_del(dentry);
 621	}
 622	/* if it was on the hash then remove it */
 623	__d_drop(dentry);
 624	if (dentry->d_inode)
 625		dentry_unlink_inode(dentry);
 626	else
 627		spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 628	this_cpu_dec(nr_dentry);
 629	if (dentry->d_op && dentry->d_op->d_release)
 630		dentry->d_op->d_release(dentry);
 631
 632	cond_resched();
 633	/* now that it's negative, ->d_parent is stable */
 634	if (!IS_ROOT(dentry)) {
 635		parent = dentry->d_parent;
 636		spin_lock(&parent->d_lock);
 637	}
 638	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 639	dentry_unlist(dentry);
 640	if (dentry->d_flags & DCACHE_SHRINK_LIST)
 641		can_free = false;
 
 642	spin_unlock(&dentry->d_lock);
 643	if (likely(can_free))
 644		dentry_free(dentry);
 645	if (parent && --parent->d_lockref.count) {
 646		spin_unlock(&parent->d_lock);
 647		return NULL;
 648	}
 649	return parent;
 650}
 651
 652/*
 653 * Lock a dentry for feeding it to __dentry_kill().
 654 * Called under rcu_read_lock() and dentry->d_lock; the former
 655 * guarantees that nothing we access will be freed under us.
 656 * Note that dentry is *not* protected from concurrent dentry_kill(),
 657 * d_delete(), etc.
 658 *
 659 * Return false if dentry is busy.  Otherwise, return true and have
 660 * that dentry's inode locked.
 661 */
 662
 663static bool lock_for_kill(struct dentry *dentry)
 664{
 665	struct inode *inode = dentry->d_inode;
 
 666
 667	if (unlikely(dentry->d_lockref.count))
 668		return false;
 669
 670	if (!inode || likely(spin_trylock(&inode->i_lock)))
 671		return true;
 672
 673	do {
 674		spin_unlock(&dentry->d_lock);
 675		spin_lock(&inode->i_lock);
 676		spin_lock(&dentry->d_lock);
 677		if (likely(inode == dentry->d_inode))
 678			break;
 679		spin_unlock(&inode->i_lock);
 680		inode = dentry->d_inode;
 681	} while (inode);
 682	if (likely(!dentry->d_lockref.count))
 683		return true;
 684	if (inode)
 685		spin_unlock(&inode->i_lock);
 686	return false;
 687}
 688
 689/*
 690 * Decide if dentry is worth retaining.  Usually this is called with dentry
 691 * locked; if not locked, we are more limited and might not be able to tell
 692 * without a lock.  False in this case means "punt to locked path and recheck".
 693 *
 694 * In case we aren't locked, these predicates are not "stable". However, it is
 695 * sufficient that at some point after we dropped the reference the dentry was
 696 * hashed and the flags had the proper value. Other dentry users may have
 697 * re-gotten a reference to the dentry and change that, but our work is done -
 698 * we can leave the dentry around with a zero refcount.
 699 */
 700static inline bool retain_dentry(struct dentry *dentry, bool locked)
 701{
 702	unsigned int d_flags;
 703
 704	smp_rmb();
 705	d_flags = READ_ONCE(dentry->d_flags);
 706
 707	// Unreachable? Nobody would be able to look it up, no point retaining
 708	if (unlikely(d_unhashed(dentry)))
 709		return false;
 710
 711	// Same if it's disconnected
 712	if (unlikely(d_flags & DCACHE_DISCONNECTED))
 713		return false;
 714
 715	// ->d_delete() might tell us not to bother, but that requires
 716	// ->d_lock; can't decide without it
 717	if (unlikely(d_flags & DCACHE_OP_DELETE)) {
 718		if (!locked || dentry->d_op->d_delete(dentry))
 719			return false;
 720	}
 721
 722	// Explicitly told not to bother
 723	if (unlikely(d_flags & DCACHE_DONTCACHE))
 724		return false;
 725
 726	// At this point it looks like we ought to keep it.  We also might
 727	// need to do something - put it on LRU if it wasn't there already
 728	// and mark it referenced if it was on LRU, but not marked yet.
 729	// Unfortunately, both actions require ->d_lock, so in lockless
 730	// case we'd have to punt rather than doing those.
 731	if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
 732		if (!locked)
 733			return false;
 734		d_lru_add(dentry);
 735	} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
 736		if (!locked)
 737			return false;
 738		dentry->d_flags |= DCACHE_REFERENCED;
 739	}
 740	return true;
 741}
 742
 743void d_mark_dontcache(struct inode *inode)
 744{
 745	struct dentry *de;
 746
 747	spin_lock(&inode->i_lock);
 748	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
 749		spin_lock(&de->d_lock);
 750		de->d_flags |= DCACHE_DONTCACHE;
 751		spin_unlock(&de->d_lock);
 752	}
 753	inode->i_state |= I_DONTCACHE;
 754	spin_unlock(&inode->i_lock);
 755}
 756EXPORT_SYMBOL(d_mark_dontcache);
 757
 758/*
 759 * Try to do a lockless dput(), and return whether that was successful.
 760 *
 761 * If unsuccessful, we return false, having already taken the dentry lock.
 762 * In that case refcount is guaranteed to be zero and we have already
 763 * decided that it's not worth keeping around.
 764 *
 765 * The caller needs to hold the RCU read lock, so that the dentry is
 766 * guaranteed to stay around even if the refcount goes down to zero!
 767 */
 768static inline bool fast_dput(struct dentry *dentry)
 769{
 770	int ret;
 771
 772	/*
 773	 * try to decrement the lockref optimistically.
 774	 */
 775	ret = lockref_put_return(&dentry->d_lockref);
 776
 777	/*
 778	 * If the lockref_put_return() failed due to the lock being held
 779	 * by somebody else, the fast path has failed. We will need to
 780	 * get the lock, and then check the count again.
 781	 */
 782	if (unlikely(ret < 0)) {
 783		spin_lock(&dentry->d_lock);
 784		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
 785			spin_unlock(&dentry->d_lock);
 786			return true;
 787		}
 788		dentry->d_lockref.count--;
 789		goto locked;
 790	}
 791
 792	/*
 793	 * If we weren't the last ref, we're done.
 794	 */
 795	if (ret)
 796		return true;
 797
 798	/*
 799	 * Can we decide that decrement of refcount is all we needed without
 800	 * taking the lock?  There's a very common case when it's all we need -
 801	 * dentry looks like it ought to be retained and there's nothing else
 802	 * to do.
 803	 */
 804	if (retain_dentry(dentry, false))
 805		return true;
 806
 807	/*
 808	 * Either not worth retaining or we can't tell without the lock.
 809	 * Get the lock, then.  We've already decremented the refcount to 0,
 810	 * but we'll need to re-check the situation after getting the lock.
 811	 */
 812	spin_lock(&dentry->d_lock);
 813
 814	/*
 815	 * Did somebody else grab a reference to it in the meantime, and
 816	 * we're no longer the last user after all? Alternatively, somebody
 817	 * else could have killed it and marked it dead. Either way, we
 818	 * don't need to do anything else.
 
 
 819	 */
 820locked:
 821	if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
 822		spin_unlock(&dentry->d_lock);
 823		return true;
 824	}
 825	return false;
 
 
 
 
 
 826}
 827
 828
 829/* 
 830 * This is dput
 831 *
 832 * This is complicated by the fact that we do not want to put
 833 * dentries that are no longer on any hash chain on the unused
 834 * list: we'd much rather just get rid of them immediately.
 835 *
 836 * However, that implies that we have to traverse the dentry
 837 * tree upwards to the parents which might _also_ now be
 838 * scheduled for deletion (it may have been only waiting for
 839 * its last child to go away).
 840 *
 841 * This tail recursion is done by hand as we don't want to depend
 842 * on the compiler to always get this right (gcc generally doesn't).
 843 * Real recursion would eat up our stack space.
 844 */
 845
 846/*
 847 * dput - release a dentry
 848 * @dentry: dentry to release 
 849 *
 850 * Release a dentry. This will drop the usage count and if appropriate
 851 * call the dentry unlink method as well as removing it from the queues and
 852 * releasing its resources. If the parent dentries were scheduled for release
 853 * they too may now get deleted.
 854 */
 855void dput(struct dentry *dentry)
 856{
 857	if (!dentry)
 858		return;
 859	might_sleep();
 860	rcu_read_lock();
 861	if (likely(fast_dput(dentry))) {
 862		rcu_read_unlock();
 863		return;
 
 
 
 
 
 
 
 
 864	}
 865	while (lock_for_kill(dentry)) {
 866		rcu_read_unlock();
 867		dentry = __dentry_kill(dentry);
 868		if (!dentry)
 869			return;
 870		if (retain_dentry(dentry, true)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871			spin_unlock(&dentry->d_lock);
 872			return;
 873		}
 874		rcu_read_lock();
 875	}
 876	rcu_read_unlock();
 
 877	spin_unlock(&dentry->d_lock);
 
 878}
 879EXPORT_SYMBOL(dput);
 880
 881static void to_shrink_list(struct dentry *dentry, struct list_head *list)
 882__must_hold(&dentry->d_lock)
 883{
 884	if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
 885		if (dentry->d_flags & DCACHE_LRU_LIST)
 886			d_lru_del(dentry);
 887		d_shrink_add(dentry, list);
 888	}
 889}
 890
 891void dput_to_list(struct dentry *dentry, struct list_head *list)
 892{
 893	rcu_read_lock();
 894	if (likely(fast_dput(dentry))) {
 895		rcu_read_unlock();
 896		return;
 897	}
 898	rcu_read_unlock();
 899	to_shrink_list(dentry, list);
 900	spin_unlock(&dentry->d_lock);
 901}
 902
 903struct dentry *dget_parent(struct dentry *dentry)
 904{
 905	int gotref;
 906	struct dentry *ret;
 907	unsigned seq;
 908
 909	/*
 910	 * Do optimistic parent lookup without any
 911	 * locking.
 912	 */
 913	rcu_read_lock();
 914	seq = raw_seqcount_begin(&dentry->d_seq);
 915	ret = READ_ONCE(dentry->d_parent);
 916	gotref = lockref_get_not_zero(&ret->d_lockref);
 917	rcu_read_unlock();
 918	if (likely(gotref)) {
 919		if (!read_seqcount_retry(&dentry->d_seq, seq))
 920			return ret;
 921		dput(ret);
 922	}
 923
 924repeat:
 925	/*
 926	 * Don't need rcu_dereference because we re-check it was correct under
 927	 * the lock.
 928	 */
 929	rcu_read_lock();
 930	ret = dentry->d_parent;
 931	spin_lock(&ret->d_lock);
 932	if (unlikely(ret != dentry->d_parent)) {
 933		spin_unlock(&ret->d_lock);
 934		rcu_read_unlock();
 935		goto repeat;
 936	}
 937	rcu_read_unlock();
 938	BUG_ON(!ret->d_lockref.count);
 939	ret->d_lockref.count++;
 940	spin_unlock(&ret->d_lock);
 941	return ret;
 942}
 943EXPORT_SYMBOL(dget_parent);
 944
 945static struct dentry * __d_find_any_alias(struct inode *inode)
 946{
 947	struct dentry *alias;
 948
 949	if (hlist_empty(&inode->i_dentry))
 950		return NULL;
 951	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
 952	lockref_get(&alias->d_lockref);
 953	return alias;
 954}
 955
 956/**
 957 * d_find_any_alias - find any alias for a given inode
 958 * @inode: inode to find an alias for
 
 
 959 *
 960 * If any aliases exist for the given inode, take and return a
 961 * reference for one of them.  If no aliases exist, return %NULL.
 
 
 
 
 
 
 
 962 */
 963struct dentry *d_find_any_alias(struct inode *inode)
 964{
 965	struct dentry *de;
 966
 967	spin_lock(&inode->i_lock);
 968	de = __d_find_any_alias(inode);
 969	spin_unlock(&inode->i_lock);
 970	return de;
 971}
 972EXPORT_SYMBOL(d_find_any_alias);
 973
 974static struct dentry *__d_find_alias(struct inode *inode)
 975{
 976	struct dentry *alias;
 977
 978	if (S_ISDIR(inode->i_mode))
 979		return __d_find_any_alias(inode);
 980
 981	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 982		spin_lock(&alias->d_lock);
 983 		if (!d_unhashed(alias)) {
 984			dget_dlock(alias);
 985			spin_unlock(&alias->d_lock);
 986			return alias;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987		}
 988		spin_unlock(&alias->d_lock);
 
 989	}
 990	return NULL;
 991}
 992
 993/**
 994 * d_find_alias - grab a hashed alias of inode
 995 * @inode: inode in question
 996 *
 997 * If inode has a hashed alias, or is a directory and has any alias,
 998 * acquire the reference to alias and return it. Otherwise return NULL.
 999 * Notice that if inode is a directory there can be only one alias and
1000 * it can be unhashed only if it has no children, or if it is the root
1001 * of a filesystem, or if the directory was renamed and d_revalidate
1002 * was the first vfs operation to notice.
1003 *
1004 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1005 * any other hashed alias over that one.
1006 */
1007struct dentry *d_find_alias(struct inode *inode)
1008{
1009	struct dentry *de = NULL;
1010
1011	if (!hlist_empty(&inode->i_dentry)) {
1012		spin_lock(&inode->i_lock);
1013		de = __d_find_alias(inode);
1014		spin_unlock(&inode->i_lock);
1015	}
1016	return de;
1017}
1018EXPORT_SYMBOL(d_find_alias);
1019
1020/*
1021 *  Caller MUST be holding rcu_read_lock() and be guaranteed
1022 *  that inode won't get freed until rcu_read_unlock().
1023 */
1024struct dentry *d_find_alias_rcu(struct inode *inode)
1025{
1026	struct hlist_head *l = &inode->i_dentry;
1027	struct dentry *de = NULL;
1028
1029	spin_lock(&inode->i_lock);
1030	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1031	// used without having I_FREEING set, which means no aliases left
1032	if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1033		if (S_ISDIR(inode->i_mode)) {
1034			de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1035		} else {
1036			hlist_for_each_entry(de, l, d_u.d_alias)
1037				if (!d_unhashed(de))
1038					break;
1039		}
1040	}
1041	spin_unlock(&inode->i_lock);
1042	return de;
1043}
1044
1045/*
1046 *	Try to kill dentries associated with this inode.
1047 * WARNING: you must own a reference to inode.
1048 */
1049void d_prune_aliases(struct inode *inode)
1050{
1051	LIST_HEAD(dispose);
1052	struct dentry *dentry;
1053
1054	spin_lock(&inode->i_lock);
1055	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1056		spin_lock(&dentry->d_lock);
1057		if (!dentry->d_lockref.count)
1058			to_shrink_list(dentry, &dispose);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059		spin_unlock(&dentry->d_lock);
1060	}
1061	spin_unlock(&inode->i_lock);
1062	shrink_dentry_list(&dispose);
1063}
1064EXPORT_SYMBOL(d_prune_aliases);
1065
1066static inline void shrink_kill(struct dentry *victim)
1067{
1068	do {
1069		rcu_read_unlock();
1070		victim = __dentry_kill(victim);
1071		rcu_read_lock();
1072	} while (victim && lock_for_kill(victim));
1073	rcu_read_unlock();
1074	if (victim)
1075		spin_unlock(&victim->d_lock);
1076}
1077
1078void shrink_dentry_list(struct list_head *list)
1079{
1080	while (!list_empty(list)) {
1081		struct dentry *dentry;
1082
1083		dentry = list_entry(list->prev, struct dentry, d_lru);
1084		spin_lock(&dentry->d_lock);
1085		rcu_read_lock();
1086		if (!lock_for_kill(dentry)) {
1087			bool can_free;
1088			rcu_read_unlock();
1089			d_shrink_del(dentry);
1090			can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
 
 
 
 
 
 
 
 
1091			spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 
1092			if (can_free)
1093				dentry_free(dentry);
1094			continue;
1095		}
1096		d_shrink_del(dentry);
1097		shrink_kill(dentry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098	}
1099}
1100
1101static enum lru_status dentry_lru_isolate(struct list_head *item,
1102		struct list_lru_one *lru, void *arg)
1103{
1104	struct list_head *freeable = arg;
1105	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1106
1107
1108	/*
1109	 * we are inverting the lru lock/dentry->d_lock here,
1110	 * so use a trylock. If we fail to get the lock, just skip
1111	 * it
1112	 */
1113	if (!spin_trylock(&dentry->d_lock))
1114		return LRU_SKIP;
1115
1116	/*
1117	 * Referenced dentries are still in use. If they have active
1118	 * counts, just remove them from the LRU. Otherwise give them
1119	 * another pass through the LRU.
1120	 */
1121	if (dentry->d_lockref.count) {
1122		d_lru_isolate(lru, dentry);
1123		spin_unlock(&dentry->d_lock);
1124		return LRU_REMOVED;
1125	}
1126
1127	if (dentry->d_flags & DCACHE_REFERENCED) {
1128		dentry->d_flags &= ~DCACHE_REFERENCED;
1129		spin_unlock(&dentry->d_lock);
1130
1131		/*
1132		 * The list move itself will be made by the common LRU code. At
1133		 * this point, we've dropped the dentry->d_lock but keep the
1134		 * lru lock. This is safe to do, since every list movement is
1135		 * protected by the lru lock even if both locks are held.
1136		 *
1137		 * This is guaranteed by the fact that all LRU management
1138		 * functions are intermediated by the LRU API calls like
1139		 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1140		 * only ever occur through this functions or through callbacks
1141		 * like this one, that are called from the LRU API.
1142		 *
1143		 * The only exceptions to this are functions like
1144		 * shrink_dentry_list, and code that first checks for the
1145		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1146		 * operating only with stack provided lists after they are
1147		 * properly isolated from the main list.  It is thus, always a
1148		 * local access.
1149		 */
1150		return LRU_ROTATE;
1151	}
1152
1153	d_lru_shrink_move(lru, dentry, freeable);
1154	spin_unlock(&dentry->d_lock);
1155
1156	return LRU_REMOVED;
1157}
1158
1159/**
1160 * prune_dcache_sb - shrink the dcache
1161 * @sb: superblock
1162 * @sc: shrink control, passed to list_lru_shrink_walk()
 
1163 *
1164 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1165 * is done when we need more memory and called from the superblock shrinker
1166 * function.
1167 *
1168 * This function may fail to free any resources if all the dentries are in
1169 * use.
1170 */
1171long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
 
1172{
1173	LIST_HEAD(dispose);
1174	long freed;
1175
1176	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1177				     dentry_lru_isolate, &dispose);
1178	shrink_dentry_list(&dispose);
1179	return freed;
1180}
1181
1182static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1183		struct list_lru_one *lru, void *arg)
1184{
1185	struct list_head *freeable = arg;
1186	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1187
1188	/*
1189	 * we are inverting the lru lock/dentry->d_lock here,
1190	 * so use a trylock. If we fail to get the lock, just skip
1191	 * it
1192	 */
1193	if (!spin_trylock(&dentry->d_lock))
1194		return LRU_SKIP;
1195
1196	d_lru_shrink_move(lru, dentry, freeable);
1197	spin_unlock(&dentry->d_lock);
1198
1199	return LRU_REMOVED;
1200}
1201
1202
1203/**
1204 * shrink_dcache_sb - shrink dcache for a superblock
1205 * @sb: superblock
1206 *
1207 * Shrink the dcache for the specified super block. This is used to free
1208 * the dcache before unmounting a file system.
1209 */
1210void shrink_dcache_sb(struct super_block *sb)
1211{
 
 
1212	do {
1213		LIST_HEAD(dispose);
1214
1215		list_lru_walk(&sb->s_dentry_lru,
1216			dentry_lru_isolate_shrink, &dispose, 1024);
 
 
1217		shrink_dentry_list(&dispose);
1218	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1219}
1220EXPORT_SYMBOL(shrink_dcache_sb);
1221
1222/**
1223 * enum d_walk_ret - action to talke during tree walk
1224 * @D_WALK_CONTINUE:	contrinue walk
1225 * @D_WALK_QUIT:	quit walk
1226 * @D_WALK_NORETRY:	quit when retry is needed
1227 * @D_WALK_SKIP:	skip this dentry and its children
1228 */
1229enum d_walk_ret {
1230	D_WALK_CONTINUE,
1231	D_WALK_QUIT,
1232	D_WALK_NORETRY,
1233	D_WALK_SKIP,
1234};
1235
1236/**
1237 * d_walk - walk the dentry tree
1238 * @parent:	start of walk
1239 * @data:	data passed to @enter() and @finish()
1240 * @enter:	callback when first entering the dentry
 
1241 *
1242 * The @enter() callbacks are called with d_lock held.
1243 */
1244static void d_walk(struct dentry *parent, void *data,
1245		   enum d_walk_ret (*enter)(void *, struct dentry *))
 
1246{
1247	struct dentry *this_parent, *dentry;
 
1248	unsigned seq = 0;
1249	enum d_walk_ret ret;
1250	bool retry = true;
1251
1252again:
1253	read_seqbegin_or_lock(&rename_lock, &seq);
1254	this_parent = parent;
1255	spin_lock(&this_parent->d_lock);
1256
1257	ret = enter(data, this_parent);
1258	switch (ret) {
1259	case D_WALK_CONTINUE:
1260		break;
1261	case D_WALK_QUIT:
1262	case D_WALK_SKIP:
1263		goto out_unlock;
1264	case D_WALK_NORETRY:
1265		retry = false;
1266		break;
1267	}
1268repeat:
1269	dentry = d_first_child(this_parent);
1270resume:
1271	hlist_for_each_entry_from(dentry, d_sib) {
1272		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1273			continue;
 
1274
1275		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1276
1277		ret = enter(data, dentry);
1278		switch (ret) {
1279		case D_WALK_CONTINUE:
1280			break;
1281		case D_WALK_QUIT:
1282			spin_unlock(&dentry->d_lock);
1283			goto out_unlock;
1284		case D_WALK_NORETRY:
1285			retry = false;
1286			break;
1287		case D_WALK_SKIP:
1288			spin_unlock(&dentry->d_lock);
1289			continue;
1290		}
1291
1292		if (!hlist_empty(&dentry->d_children)) {
1293			spin_unlock(&this_parent->d_lock);
1294			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1295			this_parent = dentry;
1296			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1297			goto repeat;
1298		}
1299		spin_unlock(&dentry->d_lock);
1300	}
1301	/*
1302	 * All done at this level ... ascend and resume the search.
1303	 */
1304	rcu_read_lock();
1305ascend:
1306	if (this_parent != parent) {
1307		dentry = this_parent;
1308		this_parent = dentry->d_parent;
1309
1310		spin_unlock(&dentry->d_lock);
 
1311		spin_lock(&this_parent->d_lock);
1312
1313		/* might go back up the wrong parent if we have had a rename. */
1314		if (need_seqretry(&rename_lock, seq))
 
 
 
 
 
 
 
1315			goto rename_retry;
1316		/* go into the first sibling still alive */
1317		hlist_for_each_entry_continue(dentry, d_sib) {
1318			if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1319				rcu_read_unlock();
1320				goto resume;
1321			}
1322		}
1323		goto ascend;
 
 
1324	}
1325	if (need_seqretry(&rename_lock, seq))
 
1326		goto rename_retry;
1327	rcu_read_unlock();
 
 
1328
1329out_unlock:
1330	spin_unlock(&this_parent->d_lock);
1331	done_seqretry(&rename_lock, seq);
1332	return;
1333
1334rename_retry:
1335	spin_unlock(&this_parent->d_lock);
1336	rcu_read_unlock();
1337	BUG_ON(seq & 1);
1338	if (!retry)
1339		return;
1340	seq = 1;
1341	goto again;
1342}
1343
1344struct check_mount {
1345	struct vfsmount *mnt;
1346	unsigned int mounted;
1347};
 
1348
1349static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1350{
1351	struct check_mount *info = data;
1352	struct path path = { .mnt = info->mnt, .dentry = dentry };
1353
1354	if (likely(!d_mountpoint(dentry)))
1355		return D_WALK_CONTINUE;
1356	if (__path_is_mountpoint(&path)) {
1357		info->mounted = 1;
1358		return D_WALK_QUIT;
1359	}
1360	return D_WALK_CONTINUE;
1361}
1362
1363/**
1364 * path_has_submounts - check for mounts over a dentry in the
1365 *                      current namespace.
1366 * @parent: path to check.
1367 *
1368 * Return true if the parent or its subdirectories contain
1369 * a mount point in the current namespace.
1370 */
1371int path_has_submounts(const struct path *parent)
1372{
1373	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1374
1375	read_seqlock_excl(&mount_lock);
1376	d_walk(parent->dentry, &data, path_check_mount);
1377	read_sequnlock_excl(&mount_lock);
1378
1379	return data.mounted;
1380}
1381EXPORT_SYMBOL(path_has_submounts);
1382
1383/*
1384 * Called by mount code to set a mountpoint and check if the mountpoint is
1385 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1386 * subtree can become unreachable).
1387 *
1388 * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1389 * this reason take rename_lock and d_lock on dentry and ancestors.
1390 */
1391int d_set_mounted(struct dentry *dentry)
1392{
1393	struct dentry *p;
1394	int ret = -ENOENT;
1395	write_seqlock(&rename_lock);
1396	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1397		/* Need exclusion wrt. d_invalidate() */
1398		spin_lock(&p->d_lock);
1399		if (unlikely(d_unhashed(p))) {
1400			spin_unlock(&p->d_lock);
1401			goto out;
1402		}
1403		spin_unlock(&p->d_lock);
1404	}
1405	spin_lock(&dentry->d_lock);
1406	if (!d_unlinked(dentry)) {
1407		ret = -EBUSY;
1408		if (!d_mountpoint(dentry)) {
1409			dentry->d_flags |= DCACHE_MOUNTED;
1410			ret = 0;
1411		}
1412	}
1413 	spin_unlock(&dentry->d_lock);
1414out:
1415	write_sequnlock(&rename_lock);
1416	return ret;
1417}
1418
1419/*
1420 * Search the dentry child list of the specified parent,
1421 * and move any unused dentries to the end of the unused
1422 * list for prune_dcache(). We descend to the next level
1423 * whenever the d_children list is non-empty and continue
1424 * searching.
1425 *
1426 * It returns zero iff there are no unused children,
1427 * otherwise  it returns the number of children moved to
1428 * the end of the unused list. This may not be the total
1429 * number of unused children, because select_parent can
1430 * drop the lock and return early due to latency
1431 * constraints.
1432 */
1433
1434struct select_data {
1435	struct dentry *start;
1436	union {
1437		long found;
1438		struct dentry *victim;
1439	};
1440	struct list_head dispose;
 
1441};
1442
1443static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1444{
1445	struct select_data *data = _data;
1446	enum d_walk_ret ret = D_WALK_CONTINUE;
1447
1448	if (data->start == dentry)
1449		goto out;
1450
1451	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1452		data->found++;
1453	} else if (!dentry->d_lockref.count) {
1454		to_shrink_list(dentry, &data->dispose);
1455		data->found++;
1456	} else if (dentry->d_lockref.count < 0) {
1457		data->found++;
1458	}
1459	/*
1460	 * We can return to the caller if we have found some (this
1461	 * ensures forward progress). We'll be coming back to find
1462	 * the rest.
1463	 */
1464	if (!list_empty(&data->dispose))
1465		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1466out:
1467	return ret;
1468}
1469
1470static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1471{
1472	struct select_data *data = _data;
1473	enum d_walk_ret ret = D_WALK_CONTINUE;
1474
1475	if (data->start == dentry)
1476		goto out;
1477
1478	if (!dentry->d_lockref.count) {
1479		if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1480			rcu_read_lock();
1481			data->victim = dentry;
1482			return D_WALK_QUIT;
1483		}
1484		to_shrink_list(dentry, &data->dispose);
1485	}
1486	/*
1487	 * We can return to the caller if we have found some (this
1488	 * ensures forward progress). We'll be coming back to find
1489	 * the rest.
1490	 */
1491	if (!list_empty(&data->dispose))
1492		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1493out:
1494	return ret;
1495}
1496
1497/**
1498 * shrink_dcache_parent - prune dcache
1499 * @parent: parent of entries to prune
1500 *
1501 * Prune the dcache to remove unused children of the parent dentry.
1502 */
1503void shrink_dcache_parent(struct dentry *parent)
1504{
1505	for (;;) {
1506		struct select_data data = {.start = parent};
1507
1508		INIT_LIST_HEAD(&data.dispose);
1509		d_walk(parent, &data, select_collect);
 
1510
1511		if (!list_empty(&data.dispose)) {
1512			shrink_dentry_list(&data.dispose);
1513			continue;
1514		}
1515
1516		cond_resched();
1517		if (!data.found)
1518			break;
1519		data.victim = NULL;
1520		d_walk(parent, &data, select_collect2);
1521		if (data.victim) {
1522			spin_lock(&data.victim->d_lock);
1523			if (!lock_for_kill(data.victim)) {
1524				spin_unlock(&data.victim->d_lock);
1525				rcu_read_unlock();
1526			} else {
1527				shrink_kill(data.victim);
1528			}
1529		}
1530		if (!list_empty(&data.dispose))
1531			shrink_dentry_list(&data.dispose);
1532	}
1533}
1534EXPORT_SYMBOL(shrink_dcache_parent);
1535
1536static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1537{
1538	/* it has busy descendents; complain about those instead */
1539	if (!hlist_empty(&dentry->d_children))
1540		return D_WALK_CONTINUE;
1541
1542	/* root with refcount 1 is fine */
1543	if (dentry == _data && dentry->d_lockref.count == 1)
1544		return D_WALK_CONTINUE;
1545
1546	WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1547			" still in use (%d) [unmount of %s %s]\n",
1548		       dentry,
1549		       dentry->d_inode ?
1550		       dentry->d_inode->i_ino : 0UL,
1551		       dentry,
1552		       dentry->d_lockref.count,
1553		       dentry->d_sb->s_type->name,
1554		       dentry->d_sb->s_id);
 
1555	return D_WALK_CONTINUE;
1556}
1557
1558static void do_one_tree(struct dentry *dentry)
1559{
1560	shrink_dcache_parent(dentry);
1561	d_walk(dentry, dentry, umount_check);
1562	d_drop(dentry);
1563	dput(dentry);
1564}
1565
1566/*
1567 * destroy the dentries attached to a superblock on unmounting
1568 */
1569void shrink_dcache_for_umount(struct super_block *sb)
1570{
1571	struct dentry *dentry;
1572
1573	rwsem_assert_held_write(&sb->s_umount);
1574
1575	dentry = sb->s_root;
1576	sb->s_root = NULL;
1577	do_one_tree(dentry);
1578
1579	while (!hlist_bl_empty(&sb->s_roots)) {
1580		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1581		do_one_tree(dentry);
1582	}
1583}
1584
1585static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1586{
1587	struct dentry **victim = _data;
 
1588	if (d_mountpoint(dentry)) {
1589		*victim = dget_dlock(dentry);
1590		return D_WALK_QUIT;
1591	}
1592	return D_WALK_CONTINUE;
 
 
 
 
 
 
 
 
 
 
 
1593}
1594
1595/**
1596 * d_invalidate - detach submounts, prune dcache, and drop
1597 * @dentry: dentry to invalidate (aka detach, prune and drop)
 
 
 
 
 
1598 */
1599void d_invalidate(struct dentry *dentry)
1600{
1601	bool had_submounts = false;
1602	spin_lock(&dentry->d_lock);
1603	if (d_unhashed(dentry)) {
1604		spin_unlock(&dentry->d_lock);
1605		return;
1606	}
1607	__d_drop(dentry);
1608	spin_unlock(&dentry->d_lock);
1609
1610	/* Negative dentries can be dropped without further checks */
1611	if (!dentry->d_inode)
1612		return;
 
 
1613
1614	shrink_dcache_parent(dentry);
1615	for (;;) {
1616		struct dentry *victim = NULL;
1617		d_walk(dentry, &victim, find_submount);
1618		if (!victim) {
1619			if (had_submounts)
1620				shrink_dcache_parent(dentry);
1621			return;
1622		}
1623		had_submounts = true;
1624		detach_mounts(victim);
1625		dput(victim);
 
 
 
 
 
 
1626	}
 
 
 
1627}
1628EXPORT_SYMBOL(d_invalidate);
1629
1630/**
1631 * __d_alloc	-	allocate a dcache entry
1632 * @sb: filesystem it will belong to
1633 * @name: qstr of the name
1634 *
1635 * Allocates a dentry. It returns %NULL if there is insufficient memory
1636 * available. On a success the dentry is returned. The name passed in is
1637 * copied and the copy passed in may be reused after this call.
1638 */
1639 
1640static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1641{
1642	struct dentry *dentry;
1643	char *dname;
1644	int err;
1645
1646	dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1647				      GFP_KERNEL);
1648	if (!dentry)
1649		return NULL;
1650
1651	/*
1652	 * We guarantee that the inline name is always NUL-terminated.
1653	 * This way the memcpy() done by the name switching in rename
1654	 * will still always have a NUL at the end, even if we might
1655	 * be overwriting an internal NUL character
1656	 */
1657	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1658	if (unlikely(!name)) {
1659		name = &slash_name;
1660		dname = dentry->d_iname;
1661	} else if (name->len > DNAME_INLINE_LEN-1) {
1662		size_t size = offsetof(struct external_name, name[1]);
1663		struct external_name *p = kmalloc(size + name->len,
1664						  GFP_KERNEL_ACCOUNT |
1665						  __GFP_RECLAIMABLE);
1666		if (!p) {
1667			kmem_cache_free(dentry_cache, dentry); 
1668			return NULL;
1669		}
1670		atomic_set(&p->u.count, 1);
1671		dname = p->name;
1672	} else  {
1673		dname = dentry->d_iname;
1674	}	
1675
1676	dentry->d_name.len = name->len;
1677	dentry->d_name.hash = name->hash;
1678	memcpy(dname, name->name, name->len);
1679	dname[name->len] = 0;
1680
1681	/* Make sure we always see the terminating NUL character */
1682	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
 
1683
1684	dentry->d_lockref.count = 1;
1685	dentry->d_flags = 0;
1686	spin_lock_init(&dentry->d_lock);
1687	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1688	dentry->d_inode = NULL;
1689	dentry->d_parent = dentry;
1690	dentry->d_sb = sb;
1691	dentry->d_op = NULL;
1692	dentry->d_fsdata = NULL;
1693	INIT_HLIST_BL_NODE(&dentry->d_hash);
1694	INIT_LIST_HEAD(&dentry->d_lru);
1695	INIT_HLIST_HEAD(&dentry->d_children);
1696	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1697	INIT_HLIST_NODE(&dentry->d_sib);
1698	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1699
1700	if (dentry->d_op && dentry->d_op->d_init) {
1701		err = dentry->d_op->d_init(dentry);
1702		if (err) {
1703			if (dname_external(dentry))
1704				kfree(external_name(dentry));
1705			kmem_cache_free(dentry_cache, dentry);
1706			return NULL;
1707		}
1708	}
1709
1710	this_cpu_inc(nr_dentry);
1711
1712	return dentry;
1713}
1714
1715/**
1716 * d_alloc	-	allocate a dcache entry
1717 * @parent: parent of entry to allocate
1718 * @name: qstr of the name
1719 *
1720 * Allocates a dentry. It returns %NULL if there is insufficient memory
1721 * available. On a success the dentry is returned. The name passed in is
1722 * copied and the copy passed in may be reused after this call.
1723 */
1724struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1725{
1726	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1727	if (!dentry)
1728		return NULL;
 
1729	spin_lock(&parent->d_lock);
1730	/*
1731	 * don't need child lock because it is not subject
1732	 * to concurrency here
1733	 */
1734	dentry->d_parent = dget_dlock(parent);
1735	hlist_add_head(&dentry->d_sib, &parent->d_children);
 
1736	spin_unlock(&parent->d_lock);
1737
1738	return dentry;
1739}
1740EXPORT_SYMBOL(d_alloc);
1741
1742struct dentry *d_alloc_anon(struct super_block *sb)
1743{
1744	return __d_alloc(sb, NULL);
1745}
1746EXPORT_SYMBOL(d_alloc_anon);
1747
1748struct dentry *d_alloc_cursor(struct dentry * parent)
1749{
1750	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1751	if (dentry) {
1752		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1753		dentry->d_parent = dget(parent);
1754	}
1755	return dentry;
1756}
1757
1758/**
1759 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1760 * @sb: the superblock
1761 * @name: qstr of the name
1762 *
1763 * For a filesystem that just pins its dentries in memory and never
1764 * performs lookups at all, return an unhashed IS_ROOT dentry.
1765 * This is used for pipes, sockets et.al. - the stuff that should
1766 * never be anyone's children or parents.  Unlike all other
1767 * dentries, these will not have RCU delay between dropping the
1768 * last reference and freeing them.
1769 *
1770 * The only user is alloc_file_pseudo() and that's what should
1771 * be considered a public interface.  Don't use directly.
1772 */
1773struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1774{
1775	static const struct dentry_operations anon_ops = {
1776		.d_dname = simple_dname
1777	};
1778	struct dentry *dentry = __d_alloc(sb, name);
1779	if (likely(dentry)) {
1780		dentry->d_flags |= DCACHE_NORCU;
1781		if (!sb->s_d_op)
1782			d_set_d_op(dentry, &anon_ops);
1783	}
1784	return dentry;
1785}
 
1786
1787struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1788{
1789	struct qstr q;
1790
1791	q.name = name;
1792	q.hash_len = hashlen_string(parent, name);
 
1793	return d_alloc(parent, &q);
1794}
1795EXPORT_SYMBOL(d_alloc_name);
1796
1797void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1798{
1799	WARN_ON_ONCE(dentry->d_op);
1800	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1801				DCACHE_OP_COMPARE	|
1802				DCACHE_OP_REVALIDATE	|
1803				DCACHE_OP_WEAK_REVALIDATE	|
1804				DCACHE_OP_DELETE	|
1805				DCACHE_OP_REAL));
1806	dentry->d_op = op;
1807	if (!op)
1808		return;
1809	if (op->d_hash)
1810		dentry->d_flags |= DCACHE_OP_HASH;
1811	if (op->d_compare)
1812		dentry->d_flags |= DCACHE_OP_COMPARE;
1813	if (op->d_revalidate)
1814		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1815	if (op->d_weak_revalidate)
1816		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1817	if (op->d_delete)
1818		dentry->d_flags |= DCACHE_OP_DELETE;
1819	if (op->d_prune)
1820		dentry->d_flags |= DCACHE_OP_PRUNE;
1821	if (op->d_real)
1822		dentry->d_flags |= DCACHE_OP_REAL;
1823
1824}
1825EXPORT_SYMBOL(d_set_d_op);
1826
1827static unsigned d_flags_for_inode(struct inode *inode)
1828{
1829	unsigned add_flags = DCACHE_REGULAR_TYPE;
1830
1831	if (!inode)
1832		return DCACHE_MISS_TYPE;
1833
1834	if (S_ISDIR(inode->i_mode)) {
1835		add_flags = DCACHE_DIRECTORY_TYPE;
1836		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1837			if (unlikely(!inode->i_op->lookup))
1838				add_flags = DCACHE_AUTODIR_TYPE;
1839			else
1840				inode->i_opflags |= IOP_LOOKUP;
1841		}
1842		goto type_determined;
1843	}
1844
1845	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1846		if (unlikely(inode->i_op->get_link)) {
1847			add_flags = DCACHE_SYMLINK_TYPE;
1848			goto type_determined;
1849		}
1850		inode->i_opflags |= IOP_NOFOLLOW;
1851	}
1852
1853	if (unlikely(!S_ISREG(inode->i_mode)))
1854		add_flags = DCACHE_SPECIAL_TYPE;
1855
1856type_determined:
1857	if (unlikely(IS_AUTOMOUNT(inode)))
1858		add_flags |= DCACHE_NEED_AUTOMOUNT;
1859	return add_flags;
1860}
1861
1862static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1863{
1864	unsigned add_flags = d_flags_for_inode(inode);
1865	WARN_ON(d_in_lookup(dentry));
1866
1867	spin_lock(&dentry->d_lock);
1868	/*
1869	 * The negative counter only tracks dentries on the LRU. Don't dec if
1870	 * d_lru is on another list.
1871	 */
1872	if ((dentry->d_flags &
1873	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
1874		this_cpu_dec(nr_dentry_negative);
1875	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1876	raw_write_seqcount_begin(&dentry->d_seq);
1877	__d_set_inode_and_type(dentry, inode, add_flags);
1878	raw_write_seqcount_end(&dentry->d_seq);
1879	fsnotify_update_flags(dentry);
1880	spin_unlock(&dentry->d_lock);
 
1881}
1882
1883/**
1884 * d_instantiate - fill in inode information for a dentry
1885 * @entry: dentry to complete
1886 * @inode: inode to attach to this dentry
1887 *
1888 * Fill in inode information in the entry.
1889 *
1890 * This turns negative dentries into productive full members
1891 * of society.
1892 *
1893 * NOTE! This assumes that the inode count has been incremented
1894 * (or otherwise set) by the caller to indicate that it is now
1895 * in use by the dcache.
1896 */
1897 
1898void d_instantiate(struct dentry *entry, struct inode * inode)
1899{
1900	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1901	if (inode) {
1902		security_d_instantiate(entry, inode);
1903		spin_lock(&inode->i_lock);
1904		__d_instantiate(entry, inode);
 
1905		spin_unlock(&inode->i_lock);
1906	}
1907}
1908EXPORT_SYMBOL(d_instantiate);
1909
1910/*
1911 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1912 * with lockdep-related part of unlock_new_inode() done before
1913 * anything else.  Use that instead of open-coding d_instantiate()/
1914 * unlock_new_inode() combinations.
1915 */
1916void d_instantiate_new(struct dentry *entry, struct inode *inode)
1917{
1918	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1919	BUG_ON(!inode);
1920	lockdep_annotate_inode_mutex_key(inode);
1921	security_d_instantiate(entry, inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1922	spin_lock(&inode->i_lock);
 
 
 
 
 
1923	__d_instantiate(entry, inode);
1924	WARN_ON(!(inode->i_state & I_NEW));
1925	inode->i_state &= ~I_NEW & ~I_CREATING;
1926	/*
1927	 * Pairs with the barrier in prepare_to_wait_event() to make sure
1928	 * ___wait_var_event() either sees the bit cleared or
1929	 * waitqueue_active() check in wake_up_var() sees the waiter.
1930	 */
1931	smp_mb();
1932	inode_wake_up_bit(inode, __I_NEW);
1933	spin_unlock(&inode->i_lock);
 
 
 
1934}
1935EXPORT_SYMBOL(d_instantiate_new);
1936
1937struct dentry *d_make_root(struct inode *root_inode)
1938{
1939	struct dentry *res = NULL;
1940
1941	if (root_inode) {
1942		res = d_alloc_anon(root_inode->i_sb);
 
 
1943		if (res)
1944			d_instantiate(res, root_inode);
1945		else
1946			iput(root_inode);
1947	}
1948	return res;
1949}
1950EXPORT_SYMBOL(d_make_root);
1951
1952static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1953{
1954	struct super_block *sb;
1955	struct dentry *new, *res;
1956
1957	if (!inode)
1958		return ERR_PTR(-ESTALE);
1959	if (IS_ERR(inode))
1960		return ERR_CAST(inode);
1961
1962	sb = inode->i_sb;
1963
1964	res = d_find_any_alias(inode); /* existing alias? */
1965	if (res)
1966		goto out;
 
 
 
1967
1968	new = d_alloc_anon(sb);
1969	if (!new) {
1970		res = ERR_PTR(-ENOMEM);
1971		goto out;
1972	}
 
 
 
 
 
1973
1974	security_d_instantiate(new, inode);
1975	spin_lock(&inode->i_lock);
1976	res = __d_find_any_alias(inode); /* recheck under lock */
1977	if (likely(!res)) { /* still no alias, attach a disconnected dentry */
1978		unsigned add_flags = d_flags_for_inode(inode);
1979
1980		if (disconnected)
1981			add_flags |= DCACHE_DISCONNECTED;
1982
1983		spin_lock(&new->d_lock);
1984		__d_set_inode_and_type(new, inode, add_flags);
1985		hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
1986		if (!disconnected) {
1987			hlist_bl_lock(&sb->s_roots);
1988			hlist_bl_add_head(&new->d_hash, &sb->s_roots);
1989			hlist_bl_unlock(&sb->s_roots);
1990		}
1991		spin_unlock(&new->d_lock);
1992		spin_unlock(&inode->i_lock);
1993		inode = NULL; /* consumed by new->d_inode */
1994		res = new;
1995	} else {
1996		spin_unlock(&inode->i_lock);
1997		dput(new);
1998	}
1999
2000 out:
2001	iput(inode);
2002	return res;
2003}
 
2004
2005/**
2006 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2007 * @inode: inode to allocate the dentry for
2008 *
2009 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2010 * similar open by handle operations.  The returned dentry may be anonymous,
2011 * or may have a full name (if the inode was already in the cache).
2012 *
2013 * When called on a directory inode, we must ensure that the inode only ever
2014 * has one dentry.  If a dentry is found, that is returned instead of
2015 * allocating a new one.
2016 *
2017 * On successful return, the reference to the inode has been transferred
2018 * to the dentry.  In case of an error the reference on the inode is released.
2019 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2020 * be passed in and the error will be propagated to the return value,
2021 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2022 */
2023struct dentry *d_obtain_alias(struct inode *inode)
2024{
2025	return __d_obtain_alias(inode, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026}
2027EXPORT_SYMBOL(d_obtain_alias);
2028
2029/**
2030 * d_obtain_root - find or allocate a dentry for a given inode
2031 * @inode: inode to allocate the dentry for
 
2032 *
2033 * Obtain an IS_ROOT dentry for the root of a filesystem.
 
 
2034 *
2035 * We must ensure that directory inodes only ever have one dentry.  If a
2036 * dentry is found, that is returned instead of allocating a new one.
2037 *
2038 * On successful return, the reference to the inode has been transferred
2039 * to the dentry.  In case of an error the reference on the inode is
2040 * released.  A %NULL or IS_ERR inode may be passed in and will be the
2041 * error will be propagate to the return value, with a %NULL @inode
2042 * replaced by ERR_PTR(-ESTALE).
 
 
2043 */
2044struct dentry *d_obtain_root(struct inode *inode)
2045{
2046	return __d_obtain_alias(inode, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2047}
2048EXPORT_SYMBOL(d_obtain_root);
2049
2050/**
2051 * d_add_ci - lookup or allocate new dentry with case-exact name
2052 * @dentry: the negative dentry that was passed to the parent's lookup func
2053 * @inode:  the inode case-insensitive lookup has found
 
2054 * @name:   the case-exact name to be associated with the returned dentry
2055 *
2056 * This is to avoid filling the dcache with case-insensitive names to the
2057 * same inode, only the actual correct case is stored in the dcache for
2058 * case-insensitive filesystems.
2059 *
2060 * For a case-insensitive lookup match and if the case-exact dentry
2061 * already exists in the dcache, use it and return it.
2062 *
2063 * If no entry exists with the exact case name, allocate new dentry with
2064 * the exact case, and return the spliced entry.
2065 */
2066struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2067			struct qstr *name)
2068{
2069	struct dentry *found, *res;
 
2070
2071	/*
2072	 * First check if a dentry matching the name already exists,
2073	 * if not go ahead and create it now.
2074	 */
2075	found = d_hash_and_lookup(dentry->d_parent, name);
2076	if (found) {
2077		iput(inode);
2078		return found;
2079	}
2080	if (d_in_lookup(dentry)) {
2081		found = d_alloc_parallel(dentry->d_parent, name,
2082					dentry->d_wait);
2083		if (IS_ERR(found) || !d_in_lookup(found)) {
2084			iput(inode);
 
 
 
2085			return found;
2086		}
2087	} else {
2088		found = d_alloc(dentry->d_parent, name);
2089		if (!found) {
2090			iput(inode);
2091			return ERR_PTR(-ENOMEM);
2092		} 
2093	}
2094	res = d_splice_alias(inode, found);
2095	if (res) {
2096		d_lookup_done(found);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2097		dput(found);
2098		return res;
2099	}
2100	return found;
 
 
 
 
2101}
2102EXPORT_SYMBOL(d_add_ci);
2103
2104/**
2105 * d_same_name - compare dentry name with case-exact name
2106 * @dentry: the negative dentry that was passed to the parent's lookup func
2107 * @parent: parent dentry
2108 * @name:   the case-exact name to be associated with the returned dentry
2109 *
2110 * Return: true if names are same, or false
2111 */
2112bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2113		 const struct qstr *name)
2114{
2115	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2116		if (dentry->d_name.len != name->len)
2117			return false;
2118		return dentry_cmp(dentry, name->name, name->len) == 0;
2119	}
2120	return parent->d_op->d_compare(dentry,
2121				       dentry->d_name.len, dentry->d_name.name,
2122				       name) == 0;
2123}
2124EXPORT_SYMBOL_GPL(d_same_name);
 
2125
2126/*
2127 * This is __d_lookup_rcu() when the parent dentry has
2128 * DCACHE_OP_COMPARE, which makes things much nastier.
2129 */
2130static noinline struct dentry *__d_lookup_rcu_op_compare(
2131	const struct dentry *parent,
2132	const struct qstr *name,
2133	unsigned *seqp)
2134{
2135	u64 hashlen = name->hash_len;
2136	struct hlist_bl_head *b = d_hash(hashlen);
2137	struct hlist_bl_node *node;
2138	struct dentry *dentry;
2139
2140	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2141		int tlen;
2142		const char *tname;
2143		unsigned seq;
2144
2145seqretry:
2146		seq = raw_seqcount_begin(&dentry->d_seq);
2147		if (dentry->d_parent != parent)
2148			continue;
2149		if (d_unhashed(dentry))
2150			continue;
2151		if (dentry->d_name.hash != hashlen_hash(hashlen))
2152			continue;
2153		tlen = dentry->d_name.len;
2154		tname = dentry->d_name.name;
2155		/* we want a consistent (name,len) pair */
2156		if (read_seqcount_retry(&dentry->d_seq, seq)) {
2157			cpu_relax();
2158			goto seqretry;
2159		}
2160		if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2161			continue;
2162		*seqp = seq;
2163		return dentry;
2164	}
2165	return NULL;
 
 
2166}
2167
2168/**
2169 * __d_lookup_rcu - search for a dentry (racy, store-free)
2170 * @parent: parent dentry
2171 * @name: qstr of name we wish to find
2172 * @seqp: returns d_seq value at the point where the dentry was found
2173 * Returns: dentry, or NULL
2174 *
2175 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2176 * resolution (store-free path walking) design described in
2177 * Documentation/filesystems/path-lookup.txt.
2178 *
2179 * This is not to be used outside core vfs.
2180 *
2181 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2182 * held, and rcu_read_lock held. The returned dentry must not be stored into
2183 * without taking d_lock and checking d_seq sequence count against @seq
2184 * returned here.
2185 *
 
 
 
2186 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2187 * the returned dentry, so long as its parent's seqlock is checked after the
2188 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2189 * is formed, giving integrity down the path walk.
2190 *
2191 * NOTE! The caller *has* to check the resulting dentry against the sequence
2192 * number we've returned before using any of the resulting dentry state!
2193 */
2194struct dentry *__d_lookup_rcu(const struct dentry *parent,
2195				const struct qstr *name,
2196				unsigned *seqp)
2197{
2198	u64 hashlen = name->hash_len;
2199	const unsigned char *str = name->name;
2200	struct hlist_bl_head *b = d_hash(hashlen);
2201	struct hlist_bl_node *node;
2202	struct dentry *dentry;
2203
2204	/*
2205	 * Note: There is significant duplication with __d_lookup_rcu which is
2206	 * required to prevent single threaded performance regressions
2207	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2208	 * Keep the two functions in sync.
2209	 */
2210
2211	if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2212		return __d_lookup_rcu_op_compare(parent, name, seqp);
2213
2214	/*
2215	 * The hash list is protected using RCU.
2216	 *
2217	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2218	 * races with d_move().
2219	 *
2220	 * It is possible that concurrent renames can mess up our list
2221	 * walk here and result in missing our dentry, resulting in the
2222	 * false-negative result. d_lookup() protects against concurrent
2223	 * renames using rename_lock seqlock.
2224	 *
2225	 * See Documentation/filesystems/path-lookup.txt for more details.
2226	 */
2227	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2228		unsigned seq;
2229
 
2230		/*
2231		 * The dentry sequence count protects us from concurrent
2232		 * renames, and thus protects parent and name fields.
2233		 *
2234		 * The caller must perform a seqcount check in order
2235		 * to do anything useful with the returned dentry.
2236		 *
2237		 * NOTE! We do a "raw" seqcount_begin here. That means that
2238		 * we don't wait for the sequence count to stabilize if it
2239		 * is in the middle of a sequence change. If we do the slow
2240		 * dentry compare, we will do seqretries until it is stable,
2241		 * and if we end up with a successful lookup, we actually
2242		 * want to exit RCU lookup anyway.
2243		 *
2244		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2245		 * we are still guaranteed NUL-termination of ->d_name.name.
2246		 */
2247		seq = raw_seqcount_begin(&dentry->d_seq);
2248		if (dentry->d_parent != parent)
2249			continue;
2250		if (d_unhashed(dentry))
2251			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252		if (dentry->d_name.hash_len != hashlen)
2253			continue;
2254		if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2255			continue;
2256		*seqp = seq;
2257		return dentry;
 
2258	}
2259	return NULL;
2260}
2261
2262/**
2263 * d_lookup - search for a dentry
2264 * @parent: parent dentry
2265 * @name: qstr of name we wish to find
2266 * Returns: dentry, or NULL
2267 *
2268 * d_lookup searches the children of the parent dentry for the name in
2269 * question. If the dentry is found its reference count is incremented and the
2270 * dentry is returned. The caller must use dput to free the entry when it has
2271 * finished using it. %NULL is returned if the dentry does not exist.
2272 */
2273struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2274{
2275	struct dentry *dentry;
2276	unsigned seq;
2277
2278	do {
2279		seq = read_seqbegin(&rename_lock);
2280		dentry = __d_lookup(parent, name);
2281		if (dentry)
2282			break;
2283	} while (read_seqretry(&rename_lock, seq));
2284	return dentry;
2285}
2286EXPORT_SYMBOL(d_lookup);
2287
2288/**
2289 * __d_lookup - search for a dentry (racy)
2290 * @parent: parent dentry
2291 * @name: qstr of name we wish to find
2292 * Returns: dentry, or NULL
2293 *
2294 * __d_lookup is like d_lookup, however it may (rarely) return a
2295 * false-negative result due to unrelated rename activity.
2296 *
2297 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2298 * however it must be used carefully, eg. with a following d_lookup in
2299 * the case of failure.
2300 *
2301 * __d_lookup callers must be commented.
2302 */
2303struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2304{
 
2305	unsigned int hash = name->hash;
2306	struct hlist_bl_head *b = d_hash(hash);
 
2307	struct hlist_bl_node *node;
2308	struct dentry *found = NULL;
2309	struct dentry *dentry;
2310
2311	/*
2312	 * Note: There is significant duplication with __d_lookup_rcu which is
2313	 * required to prevent single threaded performance regressions
2314	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2315	 * Keep the two functions in sync.
2316	 */
2317
2318	/*
2319	 * The hash list is protected using RCU.
2320	 *
2321	 * Take d_lock when comparing a candidate dentry, to avoid races
2322	 * with d_move().
2323	 *
2324	 * It is possible that concurrent renames can mess up our list
2325	 * walk here and result in missing our dentry, resulting in the
2326	 * false-negative result. d_lookup() protects against concurrent
2327	 * renames using rename_lock seqlock.
2328	 *
2329	 * See Documentation/filesystems/path-lookup.txt for more details.
2330	 */
2331	rcu_read_lock();
2332	
2333	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2334
2335		if (dentry->d_name.hash != hash)
2336			continue;
2337
2338		spin_lock(&dentry->d_lock);
2339		if (dentry->d_parent != parent)
2340			goto next;
2341		if (d_unhashed(dentry))
2342			goto next;
2343
2344		if (!d_same_name(dentry, parent, name))
2345			goto next;
 
 
 
 
 
 
 
 
 
 
 
 
 
2346
2347		dentry->d_lockref.count++;
2348		found = dentry;
2349		spin_unlock(&dentry->d_lock);
2350		break;
2351next:
2352		spin_unlock(&dentry->d_lock);
2353 	}
2354 	rcu_read_unlock();
2355
2356 	return found;
2357}
2358
2359/**
2360 * d_hash_and_lookup - hash the qstr then search for a dentry
2361 * @dir: Directory to search in
2362 * @name: qstr of name we wish to find
2363 *
2364 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2365 */
2366struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2367{
2368	/*
2369	 * Check for a fs-specific hash function. Note that we must
2370	 * calculate the standard hash first, as the d_op->d_hash()
2371	 * routine may choose to leave the hash value unchanged.
2372	 */
2373	name->hash = full_name_hash(dir, name->name, name->len);
2374	if (dir->d_flags & DCACHE_OP_HASH) {
2375		int err = dir->d_op->d_hash(dir, name);
2376		if (unlikely(err < 0))
2377			return ERR_PTR(err);
2378	}
2379	return d_lookup(dir, name);
2380}
2381EXPORT_SYMBOL(d_hash_and_lookup);
2382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2383/*
2384 * When a file is deleted, we have two options:
2385 * - turn this dentry into a negative dentry
2386 * - unhash this dentry and free it.
2387 *
2388 * Usually, we want to just turn this into
2389 * a negative dentry, but if anybody else is
2390 * currently using the dentry or the inode
2391 * we can't do that and we fall back on removing
2392 * it from the hash queues and waiting for
2393 * it to be deleted later when it has no users
2394 */
2395 
2396/**
2397 * d_delete - delete a dentry
2398 * @dentry: The dentry to delete
2399 *
2400 * Turn the dentry into a negative dentry if possible, otherwise
2401 * remove it from the hash queues so it can be deleted later
2402 */
2403 
2404void d_delete(struct dentry * dentry)
2405{
2406	struct inode *inode = dentry->d_inode;
2407
2408	spin_lock(&inode->i_lock);
2409	spin_lock(&dentry->d_lock);
2410	/*
2411	 * Are we the only user?
2412	 */
 
 
 
 
2413	if (dentry->d_lockref.count == 1) {
2414		if (dentry_negative_policy)
2415			__d_drop(dentry);
 
 
 
2416		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2417		dentry_unlink_inode(dentry);
2418	} else {
2419		__d_drop(dentry);
2420		spin_unlock(&dentry->d_lock);
2421		spin_unlock(&inode->i_lock);
2422	}
 
 
 
 
 
 
 
2423}
2424EXPORT_SYMBOL(d_delete);
2425
2426static void __d_rehash(struct dentry *entry)
2427{
2428	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2429
2430	hlist_bl_lock(b);
 
2431	hlist_bl_add_head_rcu(&entry->d_hash, b);
2432	hlist_bl_unlock(b);
2433}
2434
 
 
 
 
 
2435/**
2436 * d_rehash	- add an entry back to the hash
2437 * @entry: dentry to add to the hash
2438 *
2439 * Adds a dentry to the hash according to its name.
2440 */
2441 
2442void d_rehash(struct dentry * entry)
2443{
2444	spin_lock(&entry->d_lock);
2445	__d_rehash(entry);
2446	spin_unlock(&entry->d_lock);
2447}
2448EXPORT_SYMBOL(d_rehash);
2449
2450static inline unsigned start_dir_add(struct inode *dir)
2451{
2452	preempt_disable_nested();
2453	for (;;) {
2454		unsigned n = dir->i_dir_seq;
2455		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2456			return n;
2457		cpu_relax();
2458	}
2459}
2460
2461static inline void end_dir_add(struct inode *dir, unsigned int n,
2462			       wait_queue_head_t *d_wait)
2463{
2464	smp_store_release(&dir->i_dir_seq, n + 2);
2465	preempt_enable_nested();
2466	wake_up_all(d_wait);
2467}
2468
2469static void d_wait_lookup(struct dentry *dentry)
2470{
2471	if (d_in_lookup(dentry)) {
2472		DECLARE_WAITQUEUE(wait, current);
2473		add_wait_queue(dentry->d_wait, &wait);
2474		do {
2475			set_current_state(TASK_UNINTERRUPTIBLE);
2476			spin_unlock(&dentry->d_lock);
2477			schedule();
2478			spin_lock(&dentry->d_lock);
2479		} while (d_in_lookup(dentry));
2480	}
2481}
2482
2483struct dentry *d_alloc_parallel(struct dentry *parent,
2484				const struct qstr *name,
2485				wait_queue_head_t *wq)
2486{
2487	unsigned int hash = name->hash;
2488	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2489	struct hlist_bl_node *node;
2490	struct dentry *new = d_alloc(parent, name);
2491	struct dentry *dentry;
2492	unsigned seq, r_seq, d_seq;
2493
2494	if (unlikely(!new))
2495		return ERR_PTR(-ENOMEM);
2496
2497retry:
2498	rcu_read_lock();
2499	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2500	r_seq = read_seqbegin(&rename_lock);
2501	dentry = __d_lookup_rcu(parent, name, &d_seq);
2502	if (unlikely(dentry)) {
2503		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2504			rcu_read_unlock();
2505			goto retry;
2506		}
2507		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2508			rcu_read_unlock();
2509			dput(dentry);
2510			goto retry;
2511		}
2512		rcu_read_unlock();
2513		dput(new);
2514		return dentry;
2515	}
2516	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2517		rcu_read_unlock();
2518		goto retry;
2519	}
2520
2521	if (unlikely(seq & 1)) {
2522		rcu_read_unlock();
2523		goto retry;
2524	}
2525
2526	hlist_bl_lock(b);
2527	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2528		hlist_bl_unlock(b);
2529		rcu_read_unlock();
2530		goto retry;
2531	}
2532	/*
2533	 * No changes for the parent since the beginning of d_lookup().
2534	 * Since all removals from the chain happen with hlist_bl_lock(),
2535	 * any potential in-lookup matches are going to stay here until
2536	 * we unlock the chain.  All fields are stable in everything
2537	 * we encounter.
2538	 */
2539	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2540		if (dentry->d_name.hash != hash)
2541			continue;
2542		if (dentry->d_parent != parent)
2543			continue;
2544		if (!d_same_name(dentry, parent, name))
2545			continue;
2546		hlist_bl_unlock(b);
2547		/* now we can try to grab a reference */
2548		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2549			rcu_read_unlock();
2550			goto retry;
2551		}
2552
2553		rcu_read_unlock();
2554		/*
2555		 * somebody is likely to be still doing lookup for it;
2556		 * wait for them to finish
2557		 */
2558		spin_lock(&dentry->d_lock);
2559		d_wait_lookup(dentry);
2560		/*
2561		 * it's not in-lookup anymore; in principle we should repeat
2562		 * everything from dcache lookup, but it's likely to be what
2563		 * d_lookup() would've found anyway.  If it is, just return it;
2564		 * otherwise we really have to repeat the whole thing.
2565		 */
2566		if (unlikely(dentry->d_name.hash != hash))
2567			goto mismatch;
2568		if (unlikely(dentry->d_parent != parent))
2569			goto mismatch;
2570		if (unlikely(d_unhashed(dentry)))
2571			goto mismatch;
2572		if (unlikely(!d_same_name(dentry, parent, name)))
2573			goto mismatch;
2574		/* OK, it *is* a hashed match; return it */
2575		spin_unlock(&dentry->d_lock);
2576		dput(new);
2577		return dentry;
2578	}
2579	rcu_read_unlock();
2580	/* we can't take ->d_lock here; it's OK, though. */
2581	new->d_flags |= DCACHE_PAR_LOOKUP;
2582	new->d_wait = wq;
2583	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
2584	hlist_bl_unlock(b);
2585	return new;
2586mismatch:
2587	spin_unlock(&dentry->d_lock);
2588	dput(dentry);
2589	goto retry;
2590}
2591EXPORT_SYMBOL(d_alloc_parallel);
2592
2593/*
2594 * - Unhash the dentry
2595 * - Retrieve and clear the waitqueue head in dentry
2596 * - Return the waitqueue head
2597 */
2598static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2599{
2600	wait_queue_head_t *d_wait;
2601	struct hlist_bl_head *b;
2602
2603	lockdep_assert_held(&dentry->d_lock);
2604
2605	b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2606	hlist_bl_lock(b);
2607	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2608	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2609	d_wait = dentry->d_wait;
2610	dentry->d_wait = NULL;
2611	hlist_bl_unlock(b);
2612	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2613	INIT_LIST_HEAD(&dentry->d_lru);
2614	return d_wait;
2615}
2616
2617void __d_lookup_unhash_wake(struct dentry *dentry)
2618{
2619	spin_lock(&dentry->d_lock);
2620	wake_up_all(__d_lookup_unhash(dentry));
2621	spin_unlock(&dentry->d_lock);
2622}
2623EXPORT_SYMBOL(__d_lookup_unhash_wake);
2624
2625/* inode->i_lock held if inode is non-NULL */
2626
2627static inline void __d_add(struct dentry *dentry, struct inode *inode)
2628{
2629	wait_queue_head_t *d_wait;
2630	struct inode *dir = NULL;
2631	unsigned n;
2632	spin_lock(&dentry->d_lock);
2633	if (unlikely(d_in_lookup(dentry))) {
2634		dir = dentry->d_parent->d_inode;
2635		n = start_dir_add(dir);
2636		d_wait = __d_lookup_unhash(dentry);
2637	}
2638	if (inode) {
2639		unsigned add_flags = d_flags_for_inode(inode);
2640		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2641		raw_write_seqcount_begin(&dentry->d_seq);
2642		__d_set_inode_and_type(dentry, inode, add_flags);
2643		raw_write_seqcount_end(&dentry->d_seq);
2644		fsnotify_update_flags(dentry);
2645	}
2646	__d_rehash(dentry);
2647	if (dir)
2648		end_dir_add(dir, n, d_wait);
2649	spin_unlock(&dentry->d_lock);
2650	if (inode)
2651		spin_unlock(&inode->i_lock);
2652}
2653
2654/**
2655 * d_add - add dentry to hash queues
2656 * @entry: dentry to add
2657 * @inode: The inode to attach to this dentry
2658 *
2659 * This adds the entry to the hash queues and initializes @inode.
2660 * The entry was actually filled in earlier during d_alloc().
2661 */
2662
2663void d_add(struct dentry *entry, struct inode *inode)
2664{
2665	if (inode) {
2666		security_d_instantiate(entry, inode);
2667		spin_lock(&inode->i_lock);
2668	}
2669	__d_add(entry, inode);
2670}
2671EXPORT_SYMBOL(d_add);
2672
2673/**
2674 * d_exact_alias - find and hash an exact unhashed alias
2675 * @entry: dentry to add
2676 * @inode: The inode to go with this dentry
2677 *
2678 * If an unhashed dentry with the same name/parent and desired
2679 * inode already exists, hash and return it.  Otherwise, return
2680 * NULL.
2681 *
2682 * Parent directory should be locked.
 
2683 */
2684struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2685{
2686	struct dentry *alias;
2687	unsigned int hash = entry->d_name.hash;
2688
2689	spin_lock(&inode->i_lock);
2690	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2691		/*
2692		 * Don't need alias->d_lock here, because aliases with
2693		 * d_parent == entry->d_parent are not subject to name or
2694		 * parent changes, because the parent inode i_mutex is held.
2695		 */
2696		if (alias->d_name.hash != hash)
2697			continue;
2698		if (alias->d_parent != entry->d_parent)
2699			continue;
2700		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2701			continue;
2702		spin_lock(&alias->d_lock);
2703		if (!d_unhashed(alias)) {
2704			spin_unlock(&alias->d_lock);
2705			alias = NULL;
2706		} else {
2707			dget_dlock(alias);
2708			__d_rehash(alias);
2709			spin_unlock(&alias->d_lock);
2710		}
2711		spin_unlock(&inode->i_lock);
2712		return alias;
2713	}
2714	spin_unlock(&inode->i_lock);
2715	return NULL;
2716}
2717EXPORT_SYMBOL(d_exact_alias);
2718
2719static void swap_names(struct dentry *dentry, struct dentry *target)
2720{
2721	if (unlikely(dname_external(target))) {
2722		if (unlikely(dname_external(dentry))) {
2723			/*
2724			 * Both external: swap the pointers
2725			 */
2726			swap(target->d_name.name, dentry->d_name.name);
2727		} else {
2728			/*
2729			 * dentry:internal, target:external.  Steal target's
2730			 * storage and make target internal.
2731			 */
2732			memcpy(target->d_iname, dentry->d_name.name,
2733					dentry->d_name.len + 1);
2734			dentry->d_name.name = target->d_name.name;
2735			target->d_name.name = target->d_iname;
2736		}
2737	} else {
2738		if (unlikely(dname_external(dentry))) {
2739			/*
2740			 * dentry:external, target:internal.  Give dentry's
2741			 * storage to target and make dentry internal
2742			 */
2743			memcpy(dentry->d_iname, target->d_name.name,
2744					target->d_name.len + 1);
2745			target->d_name.name = dentry->d_name.name;
2746			dentry->d_name.name = dentry->d_iname;
2747		} else {
2748			/*
2749			 * Both are internal.
2750			 */
2751			unsigned int i;
2752			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2753			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2754				swap(((long *) &dentry->d_iname)[i],
2755				     ((long *) &target->d_iname)[i]);
2756			}
2757		}
2758	}
2759	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2760}
2761
2762static void copy_name(struct dentry *dentry, struct dentry *target)
2763{
2764	struct external_name *old_name = NULL;
2765	if (unlikely(dname_external(dentry)))
2766		old_name = external_name(dentry);
2767	if (unlikely(dname_external(target))) {
2768		atomic_inc(&external_name(target)->u.count);
2769		dentry->d_name = target->d_name;
 
 
 
 
 
 
 
 
 
 
 
 
 
2770	} else {
2771		memcpy(dentry->d_iname, target->d_name.name,
2772				target->d_name.len + 1);
2773		dentry->d_name.name = dentry->d_iname;
2774		dentry->d_name.hash_len = target->d_name.hash_len;
2775	}
2776	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2777		kfree_rcu(old_name, u.head);
2778}
2779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780/*
2781 * __d_move - move a dentry
2782 * @dentry: entry to move
2783 * @target: new dentry
2784 * @exchange: exchange the two dentries
2785 *
2786 * Update the dcache to reflect the move of a file name. Negative
2787 * dcache entries should not be moved in this way. Caller must hold
2788 * rename_lock, the i_mutex of the source and target directories,
2789 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2790 */
2791static void __d_move(struct dentry *dentry, struct dentry *target,
2792		     bool exchange)
2793{
2794	struct dentry *old_parent, *p;
2795	wait_queue_head_t *d_wait;
2796	struct inode *dir = NULL;
2797	unsigned n;
2798
2799	WARN_ON(!dentry->d_inode);
2800	if (WARN_ON(dentry == target))
2801		return;
2802
 
2803	BUG_ON(d_ancestor(target, dentry));
2804	old_parent = dentry->d_parent;
2805	p = d_ancestor(old_parent, target);
2806	if (IS_ROOT(dentry)) {
2807		BUG_ON(p);
2808		spin_lock(&target->d_parent->d_lock);
2809	} else if (!p) {
2810		/* target is not a descendent of dentry->d_parent */
2811		spin_lock(&target->d_parent->d_lock);
2812		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2813	} else {
2814		BUG_ON(p == dentry);
2815		spin_lock(&old_parent->d_lock);
2816		if (p != target)
2817			spin_lock_nested(&target->d_parent->d_lock,
2818					DENTRY_D_LOCK_NESTED);
2819	}
2820	spin_lock_nested(&dentry->d_lock, 2);
2821	spin_lock_nested(&target->d_lock, 3);
2822
2823	if (unlikely(d_in_lookup(target))) {
2824		dir = target->d_parent->d_inode;
2825		n = start_dir_add(dir);
2826		d_wait = __d_lookup_unhash(target);
2827	}
2828
2829	write_seqcount_begin(&dentry->d_seq);
2830	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2831
2832	/* unhash both */
2833	if (!d_unhashed(dentry))
2834		___d_drop(dentry);
2835	if (!d_unhashed(target))
2836		___d_drop(target);
2837
2838	/* ... and switch them in the tree */
2839	dentry->d_parent = target->d_parent;
2840	if (!exchange) {
2841		copy_name(dentry, target);
2842		target->d_hash.pprev = NULL;
2843		dentry->d_parent->d_lockref.count++;
2844		if (dentry != old_parent) /* wasn't IS_ROOT */
2845			WARN_ON(!--old_parent->d_lockref.count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2846	} else {
2847		target->d_parent = old_parent;
2848		swap_names(dentry, target);
2849		if (!hlist_unhashed(&target->d_sib))
2850			__hlist_del(&target->d_sib);
2851		hlist_add_head(&target->d_sib, &target->d_parent->d_children);
2852		__d_rehash(target);
2853		fsnotify_update_flags(target);
2854	}
2855	if (!hlist_unhashed(&dentry->d_sib))
2856		__hlist_del(&dentry->d_sib);
2857	hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
2858	__d_rehash(dentry);
2859	fsnotify_update_flags(dentry);
2860	fscrypt_handle_d_move(dentry);
2861
2862	write_seqcount_end(&target->d_seq);
2863	write_seqcount_end(&dentry->d_seq);
2864
2865	if (dir)
2866		end_dir_add(dir, n, d_wait);
2867
2868	if (dentry->d_parent != old_parent)
2869		spin_unlock(&dentry->d_parent->d_lock);
2870	if (dentry != old_parent)
2871		spin_unlock(&old_parent->d_lock);
2872	spin_unlock(&target->d_lock);
 
2873	spin_unlock(&dentry->d_lock);
2874}
2875
2876/*
2877 * d_move - move a dentry
2878 * @dentry: entry to move
2879 * @target: new dentry
2880 *
2881 * Update the dcache to reflect the move of a file name. Negative
2882 * dcache entries should not be moved in this way. See the locking
2883 * requirements for __d_move.
2884 */
2885void d_move(struct dentry *dentry, struct dentry *target)
2886{
2887	write_seqlock(&rename_lock);
2888	__d_move(dentry, target, false);
2889	write_sequnlock(&rename_lock);
2890}
2891EXPORT_SYMBOL(d_move);
2892
2893/*
2894 * d_exchange - exchange two dentries
2895 * @dentry1: first dentry
2896 * @dentry2: second dentry
2897 */
2898void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2899{
2900	write_seqlock(&rename_lock);
2901
2902	WARN_ON(!dentry1->d_inode);
2903	WARN_ON(!dentry2->d_inode);
2904	WARN_ON(IS_ROOT(dentry1));
2905	WARN_ON(IS_ROOT(dentry2));
2906
2907	__d_move(dentry1, dentry2, true);
2908
2909	write_sequnlock(&rename_lock);
2910}
2911
2912/**
2913 * d_ancestor - search for an ancestor
2914 * @p1: ancestor dentry
2915 * @p2: child dentry
2916 *
2917 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2918 * an ancestor of p2, else NULL.
2919 */
2920struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2921{
2922	struct dentry *p;
2923
2924	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2925		if (p->d_parent == p1)
2926			return p;
2927	}
2928	return NULL;
2929}
2930
2931/*
2932 * This helper attempts to cope with remotely renamed directories
2933 *
2934 * It assumes that the caller is already holding
2935 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2936 *
2937 * Note: If ever the locking in lock_rename() changes, then please
2938 * remember to update this too...
2939 */
2940static int __d_unalias(struct dentry *dentry, struct dentry *alias)
 
2941{
2942	struct mutex *m1 = NULL;
2943	struct rw_semaphore *m2 = NULL;
2944	int ret = -ESTALE;
2945
2946	/* If alias and dentry share a parent, then no extra locks required */
2947	if (alias->d_parent == dentry->d_parent)
2948		goto out_unalias;
2949
2950	/* See lock_rename() */
2951	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2952		goto out_err;
2953	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2954	if (!inode_trylock_shared(alias->d_parent->d_inode))
2955		goto out_err;
2956	m2 = &alias->d_parent->d_inode->i_rwsem;
2957out_unalias:
2958	__d_move(alias, dentry, false);
2959	ret = 0;
 
 
2960out_err:
 
2961	if (m2)
2962		up_read(m2);
2963	if (m1)
2964		mutex_unlock(m1);
2965	return ret;
2966}
2967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2968/**
2969 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2970 * @inode:  the inode which may have a disconnected dentry
2971 * @dentry: a negative dentry which we want to point to the inode.
2972 *
2973 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2974 * place of the given dentry and return it, else simply d_add the inode
2975 * to the dentry and return NULL.
2976 *
2977 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2978 * we should error out: directories can't have multiple aliases.
2979 *
2980 * This is needed in the lookup routine of any filesystem that is exportable
2981 * (via knfsd) so that we can build dcache paths to directories effectively.
2982 *
2983 * If a dentry was found and moved, then it is returned.  Otherwise NULL
2984 * is returned.  This matches the expected return value of ->lookup.
2985 *
2986 * Cluster filesystems may call this function with a negative, hashed dentry.
2987 * In that case, we know that the inode will be a regular file, and also this
2988 * will only occur during atomic_open. So we need to check for the dentry
2989 * being already hashed only in the final case.
2990 */
2991struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2992{
2993	if (IS_ERR(inode))
2994		return ERR_CAST(inode);
2995
2996	BUG_ON(!d_unhashed(dentry));
2997
2998	if (!inode)
2999		goto out;
 
 
 
 
3000
3001	security_d_instantiate(dentry, inode);
3002	spin_lock(&inode->i_lock);
 
3003	if (S_ISDIR(inode->i_mode)) {
3004		struct dentry *new = __d_find_any_alias(inode);
3005		if (unlikely(new)) {
3006			/* The reference to new ensures it remains an alias */
3007			spin_unlock(&inode->i_lock);
 
 
3008			write_seqlock(&rename_lock);
3009			if (unlikely(d_ancestor(new, dentry))) {
3010				write_sequnlock(&rename_lock);
3011				dput(new);
3012				new = ERR_PTR(-ELOOP);
3013				pr_warn_ratelimited(
3014					"VFS: Lookup of '%s' in %s %s"
3015					" would have caused loop\n",
3016					dentry->d_name.name,
3017					inode->i_sb->s_type->name,
3018					inode->i_sb->s_id);
3019			} else if (!IS_ROOT(new)) {
3020				struct dentry *old_parent = dget(new->d_parent);
3021				int err = __d_unalias(dentry, new);
3022				write_sequnlock(&rename_lock);
3023				if (err) {
3024					dput(new);
3025					new = ERR_PTR(err);
3026				}
3027				dput(old_parent);
3028			} else {
3029				__d_move(new, dentry, false);
3030				write_sequnlock(&rename_lock);
 
3031			}
3032			iput(inode);
3033			return new;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3034		}
 
 
 
 
 
 
 
 
 
 
3035	}
 
3036out:
3037	__d_add(dentry, inode);
3038	return NULL;
3039}
3040EXPORT_SYMBOL(d_splice_alias);
3041
3042/*
3043 * Test whether new_dentry is a subdirectory of old_dentry.
3044 *
3045 * Trivially implemented using the dcache structure
3046 */
3047
3048/**
3049 * is_subdir - is new dentry a subdirectory of old_dentry
3050 * @new_dentry: new dentry
3051 * @old_dentry: old dentry
3052 *
3053 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3054 * Returns false otherwise.
3055 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3056 */
3057  
3058bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3059{
3060	bool subdir;
3061	unsigned seq;
3062
3063	if (new_dentry == old_dentry)
3064		return true;
3065
3066	/* Access d_parent under rcu as d_move() may change it. */
3067	rcu_read_lock();
3068	seq = read_seqbegin(&rename_lock);
3069	subdir = d_ancestor(old_dentry, new_dentry);
3070	 /* Try lockless once... */
3071	if (read_seqretry(&rename_lock, seq)) {
3072		/* ...else acquire lock for progress even on deep chains. */
3073		read_seqlock_excl(&rename_lock);
3074		subdir = d_ancestor(old_dentry, new_dentry);
3075		read_sequnlock_excl(&rename_lock);
3076	}
3077	rcu_read_unlock();
3078	return subdir;
 
 
 
3079}
3080EXPORT_SYMBOL(is_subdir);
3081
3082static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3083{
3084	struct dentry *root = data;
3085	if (dentry != root) {
3086		if (d_unhashed(dentry) || !dentry->d_inode)
3087			return D_WALK_SKIP;
3088
3089		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3090			dentry->d_flags |= DCACHE_GENOCIDE;
3091			dentry->d_lockref.count--;
3092		}
3093	}
3094	return D_WALK_CONTINUE;
3095}
3096
3097void d_genocide(struct dentry *parent)
3098{
3099	d_walk(parent, parent, d_genocide_kill);
3100}
3101
3102void d_mark_tmpfile(struct file *file, struct inode *inode)
3103{
3104	struct dentry *dentry = file->f_path.dentry;
3105
3106	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3107		!hlist_unhashed(&dentry->d_u.d_alias) ||
3108		!d_unlinked(dentry));
3109	spin_lock(&dentry->d_parent->d_lock);
3110	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3111	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3112				(unsigned long long)inode->i_ino);
3113	spin_unlock(&dentry->d_lock);
3114	spin_unlock(&dentry->d_parent->d_lock);
3115}
3116EXPORT_SYMBOL(d_mark_tmpfile);
3117
3118void d_tmpfile(struct file *file, struct inode *inode)
3119{
3120	struct dentry *dentry = file->f_path.dentry;
3121
3122	inode_dec_link_count(inode);
3123	d_mark_tmpfile(file, inode);
3124	d_instantiate(dentry, inode);
3125}
3126EXPORT_SYMBOL(d_tmpfile);
3127
3128/*
3129 * Obtain inode number of the parent dentry.
3130 */
3131ino_t d_parent_ino(struct dentry *dentry)
3132{
3133	struct dentry *parent;
3134	struct inode *iparent;
3135	unsigned seq;
3136	ino_t ret;
3137
3138	scoped_guard(rcu) {
3139		seq = raw_seqcount_begin(&dentry->d_seq);
3140		parent = READ_ONCE(dentry->d_parent);
3141		iparent = d_inode_rcu(parent);
3142		if (likely(iparent)) {
3143			ret = iparent->i_ino;
3144			if (!read_seqcount_retry(&dentry->d_seq, seq))
3145				return ret;
3146		}
3147	}
3148
3149	spin_lock(&dentry->d_lock);
3150	ret = dentry->d_parent->d_inode->i_ino;
3151	spin_unlock(&dentry->d_lock);
3152	return ret;
3153}
3154EXPORT_SYMBOL(d_parent_ino);
3155
3156static __initdata unsigned long dhash_entries;
3157static int __init set_dhash_entries(char *str)
3158{
3159	if (!str)
3160		return 0;
3161	dhash_entries = simple_strtoul(str, &str, 0);
3162	return 1;
3163}
3164__setup("dhash_entries=", set_dhash_entries);
3165
3166static void __init dcache_init_early(void)
3167{
 
 
3168	/* If hashes are distributed across NUMA nodes, defer
3169	 * hash allocation until vmalloc space is available.
3170	 */
3171	if (hashdist)
3172		return;
3173
3174	dentry_hashtable =
3175		alloc_large_system_hash("Dentry cache",
3176					sizeof(struct hlist_bl_head),
3177					dhash_entries,
3178					13,
3179					HASH_EARLY | HASH_ZERO,
3180					&d_hash_shift,
3181					NULL,
3182					0,
3183					0);
3184	d_hash_shift = 32 - d_hash_shift;
3185
3186	runtime_const_init(shift, d_hash_shift);
3187	runtime_const_init(ptr, dentry_hashtable);
3188}
3189
3190static void __init dcache_init(void)
3191{
3192	/*
 
 
3193	 * A constructor could be added for stable state like the lists,
3194	 * but it is probably not worth it because of the cache nature
3195	 * of the dcache.
3196	 */
3197	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3198		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3199		d_iname);
3200
3201	/* Hash may have been set up in dcache_init_early */
3202	if (!hashdist)
3203		return;
3204
3205	dentry_hashtable =
3206		alloc_large_system_hash("Dentry cache",
3207					sizeof(struct hlist_bl_head),
3208					dhash_entries,
3209					13,
3210					HASH_ZERO,
3211					&d_hash_shift,
3212					NULL,
3213					0,
3214					0);
3215	d_hash_shift = 32 - d_hash_shift;
3216
3217	runtime_const_init(shift, d_hash_shift);
3218	runtime_const_init(ptr, dentry_hashtable);
3219}
3220
3221/* SLAB cache for __getname() consumers */
3222struct kmem_cache *names_cachep __ro_after_init;
3223EXPORT_SYMBOL(names_cachep);
3224
 
 
3225void __init vfs_caches_init_early(void)
3226{
3227	int i;
3228
3229	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3230		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3231
3232	dcache_init_early();
3233	inode_init_early();
3234}
3235
3236void __init vfs_caches_init(void)
3237{
3238	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3239			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
 
 
 
 
 
 
 
 
3240
3241	dcache_init();
3242	inode_init();
3243	files_init();
3244	files_maxfiles_init();
3245	mnt_init();
3246	bdev_cache_init();
3247	chrdev_init();
3248}
v3.15
 
   1/*
   2 * fs/dcache.c
   3 *
   4 * Complete reimplementation
   5 * (C) 1997 Thomas Schoebel-Theuer,
   6 * with heavy changes by Linus Torvalds
   7 */
   8
   9/*
  10 * Notes on the allocation strategy:
  11 *
  12 * The dcache is a master of the icache - whenever a dcache entry
  13 * exists, the inode will always exist. "iput()" is done either when
  14 * the dcache entry is deleted or garbage collected.
  15 */
  16
  17#include <linux/syscalls.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/fs.h>
 
  21#include <linux/fsnotify.h>
  22#include <linux/slab.h>
  23#include <linux/init.h>
  24#include <linux/hash.h>
  25#include <linux/cache.h>
  26#include <linux/export.h>
  27#include <linux/mount.h>
  28#include <linux/file.h>
  29#include <asm/uaccess.h>
  30#include <linux/security.h>
  31#include <linux/seqlock.h>
  32#include <linux/swap.h>
  33#include <linux/bootmem.h>
  34#include <linux/fs_struct.h>
  35#include <linux/hardirq.h>
  36#include <linux/bit_spinlock.h>
  37#include <linux/rculist_bl.h>
  38#include <linux/prefetch.h>
  39#include <linux/ratelimit.h>
  40#include <linux/list_lru.h>
  41#include "internal.h"
  42#include "mount.h"
  43
 
 
  44/*
  45 * Usage:
  46 * dcache->d_inode->i_lock protects:
  47 *   - i_dentry, d_alias, d_inode of aliases
  48 * dcache_hash_bucket lock protects:
  49 *   - the dcache hash table
  50 * s_anon bl list spinlock protects:
  51 *   - the s_anon list (see __d_drop)
  52 * dentry->d_sb->s_dentry_lru_lock protects:
  53 *   - the dcache lru lists and counters
  54 * d_lock protects:
  55 *   - d_flags
  56 *   - d_name
  57 *   - d_lru
  58 *   - d_count
  59 *   - d_unhashed()
  60 *   - d_parent and d_subdirs
  61 *   - childrens' d_child and d_parent
  62 *   - d_alias, d_inode
  63 *
  64 * Ordering:
  65 * dentry->d_inode->i_lock
  66 *   dentry->d_lock
  67 *     dentry->d_sb->s_dentry_lru_lock
  68 *     dcache_hash_bucket lock
  69 *     s_anon lock
  70 *
  71 * If there is an ancestor relationship:
  72 * dentry->d_parent->...->d_parent->d_lock
  73 *   ...
  74 *     dentry->d_parent->d_lock
  75 *       dentry->d_lock
  76 *
  77 * If no ancestor relationship:
  78 * if (dentry1 < dentry2)
  79 *   dentry1->d_lock
  80 *     dentry2->d_lock
  81 */
  82int sysctl_vfs_cache_pressure __read_mostly = 100;
  83EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  84
  85__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
  86
  87EXPORT_SYMBOL(rename_lock);
  88
  89static struct kmem_cache *dentry_cache __read_mostly;
 
 
 
 
 
 
 
  90
  91/*
  92 * This is the single most critical data structure when it comes
  93 * to the dcache: the hashtable for lookups. Somebody should try
  94 * to make this good - I've just made it work.
  95 *
  96 * This hash-function tries to avoid losing too many bits of hash
  97 * information, yet avoid using a prime hash-size or similar.
 
 
 
 
 
  98 */
  99
 100static unsigned int d_hash_mask __read_mostly;
 101static unsigned int d_hash_shift __read_mostly;
 102
 103static struct hlist_bl_head *dentry_hashtable __read_mostly;
 104
 105static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
 
 
 
 
 
 
 
 
 
 106					unsigned int hash)
 107{
 108	hash += (unsigned long) parent / L1_CACHE_BYTES;
 109	hash = hash + (hash >> d_hash_shift);
 110	return dentry_hashtable + (hash & d_hash_mask);
 111}
 112
 113/* Statistics gathering. */
 114struct dentry_stat_t dentry_stat = {
 115	.age_limit = 45,
 
 
 
 
 116};
 117
 118static DEFINE_PER_CPU(long, nr_dentry);
 119static DEFINE_PER_CPU(long, nr_dentry_unused);
 
 
 120
 121#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 
 
 
 
 122
 123/*
 124 * Here we resort to our own counters instead of using generic per-cpu counters
 125 * for consistency with what the vfs inode code does. We are expected to harvest
 126 * better code and performance by having our own specialized counters.
 127 *
 128 * Please note that the loop is done over all possible CPUs, not over all online
 129 * CPUs. The reason for this is that we don't want to play games with CPUs going
 130 * on and off. If one of them goes off, we will just keep their counters.
 131 *
 132 * glommer: See cffbc8a for details, and if you ever intend to change this,
 133 * please update all vfs counters to match.
 134 */
 135static long get_nr_dentry(void)
 136{
 137	int i;
 138	long sum = 0;
 139	for_each_possible_cpu(i)
 140		sum += per_cpu(nr_dentry, i);
 141	return sum < 0 ? 0 : sum;
 142}
 143
 144static long get_nr_dentry_unused(void)
 145{
 146	int i;
 147	long sum = 0;
 148	for_each_possible_cpu(i)
 149		sum += per_cpu(nr_dentry_unused, i);
 150	return sum < 0 ? 0 : sum;
 151}
 152
 153int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
 154		   size_t *lenp, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 155{
 156	dentry_stat.nr_dentry = get_nr_dentry();
 157	dentry_stat.nr_unused = get_nr_dentry_unused();
 
 158	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 159}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160#endif
 161
 162/*
 163 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
 164 * The strings are both count bytes long, and count is non-zero.
 165 */
 166#ifdef CONFIG_DCACHE_WORD_ACCESS
 167
 168#include <asm/word-at-a-time.h>
 169/*
 170 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
 171 * aligned allocation for this particular component. We don't
 172 * strictly need the load_unaligned_zeropad() safety, but it
 173 * doesn't hurt either.
 174 *
 175 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
 176 * need the careful unaligned handling.
 177 */
 178static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 179{
 180	unsigned long a,b,mask;
 181
 182	for (;;) {
 183		a = *(unsigned long *)cs;
 184		b = load_unaligned_zeropad(ct);
 185		if (tcount < sizeof(unsigned long))
 186			break;
 187		if (unlikely(a != b))
 188			return 1;
 189		cs += sizeof(unsigned long);
 190		ct += sizeof(unsigned long);
 191		tcount -= sizeof(unsigned long);
 192		if (!tcount)
 193			return 0;
 194	}
 195	mask = bytemask_from_count(tcount);
 196	return unlikely(!!((a ^ b) & mask));
 197}
 198
 199#else
 200
 201static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 202{
 203	do {
 204		if (*cs != *ct)
 205			return 1;
 206		cs++;
 207		ct++;
 208		tcount--;
 209	} while (tcount);
 210	return 0;
 211}
 212
 213#endif
 214
 215static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 216{
 217	const unsigned char *cs;
 218	/*
 219	 * Be careful about RCU walk racing with rename:
 220	 * use ACCESS_ONCE to fetch the name pointer.
 221	 *
 222	 * NOTE! Even if a rename will mean that the length
 223	 * was not loaded atomically, we don't care. The
 224	 * RCU walk will check the sequence count eventually,
 225	 * and catch it. And we won't overrun the buffer,
 226	 * because we're reading the name pointer atomically,
 227	 * and a dentry name is guaranteed to be properly
 228	 * terminated with a NUL byte.
 229	 *
 230	 * End result: even if 'len' is wrong, we'll exit
 231	 * early because the data cannot match (there can
 232	 * be no NUL in the ct/tcount data)
 233	 */
 234	cs = ACCESS_ONCE(dentry->d_name.name);
 235	smp_read_barrier_depends();
 236	return dentry_string_cmp(cs, ct, tcount);
 237}
 238
 
 
 
 
 
 
 
 
 
 
 
 
 
 239static void __d_free(struct rcu_head *head)
 240{
 241	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 242
 243	WARN_ON(!hlist_unhashed(&dentry->d_alias));
 244	if (dname_external(dentry))
 245		kfree(dentry->d_name.name);
 246	kmem_cache_free(dentry_cache, dentry); 
 247}
 248
 249static void dentry_free(struct dentry *dentry)
 250{
 251	/* if dentry was never visible to RCU, immediate free is OK */
 252	if (!(dentry->d_flags & DCACHE_RCUACCESS))
 253		__d_free(&dentry->d_u.d_rcu);
 254	else
 255		call_rcu(&dentry->d_u.d_rcu, __d_free);
 256}
 257
 258/**
 259 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
 260 * @dentry: the target dentry
 261 * After this call, in-progress rcu-walk path lookup will fail. This
 262 * should be called after unhashing, and after changing d_inode (if
 263 * the dentry has not already been unhashed).
 264 */
 265static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
 266{
 267	assert_spin_locked(&dentry->d_lock);
 268	/* Go through a barrier */
 269	write_seqcount_barrier(&dentry->d_seq);
 270}
 271
 272/*
 273 * Release the dentry's inode, using the filesystem
 274 * d_iput() operation if defined. Dentry has no refcount
 275 * and is unhashed.
 276 */
 277static void dentry_iput(struct dentry * dentry)
 278	__releases(dentry->d_lock)
 279	__releases(dentry->d_inode->i_lock)
 280{
 281	struct inode *inode = dentry->d_inode;
 282	if (inode) {
 283		dentry->d_inode = NULL;
 284		hlist_del_init(&dentry->d_alias);
 285		spin_unlock(&dentry->d_lock);
 286		spin_unlock(&inode->i_lock);
 287		if (!inode->i_nlink)
 288			fsnotify_inoderemove(inode);
 289		if (dentry->d_op && dentry->d_op->d_iput)
 290			dentry->d_op->d_iput(dentry, inode);
 291		else
 292			iput(inode);
 293	} else {
 294		spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295	}
 
 
 
 
 
 296}
 297
 298/*
 299 * Release the dentry's inode, using the filesystem
 300 * d_iput() operation if defined. dentry remains in-use.
 301 */
 302static void dentry_unlink_inode(struct dentry * dentry)
 303	__releases(dentry->d_lock)
 304	__releases(dentry->d_inode->i_lock)
 305{
 306	struct inode *inode = dentry->d_inode;
 307	__d_clear_type(dentry);
 308	dentry->d_inode = NULL;
 309	hlist_del_init(&dentry->d_alias);
 310	dentry_rcuwalk_barrier(dentry);
 
 311	spin_unlock(&dentry->d_lock);
 312	spin_unlock(&inode->i_lock);
 313	if (!inode->i_nlink)
 314		fsnotify_inoderemove(inode);
 315	if (dentry->d_op && dentry->d_op->d_iput)
 316		dentry->d_op->d_iput(dentry, inode);
 317	else
 318		iput(inode);
 319}
 320
 321/*
 322 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
 323 * is in use - which includes both the "real" per-superblock
 324 * LRU list _and_ the DCACHE_SHRINK_LIST use.
 325 *
 326 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
 327 * on the shrink list (ie not on the superblock LRU list).
 328 *
 329 * The per-cpu "nr_dentry_unused" counters are updated with
 330 * the DCACHE_LRU_LIST bit.
 331 *
 
 
 
 
 
 332 * These helper functions make sure we always follow the
 333 * rules. d_lock must be held by the caller.
 334 */
 335#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
 336static void d_lru_add(struct dentry *dentry)
 337{
 338	D_FLAG_VERIFY(dentry, 0);
 339	dentry->d_flags |= DCACHE_LRU_LIST;
 340	this_cpu_inc(nr_dentry_unused);
 341	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 342}
 343
 344static void d_lru_del(struct dentry *dentry)
 345{
 346	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 347	dentry->d_flags &= ~DCACHE_LRU_LIST;
 348	this_cpu_dec(nr_dentry_unused);
 349	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 
 
 
 350}
 351
 352static void d_shrink_del(struct dentry *dentry)
 353{
 354	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 355	list_del_init(&dentry->d_lru);
 356	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
 357	this_cpu_dec(nr_dentry_unused);
 358}
 359
 360static void d_shrink_add(struct dentry *dentry, struct list_head *list)
 361{
 362	D_FLAG_VERIFY(dentry, 0);
 363	list_add(&dentry->d_lru, list);
 364	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
 365	this_cpu_inc(nr_dentry_unused);
 366}
 367
 368/*
 369 * These can only be called under the global LRU lock, ie during the
 370 * callback for freeing the LRU list. "isolate" removes it from the
 371 * LRU lists entirely, while shrink_move moves it to the indicated
 372 * private list.
 373 */
 374static void d_lru_isolate(struct dentry *dentry)
 375{
 376	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 377	dentry->d_flags &= ~DCACHE_LRU_LIST;
 378	this_cpu_dec(nr_dentry_unused);
 379	list_del_init(&dentry->d_lru);
 
 
 380}
 381
 382static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
 
 383{
 384	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
 385	dentry->d_flags |= DCACHE_SHRINK_LIST;
 386	list_move_tail(&dentry->d_lru, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387}
 388
 389/*
 390 * dentry_lru_(add|del)_list) must be called with d_lock held.
 391 */
 392static void dentry_lru_add(struct dentry *dentry)
 393{
 394	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
 395		d_lru_add(dentry);
 
 
 
 396}
 
 397
 398/**
 399 * d_drop - drop a dentry
 400 * @dentry: dentry to drop
 401 *
 402 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
 403 * be found through a VFS lookup any more. Note that this is different from
 404 * deleting the dentry - d_delete will try to mark the dentry negative if
 405 * possible, giving a successful _negative_ lookup, while d_drop will
 406 * just make the cache lookup fail.
 407 *
 408 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
 409 * reason (NFS timeouts or autofs deletes).
 410 *
 411 * __d_drop requires dentry->d_lock.
 
 
 
 412 */
 413void __d_drop(struct dentry *dentry)
 414{
 415	if (!d_unhashed(dentry)) {
 416		struct hlist_bl_head *b;
 417		/*
 418		 * Hashed dentries are normally on the dentry hashtable,
 419		 * with the exception of those newly allocated by
 420		 * d_obtain_alias, which are always IS_ROOT:
 421		 */
 422		if (unlikely(IS_ROOT(dentry)))
 423			b = &dentry->d_sb->s_anon;
 424		else
 425			b = d_hash(dentry->d_parent, dentry->d_name.hash);
 426
 427		hlist_bl_lock(b);
 428		__hlist_bl_del(&dentry->d_hash);
 429		dentry->d_hash.pprev = NULL;
 430		hlist_bl_unlock(b);
 431		dentry_rcuwalk_barrier(dentry);
 432	}
 433}
 434EXPORT_SYMBOL(__d_drop);
 435
 436void d_drop(struct dentry *dentry)
 437{
 438	spin_lock(&dentry->d_lock);
 439	__d_drop(dentry);
 440	spin_unlock(&dentry->d_lock);
 441}
 442EXPORT_SYMBOL(d_drop);
 443
 444static void __dentry_kill(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445{
 446	struct dentry *parent = NULL;
 447	bool can_free = true;
 448	if (!IS_ROOT(dentry))
 449		parent = dentry->d_parent;
 450
 451	/*
 452	 * The dentry is now unrecoverably dead to the world.
 453	 */
 454	lockref_mark_dead(&dentry->d_lockref);
 455
 456	/*
 457	 * inform the fs via d_prune that this dentry is about to be
 458	 * unhashed and destroyed.
 459	 */
 460	if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
 461		dentry->d_op->d_prune(dentry);
 462
 463	if (dentry->d_flags & DCACHE_LRU_LIST) {
 464		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
 465			d_lru_del(dentry);
 466	}
 467	/* if it was on the hash then remove it */
 468	__d_drop(dentry);
 469	list_del(&dentry->d_u.d_child);
 470	/*
 471	 * Inform d_walk() that we are no longer attached to the
 472	 * dentry tree
 473	 */
 474	dentry->d_flags |= DCACHE_DENTRY_KILLED;
 475	if (parent)
 476		spin_unlock(&parent->d_lock);
 477	dentry_iput(dentry);
 478	/*
 479	 * dentry_iput drops the locks, at which point nobody (except
 480	 * transient RCU lookups) can reach this dentry.
 481	 */
 482	BUG_ON((int)dentry->d_lockref.count > 0);
 483	this_cpu_dec(nr_dentry);
 484	if (dentry->d_op && dentry->d_op->d_release)
 485		dentry->d_op->d_release(dentry);
 486
 487	spin_lock(&dentry->d_lock);
 488	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
 489		dentry->d_flags |= DCACHE_MAY_FREE;
 
 
 
 
 
 
 490		can_free = false;
 491	}
 492	spin_unlock(&dentry->d_lock);
 493	if (likely(can_free))
 494		dentry_free(dentry);
 
 
 
 
 
 495}
 496
 497/*
 498 * Finish off a dentry we've decided to kill.
 499 * dentry->d_lock must be held, returns with it unlocked.
 500 * If ref is non-zero, then decrement the refcount too.
 501 * Returns dentry requiring refcount drop, or NULL if we're done.
 
 
 
 
 502 */
 503static struct dentry *dentry_kill(struct dentry *dentry)
 504	__releases(dentry->d_lock)
 505{
 506	struct inode *inode = dentry->d_inode;
 507	struct dentry *parent = NULL;
 508
 509	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
 510		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512	if (!IS_ROOT(dentry)) {
 513		parent = dentry->d_parent;
 514		if (unlikely(!spin_trylock(&parent->d_lock))) {
 515			if (inode)
 516				spin_unlock(&inode->i_lock);
 517			goto failed;
 518		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519	}
 
 
 520
 521	__dentry_kill(dentry);
 522	return parent;
 
 523
 524failed:
 525	spin_unlock(&dentry->d_lock);
 526	cpu_relax();
 527	return dentry; /* try again with same dentry */
 
 
 
 
 528}
 
 529
 530static inline struct dentry *lock_parent(struct dentry *dentry)
 
 
 
 
 
 
 
 
 
 
 531{
 532	struct dentry *parent = dentry->d_parent;
 533	if (IS_ROOT(dentry))
 534		return NULL;
 535	if (likely(spin_trylock(&parent->d_lock)))
 536		return parent;
 537	spin_unlock(&dentry->d_lock);
 538	rcu_read_lock();
 539again:
 540	parent = ACCESS_ONCE(dentry->d_parent);
 541	spin_lock(&parent->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542	/*
 543	 * We can't blindly lock dentry until we are sure
 544	 * that we won't violate the locking order.
 545	 * Any changes of dentry->d_parent must have
 546	 * been done with parent->d_lock held, so
 547	 * spin_lock() above is enough of a barrier
 548	 * for checking if it's still our child.
 549	 */
 550	if (unlikely(parent != dentry->d_parent)) {
 551		spin_unlock(&parent->d_lock);
 552		goto again;
 
 553	}
 554	rcu_read_unlock();
 555	if (parent != dentry)
 556		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 557	else
 558		parent = NULL;
 559	return parent;
 560}
 561
 
 562/* 
 563 * This is dput
 564 *
 565 * This is complicated by the fact that we do not want to put
 566 * dentries that are no longer on any hash chain on the unused
 567 * list: we'd much rather just get rid of them immediately.
 568 *
 569 * However, that implies that we have to traverse the dentry
 570 * tree upwards to the parents which might _also_ now be
 571 * scheduled for deletion (it may have been only waiting for
 572 * its last child to go away).
 573 *
 574 * This tail recursion is done by hand as we don't want to depend
 575 * on the compiler to always get this right (gcc generally doesn't).
 576 * Real recursion would eat up our stack space.
 577 */
 578
 579/*
 580 * dput - release a dentry
 581 * @dentry: dentry to release 
 582 *
 583 * Release a dentry. This will drop the usage count and if appropriate
 584 * call the dentry unlink method as well as removing it from the queues and
 585 * releasing its resources. If the parent dentries were scheduled for release
 586 * they too may now get deleted.
 587 */
 588void dput(struct dentry *dentry)
 589{
 590	if (unlikely(!dentry))
 591		return;
 592
 593repeat:
 594	if (lockref_put_or_lock(&dentry->d_lockref))
 
 595		return;
 596
 597	/* Unreachable? Get rid of it */
 598	if (unlikely(d_unhashed(dentry)))
 599		goto kill_it;
 600
 601	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
 602		if (dentry->d_op->d_delete(dentry))
 603			goto kill_it;
 604	}
 605
 606	if (!(dentry->d_flags & DCACHE_REFERENCED))
 607		dentry->d_flags |= DCACHE_REFERENCED;
 608	dentry_lru_add(dentry);
 609
 610	dentry->d_lockref.count--;
 611	spin_unlock(&dentry->d_lock);
 612	return;
 613
 614kill_it:
 615	dentry = dentry_kill(dentry);
 616	if (dentry)
 617		goto repeat;
 618}
 619EXPORT_SYMBOL(dput);
 620
 621/**
 622 * d_invalidate - invalidate a dentry
 623 * @dentry: dentry to invalidate
 624 *
 625 * Try to invalidate the dentry if it turns out to be
 626 * possible. If there are other dentries that can be
 627 * reached through this one we can't delete it and we
 628 * return -EBUSY. On success we return 0.
 629 *
 630 * no dcache lock.
 631 */
 632 
 633int d_invalidate(struct dentry * dentry)
 634{
 635	/*
 636	 * If it's already been dropped, return OK.
 637	 */
 638	spin_lock(&dentry->d_lock);
 639	if (d_unhashed(dentry)) {
 640		spin_unlock(&dentry->d_lock);
 641		return 0;
 642	}
 643	/*
 644	 * Check whether to do a partial shrink_dcache
 645	 * to get rid of unused child entries.
 646	 */
 647	if (!list_empty(&dentry->d_subdirs)) {
 648		spin_unlock(&dentry->d_lock);
 649		shrink_dcache_parent(dentry);
 650		spin_lock(&dentry->d_lock);
 651	}
 652
 653	/*
 654	 * Somebody else still using it?
 655	 *
 656	 * If it's a directory, we can't drop it
 657	 * for fear of somebody re-populating it
 658	 * with children (even though dropping it
 659	 * would make it unreachable from the root,
 660	 * we might still populate it if it was a
 661	 * working directory or similar).
 662	 * We also need to leave mountpoints alone,
 663	 * directory or not.
 664	 */
 665	if (dentry->d_lockref.count > 1 && dentry->d_inode) {
 666		if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
 667			spin_unlock(&dentry->d_lock);
 668			return -EBUSY;
 669		}
 
 670	}
 671
 672	__d_drop(dentry);
 673	spin_unlock(&dentry->d_lock);
 674	return 0;
 675}
 676EXPORT_SYMBOL(d_invalidate);
 677
 678/* This must be called with d_lock held */
 679static inline void __dget_dlock(struct dentry *dentry)
 680{
 681	dentry->d_lockref.count++;
 
 
 
 
 682}
 683
 684static inline void __dget(struct dentry *dentry)
 685{
 686	lockref_get(&dentry->d_lockref);
 
 
 
 
 
 
 
 687}
 688
 689struct dentry *dget_parent(struct dentry *dentry)
 690{
 691	int gotref;
 692	struct dentry *ret;
 
 693
 694	/*
 695	 * Do optimistic parent lookup without any
 696	 * locking.
 697	 */
 698	rcu_read_lock();
 699	ret = ACCESS_ONCE(dentry->d_parent);
 
 700	gotref = lockref_get_not_zero(&ret->d_lockref);
 701	rcu_read_unlock();
 702	if (likely(gotref)) {
 703		if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
 704			return ret;
 705		dput(ret);
 706	}
 707
 708repeat:
 709	/*
 710	 * Don't need rcu_dereference because we re-check it was correct under
 711	 * the lock.
 712	 */
 713	rcu_read_lock();
 714	ret = dentry->d_parent;
 715	spin_lock(&ret->d_lock);
 716	if (unlikely(ret != dentry->d_parent)) {
 717		spin_unlock(&ret->d_lock);
 718		rcu_read_unlock();
 719		goto repeat;
 720	}
 721	rcu_read_unlock();
 722	BUG_ON(!ret->d_lockref.count);
 723	ret->d_lockref.count++;
 724	spin_unlock(&ret->d_lock);
 725	return ret;
 726}
 727EXPORT_SYMBOL(dget_parent);
 728
 
 
 
 
 
 
 
 
 
 
 
 729/**
 730 * d_find_alias - grab a hashed alias of inode
 731 * @inode: inode in question
 732 * @want_discon:  flag, used by d_splice_alias, to request
 733 *          that only a DISCONNECTED alias be returned.
 734 *
 735 * If inode has a hashed alias, or is a directory and has any alias,
 736 * acquire the reference to alias and return it. Otherwise return NULL.
 737 * Notice that if inode is a directory there can be only one alias and
 738 * it can be unhashed only if it has no children, or if it is the root
 739 * of a filesystem.
 740 *
 741 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
 742 * any other hashed alias over that one unless @want_discon is set,
 743 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
 744 */
 745static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 746{
 747	struct dentry *alias, *discon_alias;
 
 
 
 
 
 
 
 748
 749again:
 750	discon_alias = NULL;
 751	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
 
 
 
 
 
 752		spin_lock(&alias->d_lock);
 753 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 754			if (IS_ROOT(alias) &&
 755			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 756				discon_alias = alias;
 757			} else if (!want_discon) {
 758				__dget_dlock(alias);
 759				spin_unlock(&alias->d_lock);
 760				return alias;
 761			}
 762		}
 763		spin_unlock(&alias->d_lock);
 764	}
 765	if (discon_alias) {
 766		alias = discon_alias;
 767		spin_lock(&alias->d_lock);
 768		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 769			if (IS_ROOT(alias) &&
 770			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 771				__dget_dlock(alias);
 772				spin_unlock(&alias->d_lock);
 773				return alias;
 774			}
 775		}
 776		spin_unlock(&alias->d_lock);
 777		goto again;
 778	}
 779	return NULL;
 780}
 781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782struct dentry *d_find_alias(struct inode *inode)
 783{
 784	struct dentry *de = NULL;
 785
 786	if (!hlist_empty(&inode->i_dentry)) {
 787		spin_lock(&inode->i_lock);
 788		de = __d_find_alias(inode, 0);
 789		spin_unlock(&inode->i_lock);
 790	}
 791	return de;
 792}
 793EXPORT_SYMBOL(d_find_alias);
 794
 795/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 796 *	Try to kill dentries associated with this inode.
 797 * WARNING: you must own a reference to inode.
 798 */
 799void d_prune_aliases(struct inode *inode)
 800{
 
 801	struct dentry *dentry;
 802restart:
 803	spin_lock(&inode->i_lock);
 804	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
 805		spin_lock(&dentry->d_lock);
 806		if (!dentry->d_lockref.count) {
 807			/*
 808			 * inform the fs via d_prune that this dentry
 809			 * is about to be unhashed and destroyed.
 810			 */
 811			if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
 812			    !d_unhashed(dentry))
 813				dentry->d_op->d_prune(dentry);
 814
 815			__dget_dlock(dentry);
 816			__d_drop(dentry);
 817			spin_unlock(&dentry->d_lock);
 818			spin_unlock(&inode->i_lock);
 819			dput(dentry);
 820			goto restart;
 821		}
 822		spin_unlock(&dentry->d_lock);
 823	}
 824	spin_unlock(&inode->i_lock);
 
 825}
 826EXPORT_SYMBOL(d_prune_aliases);
 827
 828static void shrink_dentry_list(struct list_head *list)
 829{
 830	struct dentry *dentry, *parent;
 
 
 
 
 
 
 
 
 831
 
 
 832	while (!list_empty(list)) {
 833		struct inode *inode;
 
 834		dentry = list_entry(list->prev, struct dentry, d_lru);
 835		spin_lock(&dentry->d_lock);
 836		parent = lock_parent(dentry);
 837
 838		/*
 839		 * The dispose list is isolated and dentries are not accounted
 840		 * to the LRU here, so we can simply remove it from the list
 841		 * here regardless of whether it is referenced or not.
 842		 */
 843		d_shrink_del(dentry);
 844
 845		/*
 846		 * We found an inuse dentry which was not removed from
 847		 * the LRU because of laziness during lookup. Do not free it.
 848		 */
 849		if ((int)dentry->d_lockref.count > 0) {
 850			spin_unlock(&dentry->d_lock);
 851			if (parent)
 852				spin_unlock(&parent->d_lock);
 853			continue;
 854		}
 855
 856
 857		if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
 858			bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
 859			spin_unlock(&dentry->d_lock);
 860			if (parent)
 861				spin_unlock(&parent->d_lock);
 862			if (can_free)
 863				dentry_free(dentry);
 864			continue;
 865		}
 866
 867		inode = dentry->d_inode;
 868		if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
 869			d_shrink_add(dentry, list);
 870			spin_unlock(&dentry->d_lock);
 871			if (parent)
 872				spin_unlock(&parent->d_lock);
 873			continue;
 874		}
 875
 876		__dentry_kill(dentry);
 877
 878		/*
 879		 * We need to prune ancestors too. This is necessary to prevent
 880		 * quadratic behavior of shrink_dcache_parent(), but is also
 881		 * expected to be beneficial in reducing dentry cache
 882		 * fragmentation.
 883		 */
 884		dentry = parent;
 885		while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
 886			parent = lock_parent(dentry);
 887			if (dentry->d_lockref.count != 1) {
 888				dentry->d_lockref.count--;
 889				spin_unlock(&dentry->d_lock);
 890				if (parent)
 891					spin_unlock(&parent->d_lock);
 892				break;
 893			}
 894			inode = dentry->d_inode;	/* can't be NULL */
 895			if (unlikely(!spin_trylock(&inode->i_lock))) {
 896				spin_unlock(&dentry->d_lock);
 897				if (parent)
 898					spin_unlock(&parent->d_lock);
 899				cpu_relax();
 900				continue;
 901			}
 902			__dentry_kill(dentry);
 903			dentry = parent;
 904		}
 905	}
 906}
 907
 908static enum lru_status
 909dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
 910{
 911	struct list_head *freeable = arg;
 912	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
 913
 914
 915	/*
 916	 * we are inverting the lru lock/dentry->d_lock here,
 917	 * so use a trylock. If we fail to get the lock, just skip
 918	 * it
 919	 */
 920	if (!spin_trylock(&dentry->d_lock))
 921		return LRU_SKIP;
 922
 923	/*
 924	 * Referenced dentries are still in use. If they have active
 925	 * counts, just remove them from the LRU. Otherwise give them
 926	 * another pass through the LRU.
 927	 */
 928	if (dentry->d_lockref.count) {
 929		d_lru_isolate(dentry);
 930		spin_unlock(&dentry->d_lock);
 931		return LRU_REMOVED;
 932	}
 933
 934	if (dentry->d_flags & DCACHE_REFERENCED) {
 935		dentry->d_flags &= ~DCACHE_REFERENCED;
 936		spin_unlock(&dentry->d_lock);
 937
 938		/*
 939		 * The list move itself will be made by the common LRU code. At
 940		 * this point, we've dropped the dentry->d_lock but keep the
 941		 * lru lock. This is safe to do, since every list movement is
 942		 * protected by the lru lock even if both locks are held.
 943		 *
 944		 * This is guaranteed by the fact that all LRU management
 945		 * functions are intermediated by the LRU API calls like
 946		 * list_lru_add and list_lru_del. List movement in this file
 947		 * only ever occur through this functions or through callbacks
 948		 * like this one, that are called from the LRU API.
 949		 *
 950		 * The only exceptions to this are functions like
 951		 * shrink_dentry_list, and code that first checks for the
 952		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
 953		 * operating only with stack provided lists after they are
 954		 * properly isolated from the main list.  It is thus, always a
 955		 * local access.
 956		 */
 957		return LRU_ROTATE;
 958	}
 959
 960	d_lru_shrink_move(dentry, freeable);
 961	spin_unlock(&dentry->d_lock);
 962
 963	return LRU_REMOVED;
 964}
 965
 966/**
 967 * prune_dcache_sb - shrink the dcache
 968 * @sb: superblock
 969 * @nr_to_scan : number of entries to try to free
 970 * @nid: which node to scan for freeable entities
 971 *
 972 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
 973 * done when we need more memory an called from the superblock shrinker
 974 * function.
 975 *
 976 * This function may fail to free any resources if all the dentries are in
 977 * use.
 978 */
 979long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
 980		     int nid)
 981{
 982	LIST_HEAD(dispose);
 983	long freed;
 984
 985	freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
 986				       &dispose, &nr_to_scan);
 987	shrink_dentry_list(&dispose);
 988	return freed;
 989}
 990
 991static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
 992						spinlock_t *lru_lock, void *arg)
 993{
 994	struct list_head *freeable = arg;
 995	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
 996
 997	/*
 998	 * we are inverting the lru lock/dentry->d_lock here,
 999	 * so use a trylock. If we fail to get the lock, just skip
1000	 * it
1001	 */
1002	if (!spin_trylock(&dentry->d_lock))
1003		return LRU_SKIP;
1004
1005	d_lru_shrink_move(dentry, freeable);
1006	spin_unlock(&dentry->d_lock);
1007
1008	return LRU_REMOVED;
1009}
1010
1011
1012/**
1013 * shrink_dcache_sb - shrink dcache for a superblock
1014 * @sb: superblock
1015 *
1016 * Shrink the dcache for the specified super block. This is used to free
1017 * the dcache before unmounting a file system.
1018 */
1019void shrink_dcache_sb(struct super_block *sb)
1020{
1021	long freed;
1022
1023	do {
1024		LIST_HEAD(dispose);
1025
1026		freed = list_lru_walk(&sb->s_dentry_lru,
1027			dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1028
1029		this_cpu_sub(nr_dentry_unused, freed);
1030		shrink_dentry_list(&dispose);
1031	} while (freed > 0);
1032}
1033EXPORT_SYMBOL(shrink_dcache_sb);
1034
1035/**
1036 * enum d_walk_ret - action to talke during tree walk
1037 * @D_WALK_CONTINUE:	contrinue walk
1038 * @D_WALK_QUIT:	quit walk
1039 * @D_WALK_NORETRY:	quit when retry is needed
1040 * @D_WALK_SKIP:	skip this dentry and its children
1041 */
1042enum d_walk_ret {
1043	D_WALK_CONTINUE,
1044	D_WALK_QUIT,
1045	D_WALK_NORETRY,
1046	D_WALK_SKIP,
1047};
1048
1049/**
1050 * d_walk - walk the dentry tree
1051 * @parent:	start of walk
1052 * @data:	data passed to @enter() and @finish()
1053 * @enter:	callback when first entering the dentry
1054 * @finish:	callback when successfully finished the walk
1055 *
1056 * The @enter() and @finish() callbacks are called with d_lock held.
1057 */
1058static void d_walk(struct dentry *parent, void *data,
1059		   enum d_walk_ret (*enter)(void *, struct dentry *),
1060		   void (*finish)(void *))
1061{
1062	struct dentry *this_parent;
1063	struct list_head *next;
1064	unsigned seq = 0;
1065	enum d_walk_ret ret;
1066	bool retry = true;
1067
1068again:
1069	read_seqbegin_or_lock(&rename_lock, &seq);
1070	this_parent = parent;
1071	spin_lock(&this_parent->d_lock);
1072
1073	ret = enter(data, this_parent);
1074	switch (ret) {
1075	case D_WALK_CONTINUE:
1076		break;
1077	case D_WALK_QUIT:
1078	case D_WALK_SKIP:
1079		goto out_unlock;
1080	case D_WALK_NORETRY:
1081		retry = false;
1082		break;
1083	}
1084repeat:
1085	next = this_parent->d_subdirs.next;
1086resume:
1087	while (next != &this_parent->d_subdirs) {
1088		struct list_head *tmp = next;
1089		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1090		next = tmp->next;
1091
1092		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1093
1094		ret = enter(data, dentry);
1095		switch (ret) {
1096		case D_WALK_CONTINUE:
1097			break;
1098		case D_WALK_QUIT:
1099			spin_unlock(&dentry->d_lock);
1100			goto out_unlock;
1101		case D_WALK_NORETRY:
1102			retry = false;
1103			break;
1104		case D_WALK_SKIP:
1105			spin_unlock(&dentry->d_lock);
1106			continue;
1107		}
1108
1109		if (!list_empty(&dentry->d_subdirs)) {
1110			spin_unlock(&this_parent->d_lock);
1111			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1112			this_parent = dentry;
1113			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1114			goto repeat;
1115		}
1116		spin_unlock(&dentry->d_lock);
1117	}
1118	/*
1119	 * All done at this level ... ascend and resume the search.
1120	 */
 
 
1121	if (this_parent != parent) {
1122		struct dentry *child = this_parent;
1123		this_parent = child->d_parent;
1124
1125		rcu_read_lock();
1126		spin_unlock(&child->d_lock);
1127		spin_lock(&this_parent->d_lock);
1128
1129		/*
1130		 * might go back up the wrong parent if we have had a rename
1131		 * or deletion
1132		 */
1133		if (this_parent != child->d_parent ||
1134			 (child->d_flags & DCACHE_DENTRY_KILLED) ||
1135			 need_seqretry(&rename_lock, seq)) {
1136			spin_unlock(&this_parent->d_lock);
1137			rcu_read_unlock();
1138			goto rename_retry;
 
 
 
 
 
 
1139		}
1140		rcu_read_unlock();
1141		next = child->d_u.d_child.next;
1142		goto resume;
1143	}
1144	if (need_seqretry(&rename_lock, seq)) {
1145		spin_unlock(&this_parent->d_lock);
1146		goto rename_retry;
1147	}
1148	if (finish)
1149		finish(data);
1150
1151out_unlock:
1152	spin_unlock(&this_parent->d_lock);
1153	done_seqretry(&rename_lock, seq);
1154	return;
1155
1156rename_retry:
 
 
 
1157	if (!retry)
1158		return;
1159	seq = 1;
1160	goto again;
1161}
1162
1163/*
1164 * Search for at least 1 mount point in the dentry's subdirs.
1165 * We descend to the next level whenever the d_subdirs
1166 * list is non-empty and continue searching.
1167 */
1168
1169static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1170{
1171	int *ret = data;
1172	if (d_mountpoint(dentry)) {
1173		*ret = 1;
 
 
 
 
1174		return D_WALK_QUIT;
1175	}
1176	return D_WALK_CONTINUE;
1177}
1178
1179/**
1180 * have_submounts - check for mounts over a dentry
1181 * @parent: dentry to check.
 
1182 *
1183 * Return true if the parent or its subdirectories contain
1184 * a mount point
1185 */
1186int have_submounts(struct dentry *parent)
1187{
1188	int ret = 0;
1189
1190	d_walk(parent, &ret, check_mount, NULL);
 
 
1191
1192	return ret;
1193}
1194EXPORT_SYMBOL(have_submounts);
1195
1196/*
1197 * Called by mount code to set a mountpoint and check if the mountpoint is
1198 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1199 * subtree can become unreachable).
1200 *
1201 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed.  For
1202 * this reason take rename_lock and d_lock on dentry and ancestors.
1203 */
1204int d_set_mounted(struct dentry *dentry)
1205{
1206	struct dentry *p;
1207	int ret = -ENOENT;
1208	write_seqlock(&rename_lock);
1209	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1210		/* Need exclusion wrt. check_submounts_and_drop() */
1211		spin_lock(&p->d_lock);
1212		if (unlikely(d_unhashed(p))) {
1213			spin_unlock(&p->d_lock);
1214			goto out;
1215		}
1216		spin_unlock(&p->d_lock);
1217	}
1218	spin_lock(&dentry->d_lock);
1219	if (!d_unlinked(dentry)) {
1220		dentry->d_flags |= DCACHE_MOUNTED;
1221		ret = 0;
 
 
 
1222	}
1223 	spin_unlock(&dentry->d_lock);
1224out:
1225	write_sequnlock(&rename_lock);
1226	return ret;
1227}
1228
1229/*
1230 * Search the dentry child list of the specified parent,
1231 * and move any unused dentries to the end of the unused
1232 * list for prune_dcache(). We descend to the next level
1233 * whenever the d_subdirs list is non-empty and continue
1234 * searching.
1235 *
1236 * It returns zero iff there are no unused children,
1237 * otherwise  it returns the number of children moved to
1238 * the end of the unused list. This may not be the total
1239 * number of unused children, because select_parent can
1240 * drop the lock and return early due to latency
1241 * constraints.
1242 */
1243
1244struct select_data {
1245	struct dentry *start;
 
 
 
 
1246	struct list_head dispose;
1247	int found;
1248};
1249
1250static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1251{
1252	struct select_data *data = _data;
1253	enum d_walk_ret ret = D_WALK_CONTINUE;
1254
1255	if (data->start == dentry)
1256		goto out;
1257
1258	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1259		data->found++;
1260	} else {
1261		if (dentry->d_flags & DCACHE_LRU_LIST)
1262			d_lru_del(dentry);
1263		if (!dentry->d_lockref.count) {
1264			d_shrink_add(dentry, &data->dispose);
1265			data->found++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266		}
 
1267	}
1268	/*
1269	 * We can return to the caller if we have found some (this
1270	 * ensures forward progress). We'll be coming back to find
1271	 * the rest.
1272	 */
1273	if (!list_empty(&data->dispose))
1274		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1275out:
1276	return ret;
1277}
1278
1279/**
1280 * shrink_dcache_parent - prune dcache
1281 * @parent: parent of entries to prune
1282 *
1283 * Prune the dcache to remove unused children of the parent dentry.
1284 */
1285void shrink_dcache_parent(struct dentry *parent)
1286{
1287	for (;;) {
1288		struct select_data data;
1289
1290		INIT_LIST_HEAD(&data.dispose);
1291		data.start = parent;
1292		data.found = 0;
1293
1294		d_walk(parent, &data, select_collect, NULL);
 
 
 
 
 
1295		if (!data.found)
1296			break;
1297
1298		shrink_dentry_list(&data.dispose);
1299		cond_resched();
 
 
 
 
 
 
 
 
 
 
1300	}
1301}
1302EXPORT_SYMBOL(shrink_dcache_parent);
1303
1304static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1305{
1306	/* it has busy descendents; complain about those instead */
1307	if (!list_empty(&dentry->d_subdirs))
1308		return D_WALK_CONTINUE;
1309
1310	/* root with refcount 1 is fine */
1311	if (dentry == _data && dentry->d_lockref.count == 1)
1312		return D_WALK_CONTINUE;
1313
1314	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1315			" still in use (%d) [unmount of %s %s]\n",
1316		       dentry,
1317		       dentry->d_inode ?
1318		       dentry->d_inode->i_ino : 0UL,
1319		       dentry,
1320		       dentry->d_lockref.count,
1321		       dentry->d_sb->s_type->name,
1322		       dentry->d_sb->s_id);
1323	WARN_ON(1);
1324	return D_WALK_CONTINUE;
1325}
1326
1327static void do_one_tree(struct dentry *dentry)
1328{
1329	shrink_dcache_parent(dentry);
1330	d_walk(dentry, dentry, umount_check, NULL);
1331	d_drop(dentry);
1332	dput(dentry);
1333}
1334
1335/*
1336 * destroy the dentries attached to a superblock on unmounting
1337 */
1338void shrink_dcache_for_umount(struct super_block *sb)
1339{
1340	struct dentry *dentry;
1341
1342	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1343
1344	dentry = sb->s_root;
1345	sb->s_root = NULL;
1346	do_one_tree(dentry);
1347
1348	while (!hlist_bl_empty(&sb->s_anon)) {
1349		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1350		do_one_tree(dentry);
1351	}
1352}
1353
1354static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1355{
1356	struct select_data *data = _data;
1357
1358	if (d_mountpoint(dentry)) {
1359		data->found = -EBUSY;
1360		return D_WALK_QUIT;
1361	}
1362
1363	return select_collect(_data, dentry);
1364}
1365
1366static void check_and_drop(void *_data)
1367{
1368	struct select_data *data = _data;
1369
1370	if (d_mountpoint(data->start))
1371		data->found = -EBUSY;
1372	if (!data->found)
1373		__d_drop(data->start);
1374}
1375
1376/**
1377 * check_submounts_and_drop - prune dcache, check for submounts and drop
1378 *
1379 * All done as a single atomic operation relative to has_unlinked_ancestor().
1380 * Returns 0 if successfully unhashed @parent.  If there were submounts then
1381 * return -EBUSY.
1382 *
1383 * @dentry: dentry to prune and drop
1384 */
1385int check_submounts_and_drop(struct dentry *dentry)
1386{
1387	int ret = 0;
 
 
 
 
 
 
 
1388
1389	/* Negative dentries can be dropped without further checks */
1390	if (!dentry->d_inode) {
1391		d_drop(dentry);
1392		goto out;
1393	}
1394
 
1395	for (;;) {
1396		struct select_data data;
1397
1398		INIT_LIST_HEAD(&data.dispose);
1399		data.start = dentry;
1400		data.found = 0;
1401
1402		d_walk(dentry, &data, check_and_collect, check_and_drop);
1403		ret = data.found;
1404
1405		if (!list_empty(&data.dispose))
1406			shrink_dentry_list(&data.dispose);
1407
1408		if (ret <= 0)
1409			break;
1410
1411		cond_resched();
1412	}
1413
1414out:
1415	return ret;
1416}
1417EXPORT_SYMBOL(check_submounts_and_drop);
1418
1419/**
1420 * __d_alloc	-	allocate a dcache entry
1421 * @sb: filesystem it will belong to
1422 * @name: qstr of the name
1423 *
1424 * Allocates a dentry. It returns %NULL if there is insufficient memory
1425 * available. On a success the dentry is returned. The name passed in is
1426 * copied and the copy passed in may be reused after this call.
1427 */
1428 
1429struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1430{
1431	struct dentry *dentry;
1432	char *dname;
 
1433
1434	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 
1435	if (!dentry)
1436		return NULL;
1437
1438	/*
1439	 * We guarantee that the inline name is always NUL-terminated.
1440	 * This way the memcpy() done by the name switching in rename
1441	 * will still always have a NUL at the end, even if we might
1442	 * be overwriting an internal NUL character
1443	 */
1444	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1445	if (name->len > DNAME_INLINE_LEN-1) {
1446		dname = kmalloc(name->len + 1, GFP_KERNEL);
1447		if (!dname) {
 
 
 
 
 
 
1448			kmem_cache_free(dentry_cache, dentry); 
1449			return NULL;
1450		}
 
 
1451	} else  {
1452		dname = dentry->d_iname;
1453	}	
1454
1455	dentry->d_name.len = name->len;
1456	dentry->d_name.hash = name->hash;
1457	memcpy(dname, name->name, name->len);
1458	dname[name->len] = 0;
1459
1460	/* Make sure we always see the terminating NUL character */
1461	smp_wmb();
1462	dentry->d_name.name = dname;
1463
1464	dentry->d_lockref.count = 1;
1465	dentry->d_flags = 0;
1466	spin_lock_init(&dentry->d_lock);
1467	seqcount_init(&dentry->d_seq);
1468	dentry->d_inode = NULL;
1469	dentry->d_parent = dentry;
1470	dentry->d_sb = sb;
1471	dentry->d_op = NULL;
1472	dentry->d_fsdata = NULL;
1473	INIT_HLIST_BL_NODE(&dentry->d_hash);
1474	INIT_LIST_HEAD(&dentry->d_lru);
1475	INIT_LIST_HEAD(&dentry->d_subdirs);
1476	INIT_HLIST_NODE(&dentry->d_alias);
1477	INIT_LIST_HEAD(&dentry->d_u.d_child);
1478	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1479
 
 
 
 
 
 
 
 
 
 
1480	this_cpu_inc(nr_dentry);
1481
1482	return dentry;
1483}
1484
1485/**
1486 * d_alloc	-	allocate a dcache entry
1487 * @parent: parent of entry to allocate
1488 * @name: qstr of the name
1489 *
1490 * Allocates a dentry. It returns %NULL if there is insufficient memory
1491 * available. On a success the dentry is returned. The name passed in is
1492 * copied and the copy passed in may be reused after this call.
1493 */
1494struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1495{
1496	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1497	if (!dentry)
1498		return NULL;
1499
1500	spin_lock(&parent->d_lock);
1501	/*
1502	 * don't need child lock because it is not subject
1503	 * to concurrency here
1504	 */
1505	__dget_dlock(parent);
1506	dentry->d_parent = parent;
1507	list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1508	spin_unlock(&parent->d_lock);
1509
1510	return dentry;
1511}
1512EXPORT_SYMBOL(d_alloc);
1513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514/**
1515 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1516 * @sb: the superblock
1517 * @name: qstr of the name
1518 *
1519 * For a filesystem that just pins its dentries in memory and never
1520 * performs lookups at all, return an unhashed IS_ROOT dentry.
 
 
 
 
 
 
 
1521 */
1522struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1523{
1524	return __d_alloc(sb, name);
 
 
 
 
 
 
 
 
 
1525}
1526EXPORT_SYMBOL(d_alloc_pseudo);
1527
1528struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1529{
1530	struct qstr q;
1531
1532	q.name = name;
1533	q.len = strlen(name);
1534	q.hash = full_name_hash(q.name, q.len);
1535	return d_alloc(parent, &q);
1536}
1537EXPORT_SYMBOL(d_alloc_name);
1538
1539void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1540{
1541	WARN_ON_ONCE(dentry->d_op);
1542	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1543				DCACHE_OP_COMPARE	|
1544				DCACHE_OP_REVALIDATE	|
1545				DCACHE_OP_WEAK_REVALIDATE	|
1546				DCACHE_OP_DELETE ));
 
1547	dentry->d_op = op;
1548	if (!op)
1549		return;
1550	if (op->d_hash)
1551		dentry->d_flags |= DCACHE_OP_HASH;
1552	if (op->d_compare)
1553		dentry->d_flags |= DCACHE_OP_COMPARE;
1554	if (op->d_revalidate)
1555		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1556	if (op->d_weak_revalidate)
1557		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1558	if (op->d_delete)
1559		dentry->d_flags |= DCACHE_OP_DELETE;
1560	if (op->d_prune)
1561		dentry->d_flags |= DCACHE_OP_PRUNE;
 
 
1562
1563}
1564EXPORT_SYMBOL(d_set_d_op);
1565
1566static unsigned d_flags_for_inode(struct inode *inode)
1567{
1568	unsigned add_flags = DCACHE_FILE_TYPE;
1569
1570	if (!inode)
1571		return DCACHE_MISS_TYPE;
1572
1573	if (S_ISDIR(inode->i_mode)) {
1574		add_flags = DCACHE_DIRECTORY_TYPE;
1575		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1576			if (unlikely(!inode->i_op->lookup))
1577				add_flags = DCACHE_AUTODIR_TYPE;
1578			else
1579				inode->i_opflags |= IOP_LOOKUP;
1580		}
1581	} else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1582		if (unlikely(inode->i_op->follow_link))
 
 
 
1583			add_flags = DCACHE_SYMLINK_TYPE;
1584		else
1585			inode->i_opflags |= IOP_NOFOLLOW;
 
1586	}
1587
 
 
 
 
1588	if (unlikely(IS_AUTOMOUNT(inode)))
1589		add_flags |= DCACHE_NEED_AUTOMOUNT;
1590	return add_flags;
1591}
1592
1593static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1594{
1595	unsigned add_flags = d_flags_for_inode(inode);
 
1596
1597	spin_lock(&dentry->d_lock);
1598	__d_set_type(dentry, add_flags);
1599	if (inode)
1600		hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1601	dentry->d_inode = inode;
1602	dentry_rcuwalk_barrier(dentry);
 
 
 
 
 
 
 
1603	spin_unlock(&dentry->d_lock);
1604	fsnotify_d_instantiate(dentry, inode);
1605}
1606
1607/**
1608 * d_instantiate - fill in inode information for a dentry
1609 * @entry: dentry to complete
1610 * @inode: inode to attach to this dentry
1611 *
1612 * Fill in inode information in the entry.
1613 *
1614 * This turns negative dentries into productive full members
1615 * of society.
1616 *
1617 * NOTE! This assumes that the inode count has been incremented
1618 * (or otherwise set) by the caller to indicate that it is now
1619 * in use by the dcache.
1620 */
1621 
1622void d_instantiate(struct dentry *entry, struct inode * inode)
1623{
1624	BUG_ON(!hlist_unhashed(&entry->d_alias));
1625	if (inode)
 
1626		spin_lock(&inode->i_lock);
1627	__d_instantiate(entry, inode);
1628	if (inode)
1629		spin_unlock(&inode->i_lock);
1630	security_d_instantiate(entry, inode);
1631}
1632EXPORT_SYMBOL(d_instantiate);
1633
1634/**
1635 * d_instantiate_unique - instantiate a non-aliased dentry
1636 * @entry: dentry to instantiate
1637 * @inode: inode to attach to this dentry
1638 *
1639 * Fill in inode information in the entry. On success, it returns NULL.
1640 * If an unhashed alias of "entry" already exists, then we return the
1641 * aliased dentry instead and drop one reference to inode.
1642 *
1643 * Note that in order to avoid conflicts with rename() etc, the caller
1644 * had better be holding the parent directory semaphore.
1645 *
1646 * This also assumes that the inode count has been incremented
1647 * (or otherwise set) by the caller to indicate that it is now
1648 * in use by the dcache.
1649 */
1650static struct dentry *__d_instantiate_unique(struct dentry *entry,
1651					     struct inode *inode)
1652{
1653	struct dentry *alias;
1654	int len = entry->d_name.len;
1655	const char *name = entry->d_name.name;
1656	unsigned int hash = entry->d_name.hash;
1657
1658	if (!inode) {
1659		__d_instantiate(entry, NULL);
1660		return NULL;
1661	}
1662
1663	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1664		/*
1665		 * Don't need alias->d_lock here, because aliases with
1666		 * d_parent == entry->d_parent are not subject to name or
1667		 * parent changes, because the parent inode i_mutex is held.
1668		 */
1669		if (alias->d_name.hash != hash)
1670			continue;
1671		if (alias->d_parent != entry->d_parent)
1672			continue;
1673		if (alias->d_name.len != len)
1674			continue;
1675		if (dentry_cmp(alias, name, len))
1676			continue;
1677		__dget(alias);
1678		return alias;
1679	}
1680
1681	__d_instantiate(entry, inode);
1682	return NULL;
1683}
1684
1685struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1686{
1687	struct dentry *result;
1688
1689	BUG_ON(!hlist_unhashed(&entry->d_alias));
1690
1691	if (inode)
1692		spin_lock(&inode->i_lock);
1693	result = __d_instantiate_unique(entry, inode);
1694	if (inode)
1695		spin_unlock(&inode->i_lock);
1696
1697	if (!result) {
1698		security_d_instantiate(entry, inode);
1699		return NULL;
1700	}
1701
1702	BUG_ON(!d_unhashed(result));
1703	iput(inode);
1704	return result;
1705}
1706
1707EXPORT_SYMBOL(d_instantiate_unique);
1708
1709/**
1710 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1711 * @entry: dentry to complete
1712 * @inode: inode to attach to this dentry
1713 *
1714 * Fill in inode information in the entry.  If a directory alias is found, then
1715 * return an error (and drop inode).  Together with d_materialise_unique() this
1716 * guarantees that a directory inode may never have more than one alias.
1717 */
1718int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1719{
1720	BUG_ON(!hlist_unhashed(&entry->d_alias));
1721
1722	spin_lock(&inode->i_lock);
1723	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1724		spin_unlock(&inode->i_lock);
1725		iput(inode);
1726		return -EBUSY;
1727	}
1728	__d_instantiate(entry, inode);
 
 
 
 
 
 
 
 
 
1729	spin_unlock(&inode->i_lock);
1730	security_d_instantiate(entry, inode);
1731
1732	return 0;
1733}
1734EXPORT_SYMBOL(d_instantiate_no_diralias);
1735
1736struct dentry *d_make_root(struct inode *root_inode)
1737{
1738	struct dentry *res = NULL;
1739
1740	if (root_inode) {
1741		static const struct qstr name = QSTR_INIT("/", 1);
1742
1743		res = __d_alloc(root_inode->i_sb, &name);
1744		if (res)
1745			d_instantiate(res, root_inode);
1746		else
1747			iput(root_inode);
1748	}
1749	return res;
1750}
1751EXPORT_SYMBOL(d_make_root);
1752
1753static struct dentry * __d_find_any_alias(struct inode *inode)
1754{
1755	struct dentry *alias;
 
 
 
 
 
 
 
 
1756
1757	if (hlist_empty(&inode->i_dentry))
1758		return NULL;
1759	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1760	__dget(alias);
1761	return alias;
1762}
1763
1764/**
1765 * d_find_any_alias - find any alias for a given inode
1766 * @inode: inode to find an alias for
1767 *
1768 * If any aliases exist for the given inode, take and return a
1769 * reference for one of them.  If no aliases exist, return %NULL.
1770 */
1771struct dentry *d_find_any_alias(struct inode *inode)
1772{
1773	struct dentry *de;
1774
 
1775	spin_lock(&inode->i_lock);
1776	de = __d_find_any_alias(inode);
1777	spin_unlock(&inode->i_lock);
1778	return de;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779}
1780EXPORT_SYMBOL(d_find_any_alias);
1781
1782/**
1783 * d_obtain_alias - find or allocate a dentry for a given inode
1784 * @inode: inode to allocate the dentry for
1785 *
1786 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1787 * similar open by handle operations.  The returned dentry may be anonymous,
1788 * or may have a full name (if the inode was already in the cache).
1789 *
1790 * When called on a directory inode, we must ensure that the inode only ever
1791 * has one dentry.  If a dentry is found, that is returned instead of
1792 * allocating a new one.
1793 *
1794 * On successful return, the reference to the inode has been transferred
1795 * to the dentry.  In case of an error the reference on the inode is released.
1796 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1797 * be passed in and will be the error will be propagate to the return value,
1798 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1799 */
1800struct dentry *d_obtain_alias(struct inode *inode)
1801{
1802	static const struct qstr anonstring = QSTR_INIT("/", 1);
1803	struct dentry *tmp;
1804	struct dentry *res;
1805	unsigned add_flags;
1806
1807	if (!inode)
1808		return ERR_PTR(-ESTALE);
1809	if (IS_ERR(inode))
1810		return ERR_CAST(inode);
1811
1812	res = d_find_any_alias(inode);
1813	if (res)
1814		goto out_iput;
1815
1816	tmp = __d_alloc(inode->i_sb, &anonstring);
1817	if (!tmp) {
1818		res = ERR_PTR(-ENOMEM);
1819		goto out_iput;
1820	}
1821
1822	spin_lock(&inode->i_lock);
1823	res = __d_find_any_alias(inode);
1824	if (res) {
1825		spin_unlock(&inode->i_lock);
1826		dput(tmp);
1827		goto out_iput;
1828	}
1829
1830	/* attach a disconnected dentry */
1831	add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1832
1833	spin_lock(&tmp->d_lock);
1834	tmp->d_inode = inode;
1835	tmp->d_flags |= add_flags;
1836	hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1837	hlist_bl_lock(&tmp->d_sb->s_anon);
1838	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1839	hlist_bl_unlock(&tmp->d_sb->s_anon);
1840	spin_unlock(&tmp->d_lock);
1841	spin_unlock(&inode->i_lock);
1842	security_d_instantiate(tmp, inode);
1843
1844	return tmp;
1845
1846 out_iput:
1847	if (res && !IS_ERR(res))
1848		security_d_instantiate(res, inode);
1849	iput(inode);
1850	return res;
1851}
1852EXPORT_SYMBOL(d_obtain_alias);
1853
1854/**
1855 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1856 * @inode:  the inode which may have a disconnected dentry
1857 * @dentry: a negative dentry which we want to point to the inode.
1858 *
1859 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1860 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1861 * and return it, else simply d_add the inode to the dentry and return NULL.
1862 *
1863 * This is needed in the lookup routine of any filesystem that is exportable
1864 * (via knfsd) so that we can build dcache paths to directories effectively.
1865 *
1866 * If a dentry was found and moved, then it is returned.  Otherwise NULL
1867 * is returned.  This matches the expected return value of ->lookup.
1868 *
1869 * Cluster filesystems may call this function with a negative, hashed dentry.
1870 * In that case, we know that the inode will be a regular file, and also this
1871 * will only occur during atomic_open. So we need to check for the dentry
1872 * being already hashed only in the final case.
1873 */
1874struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1875{
1876	struct dentry *new = NULL;
1877
1878	if (IS_ERR(inode))
1879		return ERR_CAST(inode);
1880
1881	if (inode && S_ISDIR(inode->i_mode)) {
1882		spin_lock(&inode->i_lock);
1883		new = __d_find_alias(inode, 1);
1884		if (new) {
1885			BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1886			spin_unlock(&inode->i_lock);
1887			security_d_instantiate(new, inode);
1888			d_move(new, dentry);
1889			iput(inode);
1890		} else {
1891			/* already taking inode->i_lock, so d_add() by hand */
1892			__d_instantiate(dentry, inode);
1893			spin_unlock(&inode->i_lock);
1894			security_d_instantiate(dentry, inode);
1895			d_rehash(dentry);
1896		}
1897	} else {
1898		d_instantiate(dentry, inode);
1899		if (d_unhashed(dentry))
1900			d_rehash(dentry);
1901	}
1902	return new;
1903}
1904EXPORT_SYMBOL(d_splice_alias);
1905
1906/**
1907 * d_add_ci - lookup or allocate new dentry with case-exact name
 
1908 * @inode:  the inode case-insensitive lookup has found
1909 * @dentry: the negative dentry that was passed to the parent's lookup func
1910 * @name:   the case-exact name to be associated with the returned dentry
1911 *
1912 * This is to avoid filling the dcache with case-insensitive names to the
1913 * same inode, only the actual correct case is stored in the dcache for
1914 * case-insensitive filesystems.
1915 *
1916 * For a case-insensitive lookup match and if the the case-exact dentry
1917 * already exists in in the dcache, use it and return it.
1918 *
1919 * If no entry exists with the exact case name, allocate new dentry with
1920 * the exact case, and return the spliced entry.
1921 */
1922struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1923			struct qstr *name)
1924{
1925	struct dentry *found;
1926	struct dentry *new;
1927
1928	/*
1929	 * First check if a dentry matching the name already exists,
1930	 * if not go ahead and create it now.
1931	 */
1932	found = d_hash_and_lookup(dentry->d_parent, name);
1933	if (unlikely(IS_ERR(found)))
1934		goto err_out;
1935	if (!found) {
1936		new = d_alloc(dentry->d_parent, name);
1937		if (!new) {
1938			found = ERR_PTR(-ENOMEM);
1939			goto err_out;
1940		}
1941
1942		found = d_splice_alias(inode, new);
1943		if (found) {
1944			dput(new);
1945			return found;
1946		}
1947		return new;
 
 
 
 
 
1948	}
1949
1950	/*
1951	 * If a matching dentry exists, and it's not negative use it.
1952	 *
1953	 * Decrement the reference count to balance the iget() done
1954	 * earlier on.
1955	 */
1956	if (found->d_inode) {
1957		if (unlikely(found->d_inode != inode)) {
1958			/* This can't happen because bad inodes are unhashed. */
1959			BUG_ON(!is_bad_inode(inode));
1960			BUG_ON(!is_bad_inode(found->d_inode));
1961		}
1962		iput(inode);
1963		return found;
1964	}
1965
1966	/*
1967	 * Negative dentry: instantiate it unless the inode is a directory and
1968	 * already has a dentry.
1969	 */
1970	new = d_splice_alias(inode, found);
1971	if (new) {
1972		dput(found);
1973		found = new;
1974	}
1975	return found;
1976
1977err_out:
1978	iput(inode);
1979	return found;
1980}
1981EXPORT_SYMBOL(d_add_ci);
1982
1983/*
1984 * Do the slow-case of the dentry name compare.
 
 
 
1985 *
1986 * Unlike the dentry_cmp() function, we need to atomically
1987 * load the name and length information, so that the
1988 * filesystem can rely on them, and can use the 'name' and
1989 * 'len' information without worrying about walking off the
1990 * end of memory etc.
1991 *
1992 * Thus the read_seqcount_retry() and the "duplicate" info
1993 * in arguments (the low-level filesystem should not look
1994 * at the dentry inode or name contents directly, since
1995 * rename can change them while we're in RCU mode).
1996 */
1997enum slow_d_compare {
1998	D_COMP_OK,
1999	D_COMP_NOMATCH,
2000	D_COMP_SEQRETRY,
2001};
2002
2003static noinline enum slow_d_compare slow_dentry_cmp(
2004		const struct dentry *parent,
2005		struct dentry *dentry,
2006		unsigned int seq,
2007		const struct qstr *name)
 
 
 
2008{
2009	int tlen = dentry->d_name.len;
2010	const char *tname = dentry->d_name.name;
 
 
2011
2012	if (read_seqcount_retry(&dentry->d_seq, seq)) {
2013		cpu_relax();
2014		return D_COMP_SEQRETRY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2015	}
2016	if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2017		return D_COMP_NOMATCH;
2018	return D_COMP_OK;
2019}
2020
2021/**
2022 * __d_lookup_rcu - search for a dentry (racy, store-free)
2023 * @parent: parent dentry
2024 * @name: qstr of name we wish to find
2025 * @seqp: returns d_seq value at the point where the dentry was found
2026 * Returns: dentry, or NULL
2027 *
2028 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2029 * resolution (store-free path walking) design described in
2030 * Documentation/filesystems/path-lookup.txt.
2031 *
2032 * This is not to be used outside core vfs.
2033 *
2034 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2035 * held, and rcu_read_lock held. The returned dentry must not be stored into
2036 * without taking d_lock and checking d_seq sequence count against @seq
2037 * returned here.
2038 *
2039 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2040 * function.
2041 *
2042 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2043 * the returned dentry, so long as its parent's seqlock is checked after the
2044 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2045 * is formed, giving integrity down the path walk.
2046 *
2047 * NOTE! The caller *has* to check the resulting dentry against the sequence
2048 * number we've returned before using any of the resulting dentry state!
2049 */
2050struct dentry *__d_lookup_rcu(const struct dentry *parent,
2051				const struct qstr *name,
2052				unsigned *seqp)
2053{
2054	u64 hashlen = name->hash_len;
2055	const unsigned char *str = name->name;
2056	struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2057	struct hlist_bl_node *node;
2058	struct dentry *dentry;
2059
2060	/*
2061	 * Note: There is significant duplication with __d_lookup_rcu which is
2062	 * required to prevent single threaded performance regressions
2063	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2064	 * Keep the two functions in sync.
2065	 */
2066
 
 
 
2067	/*
2068	 * The hash list is protected using RCU.
2069	 *
2070	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2071	 * races with d_move().
2072	 *
2073	 * It is possible that concurrent renames can mess up our list
2074	 * walk here and result in missing our dentry, resulting in the
2075	 * false-negative result. d_lookup() protects against concurrent
2076	 * renames using rename_lock seqlock.
2077	 *
2078	 * See Documentation/filesystems/path-lookup.txt for more details.
2079	 */
2080	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2081		unsigned seq;
2082
2083seqretry:
2084		/*
2085		 * The dentry sequence count protects us from concurrent
2086		 * renames, and thus protects parent and name fields.
2087		 *
2088		 * The caller must perform a seqcount check in order
2089		 * to do anything useful with the returned dentry.
2090		 *
2091		 * NOTE! We do a "raw" seqcount_begin here. That means that
2092		 * we don't wait for the sequence count to stabilize if it
2093		 * is in the middle of a sequence change. If we do the slow
2094		 * dentry compare, we will do seqretries until it is stable,
2095		 * and if we end up with a successful lookup, we actually
2096		 * want to exit RCU lookup anyway.
 
 
 
2097		 */
2098		seq = raw_seqcount_begin(&dentry->d_seq);
2099		if (dentry->d_parent != parent)
2100			continue;
2101		if (d_unhashed(dentry))
2102			continue;
2103
2104		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2105			if (dentry->d_name.hash != hashlen_hash(hashlen))
2106				continue;
2107			*seqp = seq;
2108			switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2109			case D_COMP_OK:
2110				return dentry;
2111			case D_COMP_NOMATCH:
2112				continue;
2113			default:
2114				goto seqretry;
2115			}
2116		}
2117
2118		if (dentry->d_name.hash_len != hashlen)
2119			continue;
 
 
2120		*seqp = seq;
2121		if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2122			return dentry;
2123	}
2124	return NULL;
2125}
2126
2127/**
2128 * d_lookup - search for a dentry
2129 * @parent: parent dentry
2130 * @name: qstr of name we wish to find
2131 * Returns: dentry, or NULL
2132 *
2133 * d_lookup searches the children of the parent dentry for the name in
2134 * question. If the dentry is found its reference count is incremented and the
2135 * dentry is returned. The caller must use dput to free the entry when it has
2136 * finished using it. %NULL is returned if the dentry does not exist.
2137 */
2138struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2139{
2140	struct dentry *dentry;
2141	unsigned seq;
2142
2143        do {
2144                seq = read_seqbegin(&rename_lock);
2145                dentry = __d_lookup(parent, name);
2146                if (dentry)
2147			break;
2148	} while (read_seqretry(&rename_lock, seq));
2149	return dentry;
2150}
2151EXPORT_SYMBOL(d_lookup);
2152
2153/**
2154 * __d_lookup - search for a dentry (racy)
2155 * @parent: parent dentry
2156 * @name: qstr of name we wish to find
2157 * Returns: dentry, or NULL
2158 *
2159 * __d_lookup is like d_lookup, however it may (rarely) return a
2160 * false-negative result due to unrelated rename activity.
2161 *
2162 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2163 * however it must be used carefully, eg. with a following d_lookup in
2164 * the case of failure.
2165 *
2166 * __d_lookup callers must be commented.
2167 */
2168struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2169{
2170	unsigned int len = name->len;
2171	unsigned int hash = name->hash;
2172	const unsigned char *str = name->name;
2173	struct hlist_bl_head *b = d_hash(parent, hash);
2174	struct hlist_bl_node *node;
2175	struct dentry *found = NULL;
2176	struct dentry *dentry;
2177
2178	/*
2179	 * Note: There is significant duplication with __d_lookup_rcu which is
2180	 * required to prevent single threaded performance regressions
2181	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2182	 * Keep the two functions in sync.
2183	 */
2184
2185	/*
2186	 * The hash list is protected using RCU.
2187	 *
2188	 * Take d_lock when comparing a candidate dentry, to avoid races
2189	 * with d_move().
2190	 *
2191	 * It is possible that concurrent renames can mess up our list
2192	 * walk here and result in missing our dentry, resulting in the
2193	 * false-negative result. d_lookup() protects against concurrent
2194	 * renames using rename_lock seqlock.
2195	 *
2196	 * See Documentation/filesystems/path-lookup.txt for more details.
2197	 */
2198	rcu_read_lock();
2199	
2200	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2201
2202		if (dentry->d_name.hash != hash)
2203			continue;
2204
2205		spin_lock(&dentry->d_lock);
2206		if (dentry->d_parent != parent)
2207			goto next;
2208		if (d_unhashed(dentry))
2209			goto next;
2210
2211		/*
2212		 * It is safe to compare names since d_move() cannot
2213		 * change the qstr (protected by d_lock).
2214		 */
2215		if (parent->d_flags & DCACHE_OP_COMPARE) {
2216			int tlen = dentry->d_name.len;
2217			const char *tname = dentry->d_name.name;
2218			if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2219				goto next;
2220		} else {
2221			if (dentry->d_name.len != len)
2222				goto next;
2223			if (dentry_cmp(dentry, str, len))
2224				goto next;
2225		}
2226
2227		dentry->d_lockref.count++;
2228		found = dentry;
2229		spin_unlock(&dentry->d_lock);
2230		break;
2231next:
2232		spin_unlock(&dentry->d_lock);
2233 	}
2234 	rcu_read_unlock();
2235
2236 	return found;
2237}
2238
2239/**
2240 * d_hash_and_lookup - hash the qstr then search for a dentry
2241 * @dir: Directory to search in
2242 * @name: qstr of name we wish to find
2243 *
2244 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2245 */
2246struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2247{
2248	/*
2249	 * Check for a fs-specific hash function. Note that we must
2250	 * calculate the standard hash first, as the d_op->d_hash()
2251	 * routine may choose to leave the hash value unchanged.
2252	 */
2253	name->hash = full_name_hash(name->name, name->len);
2254	if (dir->d_flags & DCACHE_OP_HASH) {
2255		int err = dir->d_op->d_hash(dir, name);
2256		if (unlikely(err < 0))
2257			return ERR_PTR(err);
2258	}
2259	return d_lookup(dir, name);
2260}
2261EXPORT_SYMBOL(d_hash_and_lookup);
2262
2263/**
2264 * d_validate - verify dentry provided from insecure source (deprecated)
2265 * @dentry: The dentry alleged to be valid child of @dparent
2266 * @dparent: The parent dentry (known to be valid)
2267 *
2268 * An insecure source has sent us a dentry, here we verify it and dget() it.
2269 * This is used by ncpfs in its readdir implementation.
2270 * Zero is returned in the dentry is invalid.
2271 *
2272 * This function is slow for big directories, and deprecated, do not use it.
2273 */
2274int d_validate(struct dentry *dentry, struct dentry *dparent)
2275{
2276	struct dentry *child;
2277
2278	spin_lock(&dparent->d_lock);
2279	list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2280		if (dentry == child) {
2281			spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2282			__dget_dlock(dentry);
2283			spin_unlock(&dentry->d_lock);
2284			spin_unlock(&dparent->d_lock);
2285			return 1;
2286		}
2287	}
2288	spin_unlock(&dparent->d_lock);
2289
2290	return 0;
2291}
2292EXPORT_SYMBOL(d_validate);
2293
2294/*
2295 * When a file is deleted, we have two options:
2296 * - turn this dentry into a negative dentry
2297 * - unhash this dentry and free it.
2298 *
2299 * Usually, we want to just turn this into
2300 * a negative dentry, but if anybody else is
2301 * currently using the dentry or the inode
2302 * we can't do that and we fall back on removing
2303 * it from the hash queues and waiting for
2304 * it to be deleted later when it has no users
2305 */
2306 
2307/**
2308 * d_delete - delete a dentry
2309 * @dentry: The dentry to delete
2310 *
2311 * Turn the dentry into a negative dentry if possible, otherwise
2312 * remove it from the hash queues so it can be deleted later
2313 */
2314 
2315void d_delete(struct dentry * dentry)
2316{
2317	struct inode *inode;
2318	int isdir = 0;
 
 
2319	/*
2320	 * Are we the only user?
2321	 */
2322again:
2323	spin_lock(&dentry->d_lock);
2324	inode = dentry->d_inode;
2325	isdir = S_ISDIR(inode->i_mode);
2326	if (dentry->d_lockref.count == 1) {
2327		if (!spin_trylock(&inode->i_lock)) {
2328			spin_unlock(&dentry->d_lock);
2329			cpu_relax();
2330			goto again;
2331		}
2332		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2333		dentry_unlink_inode(dentry);
2334		fsnotify_nameremove(dentry, isdir);
2335		return;
 
 
2336	}
2337
2338	if (!d_unhashed(dentry))
2339		__d_drop(dentry);
2340
2341	spin_unlock(&dentry->d_lock);
2342
2343	fsnotify_nameremove(dentry, isdir);
2344}
2345EXPORT_SYMBOL(d_delete);
2346
2347static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2348{
2349	BUG_ON(!d_unhashed(entry));
 
2350	hlist_bl_lock(b);
2351	entry->d_flags |= DCACHE_RCUACCESS;
2352	hlist_bl_add_head_rcu(&entry->d_hash, b);
2353	hlist_bl_unlock(b);
2354}
2355
2356static void _d_rehash(struct dentry * entry)
2357{
2358	__d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2359}
2360
2361/**
2362 * d_rehash	- add an entry back to the hash
2363 * @entry: dentry to add to the hash
2364 *
2365 * Adds a dentry to the hash according to its name.
2366 */
2367 
2368void d_rehash(struct dentry * entry)
2369{
2370	spin_lock(&entry->d_lock);
2371	_d_rehash(entry);
2372	spin_unlock(&entry->d_lock);
2373}
2374EXPORT_SYMBOL(d_rehash);
2375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2376/**
2377 * dentry_update_name_case - update case insensitive dentry with a new name
2378 * @dentry: dentry to be updated
2379 * @name: new name
2380 *
2381 * Update a case insensitive dentry with new case of name.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2382 *
2383 * dentry must have been returned by d_lookup with name @name. Old and new
2384 * name lengths must match (ie. no d_compare which allows mismatched name
2385 * lengths).
2386 *
2387 * Parent inode i_mutex must be held over d_lookup and into this call (to
2388 * keep renames and concurrent inserts, and readdir(2) away).
2389 */
2390void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2391{
2392	BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2393	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2394
2395	spin_lock(&dentry->d_lock);
2396	write_seqcount_begin(&dentry->d_seq);
2397	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2398	write_seqcount_end(&dentry->d_seq);
2399	spin_unlock(&dentry->d_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2400}
2401EXPORT_SYMBOL(dentry_update_name_case);
2402
2403static void switch_names(struct dentry *dentry, struct dentry *target)
2404{
2405	if (dname_external(target)) {
2406		if (dname_external(dentry)) {
2407			/*
2408			 * Both external: swap the pointers
2409			 */
2410			swap(target->d_name.name, dentry->d_name.name);
2411		} else {
2412			/*
2413			 * dentry:internal, target:external.  Steal target's
2414			 * storage and make target internal.
2415			 */
2416			memcpy(target->d_iname, dentry->d_name.name,
2417					dentry->d_name.len + 1);
2418			dentry->d_name.name = target->d_name.name;
2419			target->d_name.name = target->d_iname;
2420		}
2421	} else {
2422		if (dname_external(dentry)) {
2423			/*
2424			 * dentry:external, target:internal.  Give dentry's
2425			 * storage to target and make dentry internal
2426			 */
2427			memcpy(dentry->d_iname, target->d_name.name,
2428					target->d_name.len + 1);
2429			target->d_name.name = dentry->d_name.name;
2430			dentry->d_name.name = dentry->d_iname;
2431		} else {
2432			/*
2433			 * Both are internal.
2434			 */
2435			unsigned int i;
2436			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2437			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2438				swap(((long *) &dentry->d_iname)[i],
2439				     ((long *) &target->d_iname)[i]);
2440			}
2441		}
2442	}
2443	swap(dentry->d_name.len, target->d_name.len);
2444}
2445
2446static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2447{
2448	/*
2449	 * XXXX: do we really need to take target->d_lock?
2450	 */
2451	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2452		spin_lock(&target->d_parent->d_lock);
2453	else {
2454		if (d_ancestor(dentry->d_parent, target->d_parent)) {
2455			spin_lock(&dentry->d_parent->d_lock);
2456			spin_lock_nested(&target->d_parent->d_lock,
2457						DENTRY_D_LOCK_NESTED);
2458		} else {
2459			spin_lock(&target->d_parent->d_lock);
2460			spin_lock_nested(&dentry->d_parent->d_lock,
2461						DENTRY_D_LOCK_NESTED);
2462		}
2463	}
2464	if (target < dentry) {
2465		spin_lock_nested(&target->d_lock, 2);
2466		spin_lock_nested(&dentry->d_lock, 3);
2467	} else {
2468		spin_lock_nested(&dentry->d_lock, 2);
2469		spin_lock_nested(&target->d_lock, 3);
 
 
2470	}
 
 
2471}
2472
2473static void dentry_unlock_parents_for_move(struct dentry *dentry,
2474					struct dentry *target)
2475{
2476	if (target->d_parent != dentry->d_parent)
2477		spin_unlock(&dentry->d_parent->d_lock);
2478	if (target->d_parent != target)
2479		spin_unlock(&target->d_parent->d_lock);
2480}
2481
2482/*
2483 * When switching names, the actual string doesn't strictly have to
2484 * be preserved in the target - because we're dropping the target
2485 * anyway. As such, we can just do a simple memcpy() to copy over
2486 * the new name before we switch.
2487 *
2488 * Note that we have to be a lot more careful about getting the hash
2489 * switched - we have to switch the hash value properly even if it
2490 * then no longer matches the actual (corrupted) string of the target.
2491 * The hash value has to match the hash queue that the dentry is on..
2492 */
2493/*
2494 * __d_move - move a dentry
2495 * @dentry: entry to move
2496 * @target: new dentry
2497 * @exchange: exchange the two dentries
2498 *
2499 * Update the dcache to reflect the move of a file name. Negative
2500 * dcache entries should not be moved in this way. Caller must hold
2501 * rename_lock, the i_mutex of the source and target directories,
2502 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2503 */
2504static void __d_move(struct dentry *dentry, struct dentry *target,
2505		     bool exchange)
2506{
2507	if (!dentry->d_inode)
2508		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
 
 
 
 
 
 
2509
2510	BUG_ON(d_ancestor(dentry, target));
2511	BUG_ON(d_ancestor(target, dentry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2512
2513	dentry_lock_for_move(dentry, target);
 
 
 
 
2514
2515	write_seqcount_begin(&dentry->d_seq);
2516	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2517
2518	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2519
2520	/*
2521	 * Move the dentry to the target hash queue. Don't bother checking
2522	 * for the same hash queue because of how unlikely it is.
2523	 */
2524	__d_drop(dentry);
2525	__d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2526
2527	/*
2528	 * Unhash the target (d_delete() is not usable here).  If exchanging
2529	 * the two dentries, then rehash onto the other's hash queue.
2530	 */
2531	__d_drop(target);
2532	if (exchange) {
2533		__d_rehash(target,
2534			   d_hash(dentry->d_parent, dentry->d_name.hash));
2535	}
2536
2537	list_del(&dentry->d_u.d_child);
2538	list_del(&target->d_u.d_child);
2539
2540	/* Switch the names.. */
2541	switch_names(dentry, target);
2542	swap(dentry->d_name.hash, target->d_name.hash);
2543
2544	/* ... and switch the parents */
2545	if (IS_ROOT(dentry)) {
2546		dentry->d_parent = target->d_parent;
2547		target->d_parent = target;
2548		INIT_LIST_HEAD(&target->d_u.d_child);
2549	} else {
2550		swap(dentry->d_parent, target->d_parent);
2551
2552		/* And add them back to the (new) parent lists */
2553		list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2554	}
2555
2556	list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
 
 
 
 
 
 
 
2557
2558	write_seqcount_end(&target->d_seq);
2559	write_seqcount_end(&dentry->d_seq);
2560
2561	dentry_unlock_parents_for_move(dentry, target);
2562	if (exchange)
2563		fsnotify_d_move(target);
 
 
 
 
2564	spin_unlock(&target->d_lock);
2565	fsnotify_d_move(dentry);
2566	spin_unlock(&dentry->d_lock);
2567}
2568
2569/*
2570 * d_move - move a dentry
2571 * @dentry: entry to move
2572 * @target: new dentry
2573 *
2574 * Update the dcache to reflect the move of a file name. Negative
2575 * dcache entries should not be moved in this way. See the locking
2576 * requirements for __d_move.
2577 */
2578void d_move(struct dentry *dentry, struct dentry *target)
2579{
2580	write_seqlock(&rename_lock);
2581	__d_move(dentry, target, false);
2582	write_sequnlock(&rename_lock);
2583}
2584EXPORT_SYMBOL(d_move);
2585
2586/*
2587 * d_exchange - exchange two dentries
2588 * @dentry1: first dentry
2589 * @dentry2: second dentry
2590 */
2591void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2592{
2593	write_seqlock(&rename_lock);
2594
2595	WARN_ON(!dentry1->d_inode);
2596	WARN_ON(!dentry2->d_inode);
2597	WARN_ON(IS_ROOT(dentry1));
2598	WARN_ON(IS_ROOT(dentry2));
2599
2600	__d_move(dentry1, dentry2, true);
2601
2602	write_sequnlock(&rename_lock);
2603}
2604
2605/**
2606 * d_ancestor - search for an ancestor
2607 * @p1: ancestor dentry
2608 * @p2: child dentry
2609 *
2610 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2611 * an ancestor of p2, else NULL.
2612 */
2613struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2614{
2615	struct dentry *p;
2616
2617	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2618		if (p->d_parent == p1)
2619			return p;
2620	}
2621	return NULL;
2622}
2623
2624/*
2625 * This helper attempts to cope with remotely renamed directories
2626 *
2627 * It assumes that the caller is already holding
2628 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2629 *
2630 * Note: If ever the locking in lock_rename() changes, then please
2631 * remember to update this too...
2632 */
2633static struct dentry *__d_unalias(struct inode *inode,
2634		struct dentry *dentry, struct dentry *alias)
2635{
2636	struct mutex *m1 = NULL, *m2 = NULL;
2637	struct dentry *ret = ERR_PTR(-EBUSY);
 
2638
2639	/* If alias and dentry share a parent, then no extra locks required */
2640	if (alias->d_parent == dentry->d_parent)
2641		goto out_unalias;
2642
2643	/* See lock_rename() */
2644	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2645		goto out_err;
2646	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2647	if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2648		goto out_err;
2649	m2 = &alias->d_parent->d_inode->i_mutex;
2650out_unalias:
2651	if (likely(!d_mountpoint(alias))) {
2652		__d_move(alias, dentry, false);
2653		ret = alias;
2654	}
2655out_err:
2656	spin_unlock(&inode->i_lock);
2657	if (m2)
2658		mutex_unlock(m2);
2659	if (m1)
2660		mutex_unlock(m1);
2661	return ret;
2662}
2663
2664/*
2665 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2666 * named dentry in place of the dentry to be replaced.
2667 * returns with anon->d_lock held!
2668 */
2669static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2670{
2671	struct dentry *dparent;
2672
2673	dentry_lock_for_move(anon, dentry);
2674
2675	write_seqcount_begin(&dentry->d_seq);
2676	write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2677
2678	dparent = dentry->d_parent;
2679
2680	switch_names(dentry, anon);
2681	swap(dentry->d_name.hash, anon->d_name.hash);
2682
2683	dentry->d_parent = dentry;
2684	list_del_init(&dentry->d_u.d_child);
2685	anon->d_parent = dparent;
2686	list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2687
2688	write_seqcount_end(&dentry->d_seq);
2689	write_seqcount_end(&anon->d_seq);
2690
2691	dentry_unlock_parents_for_move(anon, dentry);
2692	spin_unlock(&dentry->d_lock);
2693
2694	/* anon->d_lock still locked, returns locked */
2695}
2696
2697/**
2698 * d_materialise_unique - introduce an inode into the tree
2699 * @dentry: candidate dentry
2700 * @inode: inode to bind to the dentry, to which aliases may be attached
 
 
 
 
2701 *
2702 * Introduces an dentry into the tree, substituting an extant disconnected
2703 * root directory alias in its place if there is one. Caller must hold the
2704 * i_mutex of the parent directory.
 
 
 
 
 
 
 
 
 
 
2705 */
2706struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2707{
2708	struct dentry *actual;
 
2709
2710	BUG_ON(!d_unhashed(dentry));
2711
2712	if (!inode) {
2713		actual = dentry;
2714		__d_instantiate(dentry, NULL);
2715		d_rehash(actual);
2716		goto out_nolock;
2717	}
2718
 
2719	spin_lock(&inode->i_lock);
2720
2721	if (S_ISDIR(inode->i_mode)) {
2722		struct dentry *alias;
2723
2724		/* Does an aliased dentry already exist? */
2725		alias = __d_find_alias(inode, 0);
2726		if (alias) {
2727			actual = alias;
2728			write_seqlock(&rename_lock);
2729
2730			if (d_ancestor(alias, dentry)) {
2731				/* Check for loops */
2732				actual = ERR_PTR(-ELOOP);
2733				spin_unlock(&inode->i_lock);
2734			} else if (IS_ROOT(alias)) {
2735				/* Is this an anonymous mountpoint that we
2736				 * could splice into our tree? */
2737				__d_materialise_dentry(dentry, alias);
 
 
 
 
2738				write_sequnlock(&rename_lock);
2739				__d_drop(alias);
2740				goto found;
 
 
 
2741			} else {
2742				/* Nope, but we must(!) avoid directory
2743				 * aliasing. This drops inode->i_lock */
2744				actual = __d_unalias(inode, dentry, alias);
2745			}
2746			write_sequnlock(&rename_lock);
2747			if (IS_ERR(actual)) {
2748				if (PTR_ERR(actual) == -ELOOP)
2749					pr_warn_ratelimited(
2750						"VFS: Lookup of '%s' in %s %s"
2751						" would have caused loop\n",
2752						dentry->d_name.name,
2753						inode->i_sb->s_type->name,
2754						inode->i_sb->s_id);
2755				dput(alias);
2756			}
2757			goto out_nolock;
2758		}
2759	}
2760
2761	/* Add a unique reference */
2762	actual = __d_instantiate_unique(dentry, inode);
2763	if (!actual)
2764		actual = dentry;
2765	else
2766		BUG_ON(!d_unhashed(actual));
2767
2768	spin_lock(&actual->d_lock);
2769found:
2770	_d_rehash(actual);
2771	spin_unlock(&actual->d_lock);
2772	spin_unlock(&inode->i_lock);
2773out_nolock:
2774	if (actual == dentry) {
2775		security_d_instantiate(dentry, inode);
2776		return NULL;
2777	}
2778
2779	iput(inode);
2780	return actual;
2781}
2782EXPORT_SYMBOL_GPL(d_materialise_unique);
2783
2784static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2785{
2786	*buflen -= namelen;
2787	if (*buflen < 0)
2788		return -ENAMETOOLONG;
2789	*buffer -= namelen;
2790	memcpy(*buffer, str, namelen);
2791	return 0;
2792}
2793
2794/**
2795 * prepend_name - prepend a pathname in front of current buffer pointer
2796 * @buffer: buffer pointer
2797 * @buflen: allocated length of the buffer
2798 * @name:   name string and length qstr structure
2799 *
2800 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2801 * make sure that either the old or the new name pointer and length are
2802 * fetched. However, there may be mismatch between length and pointer.
2803 * The length cannot be trusted, we need to copy it byte-by-byte until
2804 * the length is reached or a null byte is found. It also prepends "/" at
2805 * the beginning of the name. The sequence number check at the caller will
2806 * retry it again when a d_move() does happen. So any garbage in the buffer
2807 * due to mismatched pointer and length will be discarded.
2808 */
2809static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2810{
2811	const char *dname = ACCESS_ONCE(name->name);
2812	u32 dlen = ACCESS_ONCE(name->len);
2813	char *p;
2814
2815	*buflen -= dlen + 1;
2816	if (*buflen < 0)
2817		return -ENAMETOOLONG;
2818	p = *buffer -= dlen + 1;
2819	*p++ = '/';
2820	while (dlen--) {
2821		char c = *dname++;
2822		if (!c)
2823			break;
2824		*p++ = c;
2825	}
2826	return 0;
2827}
2828
2829/**
2830 * prepend_path - Prepend path string to a buffer
2831 * @path: the dentry/vfsmount to report
2832 * @root: root vfsmnt/dentry
2833 * @buffer: pointer to the end of the buffer
2834 * @buflen: pointer to buffer length
2835 *
2836 * The function will first try to write out the pathname without taking any
2837 * lock other than the RCU read lock to make sure that dentries won't go away.
2838 * It only checks the sequence number of the global rename_lock as any change
2839 * in the dentry's d_seq will be preceded by changes in the rename_lock
2840 * sequence number. If the sequence number had been changed, it will restart
2841 * the whole pathname back-tracing sequence again by taking the rename_lock.
2842 * In this case, there is no need to take the RCU read lock as the recursive
2843 * parent pointer references will keep the dentry chain alive as long as no
2844 * rename operation is performed.
2845 */
2846static int prepend_path(const struct path *path,
2847			const struct path *root,
2848			char **buffer, int *buflen)
2849{
2850	struct dentry *dentry;
2851	struct vfsmount *vfsmnt;
2852	struct mount *mnt;
2853	int error = 0;
2854	unsigned seq, m_seq = 0;
2855	char *bptr;
2856	int blen;
2857
2858	rcu_read_lock();
2859restart_mnt:
2860	read_seqbegin_or_lock(&mount_lock, &m_seq);
2861	seq = 0;
2862	rcu_read_lock();
2863restart:
2864	bptr = *buffer;
2865	blen = *buflen;
2866	error = 0;
2867	dentry = path->dentry;
2868	vfsmnt = path->mnt;
2869	mnt = real_mount(vfsmnt);
2870	read_seqbegin_or_lock(&rename_lock, &seq);
2871	while (dentry != root->dentry || vfsmnt != root->mnt) {
2872		struct dentry * parent;
2873
2874		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2875			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2876			/* Global root? */
2877			if (mnt != parent) {
2878				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2879				mnt = parent;
2880				vfsmnt = &mnt->mnt;
2881				continue;
2882			}
2883			/*
2884			 * Filesystems needing to implement special "root names"
2885			 * should do so with ->d_dname()
2886			 */
2887			if (IS_ROOT(dentry) &&
2888			   (dentry->d_name.len != 1 ||
2889			    dentry->d_name.name[0] != '/')) {
2890				WARN(1, "Root dentry has weird name <%.*s>\n",
2891				     (int) dentry->d_name.len,
2892				     dentry->d_name.name);
2893			}
2894			if (!error)
2895				error = is_mounted(vfsmnt) ? 1 : 2;
2896			break;
2897		}
2898		parent = dentry->d_parent;
2899		prefetch(parent);
2900		error = prepend_name(&bptr, &blen, &dentry->d_name);
2901		if (error)
2902			break;
2903
2904		dentry = parent;
2905	}
2906	if (!(seq & 1))
2907		rcu_read_unlock();
2908	if (need_seqretry(&rename_lock, seq)) {
2909		seq = 1;
2910		goto restart;
2911	}
2912	done_seqretry(&rename_lock, seq);
2913
2914	if (!(m_seq & 1))
2915		rcu_read_unlock();
2916	if (need_seqretry(&mount_lock, m_seq)) {
2917		m_seq = 1;
2918		goto restart_mnt;
2919	}
2920	done_seqretry(&mount_lock, m_seq);
2921
2922	if (error >= 0 && bptr == *buffer) {
2923		if (--blen < 0)
2924			error = -ENAMETOOLONG;
2925		else
2926			*--bptr = '/';
2927	}
2928	*buffer = bptr;
2929	*buflen = blen;
2930	return error;
2931}
2932
2933/**
2934 * __d_path - return the path of a dentry
2935 * @path: the dentry/vfsmount to report
2936 * @root: root vfsmnt/dentry
2937 * @buf: buffer to return value in
2938 * @buflen: buffer length
2939 *
2940 * Convert a dentry into an ASCII path name.
2941 *
2942 * Returns a pointer into the buffer or an error code if the
2943 * path was too long.
2944 *
2945 * "buflen" should be positive.
2946 *
2947 * If the path is not reachable from the supplied root, return %NULL.
2948 */
2949char *__d_path(const struct path *path,
2950	       const struct path *root,
2951	       char *buf, int buflen)
2952{
2953	char *res = buf + buflen;
2954	int error;
2955
2956	prepend(&res, &buflen, "\0", 1);
2957	error = prepend_path(path, root, &res, &buflen);
2958
2959	if (error < 0)
2960		return ERR_PTR(error);
2961	if (error > 0)
2962		return NULL;
2963	return res;
2964}
2965
2966char *d_absolute_path(const struct path *path,
2967	       char *buf, int buflen)
2968{
2969	struct path root = {};
2970	char *res = buf + buflen;
2971	int error;
2972
2973	prepend(&res, &buflen, "\0", 1);
2974	error = prepend_path(path, &root, &res, &buflen);
2975
2976	if (error > 1)
2977		error = -EINVAL;
2978	if (error < 0)
2979		return ERR_PTR(error);
2980	return res;
2981}
2982
2983/*
2984 * same as __d_path but appends "(deleted)" for unlinked files.
2985 */
2986static int path_with_deleted(const struct path *path,
2987			     const struct path *root,
2988			     char **buf, int *buflen)
2989{
2990	prepend(buf, buflen, "\0", 1);
2991	if (d_unlinked(path->dentry)) {
2992		int error = prepend(buf, buflen, " (deleted)", 10);
2993		if (error)
2994			return error;
2995	}
2996
2997	return prepend_path(path, root, buf, buflen);
2998}
2999
3000static int prepend_unreachable(char **buffer, int *buflen)
3001{
3002	return prepend(buffer, buflen, "(unreachable)", 13);
3003}
3004
3005static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3006{
3007	unsigned seq;
3008
3009	do {
3010		seq = read_seqcount_begin(&fs->seq);
3011		*root = fs->root;
3012	} while (read_seqcount_retry(&fs->seq, seq));
3013}
3014
3015/**
3016 * d_path - return the path of a dentry
3017 * @path: path to report
3018 * @buf: buffer to return value in
3019 * @buflen: buffer length
3020 *
3021 * Convert a dentry into an ASCII path name. If the entry has been deleted
3022 * the string " (deleted)" is appended. Note that this is ambiguous.
3023 *
3024 * Returns a pointer into the buffer or an error code if the path was
3025 * too long. Note: Callers should use the returned pointer, not the passed
3026 * in buffer, to use the name! The implementation often starts at an offset
3027 * into the buffer, and may leave 0 bytes at the start.
3028 *
3029 * "buflen" should be positive.
3030 */
3031char *d_path(const struct path *path, char *buf, int buflen)
3032{
3033	char *res = buf + buflen;
3034	struct path root;
3035	int error;
3036
3037	/*
3038	 * We have various synthetic filesystems that never get mounted.  On
3039	 * these filesystems dentries are never used for lookup purposes, and
3040	 * thus don't need to be hashed.  They also don't need a name until a
3041	 * user wants to identify the object in /proc/pid/fd/.  The little hack
3042	 * below allows us to generate a name for these objects on demand:
3043	 *
3044	 * Some pseudo inodes are mountable.  When they are mounted
3045	 * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3046	 * and instead have d_path return the mounted path.
3047	 */
3048	if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3049	    (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3050		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3051
3052	rcu_read_lock();
3053	get_fs_root_rcu(current->fs, &root);
3054	error = path_with_deleted(path, &root, &res, &buflen);
3055	rcu_read_unlock();
3056
3057	if (error < 0)
3058		res = ERR_PTR(error);
3059	return res;
3060}
3061EXPORT_SYMBOL(d_path);
3062
3063/*
3064 * Helper function for dentry_operations.d_dname() members
3065 */
3066char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3067			const char *fmt, ...)
3068{
3069	va_list args;
3070	char temp[64];
3071	int sz;
3072
3073	va_start(args, fmt);
3074	sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3075	va_end(args);
3076
3077	if (sz > sizeof(temp) || sz > buflen)
3078		return ERR_PTR(-ENAMETOOLONG);
3079
3080	buffer += buflen - sz;
3081	return memcpy(buffer, temp, sz);
3082}
3083
3084char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3085{
3086	char *end = buffer + buflen;
3087	/* these dentries are never renamed, so d_lock is not needed */
3088	if (prepend(&end, &buflen, " (deleted)", 11) ||
3089	    prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3090	    prepend(&end, &buflen, "/", 1))  
3091		end = ERR_PTR(-ENAMETOOLONG);
3092	return end;
3093}
3094EXPORT_SYMBOL(simple_dname);
3095
3096/*
3097 * Write full pathname from the root of the filesystem into the buffer.
3098 */
3099static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3100{
3101	struct dentry *dentry;
3102	char *end, *retval;
3103	int len, seq = 0;
3104	int error = 0;
3105
3106	if (buflen < 2)
3107		goto Elong;
3108
3109	rcu_read_lock();
3110restart:
3111	dentry = d;
3112	end = buf + buflen;
3113	len = buflen;
3114	prepend(&end, &len, "\0", 1);
3115	/* Get '/' right */
3116	retval = end-1;
3117	*retval = '/';
3118	read_seqbegin_or_lock(&rename_lock, &seq);
3119	while (!IS_ROOT(dentry)) {
3120		struct dentry *parent = dentry->d_parent;
3121
3122		prefetch(parent);
3123		error = prepend_name(&end, &len, &dentry->d_name);
3124		if (error)
3125			break;
3126
3127		retval = end;
3128		dentry = parent;
3129	}
3130	if (!(seq & 1))
3131		rcu_read_unlock();
3132	if (need_seqretry(&rename_lock, seq)) {
3133		seq = 1;
3134		goto restart;
3135	}
3136	done_seqretry(&rename_lock, seq);
3137	if (error)
3138		goto Elong;
3139	return retval;
3140Elong:
3141	return ERR_PTR(-ENAMETOOLONG);
3142}
3143
3144char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3145{
3146	return __dentry_path(dentry, buf, buflen);
3147}
3148EXPORT_SYMBOL(dentry_path_raw);
3149
3150char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3151{
3152	char *p = NULL;
3153	char *retval;
3154
3155	if (d_unlinked(dentry)) {
3156		p = buf + buflen;
3157		if (prepend(&p, &buflen, "//deleted", 10) != 0)
3158			goto Elong;
3159		buflen++;
3160	}
3161	retval = __dentry_path(dentry, buf, buflen);
3162	if (!IS_ERR(retval) && p)
3163		*p = '/';	/* restore '/' overriden with '\0' */
3164	return retval;
3165Elong:
3166	return ERR_PTR(-ENAMETOOLONG);
3167}
3168
3169static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3170				    struct path *pwd)
3171{
3172	unsigned seq;
3173
3174	do {
3175		seq = read_seqcount_begin(&fs->seq);
3176		*root = fs->root;
3177		*pwd = fs->pwd;
3178	} while (read_seqcount_retry(&fs->seq, seq));
3179}
3180
3181/*
3182 * NOTE! The user-level library version returns a
3183 * character pointer. The kernel system call just
3184 * returns the length of the buffer filled (which
3185 * includes the ending '\0' character), or a negative
3186 * error value. So libc would do something like
3187 *
3188 *	char *getcwd(char * buf, size_t size)
3189 *	{
3190 *		int retval;
3191 *
3192 *		retval = sys_getcwd(buf, size);
3193 *		if (retval >= 0)
3194 *			return buf;
3195 *		errno = -retval;
3196 *		return NULL;
3197 *	}
3198 */
3199SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3200{
3201	int error;
3202	struct path pwd, root;
3203	char *page = __getname();
3204
3205	if (!page)
3206		return -ENOMEM;
3207
3208	rcu_read_lock();
3209	get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3210
3211	error = -ENOENT;
3212	if (!d_unlinked(pwd.dentry)) {
3213		unsigned long len;
3214		char *cwd = page + PATH_MAX;
3215		int buflen = PATH_MAX;
3216
3217		prepend(&cwd, &buflen, "\0", 1);
3218		error = prepend_path(&pwd, &root, &cwd, &buflen);
3219		rcu_read_unlock();
3220
3221		if (error < 0)
3222			goto out;
3223
3224		/* Unreachable from current root */
3225		if (error > 0) {
3226			error = prepend_unreachable(&cwd, &buflen);
3227			if (error)
3228				goto out;
3229		}
3230
3231		error = -ERANGE;
3232		len = PATH_MAX + page - cwd;
3233		if (len <= size) {
3234			error = len;
3235			if (copy_to_user(buf, cwd, len))
3236				error = -EFAULT;
3237		}
3238	} else {
3239		rcu_read_unlock();
3240	}
3241
3242out:
3243	__putname(page);
3244	return error;
3245}
 
3246
3247/*
3248 * Test whether new_dentry is a subdirectory of old_dentry.
3249 *
3250 * Trivially implemented using the dcache structure
3251 */
3252
3253/**
3254 * is_subdir - is new dentry a subdirectory of old_dentry
3255 * @new_dentry: new dentry
3256 * @old_dentry: old dentry
3257 *
3258 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3259 * Returns 0 otherwise.
3260 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3261 */
3262  
3263int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3264{
3265	int result;
3266	unsigned seq;
3267
3268	if (new_dentry == old_dentry)
3269		return 1;
3270
3271	do {
3272		/* for restarting inner loop in case of seq retry */
3273		seq = read_seqbegin(&rename_lock);
3274		/*
3275		 * Need rcu_readlock to protect against the d_parent trashing
3276		 * due to d_move
3277		 */
3278		rcu_read_lock();
3279		if (d_ancestor(old_dentry, new_dentry))
3280			result = 1;
3281		else
3282			result = 0;
3283		rcu_read_unlock();
3284	} while (read_seqretry(&rename_lock, seq));
3285
3286	return result;
3287}
 
3288
3289static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3290{
3291	struct dentry *root = data;
3292	if (dentry != root) {
3293		if (d_unhashed(dentry) || !dentry->d_inode)
3294			return D_WALK_SKIP;
3295
3296		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3297			dentry->d_flags |= DCACHE_GENOCIDE;
3298			dentry->d_lockref.count--;
3299		}
3300	}
3301	return D_WALK_CONTINUE;
3302}
3303
3304void d_genocide(struct dentry *parent)
3305{
3306	d_walk(parent, parent, d_genocide_kill, NULL);
3307}
3308
3309void d_tmpfile(struct dentry *dentry, struct inode *inode)
3310{
3311	inode_dec_link_count(inode);
 
3312	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3313		!hlist_unhashed(&dentry->d_alias) ||
3314		!d_unlinked(dentry));
3315	spin_lock(&dentry->d_parent->d_lock);
3316	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3317	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3318				(unsigned long long)inode->i_ino);
3319	spin_unlock(&dentry->d_lock);
3320	spin_unlock(&dentry->d_parent->d_lock);
 
 
 
 
 
 
 
 
 
3321	d_instantiate(dentry, inode);
3322}
3323EXPORT_SYMBOL(d_tmpfile);
3324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3325static __initdata unsigned long dhash_entries;
3326static int __init set_dhash_entries(char *str)
3327{
3328	if (!str)
3329		return 0;
3330	dhash_entries = simple_strtoul(str, &str, 0);
3331	return 1;
3332}
3333__setup("dhash_entries=", set_dhash_entries);
3334
3335static void __init dcache_init_early(void)
3336{
3337	unsigned int loop;
3338
3339	/* If hashes are distributed across NUMA nodes, defer
3340	 * hash allocation until vmalloc space is available.
3341	 */
3342	if (hashdist)
3343		return;
3344
3345	dentry_hashtable =
3346		alloc_large_system_hash("Dentry cache",
3347					sizeof(struct hlist_bl_head),
3348					dhash_entries,
3349					13,
3350					HASH_EARLY,
3351					&d_hash_shift,
3352					&d_hash_mask,
3353					0,
3354					0);
 
3355
3356	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3357		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3358}
3359
3360static void __init dcache_init(void)
3361{
3362	unsigned int loop;
3363
3364	/* 
3365	 * A constructor could be added for stable state like the lists,
3366	 * but it is probably not worth it because of the cache nature
3367	 * of the dcache. 
3368	 */
3369	dentry_cache = KMEM_CACHE(dentry,
3370		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
 
3371
3372	/* Hash may have been set up in dcache_init_early */
3373	if (!hashdist)
3374		return;
3375
3376	dentry_hashtable =
3377		alloc_large_system_hash("Dentry cache",
3378					sizeof(struct hlist_bl_head),
3379					dhash_entries,
3380					13,
3381					0,
3382					&d_hash_shift,
3383					&d_hash_mask,
3384					0,
3385					0);
 
3386
3387	for (loop = 0; loop < (1U << d_hash_shift); loop++)
3388		INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3389}
3390
3391/* SLAB cache for __getname() consumers */
3392struct kmem_cache *names_cachep __read_mostly;
3393EXPORT_SYMBOL(names_cachep);
3394
3395EXPORT_SYMBOL(d_genocide);
3396
3397void __init vfs_caches_init_early(void)
3398{
 
 
 
 
 
3399	dcache_init_early();
3400	inode_init_early();
3401}
3402
3403void __init vfs_caches_init(unsigned long mempages)
3404{
3405	unsigned long reserve;
3406
3407	/* Base hash sizes on available memory, with a reserve equal to
3408           150% of current kernel size */
3409
3410	reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3411	mempages -= reserve;
3412
3413	names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3414			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3415
3416	dcache_init();
3417	inode_init();
3418	files_init(mempages);
 
3419	mnt_init();
3420	bdev_cache_init();
3421	chrdev_init();
3422}