Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * The NFSD open file cache.
   4 *
   5 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
   6 *
   7 * An nfsd_file object is a per-file collection of open state that binds
   8 * together:
   9 *   - a struct file *
  10 *   - a user credential
  11 *   - a network namespace
  12 *   - a read-ahead context
  13 *   - monitoring for writeback errors
  14 *
  15 * nfsd_file objects are reference-counted. Consumers acquire a new
  16 * object via the nfsd_file_acquire API. They manage their interest in
  17 * the acquired object, and hence the object's reference count, via
  18 * nfsd_file_get and nfsd_file_put. There are two varieties of nfsd_file
  19 * object:
  20 *
  21 *  * non-garbage-collected: When a consumer wants to precisely control
  22 *    the lifetime of a file's open state, it acquires a non-garbage-
  23 *    collected nfsd_file. The final nfsd_file_put releases the open
  24 *    state immediately.
  25 *
  26 *  * garbage-collected: When a consumer does not control the lifetime
  27 *    of open state, it acquires a garbage-collected nfsd_file. The
  28 *    final nfsd_file_put allows the open state to linger for a period
  29 *    during which it may be re-used.
  30 */
  31
  32#include <linux/hash.h>
  33#include <linux/slab.h>
  34#include <linux/file.h>
  35#include <linux/pagemap.h>
  36#include <linux/sched.h>
  37#include <linux/list_lru.h>
  38#include <linux/fsnotify_backend.h>
  39#include <linux/fsnotify.h>
  40#include <linux/seq_file.h>
  41#include <linux/rhashtable.h>
  42
  43#include "vfs.h"
  44#include "nfsd.h"
  45#include "nfsfh.h"
  46#include "netns.h"
  47#include "filecache.h"
  48#include "trace.h"
  49
 
 
 
 
 
  50#define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
  51
  52#define NFSD_FILE_CACHE_UP		     (0)
 
 
 
  53
  54/* We only care about NFSD_MAY_READ/WRITE for this cache */
  55#define NFSD_FILE_MAY_MASK	(NFSD_MAY_READ|NFSD_MAY_WRITE|NFSD_MAY_LOCALIO)
  56
  57static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
  58static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
  59static DEFINE_PER_CPU(unsigned long, nfsd_file_allocations);
  60static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
  61static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
  62static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
  63
  64struct nfsd_fcache_disposal {
  65	spinlock_t lock;
  66	struct list_head freeme;
  67};
  68
 
 
  69static struct kmem_cache		*nfsd_file_slab;
  70static struct kmem_cache		*nfsd_file_mark_slab;
 
  71static struct list_lru			nfsd_file_lru;
  72static unsigned long			nfsd_file_flags;
  73static struct fsnotify_group		*nfsd_file_fsnotify_group;
 
  74static struct delayed_work		nfsd_filecache_laundrette;
  75static struct rhltable			nfsd_file_rhltable
  76						____cacheline_aligned_in_smp;
  77
  78static bool
  79nfsd_match_cred(const struct cred *c1, const struct cred *c2)
 
 
 
 
 
  80{
  81	int i;
  82
  83	if (!uid_eq(c1->fsuid, c2->fsuid))
  84		return false;
  85	if (!gid_eq(c1->fsgid, c2->fsgid))
  86		return false;
  87	if (c1->group_info == NULL || c2->group_info == NULL)
  88		return c1->group_info == c2->group_info;
  89	if (c1->group_info->ngroups != c2->group_info->ngroups)
  90		return false;
  91	for (i = 0; i < c1->group_info->ngroups; i++) {
  92		if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
  93			return false;
  94	}
  95	return true;
  96}
  97
  98static const struct rhashtable_params nfsd_file_rhash_params = {
  99	.key_len		= sizeof_field(struct nfsd_file, nf_inode),
 100	.key_offset		= offsetof(struct nfsd_file, nf_inode),
 101	.head_offset		= offsetof(struct nfsd_file, nf_rlist),
 
 102
 103	/*
 104	 * Start with a single page hash table to reduce resizing churn
 105	 * on light workloads.
 106	 */
 107	.min_size		= 256,
 108	.automatic_shrinking	= true,
 109};
 110
 111static void
 112nfsd_file_schedule_laundrette(void)
 113{
 114	if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
 115		queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
 116				   NFSD_LAUNDRETTE_DELAY);
 117}
 118
 119static void
 120nfsd_file_slab_free(struct rcu_head *rcu)
 121{
 122	struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
 123
 124	put_cred(nf->nf_cred);
 125	kmem_cache_free(nfsd_file_slab, nf);
 126}
 127
 128static void
 129nfsd_file_mark_free(struct fsnotify_mark *mark)
 130{
 131	struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
 132						  nfm_mark);
 133
 134	kmem_cache_free(nfsd_file_mark_slab, nfm);
 135}
 136
 137static struct nfsd_file_mark *
 138nfsd_file_mark_get(struct nfsd_file_mark *nfm)
 139{
 140	if (!refcount_inc_not_zero(&nfm->nfm_ref))
 141		return NULL;
 142	return nfm;
 143}
 144
 145static void
 146nfsd_file_mark_put(struct nfsd_file_mark *nfm)
 147{
 148	if (refcount_dec_and_test(&nfm->nfm_ref)) {
 
 149		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
 150		fsnotify_put_mark(&nfm->nfm_mark);
 151	}
 152}
 153
 154static struct nfsd_file_mark *
 155nfsd_file_mark_find_or_create(struct inode *inode)
 156{
 157	int			err;
 158	struct fsnotify_mark	*mark;
 159	struct nfsd_file_mark	*nfm = NULL, *new;
 
 160
 161	do {
 162		fsnotify_group_lock(nfsd_file_fsnotify_group);
 163		mark = fsnotify_find_inode_mark(inode,
 164						nfsd_file_fsnotify_group);
 165		if (mark) {
 166			nfm = nfsd_file_mark_get(container_of(mark,
 167						 struct nfsd_file_mark,
 168						 nfm_mark));
 169			fsnotify_group_unlock(nfsd_file_fsnotify_group);
 170			if (nfm) {
 171				fsnotify_put_mark(mark);
 172				break;
 173			}
 174			/* Avoid soft lockup race with nfsd_file_mark_put() */
 175			fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
 176			fsnotify_put_mark(mark);
 177		} else {
 178			fsnotify_group_unlock(nfsd_file_fsnotify_group);
 179		}
 
 180
 181		/* allocate a new nfm */
 182		new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
 183		if (!new)
 184			return NULL;
 185		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
 186		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
 187		refcount_set(&new->nfm_ref, 1);
 188
 189		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
 190
 191		/*
 192		 * If the add was successful, then return the object.
 193		 * Otherwise, we need to put the reference we hold on the
 194		 * nfm_mark. The fsnotify code will take a reference and put
 195		 * it on failure, so we can't just free it directly. It's also
 196		 * not safe to call fsnotify_destroy_mark on it as the
 197		 * mark->group will be NULL. Thus, we can't let the nfm_ref
 198		 * counter drive the destruction at this point.
 199		 */
 200		if (likely(!err))
 201			nfm = new;
 202		else
 203			fsnotify_put_mark(&new->nfm_mark);
 204	} while (unlikely(err == -EEXIST));
 205
 206	return nfm;
 207}
 208
 209static struct nfsd_file *
 210nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
 211		bool want_gc)
 212{
 213	struct nfsd_file *nf;
 214
 215	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
 216	if (unlikely(!nf))
 217		return NULL;
 218
 219	this_cpu_inc(nfsd_file_allocations);
 220	INIT_LIST_HEAD(&nf->nf_lru);
 221	INIT_LIST_HEAD(&nf->nf_gc);
 222	nf->nf_birthtime = ktime_get();
 223	nf->nf_file = NULL;
 224	nf->nf_cred = get_current_cred();
 225	nf->nf_net = net;
 226	nf->nf_flags = want_gc ?
 227		BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING) | BIT(NFSD_FILE_GC) :
 228		BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING);
 229	nf->nf_inode = inode;
 230	refcount_set(&nf->nf_ref, 1);
 231	nf->nf_may = need;
 232	nf->nf_mark = NULL;
 
 
 
 233	return nf;
 234}
 235
 236/**
 237 * nfsd_file_check_write_error - check for writeback errors on a file
 238 * @nf: nfsd_file to check for writeback errors
 239 *
 240 * Check whether a nfsd_file has an unseen error. Reset the write
 241 * verifier if so.
 242 */
 243static void
 244nfsd_file_check_write_error(struct nfsd_file *nf)
 245{
 246	struct file *file = nf->nf_file;
 247
 248	if ((file->f_mode & FMODE_WRITE) &&
 249	    filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
 250		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
 251}
 252
 253static void
 254nfsd_file_hash_remove(struct nfsd_file *nf)
 255{
 256	trace_nfsd_file_unhash(nf);
 257	rhltable_remove(&nfsd_file_rhltable, &nf->nf_rlist,
 258			nfsd_file_rhash_params);
 259}
 260
 261static bool
 262nfsd_file_unhash(struct nfsd_file *nf)
 263{
 264	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
 265		nfsd_file_hash_remove(nf);
 266		return true;
 267	}
 268	return false;
 269}
 270
 271static void
 272nfsd_file_free(struct nfsd_file *nf)
 273{
 274	s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
 275
 276	trace_nfsd_file_free(nf);
 277
 278	this_cpu_inc(nfsd_file_releases);
 279	this_cpu_add(nfsd_file_total_age, age);
 280
 281	nfsd_file_unhash(nf);
 282	if (nf->nf_mark)
 283		nfsd_file_mark_put(nf->nf_mark);
 284	if (nf->nf_file) {
 285		nfsd_file_check_write_error(nf);
 286		nfsd_filp_close(nf->nf_file);
 
 
 287	}
 288
 289	/*
 290	 * If this item is still linked via nf_lru, that's a bug.
 291	 * WARN and leak it to preserve system stability.
 292	 */
 293	if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
 294		return;
 295
 296	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
 
 297}
 298
 299static bool
 300nfsd_file_check_writeback(struct nfsd_file *nf)
 301{
 302	struct file *file = nf->nf_file;
 303	struct address_space *mapping;
 304
 305	/* File not open for write? */
 306	if (!(file->f_mode & FMODE_WRITE))
 307		return false;
 308
 309	/*
 310	 * Some filesystems (e.g. NFS) flush all dirty data on close.
 311	 * On others, there is no need to wait for writeback.
 312	 */
 313	if (!(file_inode(file)->i_sb->s_export_op->flags & EXPORT_OP_FLUSH_ON_CLOSE))
 314		return false;
 315
 316	mapping = file->f_mapping;
 317	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
 318		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
 319}
 320
 
 
 
 
 
 
 
 
 
 321
 322static bool nfsd_file_lru_add(struct nfsd_file *nf)
 
 323{
 324	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
 325	if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru)) {
 326		trace_nfsd_file_lru_add(nf);
 327		return true;
 328	}
 329	return false;
 330}
 331
 332static bool nfsd_file_lru_remove(struct nfsd_file *nf)
 
 333{
 334	if (list_lru_del_obj(&nfsd_file_lru, &nf->nf_lru)) {
 335		trace_nfsd_file_lru_del(nf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 336		return true;
 337	}
 338	return false;
 339}
 340
 341struct nfsd_file *
 342nfsd_file_get(struct nfsd_file *nf)
 
 
 
 343{
 344	if (nf && refcount_inc_not_zero(&nf->nf_ref))
 345		return nf;
 346	return NULL;
 
 
 
 
 
 
 
 
 347}
 348
 349/**
 350 * nfsd_file_put - put the reference to a nfsd_file
 351 * @nf: nfsd_file of which to put the reference
 352 *
 353 * Put a reference to a nfsd_file. In the non-GC case, we just put the
 354 * reference immediately. In the GC case, if the reference would be
 355 * the last one, the put it on the LRU instead to be cleaned up later.
 356 */
 357void
 358nfsd_file_put(struct nfsd_file *nf)
 359{
 360	might_sleep();
 361	trace_nfsd_file_put(nf);
 362
 363	if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
 364	    test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
 365		/*
 366		 * If this is the last reference (nf_ref == 1), then try to
 367		 * transfer it to the LRU.
 368		 */
 369		if (refcount_dec_not_one(&nf->nf_ref))
 370			return;
 371
 372		/* Try to add it to the LRU.  If that fails, decrement. */
 373		if (nfsd_file_lru_add(nf)) {
 374			/* If it's still hashed, we're done */
 375			if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
 376				nfsd_file_schedule_laundrette();
 377				return;
 378			}
 379
 380			/*
 381			 * We're racing with unhashing, so try to remove it from
 382			 * the LRU. If removal fails, then someone else already
 383			 * has our reference.
 384			 */
 385			if (!nfsd_file_lru_remove(nf))
 386				return;
 387		}
 388	}
 389	if (refcount_dec_and_test(&nf->nf_ref))
 390		nfsd_file_free(nf);
 
 
 391}
 392
 393/**
 394 * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller
 395 * @nf: nfsd_file of which to put the reference
 396 *
 397 * First save the associated net to return to caller, then put
 398 * the reference of the nfsd_file.
 399 */
 400struct net *
 401nfsd_file_put_local(struct nfsd_file *nf)
 402{
 403	struct net *net = nf->nf_net;
 
 404
 405	nfsd_file_put(nf);
 406	return net;
 
 407}
 408
 409/**
 410 * nfsd_file_file - get the backing file of an nfsd_file
 411 * @nf: nfsd_file of which to access the backing file.
 412 *
 413 * Return backing file for @nf.
 414 */
 415struct file *
 416nfsd_file_file(struct nfsd_file *nf)
 417{
 418	return nf->nf_file;
 
 
 419}
 420
 421static void
 422nfsd_file_dispose_list(struct list_head *dispose)
 423{
 424	struct nfsd_file *nf;
 425
 426	while (!list_empty(dispose)) {
 427		nf = list_first_entry(dispose, struct nfsd_file, nf_gc);
 428		list_del_init(&nf->nf_gc);
 429		nfsd_file_free(nf);
 430	}
 431}
 432
 433/**
 434 * nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
 435 * @dispose: list of nfsd_files to be disposed
 436 *
 437 * Transfers each file to the "freeme" list for its nfsd_net, to eventually
 438 * be disposed of by the per-net garbage collector.
 439 */
 440static void
 441nfsd_file_dispose_list_delayed(struct list_head *dispose)
 442{
 443	while(!list_empty(dispose)) {
 444		struct nfsd_file *nf = list_first_entry(dispose,
 445						struct nfsd_file, nf_gc);
 446		struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
 447		struct nfsd_fcache_disposal *l = nn->fcache_disposal;
 448		struct svc_serv *serv;
 449
 450		spin_lock(&l->lock);
 451		list_move_tail(&nf->nf_gc, &l->freeme);
 452		spin_unlock(&l->lock);
 453
 454		/*
 455		 * The filecache laundrette is shut down after the
 456		 * nn->nfsd_serv pointer is cleared, but before the
 457		 * svc_serv is freed.
 458		 */
 459		serv = nn->nfsd_serv;
 460		if (serv)
 461			svc_wake_up(serv);
 462	}
 463}
 464
 465/**
 466 * nfsd_file_net_dispose - deal with nfsd_files waiting to be disposed.
 467 * @nn: nfsd_net in which to find files to be disposed.
 468 *
 469 * When files held open for nfsv3 are removed from the filecache, whether
 470 * due to memory pressure or garbage collection, they are queued to
 471 * a per-net-ns queue.  This function completes the disposal, either
 472 * directly or by waking another nfsd thread to help with the work.
 473 */
 474void nfsd_file_net_dispose(struct nfsd_net *nn)
 475{
 476	struct nfsd_fcache_disposal *l = nn->fcache_disposal;
 
 477
 478	if (!list_empty(&l->freeme)) {
 479		LIST_HEAD(dispose);
 480		int i;
 481
 482		spin_lock(&l->lock);
 483		for (i = 0; i < 8 && !list_empty(&l->freeme); i++)
 484			list_move(l->freeme.next, &dispose);
 485		spin_unlock(&l->lock);
 486		if (!list_empty(&l->freeme))
 487			/* Wake up another thread to share the work
 488			 * *before* doing any actual disposing.
 489			 */
 490			svc_wake_up(nn->nfsd_serv);
 491		nfsd_file_dispose_list(&dispose);
 492	}
 
 
 493}
 494
 495/**
 496 * nfsd_file_lru_cb - Examine an entry on the LRU list
 497 * @item: LRU entry to examine
 498 * @lru: controlling LRU
 499 * @arg: dispose list
 500 *
 501 * Return values:
 502 *   %LRU_REMOVED: @item was removed from the LRU
 503 *   %LRU_ROTATE: @item is to be moved to the LRU tail
 504 *   %LRU_SKIP: @item cannot be evicted
 505 */
 506static enum lru_status
 507nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
 508		 void *arg)
 
 
 509{
 510	struct list_head *head = arg;
 511	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
 512
 513	/* We should only be dealing with GC entries here */
 514	WARN_ON_ONCE(!test_bit(NFSD_FILE_GC, &nf->nf_flags));
 
 
 
 
 
 
 
 
 
 
 515
 516	/*
 517	 * Don't throw out files that are still undergoing I/O or
 518	 * that have uncleared errors pending.
 519	 */
 520	if (nfsd_file_check_writeback(nf)) {
 521		trace_nfsd_file_gc_writeback(nf);
 522		return LRU_SKIP;
 523	}
 524
 525	/* If it was recently added to the list, skip it */
 526	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) {
 527		trace_nfsd_file_gc_referenced(nf);
 528		return LRU_ROTATE;
 529	}
 530
 531	/*
 532	 * Put the reference held on behalf of the LRU. If it wasn't the last
 533	 * one, then just remove it from the LRU and ignore it.
 534	 */
 535	if (!refcount_dec_and_test(&nf->nf_ref)) {
 536		trace_nfsd_file_gc_in_use(nf);
 537		list_lru_isolate(lru, &nf->nf_lru);
 538		return LRU_REMOVED;
 539	}
 540
 541	/* Refcount went to zero. Unhash it and queue it to the dispose list */
 542	nfsd_file_unhash(nf);
 543	list_lru_isolate(lru, &nf->nf_lru);
 544	list_add(&nf->nf_gc, head);
 545	this_cpu_inc(nfsd_file_evictions);
 546	trace_nfsd_file_gc_disposed(nf);
 547	return LRU_REMOVED;
 
 
 
 
 548}
 549
 550static void
 551nfsd_file_gc(void)
 552{
 553	LIST_HEAD(dispose);
 554	unsigned long ret;
 555
 556	ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
 557			    &dispose, list_lru_count(&nfsd_file_lru));
 558	trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
 559	nfsd_file_dispose_list_delayed(&dispose);
 560}
 561
 562static void
 563nfsd_file_gc_worker(struct work_struct *work)
 564{
 565	nfsd_file_gc();
 566	if (list_lru_count(&nfsd_file_lru))
 567		nfsd_file_schedule_laundrette();
 568}
 569
 570static unsigned long
 571nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
 572{
 573	return list_lru_count(&nfsd_file_lru);
 574}
 575
 576static unsigned long
 577nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
 578{
 579	LIST_HEAD(dispose);
 580	unsigned long ret;
 581
 582	ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
 583				   nfsd_file_lru_cb, &dispose);
 584	trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
 585	nfsd_file_dispose_list_delayed(&dispose);
 586	return ret;
 587}
 588
 589static struct shrinker *nfsd_file_shrinker;
 
 
 
 
 590
 591/**
 592 * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
 593 * @nf: nfsd_file to attempt to queue
 594 * @dispose: private list to queue successfully-put objects
 595 *
 596 * Unhash an nfsd_file, try to get a reference to it, and then put that
 597 * reference. If it's the last reference, queue it to the dispose list.
 598 */
 599static void
 600nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
 601	__must_hold(RCU)
 602{
 603	int decrement = 1;
 
 604
 605	/* If we raced with someone else unhashing, ignore it */
 606	if (!nfsd_file_unhash(nf))
 607		return;
 608
 609	/* If we can't get a reference, ignore it */
 610	if (!nfsd_file_get(nf))
 611		return;
 612
 613	/* Extra decrement if we remove from the LRU */
 614	if (nfsd_file_lru_remove(nf))
 615		++decrement;
 616
 617	/* If refcount goes to 0, then put on the dispose list */
 618	if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
 619		list_add(&nf->nf_gc, dispose);
 620		trace_nfsd_file_closing(nf);
 621	}
 
 622}
 623
 624/**
 625 * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
 626 * @inode:   inode on which to close out nfsd_files
 627 * @dispose: list on which to gather nfsd_files to close out
 628 *
 629 * An nfsd_file represents a struct file being held open on behalf of nfsd.
 630 * An open file however can block other activity (such as leases), or cause
 631 * undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
 632 *
 633 * This function is intended to find open nfsd_files when this sort of
 634 * conflicting access occurs and then attempt to close those files out.
 635 *
 636 * Populates the dispose list with entries that have already had their
 637 * refcounts go to zero. The actual free of an nfsd_file can be expensive,
 638 * so we leave it up to the caller whether it wants to wait or not.
 
 639 */
 640static void
 641nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
 642{
 643	struct rhlist_head *tmp, *list;
 644	struct nfsd_file *nf;
 
 645
 646	rcu_read_lock();
 647	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
 648			       nfsd_file_rhash_params);
 649	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
 650		if (!test_bit(NFSD_FILE_GC, &nf->nf_flags))
 651			continue;
 652		nfsd_file_cond_queue(nf, dispose);
 653	}
 654	rcu_read_unlock();
 655}
 656
 657/**
 658 * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
 659 * @inode: inode of the file to attempt to remove
 660 *
 661 * Close out any open nfsd_files that can be reaped for @inode. The
 662 * actual freeing is deferred to the dispose_list_delayed infrastructure.
 663 *
 664 * This is used by the fsnotify callbacks and setlease notifier.
 665 */
 666static void
 667nfsd_file_close_inode(struct inode *inode)
 668{
 
 
 669	LIST_HEAD(dispose);
 670
 671	nfsd_file_queue_for_close(inode, &dispose);
 672	nfsd_file_dispose_list_delayed(&dispose);
 
 673}
 674
 675/**
 676 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
 677 * @inode: inode of the file to attempt to remove
 678 *
 679 * Close out any open nfsd_files that can be reaped for @inode. The
 680 * nfsd_files are closed out synchronously.
 681 *
 682 * This is called from nfsd_rename and nfsd_unlink to avoid silly-renames
 683 * when reexporting NFS.
 684 */
 685void
 686nfsd_file_close_inode_sync(struct inode *inode)
 687{
 688	struct nfsd_file *nf;
 689	LIST_HEAD(dispose);
 690
 691	trace_nfsd_file_close(inode);
 692
 693	nfsd_file_queue_for_close(inode, &dispose);
 694	while (!list_empty(&dispose)) {
 695		nf = list_first_entry(&dispose, struct nfsd_file, nf_gc);
 696		list_del_init(&nf->nf_gc);
 697		nfsd_file_free(nf);
 
 698	}
 699}
 700
 701static int
 702nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
 703			    void *data)
 704{
 705	struct file_lease *fl = data;
 706
 707	/* Only close files for F_SETLEASE leases */
 708	if (fl->c.flc_flags & FL_LEASE)
 709		nfsd_file_close_inode(file_inode(fl->c.flc_file));
 710	return 0;
 711}
 712
 713static struct notifier_block nfsd_file_lease_notifier = {
 714	.notifier_call = nfsd_file_lease_notifier_call,
 715};
 716
 717static int
 718nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
 719				struct inode *inode, struct inode *dir,
 720				const struct qstr *name, u32 cookie)
 
 
 721{
 722	if (WARN_ON_ONCE(!inode))
 723		return 0;
 724
 725	trace_nfsd_file_fsnotify_handle_event(inode, mask);
 726
 727	/* Should be no marks on non-regular files */
 728	if (!S_ISREG(inode->i_mode)) {
 729		WARN_ON_ONCE(1);
 730		return 0;
 731	}
 732
 733	/* don't close files if this was not the last link */
 734	if (mask & FS_ATTRIB) {
 735		if (inode->i_nlink)
 736			return 0;
 737	}
 738
 739	nfsd_file_close_inode(inode);
 740	return 0;
 741}
 742
 743
 744static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
 745	.handle_inode_event = nfsd_file_fsnotify_handle_event,
 746	.free_mark = nfsd_file_mark_free,
 747};
 748
 749int
 750nfsd_file_cache_init(void)
 751{
 752	int ret;
 
 753
 754	lockdep_assert_held(&nfsd_mutex);
 755	if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
 
 756		return 0;
 757
 758	ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
 759	if (ret)
 760		goto out;
 
 
 
 761
 762	ret = -ENOMEM;
 763	nfsd_file_slab = KMEM_CACHE(nfsd_file, 0);
 764	if (!nfsd_file_slab) {
 765		pr_err("nfsd: unable to create nfsd_file_slab\n");
 766		goto out_err;
 767	}
 768
 769	nfsd_file_mark_slab = KMEM_CACHE(nfsd_file_mark, 0);
 
 770	if (!nfsd_file_mark_slab) {
 771		pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
 772		goto out_err;
 773	}
 774
 
 775	ret = list_lru_init(&nfsd_file_lru);
 776	if (ret) {
 777		pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
 778		goto out_err;
 779	}
 780
 781	nfsd_file_shrinker = shrinker_alloc(0, "nfsd-filecache");
 782	if (!nfsd_file_shrinker) {
 783		ret = -ENOMEM;
 784		pr_err("nfsd: failed to allocate nfsd_file_shrinker\n");
 785		goto out_lru;
 786	}
 787
 788	nfsd_file_shrinker->count_objects = nfsd_file_lru_count;
 789	nfsd_file_shrinker->scan_objects = nfsd_file_lru_scan;
 790	nfsd_file_shrinker->seeks = 1;
 791
 792	shrinker_register(nfsd_file_shrinker);
 793
 794	ret = lease_register_notifier(&nfsd_file_lease_notifier);
 795	if (ret) {
 796		pr_err("nfsd: unable to register lease notifier: %d\n", ret);
 797		goto out_shrinker;
 798	}
 799
 800	nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops,
 801							0);
 802	if (IS_ERR(nfsd_file_fsnotify_group)) {
 803		pr_err("nfsd: unable to create fsnotify group: %ld\n",
 804			PTR_ERR(nfsd_file_fsnotify_group));
 805		ret = PTR_ERR(nfsd_file_fsnotify_group);
 806		nfsd_file_fsnotify_group = NULL;
 807		goto out_notifier;
 808	}
 809
 810	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
 
 
 
 
 
 811out:
 812	if (ret)
 813		clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags);
 814	return ret;
 815out_notifier:
 816	lease_unregister_notifier(&nfsd_file_lease_notifier);
 817out_shrinker:
 818	shrinker_free(nfsd_file_shrinker);
 819out_lru:
 820	list_lru_destroy(&nfsd_file_lru);
 821out_err:
 822	kmem_cache_destroy(nfsd_file_slab);
 823	nfsd_file_slab = NULL;
 824	kmem_cache_destroy(nfsd_file_mark_slab);
 825	nfsd_file_mark_slab = NULL;
 826	rhltable_destroy(&nfsd_file_rhltable);
 
 827	goto out;
 828}
 829
 830/**
 831 * __nfsd_file_cache_purge: clean out the cache for shutdown
 832 * @net: net-namespace to shut down the cache (may be NULL)
 833 *
 834 * Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
 835 * then close out everything. Called when an nfsd instance is being shut down,
 836 * and when the exports table is flushed.
 837 */
 838static void
 839__nfsd_file_cache_purge(struct net *net)
 840{
 841	struct rhashtable_iter iter;
 842	struct nfsd_file *nf;
 
 843	LIST_HEAD(dispose);
 
 844
 845	rhltable_walk_enter(&nfsd_file_rhltable, &iter);
 846	do {
 847		rhashtable_walk_start(&iter);
 848
 849		nf = rhashtable_walk_next(&iter);
 850		while (!IS_ERR_OR_NULL(nf)) {
 851			if (!net || nf->nf_net == net)
 852				nfsd_file_cond_queue(nf, &dispose);
 853			nf = rhashtable_walk_next(&iter);
 854		}
 855
 856		rhashtable_walk_stop(&iter);
 857	} while (nf == ERR_PTR(-EAGAIN));
 858	rhashtable_walk_exit(&iter);
 859
 860	nfsd_file_dispose_list(&dispose);
 861}
 862
 863static struct nfsd_fcache_disposal *
 864nfsd_alloc_fcache_disposal(void)
 865{
 866	struct nfsd_fcache_disposal *l;
 867
 868	l = kmalloc(sizeof(*l), GFP_KERNEL);
 869	if (!l)
 870		return NULL;
 871	spin_lock_init(&l->lock);
 872	INIT_LIST_HEAD(&l->freeme);
 873	return l;
 874}
 875
 876static void
 877nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
 878{
 879	nfsd_file_dispose_list(&l->freeme);
 880	kfree(l);
 881}
 882
 883static void
 884nfsd_free_fcache_disposal_net(struct net *net)
 885{
 886	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 887	struct nfsd_fcache_disposal *l = nn->fcache_disposal;
 888
 889	nfsd_free_fcache_disposal(l);
 890}
 891
 892int
 893nfsd_file_cache_start_net(struct net *net)
 894{
 895	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 896
 897	nn->fcache_disposal = nfsd_alloc_fcache_disposal();
 898	return nn->fcache_disposal ? 0 : -ENOMEM;
 899}
 900
 901/**
 902 * nfsd_file_cache_purge - Remove all cache items associated with @net
 903 * @net: target net namespace
 904 *
 905 */
 906void
 907nfsd_file_cache_purge(struct net *net)
 908{
 909	lockdep_assert_held(&nfsd_mutex);
 910	if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
 911		__nfsd_file_cache_purge(net);
 912}
 913
 914void
 915nfsd_file_cache_shutdown_net(struct net *net)
 916{
 917	nfsd_file_cache_purge(net);
 918	nfsd_free_fcache_disposal_net(net);
 
 
 
 
 919}
 920
 921void
 922nfsd_file_cache_shutdown(void)
 923{
 924	int i;
 925
 926	lockdep_assert_held(&nfsd_mutex);
 927	if (test_and_clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
 928		return;
 929
 930	lease_unregister_notifier(&nfsd_file_lease_notifier);
 931	shrinker_free(nfsd_file_shrinker);
 932	/*
 933	 * make sure all callers of nfsd_file_lru_cb are done before
 934	 * calling nfsd_file_cache_purge
 935	 */
 936	cancel_delayed_work_sync(&nfsd_filecache_laundrette);
 937	__nfsd_file_cache_purge(NULL);
 938	list_lru_destroy(&nfsd_file_lru);
 939	rcu_barrier();
 940	fsnotify_put_group(nfsd_file_fsnotify_group);
 941	nfsd_file_fsnotify_group = NULL;
 942	kmem_cache_destroy(nfsd_file_slab);
 943	nfsd_file_slab = NULL;
 944	fsnotify_wait_marks_destroyed();
 945	kmem_cache_destroy(nfsd_file_mark_slab);
 946	nfsd_file_mark_slab = NULL;
 947	rhltable_destroy(&nfsd_file_rhltable);
 
 
 948
 949	for_each_possible_cpu(i) {
 950		per_cpu(nfsd_file_cache_hits, i) = 0;
 951		per_cpu(nfsd_file_acquisitions, i) = 0;
 952		per_cpu(nfsd_file_allocations, i) = 0;
 953		per_cpu(nfsd_file_releases, i) = 0;
 954		per_cpu(nfsd_file_total_age, i) = 0;
 955		per_cpu(nfsd_file_evictions, i) = 0;
 
 
 
 
 
 
 
 
 
 956	}
 
 957}
 958
 959static struct nfsd_file *
 960nfsd_file_lookup_locked(const struct net *net, const struct cred *cred,
 961			struct inode *inode, unsigned char need,
 962			bool want_gc)
 963{
 964	struct rhlist_head *tmp, *list;
 965	struct nfsd_file *nf;
 
 966
 967	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
 968			       nfsd_file_rhash_params);
 969	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
 970		if (nf->nf_may != need)
 971			continue;
 972		if (nf->nf_net != net)
 973			continue;
 974		if (!nfsd_match_cred(nf->nf_cred, cred))
 975			continue;
 976		if (test_bit(NFSD_FILE_GC, &nf->nf_flags) != want_gc)
 977			continue;
 978		if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
 979			continue;
 980
 981		if (!nfsd_file_get(nf))
 982			continue;
 983		return nf;
 984	}
 985	return NULL;
 986}
 987
 988/**
 989 * nfsd_file_is_cached - are there any cached open files for this inode?
 990 * @inode: inode to check
 991 *
 992 * The lookup matches inodes in all net namespaces and is atomic wrt
 993 * nfsd_file_acquire().
 994 *
 995 * Return values:
 996 *   %true: filecache contains at least one file matching this inode
 997 *   %false: filecache contains no files matching this inode
 998 */
 999bool
1000nfsd_file_is_cached(struct inode *inode)
1001{
1002	struct rhlist_head *tmp, *list;
1003	struct nfsd_file *nf;
1004	bool ret = false;
 
 
1005
1006	rcu_read_lock();
1007	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
1008			       nfsd_file_rhash_params);
1009	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist)
1010		if (test_bit(NFSD_FILE_GC, &nf->nf_flags)) {
1011			ret = true;
1012			break;
1013		}
 
1014	rcu_read_unlock();
1015
1016	trace_nfsd_file_is_cached(inode, (int)ret);
1017	return ret;
1018}
1019
1020static __be32
1021nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
1022		     struct svc_cred *cred,
1023		     struct auth_domain *client,
1024		     struct svc_fh *fhp,
1025		     unsigned int may_flags, struct file *file,
1026		     struct nfsd_file **pnf, bool want_gc)
1027{
1028	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
1029	struct nfsd_file *new, *nf;
1030	bool stale_retry = true;
1031	bool open_retry = true;
1032	struct inode *inode;
1033	__be32 status;
1034	int ret;
1035
1036retry:
1037	if (rqstp) {
1038		status = fh_verify(rqstp, fhp, S_IFREG,
1039				   may_flags|NFSD_MAY_OWNER_OVERRIDE);
1040	} else {
1041		status = fh_verify_local(net, cred, client, fhp, S_IFREG,
1042					 may_flags|NFSD_MAY_OWNER_OVERRIDE);
1043	}
1044	if (status != nfs_ok)
1045		return status;
1046	inode = d_inode(fhp->fh_dentry);
1047
 
 
 
1048	rcu_read_lock();
1049	nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
1050	rcu_read_unlock();
1051
1052	if (nf) {
1053		/*
1054		 * If the nf is on the LRU then it holds an extra reference
1055		 * that must be put if it's removed. It had better not be
1056		 * the last one however, since we should hold another.
1057		 */
1058		if (nfsd_file_lru_remove(nf))
1059			refcount_dec(&nf->nf_ref);
1060		goto wait_for_construction;
1061	}
1062
1063	new = nfsd_file_alloc(net, inode, need, want_gc);
1064	if (!new) {
1065		status = nfserr_jukebox;
1066		goto out;
 
1067	}
1068
1069	rcu_read_lock();
1070	spin_lock(&inode->i_lock);
1071	nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
1072	if (unlikely(nf)) {
1073		spin_unlock(&inode->i_lock);
1074		rcu_read_unlock();
1075		nfsd_file_free(new);
1076		goto wait_for_construction;
1077	}
1078	nf = new;
1079	ret = rhltable_insert(&nfsd_file_rhltable, &nf->nf_rlist,
1080			      nfsd_file_rhash_params);
1081	spin_unlock(&inode->i_lock);
1082	rcu_read_unlock();
1083	if (likely(ret == 0))
1084		goto open_file;
1085
1086	trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
1087	status = nfserr_jukebox;
1088	goto construction_err;
1089
1090wait_for_construction:
1091	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
1092
1093	/* Did construction of this file fail? */
1094	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
1095		trace_nfsd_file_cons_err(rqstp, inode, may_flags, nf);
1096		if (!open_retry) {
1097			status = nfserr_jukebox;
1098			goto construction_err;
1099		}
1100		nfsd_file_put(nf);
1101		open_retry = false;
1102		fh_put(fhp);
1103		goto retry;
1104	}
 
1105	this_cpu_inc(nfsd_file_cache_hits);
1106
1107	status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
1108	if (status != nfs_ok) {
1109		nfsd_file_put(nf);
1110		nf = NULL;
1111	}
1112
 
 
 
 
 
 
 
 
 
 
 
 
1113out:
1114	if (status == nfs_ok) {
1115		this_cpu_inc(nfsd_file_acquisitions);
1116		nfsd_file_check_write_error(nf);
1117		*pnf = nf;
 
 
 
1118	}
1119	trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
1120	return status;
1121
 
 
1122open_file:
1123	trace_nfsd_file_alloc(nf);
1124	nf->nf_mark = nfsd_file_mark_find_or_create(inode);
1125	if (nf->nf_mark) {
1126		if (file) {
1127			get_file(file);
1128			nf->nf_file = file;
1129			status = nfs_ok;
1130			trace_nfsd_file_opened(nf, status);
1131		} else {
1132			ret = nfsd_open_verified(fhp, may_flags, &nf->nf_file);
1133			if (ret == -EOPENSTALE && stale_retry) {
1134				stale_retry = false;
1135				nfsd_file_unhash(nf);
1136				clear_and_wake_up_bit(NFSD_FILE_PENDING,
1137						      &nf->nf_flags);
1138				if (refcount_dec_and_test(&nf->nf_ref))
1139					nfsd_file_free(nf);
1140				nf = NULL;
1141				fh_put(fhp);
1142				goto retry;
1143			}
1144			status = nfserrno(ret);
1145			trace_nfsd_file_open(nf, status);
1146		}
1147	} else
1148		status = nfserr_jukebox;
1149	/*
1150	 * If construction failed, or we raced with a call to unlink()
1151	 * then unhash.
1152	 */
1153	if (status != nfs_ok || inode->i_nlink == 0)
1154		nfsd_file_unhash(nf);
1155	clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
1156	if (status == nfs_ok)
1157		goto out;
1158
1159construction_err:
1160	if (refcount_dec_and_test(&nf->nf_ref))
1161		nfsd_file_free(nf);
1162	nf = NULL;
 
1163	goto out;
1164}
1165
1166/**
1167 * nfsd_file_acquire_gc - Get a struct nfsd_file with an open file
1168 * @rqstp: the RPC transaction being executed
1169 * @fhp: the NFS filehandle of the file to be opened
1170 * @may_flags: NFSD_MAY_ settings for the file
1171 * @pnf: OUT: new or found "struct nfsd_file" object
1172 *
1173 * The nfsd_file object returned by this API is reference-counted
1174 * and garbage-collected. The object is retained for a few
1175 * seconds after the final nfsd_file_put() in case the caller
1176 * wants to re-use it.
1177 *
1178 * Return values:
1179 *   %nfs_ok - @pnf points to an nfsd_file with its reference
1180 *   count boosted.
1181 *
1182 * On error, an nfsstat value in network byte order is returned.
1183 */
1184__be32
1185nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
1186		     unsigned int may_flags, struct nfsd_file **pnf)
1187{
1188	return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
1189				    fhp, may_flags, NULL, pnf, true);
1190}
1191
1192/**
1193 * nfsd_file_acquire - Get a struct nfsd_file with an open file
1194 * @rqstp: the RPC transaction being executed
1195 * @fhp: the NFS filehandle of the file to be opened
1196 * @may_flags: NFSD_MAY_ settings for the file
1197 * @pnf: OUT: new or found "struct nfsd_file" object
1198 *
1199 * The nfsd_file_object returned by this API is reference-counted
1200 * but not garbage-collected. The object is unhashed after the
1201 * final nfsd_file_put().
1202 *
1203 * Return values:
1204 *   %nfs_ok - @pnf points to an nfsd_file with its reference
1205 *   count boosted.
1206 *
1207 * On error, an nfsstat value in network byte order is returned.
1208 */
1209__be32
1210nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
1211		  unsigned int may_flags, struct nfsd_file **pnf)
1212{
1213	return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
1214				    fhp, may_flags, NULL, pnf, false);
1215}
1216
1217/**
1218 * nfsd_file_acquire_local - Get a struct nfsd_file with an open file for localio
1219 * @net: The network namespace in which to perform a lookup
1220 * @cred: the user credential with which to validate access
1221 * @client: the auth_domain for LOCALIO lookup
1222 * @fhp: the NFS filehandle of the file to be opened
1223 * @may_flags: NFSD_MAY_ settings for the file
1224 * @pnf: OUT: new or found "struct nfsd_file" object
1225 *
1226 * This file lookup interface provide access to a file given the
1227 * filehandle and credential.  No connection-based authorisation
1228 * is performed and in that way it is quite different to other
1229 * file access mediated by nfsd.  It allows a kernel module such as the NFS
1230 * client to reach across network and filesystem namespaces to access
1231 * a file.  The security implications of this should be carefully
1232 * considered before use.
1233 *
1234 * The nfsd_file object returned by this API is reference-counted
1235 * and garbage-collected. The object is retained for a few
1236 * seconds after the final nfsd_file_put() in case the caller
1237 * wants to re-use it.
1238 *
1239 * Return values:
1240 *   %nfs_ok - @pnf points to an nfsd_file with its reference
1241 *   count boosted.
1242 *
1243 * On error, an nfsstat value in network byte order is returned.
1244 */
1245__be32
1246nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
1247			struct auth_domain *client, struct svc_fh *fhp,
1248			unsigned int may_flags, struct nfsd_file **pnf)
1249{
1250	/*
1251	 * Save creds before calling nfsd_file_do_acquire() (which calls
1252	 * nfsd_setuser). Important because caller (LOCALIO) is from
1253	 * client context.
1254	 */
1255	const struct cred *save_cred = get_current_cred();
1256	__be32 beres;
1257
1258	beres = nfsd_file_do_acquire(NULL, net, cred, client,
1259				     fhp, may_flags, NULL, pnf, true);
1260	revert_creds(save_cred);
1261	return beres;
1262}
1263
1264/**
1265 * nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
1266 * @rqstp: the RPC transaction being executed
1267 * @fhp: the NFS filehandle of the file just created
1268 * @may_flags: NFSD_MAY_ settings for the file
1269 * @file: cached, already-open file (may be NULL)
1270 * @pnf: OUT: new or found "struct nfsd_file" object
1271 *
1272 * Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
1273 * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
1274 * opening a new one.
1275 *
1276 * Return values:
1277 *   %nfs_ok - @pnf points to an nfsd_file with its reference
1278 *   count boosted.
1279 *
1280 * On error, an nfsstat value in network byte order is returned.
1281 */
1282__be32
1283nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
1284			 unsigned int may_flags, struct file *file,
1285			 struct nfsd_file **pnf)
1286{
1287	return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
1288				    fhp, may_flags, file, pnf, false);
1289}
1290
1291/*
1292 * Note that fields may be added, removed or reordered in the future. Programs
1293 * scraping this file for info should test the labels to ensure they're
1294 * getting the correct field.
1295 */
1296int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
1297{
1298	unsigned long allocations = 0, releases = 0, evictions = 0;
1299	unsigned long hits = 0, acquisitions = 0;
1300	unsigned int i, count = 0, buckets = 0;
1301	unsigned long lru = 0, total_age = 0;
1302
1303	/* Serialize with server shutdown */
 
 
 
 
1304	mutex_lock(&nfsd_mutex);
1305	if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1) {
1306		struct bucket_table *tbl;
1307		struct rhashtable *ht;
1308
1309		lru = list_lru_count(&nfsd_file_lru);
1310
1311		rcu_read_lock();
1312		ht = &nfsd_file_rhltable.ht;
1313		count = atomic_read(&ht->nelems);
1314		tbl = rht_dereference_rcu(ht->tbl, ht);
1315		buckets = tbl->size;
1316		rcu_read_unlock();
1317	}
1318	mutex_unlock(&nfsd_mutex);
1319
1320	for_each_possible_cpu(i) {
1321		hits += per_cpu(nfsd_file_cache_hits, i);
1322		acquisitions += per_cpu(nfsd_file_acquisitions, i);
1323		allocations += per_cpu(nfsd_file_allocations, i);
1324		releases += per_cpu(nfsd_file_releases, i);
1325		total_age += per_cpu(nfsd_file_total_age, i);
1326		evictions += per_cpu(nfsd_file_evictions, i);
1327	}
1328
1329	seq_printf(m, "total inodes:  %u\n", count);
1330	seq_printf(m, "hash buckets:  %u\n", buckets);
1331	seq_printf(m, "lru entries:   %lu\n", lru);
1332	seq_printf(m, "cache hits:    %lu\n", hits);
1333	seq_printf(m, "acquisitions:  %lu\n", acquisitions);
1334	seq_printf(m, "allocations:   %lu\n", allocations);
1335	seq_printf(m, "releases:      %lu\n", releases);
1336	seq_printf(m, "evictions:     %lu\n", evictions);
1337	if (releases)
1338		seq_printf(m, "mean age (ms): %ld\n", total_age / releases);
1339	else
1340		seq_printf(m, "mean age (ms): -\n");
1341	return 0;
 
 
 
 
 
1342}
v5.4
 
  1/*
  2 * Open file cache.
  3 *
  4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/hash.h>
  8#include <linux/slab.h>
  9#include <linux/file.h>
 
 10#include <linux/sched.h>
 11#include <linux/list_lru.h>
 12#include <linux/fsnotify_backend.h>
 13#include <linux/fsnotify.h>
 14#include <linux/seq_file.h>
 
 15
 16#include "vfs.h"
 17#include "nfsd.h"
 18#include "nfsfh.h"
 19#include "netns.h"
 20#include "filecache.h"
 21#include "trace.h"
 22
 23#define NFSDDBG_FACILITY	NFSDDBG_FH
 24
 25/* FIXME: dynamically size this for the machine somehow? */
 26#define NFSD_FILE_HASH_BITS                   12
 27#define NFSD_FILE_HASH_SIZE                  (1 << NFSD_FILE_HASH_BITS)
 28#define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
 29
 30#define NFSD_FILE_LRU_RESCAN		     (0)
 31#define NFSD_FILE_SHUTDOWN		     (1)
 32#define NFSD_FILE_LRU_THRESHOLD		     (4096UL)
 33#define NFSD_FILE_LRU_LIMIT		     (NFSD_FILE_LRU_THRESHOLD << 2)
 34
 35/* We only care about NFSD_MAY_READ/WRITE for this cache */
 36#define NFSD_FILE_MAY_MASK	(NFSD_MAY_READ|NFSD_MAY_WRITE)
 37
 38struct nfsd_fcache_bucket {
 39	struct hlist_head	nfb_head;
 40	spinlock_t		nfb_lock;
 41	unsigned int		nfb_count;
 42	unsigned int		nfb_maxcount;
 
 
 
 
 
 43};
 44
 45static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
 46
 47static struct kmem_cache		*nfsd_file_slab;
 48static struct kmem_cache		*nfsd_file_mark_slab;
 49static struct nfsd_fcache_bucket	*nfsd_file_hashtbl;
 50static struct list_lru			nfsd_file_lru;
 51static long				nfsd_file_lru_flags;
 52static struct fsnotify_group		*nfsd_file_fsnotify_group;
 53static atomic_long_t			nfsd_filecache_count;
 54static struct delayed_work		nfsd_filecache_laundrette;
 
 
 55
 56enum nfsd_file_laundrette_ctl {
 57	NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
 58	NFSD_FILE_LAUNDRETTE_MAY_FLUSH
 59};
 60
 61static void
 62nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
 63{
 64	long count = atomic_long_read(&nfsd_filecache_count);
 65
 66	if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
 67		return;
 
 
 
 
 
 
 
 
 
 
 
 
 68
 69	/* Be more aggressive about scanning if over the threshold */
 70	if (count > NFSD_FILE_LRU_THRESHOLD)
 71		mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
 72	else
 73		schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
 74
 75	if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
 76		return;
 
 
 
 
 
 77
 78	/* ...and don't delay flushing if we're out of control */
 79	if (count >= NFSD_FILE_LRU_LIMIT)
 80		flush_delayed_work(&nfsd_filecache_laundrette);
 
 
 
 81}
 82
 83static void
 84nfsd_file_slab_free(struct rcu_head *rcu)
 85{
 86	struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
 87
 88	put_cred(nf->nf_cred);
 89	kmem_cache_free(nfsd_file_slab, nf);
 90}
 91
 92static void
 93nfsd_file_mark_free(struct fsnotify_mark *mark)
 94{
 95	struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
 96						  nfm_mark);
 97
 98	kmem_cache_free(nfsd_file_mark_slab, nfm);
 99}
100
101static struct nfsd_file_mark *
102nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103{
104	if (!atomic_inc_not_zero(&nfm->nfm_ref))
105		return NULL;
106	return nfm;
107}
108
109static void
110nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111{
112	if (atomic_dec_and_test(&nfm->nfm_ref)) {
113
114		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
115		fsnotify_put_mark(&nfm->nfm_mark);
116	}
117}
118
119static struct nfsd_file_mark *
120nfsd_file_mark_find_or_create(struct nfsd_file *nf)
121{
122	int			err;
123	struct fsnotify_mark	*mark;
124	struct nfsd_file_mark	*nfm = NULL, *new;
125	struct inode *inode = nf->nf_inode;
126
127	do {
128		mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
129		mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
130				nfsd_file_fsnotify_group);
131		if (mark) {
132			nfm = nfsd_file_mark_get(container_of(mark,
133						 struct nfsd_file_mark,
134						 nfm_mark));
135			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
 
 
 
 
 
 
136			fsnotify_put_mark(mark);
137			if (likely(nfm))
138				break;
139		} else
140			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
141
142		/* allocate a new nfm */
143		new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
144		if (!new)
145			return NULL;
146		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
147		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
148		atomic_set(&new->nfm_ref, 1);
149
150		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
151
152		/*
153		 * If the add was successful, then return the object.
154		 * Otherwise, we need to put the reference we hold on the
155		 * nfm_mark. The fsnotify code will take a reference and put
156		 * it on failure, so we can't just free it directly. It's also
157		 * not safe to call fsnotify_destroy_mark on it as the
158		 * mark->group will be NULL. Thus, we can't let the nfm_ref
159		 * counter drive the destruction at this point.
160		 */
161		if (likely(!err))
162			nfm = new;
163		else
164			fsnotify_put_mark(&new->nfm_mark);
165	} while (unlikely(err == -EEXIST));
166
167	return nfm;
168}
169
170static struct nfsd_file *
171nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
172		struct net *net)
173{
174	struct nfsd_file *nf;
175
176	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
177	if (nf) {
178		INIT_HLIST_NODE(&nf->nf_node);
179		INIT_LIST_HEAD(&nf->nf_lru);
180		nf->nf_file = NULL;
181		nf->nf_cred = get_current_cred();
182		nf->nf_net = net;
183		nf->nf_flags = 0;
184		nf->nf_inode = inode;
185		nf->nf_hashval = hashval;
186		atomic_set(&nf->nf_ref, 1);
187		nf->nf_may = may & NFSD_FILE_MAY_MASK;
188		if (may & NFSD_MAY_NOT_BREAK_LEASE) {
189			if (may & NFSD_MAY_WRITE)
190				__set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
191			if (may & NFSD_MAY_READ)
192				__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
193		}
194		nf->nf_mark = NULL;
195		trace_nfsd_file_alloc(nf);
196	}
197	return nf;
198}
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200static bool
 
 
 
 
 
 
 
 
 
 
201nfsd_file_free(struct nfsd_file *nf)
202{
203	bool flush = false;
 
 
204
205	trace_nfsd_file_put_final(nf);
 
 
 
206	if (nf->nf_mark)
207		nfsd_file_mark_put(nf->nf_mark);
208	if (nf->nf_file) {
209		get_file(nf->nf_file);
210		filp_close(nf->nf_file, NULL);
211		fput(nf->nf_file);
212		flush = true;
213	}
 
 
 
 
 
 
 
 
214	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
215	return flush;
216}
217
218static bool
219nfsd_file_check_writeback(struct nfsd_file *nf)
220{
221	struct file *file = nf->nf_file;
222	struct address_space *mapping;
223
224	if (!file || !(file->f_mode & FMODE_WRITE))
 
225		return false;
 
 
 
 
 
 
 
 
226	mapping = file->f_mapping;
227	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
228		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
229}
230
231static int
232nfsd_file_check_write_error(struct nfsd_file *nf)
233{
234	struct file *file = nf->nf_file;
235
236	if (!file || !(file->f_mode & FMODE_WRITE))
237		return 0;
238	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
239}
240
241static bool
242nfsd_file_in_use(struct nfsd_file *nf)
243{
244	return nfsd_file_check_writeback(nf) ||
245			nfsd_file_check_write_error(nf);
 
 
 
 
246}
247
248static void
249nfsd_file_do_unhash(struct nfsd_file *nf)
250{
251	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
252
253	trace_nfsd_file_unhash(nf);
254
255	if (nfsd_file_check_write_error(nf))
256		nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
257	--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
258	hlist_del_rcu(&nf->nf_node);
259	if (!list_empty(&nf->nf_lru))
260		list_lru_del(&nfsd_file_lru, &nf->nf_lru);
261	atomic_long_dec(&nfsd_filecache_count);
262}
263
264static bool
265nfsd_file_unhash(struct nfsd_file *nf)
266{
267	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
268		nfsd_file_do_unhash(nf);
269		return true;
270	}
271	return false;
272}
273
274/*
275 * Return true if the file was unhashed.
276 */
277static bool
278nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
279{
280	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
281
282	trace_nfsd_file_unhash_and_release_locked(nf);
283	if (!nfsd_file_unhash(nf))
284		return false;
285	/* keep final reference for nfsd_file_lru_dispose */
286	if (atomic_add_unless(&nf->nf_ref, -1, 1))
287		return true;
288
289	list_add(&nf->nf_lru, dispose);
290	return true;
291}
292
293static int
294nfsd_file_put_noref(struct nfsd_file *nf)
 
 
 
 
 
 
 
 
295{
296	int count;
297	trace_nfsd_file_put(nf);
298
299	count = atomic_dec_return(&nf->nf_ref);
300	if (!count) {
301		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302		nfsd_file_free(nf);
303	}
304	return count;
305}
306
307void
308nfsd_file_put(struct nfsd_file *nf)
 
 
 
 
 
 
 
309{
310	bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
311	bool unused = !nfsd_file_in_use(nf);
312
313	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
314	if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
315		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
316}
317
318struct nfsd_file *
319nfsd_file_get(struct nfsd_file *nf)
 
 
 
 
 
 
320{
321	if (likely(atomic_inc_not_zero(&nf->nf_ref)))
322		return nf;
323	return NULL;
324}
325
326static void
327nfsd_file_dispose_list(struct list_head *dispose)
328{
329	struct nfsd_file *nf;
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331	while(!list_empty(dispose)) {
332		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
333		list_del(&nf->nf_lru);
334		nfsd_file_put_noref(nf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335	}
336}
337
338static void
339nfsd_file_dispose_list_sync(struct list_head *dispose)
 
 
 
 
 
 
 
 
340{
341	bool flush = false;
342	struct nfsd_file *nf;
343
344	while(!list_empty(dispose)) {
345		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
346		list_del(&nf->nf_lru);
347		if (!atomic_dec_and_test(&nf->nf_ref))
348			continue;
349		if (nfsd_file_free(nf))
350			flush = true;
 
 
 
 
 
 
 
351	}
352	if (flush)
353		flush_delayed_fput();
354}
355
356/*
357 * Note this can deadlock with nfsd_file_cache_purge.
 
 
 
 
 
 
 
 
358 */
359static enum lru_status
360nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
361		 spinlock_t *lock, void *arg)
362	__releases(lock)
363	__acquires(lock)
364{
365	struct list_head *head = arg;
366	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
367
368	/*
369	 * Do a lockless refcount check. The hashtable holds one reference, so
370	 * we look to see if anything else has a reference, or if any have
371	 * been put since the shrinker last ran. Those don't get unhashed and
372	 * released.
373	 *
374	 * Note that in the put path, we set the flag and then decrement the
375	 * counter. Here we check the counter and then test and clear the flag.
376	 * That order is deliberate to ensure that we can do this locklessly.
377	 */
378	if (atomic_read(&nf->nf_ref) > 1)
379		goto out_skip;
380
381	/*
382	 * Don't throw out files that are still undergoing I/O or
383	 * that have uncleared errors pending.
384	 */
385	if (nfsd_file_check_writeback(nf))
386		goto out_skip;
 
 
387
388	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
389		goto out_rescan;
 
 
 
390
391	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
392		goto out_skip;
 
 
 
 
 
 
 
393
394	list_lru_isolate_move(lru, &nf->nf_lru, head);
 
 
 
 
 
395	return LRU_REMOVED;
396out_rescan:
397	set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
398out_skip:
399	return LRU_SKIP;
400}
401
402static void
403nfsd_file_lru_dispose(struct list_head *head)
404{
405	while(!list_empty(head)) {
406		struct nfsd_file *nf = list_first_entry(head,
407				struct nfsd_file, nf_lru);
408		list_del_init(&nf->nf_lru);
409		spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
410		nfsd_file_do_unhash(nf);
411		spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
412		nfsd_file_put_noref(nf);
413	}
 
 
 
 
 
 
414}
415
416static unsigned long
417nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
418{
419	return list_lru_count(&nfsd_file_lru);
420}
421
422static unsigned long
423nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
424{
425	LIST_HEAD(head);
426	unsigned long ret;
427
428	ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
429	nfsd_file_lru_dispose(&head);
 
 
430	return ret;
431}
432
433static struct shrinker	nfsd_file_shrinker = {
434	.scan_objects = nfsd_file_lru_scan,
435	.count_objects = nfsd_file_lru_count,
436	.seeks = 1,
437};
438
 
 
 
 
 
 
 
 
439static void
440__nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
441			struct list_head *dispose)
442{
443	struct nfsd_file	*nf;
444	struct hlist_node	*tmp;
445
446	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
447	hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
448		if (inode == nf->nf_inode)
449			nfsd_file_unhash_and_release_locked(nf, dispose);
 
 
 
 
 
 
 
 
 
 
 
 
450	}
451	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
452}
453
454/**
455 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
456 * @inode: inode of the file to attempt to remove
 
 
 
 
 
 
 
 
457 *
458 * Walk the whole hash bucket, looking for any files that correspond to "inode".
459 * If any do, then unhash them and put the hashtable reference to them and
460 * destroy any that had their last reference put. Also ensure that any of the
461 * fputs also have their final __fput done as well.
462 */
463void
464nfsd_file_close_inode_sync(struct inode *inode)
465{
466	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
467						NFSD_FILE_HASH_BITS);
468	LIST_HEAD(dispose);
469
470	__nfsd_file_close_inode(inode, hashval, &dispose);
471	trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
472	nfsd_file_dispose_list_sync(&dispose);
 
 
 
 
 
 
473}
474
475/**
476 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
477 * @inode: inode of the file to attempt to remove
478 *
479 * Walk the whole hash bucket, looking for any files that correspond to "inode".
480 * If any do, then unhash them and put the hashtable reference to them and
481 * destroy any that had their last reference put.
 
482 */
483static void
484nfsd_file_close_inode(struct inode *inode)
485{
486	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
487						NFSD_FILE_HASH_BITS);
488	LIST_HEAD(dispose);
489
490	__nfsd_file_close_inode(inode, hashval, &dispose);
491	trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
492	nfsd_file_dispose_list(&dispose);
493}
494
495/**
496 * nfsd_file_delayed_close - close unused nfsd_files
497 * @work: dummy
498 *
499 * Walk the LRU list and close any entries that have not been used since
500 * the last scan.
501 *
502 * Note this can deadlock with nfsd_file_cache_purge.
 
503 */
504static void
505nfsd_file_delayed_close(struct work_struct *work)
506{
507	LIST_HEAD(head);
 
508
509	list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
510
511	if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
512		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
513
514	if (!list_empty(&head)) {
515		nfsd_file_lru_dispose(&head);
516		flush_delayed_fput();
517	}
518}
519
520static int
521nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
522			    void *data)
523{
524	struct file_lock *fl = data;
525
526	/* Only close files for F_SETLEASE leases */
527	if (fl->fl_flags & FL_LEASE)
528		nfsd_file_close_inode_sync(file_inode(fl->fl_file));
529	return 0;
530}
531
532static struct notifier_block nfsd_file_lease_notifier = {
533	.notifier_call = nfsd_file_lease_notifier_call,
534};
535
536static int
537nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
538				struct inode *inode,
539				u32 mask, const void *data, int data_type,
540				const struct qstr *file_name, u32 cookie,
541				struct fsnotify_iter_info *iter_info)
542{
 
 
 
543	trace_nfsd_file_fsnotify_handle_event(inode, mask);
544
545	/* Should be no marks on non-regular files */
546	if (!S_ISREG(inode->i_mode)) {
547		WARN_ON_ONCE(1);
548		return 0;
549	}
550
551	/* don't close files if this was not the last link */
552	if (mask & FS_ATTRIB) {
553		if (inode->i_nlink)
554			return 0;
555	}
556
557	nfsd_file_close_inode(inode);
558	return 0;
559}
560
561
562static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
563	.handle_event = nfsd_file_fsnotify_handle_event,
564	.free_mark = nfsd_file_mark_free,
565};
566
567int
568nfsd_file_cache_init(void)
569{
570	int		ret = -ENOMEM;
571	unsigned int	i;
572
573	clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
574
575	if (nfsd_file_hashtbl)
576		return 0;
577
578	nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
579				sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
580	if (!nfsd_file_hashtbl) {
581		pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
582		goto out_err;
583	}
584
585	nfsd_file_slab = kmem_cache_create("nfsd_file",
586				sizeof(struct nfsd_file), 0, 0, NULL);
587	if (!nfsd_file_slab) {
588		pr_err("nfsd: unable to create nfsd_file_slab\n");
589		goto out_err;
590	}
591
592	nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
593					sizeof(struct nfsd_file_mark), 0, 0, NULL);
594	if (!nfsd_file_mark_slab) {
595		pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
596		goto out_err;
597	}
598
599
600	ret = list_lru_init(&nfsd_file_lru);
601	if (ret) {
602		pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
603		goto out_err;
604	}
605
606	ret = register_shrinker(&nfsd_file_shrinker);
607	if (ret) {
608		pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
 
609		goto out_lru;
610	}
611
 
 
 
 
 
 
612	ret = lease_register_notifier(&nfsd_file_lease_notifier);
613	if (ret) {
614		pr_err("nfsd: unable to register lease notifier: %d\n", ret);
615		goto out_shrinker;
616	}
617
618	nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
 
619	if (IS_ERR(nfsd_file_fsnotify_group)) {
620		pr_err("nfsd: unable to create fsnotify group: %ld\n",
621			PTR_ERR(nfsd_file_fsnotify_group));
 
622		nfsd_file_fsnotify_group = NULL;
623		goto out_notifier;
624	}
625
626	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
627		INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
628		spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
629	}
630
631	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
632out:
 
 
633	return ret;
634out_notifier:
635	lease_unregister_notifier(&nfsd_file_lease_notifier);
636out_shrinker:
637	unregister_shrinker(&nfsd_file_shrinker);
638out_lru:
639	list_lru_destroy(&nfsd_file_lru);
640out_err:
641	kmem_cache_destroy(nfsd_file_slab);
642	nfsd_file_slab = NULL;
643	kmem_cache_destroy(nfsd_file_mark_slab);
644	nfsd_file_mark_slab = NULL;
645	kfree(nfsd_file_hashtbl);
646	nfsd_file_hashtbl = NULL;
647	goto out;
648}
649
650/*
651 * Note this can deadlock with nfsd_file_lru_cb.
 
 
 
 
 
652 */
653void
654nfsd_file_cache_purge(struct net *net)
655{
656	unsigned int		i;
657	struct nfsd_file	*nf;
658	struct hlist_node	*next;
659	LIST_HEAD(dispose);
660	bool del;
661
662	if (!nfsd_file_hashtbl)
663		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
665	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
666		struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
 
667
668		spin_lock(&nfb->nfb_lock);
669		hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
670			if (net && nf->nf_net != net)
671				continue;
672			del = nfsd_file_unhash_and_release_locked(nf, &dispose);
 
 
 
 
 
 
 
673
674			/*
675			 * Deadlock detected! Something marked this entry as
676			 * unhased, but hasn't removed it from the hash list.
677			 */
678			WARN_ON_ONCE(!del);
679		}
680		spin_unlock(&nfb->nfb_lock);
681		nfsd_file_dispose_list(&dispose);
682	}
683}
684
685void
686nfsd_file_cache_shutdown(void)
687{
688	LIST_HEAD(dispose);
689
690	set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
 
 
691
692	lease_unregister_notifier(&nfsd_file_lease_notifier);
693	unregister_shrinker(&nfsd_file_shrinker);
694	/*
695	 * make sure all callers of nfsd_file_lru_cb are done before
696	 * calling nfsd_file_cache_purge
697	 */
698	cancel_delayed_work_sync(&nfsd_filecache_laundrette);
699	nfsd_file_cache_purge(NULL);
700	list_lru_destroy(&nfsd_file_lru);
701	rcu_barrier();
702	fsnotify_put_group(nfsd_file_fsnotify_group);
703	nfsd_file_fsnotify_group = NULL;
704	kmem_cache_destroy(nfsd_file_slab);
705	nfsd_file_slab = NULL;
706	fsnotify_wait_marks_destroyed();
707	kmem_cache_destroy(nfsd_file_mark_slab);
708	nfsd_file_mark_slab = NULL;
709	kfree(nfsd_file_hashtbl);
710	nfsd_file_hashtbl = NULL;
711}
712
713static bool
714nfsd_match_cred(const struct cred *c1, const struct cred *c2)
715{
716	int i;
717
718	if (!uid_eq(c1->fsuid, c2->fsuid))
719		return false;
720	if (!gid_eq(c1->fsgid, c2->fsgid))
721		return false;
722	if (c1->group_info == NULL || c2->group_info == NULL)
723		return c1->group_info == c2->group_info;
724	if (c1->group_info->ngroups != c2->group_info->ngroups)
725		return false;
726	for (i = 0; i < c1->group_info->ngroups; i++) {
727		if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
728			return false;
729	}
730	return true;
731}
732
733static struct nfsd_file *
734nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
735			unsigned int hashval, struct net *net)
 
736{
 
737	struct nfsd_file *nf;
738	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
739
740	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
741				 nf_node) {
742		if ((need & nf->nf_may) != need)
 
 
 
743			continue;
744		if (nf->nf_inode != inode)
745			continue;
746		if (nf->nf_net != net)
747			continue;
748		if (!nfsd_match_cred(nf->nf_cred, current_cred()))
749			continue;
750		if (nfsd_file_get(nf) != NULL)
751			return nf;
 
 
752	}
753	return NULL;
754}
755
756/**
757 * nfsd_file_is_cached - are there any cached open files for this fh?
758 * @inode: inode of the file to check
759 *
760 * Scan the hashtable for open files that match this fh. Returns true if there
761 * are any, and false if not.
 
 
 
 
762 */
763bool
764nfsd_file_is_cached(struct inode *inode)
765{
766	bool			ret = false;
767	struct nfsd_file	*nf;
768	unsigned int		hashval;
769
770        hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
771
772	rcu_read_lock();
773	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
774				 nf_node) {
775		if (inode == nf->nf_inode) {
 
776			ret = true;
777			break;
778		}
779	}
780	rcu_read_unlock();
781	trace_nfsd_file_is_cached(inode, hashval, (int)ret);
 
782	return ret;
783}
784
785__be32
786nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
787		  unsigned int may_flags, struct nfsd_file **pnf)
 
 
 
 
788{
789	__be32	status;
790	struct net *net = SVC_NET(rqstp);
791	struct nfsd_file *nf, *new;
 
792	struct inode *inode;
793	unsigned int hashval;
 
794
795	/* FIXME: skip this if fh_dentry is already set? */
796	status = fh_verify(rqstp, fhp, S_IFREG,
797				may_flags|NFSD_MAY_OWNER_OVERRIDE);
 
 
 
 
 
798	if (status != nfs_ok)
799		return status;
 
800
801	inode = d_inode(fhp->fh_dentry);
802	hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
803retry:
804	rcu_read_lock();
805	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
806	rcu_read_unlock();
807	if (nf)
 
 
 
 
 
 
 
 
808		goto wait_for_construction;
 
809
810	new = nfsd_file_alloc(inode, may_flags, hashval, net);
811	if (!new) {
812		trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
813					NULL, nfserr_jukebox);
814		return nfserr_jukebox;
815	}
816
817	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
818	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
819	if (nf == NULL)
 
 
 
 
 
 
 
 
 
 
 
 
820		goto open_file;
821	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
822	nfsd_file_slab_free(&new->nf_rcu);
 
 
823
824wait_for_construction:
825	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
826
827	/* Did construction of this file fail? */
828	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
829		nfsd_file_put_noref(nf);
 
 
 
 
 
 
 
830		goto retry;
831	}
832
833	this_cpu_inc(nfsd_file_cache_hits);
834
835	if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
836		bool write = (may_flags & NFSD_MAY_WRITE);
 
 
 
837
838		if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
839		    (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
840			status = nfserrno(nfsd_open_break_lease(
841					file_inode(nf->nf_file), may_flags));
842			if (status == nfs_ok) {
843				clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
844				if (write)
845					clear_bit(NFSD_FILE_BREAK_WRITE,
846						  &nf->nf_flags);
847			}
848		}
849	}
850out:
851	if (status == nfs_ok) {
 
 
852		*pnf = nf;
853	} else {
854		nfsd_file_put(nf);
855		nf = NULL;
856	}
 
 
857
858	trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
859	return status;
860open_file:
861	nf = new;
862	/* Take reference for the hashtable */
863	atomic_inc(&nf->nf_ref);
864	__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
865	__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
866	list_lru_add(&nfsd_file_lru, &nf->nf_lru);
867	hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
868	++nfsd_file_hashtbl[hashval].nfb_count;
869	nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
870			nfsd_file_hashtbl[hashval].nfb_count);
871	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
872	atomic_long_inc(&nfsd_filecache_count);
873
874	nf->nf_mark = nfsd_file_mark_find_or_create(nf);
875	if (nf->nf_mark)
876		status = nfsd_open_verified(rqstp, fhp, S_IFREG,
877				may_flags, &nf->nf_file);
878	else
 
 
 
 
 
 
 
879		status = nfserr_jukebox;
880	/*
881	 * If construction failed, or we raced with a call to unlink()
882	 * then unhash.
883	 */
884	if (status != nfs_ok || inode->i_nlink == 0) {
885		bool do_free;
886		spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
887		do_free = nfsd_file_unhash(nf);
888		spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
889		if (do_free)
890			nfsd_file_put_noref(nf);
891	}
892	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
893	smp_mb__after_atomic();
894	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
895	goto out;
896}
897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
898/*
899 * Note that fields may be added, removed or reordered in the future. Programs
900 * scraping this file for info should test the labels to ensure they're
901 * getting the correct field.
902 */
903static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
904{
905	unsigned int i, count = 0, longest = 0;
906	unsigned long hits = 0;
 
 
907
908	/*
909	 * No need for spinlocks here since we're not terribly interested in
910	 * accuracy. We do take the nfsd_mutex simply to ensure that we
911	 * don't end up racing with server shutdown
912	 */
913	mutex_lock(&nfsd_mutex);
914	if (nfsd_file_hashtbl) {
915		for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
916			count += nfsd_file_hashtbl[i].nfb_count;
917			longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
918		}
 
 
 
 
 
 
 
919	}
920	mutex_unlock(&nfsd_mutex);
921
922	for_each_possible_cpu(i)
923		hits += per_cpu(nfsd_file_cache_hits, i);
 
 
 
 
 
 
924
925	seq_printf(m, "total entries: %u\n", count);
926	seq_printf(m, "longest chain: %u\n", longest);
 
927	seq_printf(m, "cache hits:    %lu\n", hits);
 
 
 
 
 
 
 
 
928	return 0;
929}
930
931int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
932{
933	return single_open(file, nfsd_file_cache_stats_show, NULL);
934}