Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sunrpc/cache.c
   4 *
   5 * Generic code for various authentication-related caches
   6 * used by sunrpc clients and servers.
   7 *
   8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/fs.h>
  13#include <linux/file.h>
  14#include <linux/slab.h>
  15#include <linux/signal.h>
  16#include <linux/sched.h>
  17#include <linux/kmod.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/ctype.h>
  21#include <linux/string_helpers.h>
  22#include <linux/uaccess.h>
  23#include <linux/poll.h>
  24#include <linux/seq_file.h>
  25#include <linux/proc_fs.h>
  26#include <linux/net.h>
  27#include <linux/workqueue.h>
  28#include <linux/mutex.h>
  29#include <linux/pagemap.h>
  30#include <asm/ioctls.h>
  31#include <linux/sunrpc/types.h>
  32#include <linux/sunrpc/cache.h>
  33#include <linux/sunrpc/stats.h>
  34#include <linux/sunrpc/rpc_pipe_fs.h>
 
  35#include "netns.h"
  36
  37#define	 RPCDBG_FACILITY RPCDBG_CACHE
  38
  39static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  40static void cache_revisit_request(struct cache_head *item);
  41static bool cache_listeners_exist(struct cache_detail *detail);
  42
  43static void cache_init(struct cache_head *h, struct cache_detail *detail)
  44{
  45	time_t now = seconds_since_boot();
  46	INIT_HLIST_NODE(&h->cache_list);
  47	h->flags = 0;
  48	kref_init(&h->ref);
  49	h->expiry_time = now + CACHE_NEW_EXPIRY;
  50	if (now <= detail->flush_time)
  51		/* ensure it isn't already expired */
  52		now = detail->flush_time + 1;
  53	h->last_refresh = now;
  54}
  55
  56static inline int cache_is_valid(struct cache_head *h);
  57static void cache_fresh_locked(struct cache_head *head, time_t expiry,
  58				struct cache_detail *detail);
  59static void cache_fresh_unlocked(struct cache_head *head,
  60				struct cache_detail *detail);
  61
  62static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
  63						struct cache_head *key,
  64						int hash)
  65{
  66	struct hlist_head *head = &detail->hash_table[hash];
  67	struct cache_head *tmp;
  68
  69	rcu_read_lock();
  70	hlist_for_each_entry_rcu(tmp, head, cache_list) {
  71		if (detail->match(tmp, key)) {
  72			if (cache_is_expired(detail, tmp))
  73				continue;
  74			tmp = cache_get_rcu(tmp);
  75			rcu_read_unlock();
  76			return tmp;
  77		}
 
  78	}
  79	rcu_read_unlock();
  80	return NULL;
  81}
  82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
  84						 struct cache_head *key,
  85						 int hash)
  86{
  87	struct cache_head *new, *tmp, *freeme = NULL;
  88	struct hlist_head *head = &detail->hash_table[hash];
  89
  90	new = detail->alloc();
  91	if (!new)
  92		return NULL;
  93	/* must fully initialise 'new', else
  94	 * we might get lose if we need to
  95	 * cache_put it soon.
  96	 */
  97	cache_init(new, detail);
  98	detail->init(new, key);
  99
 100	spin_lock(&detail->hash_lock);
 101
 102	/* check if entry appeared while we slept */
 103	hlist_for_each_entry_rcu(tmp, head, cache_list) {
 104		if (detail->match(tmp, key)) {
 105			if (cache_is_expired(detail, tmp)) {
 106				hlist_del_init_rcu(&tmp->cache_list);
 107				detail->entries --;
 108				if (cache_is_valid(tmp) == -EAGAIN)
 109					set_bit(CACHE_NEGATIVE, &tmp->flags);
 110				cache_fresh_locked(tmp, 0, detail);
 111				freeme = tmp;
 112				break;
 113			}
 114			cache_get(tmp);
 115			spin_unlock(&detail->hash_lock);
 116			cache_put(new, detail);
 117			return tmp;
 118		}
 
 
 
 
 119	}
 120
 121	hlist_add_head_rcu(&new->cache_list, head);
 122	detail->entries++;
 123	cache_get(new);
 124	spin_unlock(&detail->hash_lock);
 125
 126	if (freeme) {
 127		cache_fresh_unlocked(freeme, detail);
 128		cache_put(freeme, detail);
 129	}
 130	return new;
 131}
 132
 133struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
 134					   struct cache_head *key, int hash)
 135{
 136	struct cache_head *ret;
 137
 138	ret = sunrpc_cache_find_rcu(detail, key, hash);
 139	if (ret)
 140		return ret;
 141	/* Didn't find anything, insert an empty entry */
 142	return sunrpc_cache_add_entry(detail, key, hash);
 143}
 144EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
 145
 146static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 147
 148static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 149			       struct cache_detail *detail)
 150{
 151	time_t now = seconds_since_boot();
 152	if (now <= detail->flush_time)
 153		/* ensure it isn't immediately treated as expired */
 154		now = detail->flush_time + 1;
 155	head->expiry_time = expiry;
 156	head->last_refresh = now;
 157	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 158	set_bit(CACHE_VALID, &head->flags);
 159}
 160
 161static void cache_fresh_unlocked(struct cache_head *head,
 162				 struct cache_detail *detail)
 163{
 164	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 165		cache_revisit_request(head);
 166		cache_dequeue(detail, head);
 167	}
 168}
 169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 171				       struct cache_head *new, struct cache_head *old, int hash)
 172{
 173	/* The 'old' entry is to be replaced by 'new'.
 174	 * If 'old' is not VALID, we update it directly,
 175	 * otherwise we need to replace it
 176	 */
 177	struct cache_head *tmp;
 178
 179	if (!test_bit(CACHE_VALID, &old->flags)) {
 180		spin_lock(&detail->hash_lock);
 181		if (!test_bit(CACHE_VALID, &old->flags)) {
 182			if (test_bit(CACHE_NEGATIVE, &new->flags))
 183				set_bit(CACHE_NEGATIVE, &old->flags);
 184			else
 185				detail->update(old, new);
 186			cache_fresh_locked(old, new->expiry_time, detail);
 187			spin_unlock(&detail->hash_lock);
 188			cache_fresh_unlocked(old, detail);
 189			return old;
 190		}
 191		spin_unlock(&detail->hash_lock);
 192	}
 193	/* We need to insert a new entry */
 194	tmp = detail->alloc();
 195	if (!tmp) {
 196		cache_put(old, detail);
 197		return NULL;
 198	}
 199	cache_init(tmp, detail);
 200	detail->init(tmp, old);
 201
 202	spin_lock(&detail->hash_lock);
 203	if (test_bit(CACHE_NEGATIVE, &new->flags))
 204		set_bit(CACHE_NEGATIVE, &tmp->flags);
 205	else
 206		detail->update(tmp, new);
 207	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 208	detail->entries++;
 209	cache_get(tmp);
 210	cache_fresh_locked(tmp, new->expiry_time, detail);
 211	cache_fresh_locked(old, 0, detail);
 212	spin_unlock(&detail->hash_lock);
 213	cache_fresh_unlocked(tmp, detail);
 214	cache_fresh_unlocked(old, detail);
 215	cache_put(old, detail);
 216	return tmp;
 217}
 218EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 219
 220static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 221{
 222	if (cd->cache_upcall)
 223		return cd->cache_upcall(cd, h);
 224	return sunrpc_cache_pipe_upcall(cd, h);
 225}
 226
 227static inline int cache_is_valid(struct cache_head *h)
 228{
 229	if (!test_bit(CACHE_VALID, &h->flags))
 230		return -EAGAIN;
 231	else {
 232		/* entry is valid */
 233		if (test_bit(CACHE_NEGATIVE, &h->flags))
 234			return -ENOENT;
 235		else {
 236			/*
 237			 * In combination with write barrier in
 238			 * sunrpc_cache_update, ensures that anyone
 239			 * using the cache entry after this sees the
 240			 * updated contents:
 241			 */
 242			smp_rmb();
 243			return 0;
 244		}
 245	}
 246}
 247
 248static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 249{
 250	int rv;
 251
 252	spin_lock(&detail->hash_lock);
 253	rv = cache_is_valid(h);
 254	if (rv == -EAGAIN) {
 255		set_bit(CACHE_NEGATIVE, &h->flags);
 256		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
 257				   detail);
 258		rv = -ENOENT;
 259	}
 260	spin_unlock(&detail->hash_lock);
 261	cache_fresh_unlocked(h, detail);
 262	return rv;
 263}
 264
 265/*
 266 * This is the generic cache management routine for all
 267 * the authentication caches.
 268 * It checks the currency of a cache item and will (later)
 269 * initiate an upcall to fill it if needed.
 270 *
 271 *
 272 * Returns 0 if the cache_head can be used, or cache_puts it and returns
 273 * -EAGAIN if upcall is pending and request has been queued
 274 * -ETIMEDOUT if upcall failed or request could not be queue or
 275 *           upcall completed but item is still invalid (implying that
 276 *           the cache item has been replaced with a newer one).
 277 * -ENOENT if cache entry was negative
 278 */
 279int cache_check(struct cache_detail *detail,
 280		    struct cache_head *h, struct cache_req *rqstp)
 281{
 282	int rv;
 283	long refresh_age, age;
 284
 285	/* First decide return status as best we can */
 286	rv = cache_is_valid(h);
 287
 288	/* now see if we want to start an upcall */
 289	refresh_age = (h->expiry_time - h->last_refresh);
 290	age = seconds_since_boot() - h->last_refresh;
 291
 292	if (rqstp == NULL) {
 293		if (rv == -EAGAIN)
 294			rv = -ENOENT;
 295	} else if (rv == -EAGAIN ||
 296		   (h->expiry_time != 0 && age > refresh_age/2)) {
 297		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
 298				refresh_age, age);
 299		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 300			switch (cache_make_upcall(detail, h)) {
 301			case -EINVAL:
 302				rv = try_to_negate_entry(detail, h);
 303				break;
 304			case -EAGAIN:
 305				cache_fresh_unlocked(h, detail);
 306				break;
 307			}
 308		} else if (!cache_listeners_exist(detail))
 309			rv = try_to_negate_entry(detail, h);
 
 
 
 
 
 310	}
 311
 312	if (rv == -EAGAIN) {
 313		if (!cache_defer_req(rqstp, h)) {
 314			/*
 315			 * Request was not deferred; handle it as best
 316			 * we can ourselves:
 317			 */
 318			rv = cache_is_valid(h);
 319			if (rv == -EAGAIN)
 320				rv = -ETIMEDOUT;
 321		}
 322	}
 323	if (rv)
 324		cache_put(h, detail);
 325	return rv;
 326}
 327EXPORT_SYMBOL_GPL(cache_check);
 328
 329/*
 330 * caches need to be periodically cleaned.
 331 * For this we maintain a list of cache_detail and
 332 * a current pointer into that list and into the table
 333 * for that entry.
 334 *
 335 * Each time cache_clean is called it finds the next non-empty entry
 336 * in the current table and walks the list in that entry
 337 * looking for entries that can be removed.
 338 *
 339 * An entry gets removed if:
 340 * - The expiry is before current time
 341 * - The last_refresh time is before the flush_time for that cache
 342 *
 343 * later we might drop old entries with non-NEVER expiry if that table
 344 * is getting 'full' for some definition of 'full'
 345 *
 346 * The question of "how often to scan a table" is an interesting one
 347 * and is answered in part by the use of the "nextcheck" field in the
 348 * cache_detail.
 349 * When a scan of a table begins, the nextcheck field is set to a time
 350 * that is well into the future.
 351 * While scanning, if an expiry time is found that is earlier than the
 352 * current nextcheck time, nextcheck is set to that expiry time.
 353 * If the flush_time is ever set to a time earlier than the nextcheck
 354 * time, the nextcheck time is then set to that flush_time.
 355 *
 356 * A table is then only scanned if the current time is at least
 357 * the nextcheck time.
 358 *
 359 */
 360
 361static LIST_HEAD(cache_list);
 362static DEFINE_SPINLOCK(cache_list_lock);
 363static struct cache_detail *current_detail;
 364static int current_index;
 365
 366static void do_cache_clean(struct work_struct *work);
 367static struct delayed_work cache_cleaner;
 368
 369void sunrpc_init_cache_detail(struct cache_detail *cd)
 370{
 371	spin_lock_init(&cd->hash_lock);
 372	INIT_LIST_HEAD(&cd->queue);
 373	spin_lock(&cache_list_lock);
 374	cd->nextcheck = 0;
 375	cd->entries = 0;
 376	atomic_set(&cd->writers, 0);
 377	cd->last_close = 0;
 378	cd->last_warn = -1;
 379	list_add(&cd->others, &cache_list);
 380	spin_unlock(&cache_list_lock);
 381
 382	/* start the cleaning process */
 383	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 384}
 385EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 386
 387void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 388{
 389	cache_purge(cd);
 390	spin_lock(&cache_list_lock);
 391	spin_lock(&cd->hash_lock);
 392	if (current_detail == cd)
 393		current_detail = NULL;
 394	list_del_init(&cd->others);
 395	spin_unlock(&cd->hash_lock);
 396	spin_unlock(&cache_list_lock);
 397	if (list_empty(&cache_list)) {
 398		/* module must be being unloaded so its safe to kill the worker */
 399		cancel_delayed_work_sync(&cache_cleaner);
 400	}
 401}
 402EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
 403
 404/* clean cache tries to find something to clean
 405 * and cleans it.
 406 * It returns 1 if it cleaned something,
 407 *            0 if it didn't find anything this time
 408 *           -1 if it fell off the end of the list.
 409 */
 410static int cache_clean(void)
 411{
 412	int rv = 0;
 413	struct list_head *next;
 414
 415	spin_lock(&cache_list_lock);
 416
 417	/* find a suitable table if we don't already have one */
 418	while (current_detail == NULL ||
 419	    current_index >= current_detail->hash_size) {
 420		if (current_detail)
 421			next = current_detail->others.next;
 422		else
 423			next = cache_list.next;
 424		if (next == &cache_list) {
 425			current_detail = NULL;
 426			spin_unlock(&cache_list_lock);
 427			return -1;
 428		}
 429		current_detail = list_entry(next, struct cache_detail, others);
 430		if (current_detail->nextcheck > seconds_since_boot())
 431			current_index = current_detail->hash_size;
 432		else {
 433			current_index = 0;
 434			current_detail->nextcheck = seconds_since_boot()+30*60;
 435		}
 436	}
 437
 438	/* find a non-empty bucket in the table */
 439	while (current_detail &&
 440	       current_index < current_detail->hash_size &&
 441	       hlist_empty(&current_detail->hash_table[current_index]))
 442		current_index++;
 443
 444	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 445
 446	if (current_detail && current_index < current_detail->hash_size) {
 447		struct cache_head *ch = NULL;
 448		struct cache_detail *d;
 449		struct hlist_head *head;
 450		struct hlist_node *tmp;
 451
 452		spin_lock(&current_detail->hash_lock);
 453
 454		/* Ok, now to clean this strand */
 455
 456		head = &current_detail->hash_table[current_index];
 457		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 458			if (current_detail->nextcheck > ch->expiry_time)
 459				current_detail->nextcheck = ch->expiry_time+1;
 460			if (!cache_is_expired(current_detail, ch))
 461				continue;
 462
 463			hlist_del_init_rcu(&ch->cache_list);
 464			current_detail->entries--;
 465			rv = 1;
 466			break;
 467		}
 468
 469		spin_unlock(&current_detail->hash_lock);
 470		d = current_detail;
 471		if (!ch)
 472			current_index ++;
 473		spin_unlock(&cache_list_lock);
 474		if (ch) {
 475			set_bit(CACHE_CLEANED, &ch->flags);
 476			cache_fresh_unlocked(ch, d);
 477			cache_put(ch, d);
 478		}
 479	} else
 480		spin_unlock(&cache_list_lock);
 481
 482	return rv;
 483}
 484
 485/*
 486 * We want to regularly clean the cache, so we need to schedule some work ...
 487 */
 488static void do_cache_clean(struct work_struct *work)
 489{
 490	int delay = 5;
 491	if (cache_clean() == -1)
 492		delay = round_jiffies_relative(30*HZ);
 493
 494	if (list_empty(&cache_list))
 495		delay = 0;
 496
 497	if (delay)
 498		queue_delayed_work(system_power_efficient_wq,
 499				   &cache_cleaner, delay);
 500}
 501
 502
 503/*
 504 * Clean all caches promptly.  This just calls cache_clean
 505 * repeatedly until we are sure that every cache has had a chance to
 506 * be fully cleaned
 507 */
 508void cache_flush(void)
 509{
 510	while (cache_clean() != -1)
 511		cond_resched();
 512	while (cache_clean() != -1)
 513		cond_resched();
 514}
 515EXPORT_SYMBOL_GPL(cache_flush);
 516
 517void cache_purge(struct cache_detail *detail)
 518{
 519	struct cache_head *ch = NULL;
 520	struct hlist_head *head = NULL;
 521	struct hlist_node *tmp = NULL;
 522	int i = 0;
 523
 524	spin_lock(&detail->hash_lock);
 525	if (!detail->entries) {
 526		spin_unlock(&detail->hash_lock);
 527		return;
 528	}
 529
 530	dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
 531	for (i = 0; i < detail->hash_size; i++) {
 532		head = &detail->hash_table[i];
 533		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 534			hlist_del_init_rcu(&ch->cache_list);
 535			detail->entries--;
 536
 537			set_bit(CACHE_CLEANED, &ch->flags);
 538			spin_unlock(&detail->hash_lock);
 539			cache_fresh_unlocked(ch, detail);
 540			cache_put(ch, detail);
 541			spin_lock(&detail->hash_lock);
 542		}
 543	}
 544	spin_unlock(&detail->hash_lock);
 545}
 546EXPORT_SYMBOL_GPL(cache_purge);
 547
 548
 549/*
 550 * Deferral and Revisiting of Requests.
 551 *
 552 * If a cache lookup finds a pending entry, we
 553 * need to defer the request and revisit it later.
 554 * All deferred requests are stored in a hash table,
 555 * indexed by "struct cache_head *".
 556 * As it may be wasteful to store a whole request
 557 * structure, we allow the request to provide a
 558 * deferred form, which must contain a
 559 * 'struct cache_deferred_req'
 560 * This cache_deferred_req contains a method to allow
 561 * it to be revisited when cache info is available
 562 */
 563
 564#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
 565#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 566
 567#define	DFR_MAX	300	/* ??? */
 568
 569static DEFINE_SPINLOCK(cache_defer_lock);
 570static LIST_HEAD(cache_defer_list);
 571static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 572static int cache_defer_cnt;
 573
 574static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 575{
 576	hlist_del_init(&dreq->hash);
 577	if (!list_empty(&dreq->recent)) {
 578		list_del_init(&dreq->recent);
 579		cache_defer_cnt--;
 580	}
 581}
 582
 583static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 584{
 585	int hash = DFR_HASH(item);
 586
 587	INIT_LIST_HEAD(&dreq->recent);
 588	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 589}
 590
 591static void setup_deferral(struct cache_deferred_req *dreq,
 592			   struct cache_head *item,
 593			   int count_me)
 594{
 595
 596	dreq->item = item;
 597
 598	spin_lock(&cache_defer_lock);
 599
 600	__hash_deferred_req(dreq, item);
 601
 602	if (count_me) {
 603		cache_defer_cnt++;
 604		list_add(&dreq->recent, &cache_defer_list);
 605	}
 606
 607	spin_unlock(&cache_defer_lock);
 608
 609}
 610
 611struct thread_deferred_req {
 612	struct cache_deferred_req handle;
 613	struct completion completion;
 614};
 615
 616static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 617{
 618	struct thread_deferred_req *dr =
 619		container_of(dreq, struct thread_deferred_req, handle);
 620	complete(&dr->completion);
 621}
 622
 623static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 624{
 625	struct thread_deferred_req sleeper;
 626	struct cache_deferred_req *dreq = &sleeper.handle;
 627
 628	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 629	dreq->revisit = cache_restart_thread;
 630
 631	setup_deferral(dreq, item, 0);
 632
 633	if (!test_bit(CACHE_PENDING, &item->flags) ||
 634	    wait_for_completion_interruptible_timeout(
 635		    &sleeper.completion, req->thread_wait) <= 0) {
 636		/* The completion wasn't completed, so we need
 637		 * to clean up
 638		 */
 639		spin_lock(&cache_defer_lock);
 640		if (!hlist_unhashed(&sleeper.handle.hash)) {
 641			__unhash_deferred_req(&sleeper.handle);
 642			spin_unlock(&cache_defer_lock);
 643		} else {
 644			/* cache_revisit_request already removed
 645			 * this from the hash table, but hasn't
 646			 * called ->revisit yet.  It will very soon
 647			 * and we need to wait for it.
 648			 */
 649			spin_unlock(&cache_defer_lock);
 650			wait_for_completion(&sleeper.completion);
 651		}
 652	}
 653}
 654
 655static void cache_limit_defers(void)
 656{
 657	/* Make sure we haven't exceed the limit of allowed deferred
 658	 * requests.
 659	 */
 660	struct cache_deferred_req *discard = NULL;
 661
 662	if (cache_defer_cnt <= DFR_MAX)
 663		return;
 664
 665	spin_lock(&cache_defer_lock);
 666
 667	/* Consider removing either the first or the last */
 668	if (cache_defer_cnt > DFR_MAX) {
 669		if (prandom_u32() & 1)
 670			discard = list_entry(cache_defer_list.next,
 671					     struct cache_deferred_req, recent);
 672		else
 673			discard = list_entry(cache_defer_list.prev,
 674					     struct cache_deferred_req, recent);
 675		__unhash_deferred_req(discard);
 676	}
 677	spin_unlock(&cache_defer_lock);
 678	if (discard)
 679		discard->revisit(discard, 1);
 680}
 681
 682/* Return true if and only if a deferred request is queued. */
 683static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 684{
 685	struct cache_deferred_req *dreq;
 686
 687	if (req->thread_wait) {
 688		cache_wait_req(req, item);
 689		if (!test_bit(CACHE_PENDING, &item->flags))
 690			return false;
 691	}
 692	dreq = req->defer(req);
 693	if (dreq == NULL)
 694		return false;
 695	setup_deferral(dreq, item, 1);
 696	if (!test_bit(CACHE_PENDING, &item->flags))
 697		/* Bit could have been cleared before we managed to
 698		 * set up the deferral, so need to revisit just in case
 699		 */
 700		cache_revisit_request(item);
 701
 702	cache_limit_defers();
 703	return true;
 704}
 705
 706static void cache_revisit_request(struct cache_head *item)
 707{
 708	struct cache_deferred_req *dreq;
 709	struct list_head pending;
 710	struct hlist_node *tmp;
 711	int hash = DFR_HASH(item);
 712
 713	INIT_LIST_HEAD(&pending);
 714	spin_lock(&cache_defer_lock);
 715
 716	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
 717		if (dreq->item == item) {
 718			__unhash_deferred_req(dreq);
 719			list_add(&dreq->recent, &pending);
 720		}
 721
 722	spin_unlock(&cache_defer_lock);
 723
 724	while (!list_empty(&pending)) {
 725		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 726		list_del_init(&dreq->recent);
 727		dreq->revisit(dreq, 0);
 728	}
 729}
 730
 731void cache_clean_deferred(void *owner)
 732{
 733	struct cache_deferred_req *dreq, *tmp;
 734	struct list_head pending;
 735
 736
 737	INIT_LIST_HEAD(&pending);
 738	spin_lock(&cache_defer_lock);
 739
 740	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 741		if (dreq->owner == owner) {
 742			__unhash_deferred_req(dreq);
 743			list_add(&dreq->recent, &pending);
 744		}
 745	}
 746	spin_unlock(&cache_defer_lock);
 747
 748	while (!list_empty(&pending)) {
 749		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 750		list_del_init(&dreq->recent);
 751		dreq->revisit(dreq, 1);
 752	}
 753}
 754
 755/*
 756 * communicate with user-space
 757 *
 758 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
 759 * On read, you get a full request, or block.
 760 * On write, an update request is processed.
 761 * Poll works if anything to read, and always allows write.
 762 *
 763 * Implemented by linked list of requests.  Each open file has
 764 * a ->private that also exists in this list.  New requests are added
 765 * to the end and may wakeup and preceding readers.
 766 * New readers are added to the head.  If, on read, an item is found with
 767 * CACHE_UPCALLING clear, we free it from the list.
 768 *
 769 */
 770
 771static DEFINE_SPINLOCK(queue_lock);
 772static DEFINE_MUTEX(queue_io_mutex);
 773
 774struct cache_queue {
 775	struct list_head	list;
 776	int			reader;	/* if 0, then request */
 777};
 778struct cache_request {
 779	struct cache_queue	q;
 780	struct cache_head	*item;
 781	char			* buf;
 782	int			len;
 783	int			readers;
 784};
 785struct cache_reader {
 786	struct cache_queue	q;
 787	int			offset;	/* if non-0, we have a refcnt on next request */
 788};
 789
 790static int cache_request(struct cache_detail *detail,
 791			       struct cache_request *crq)
 792{
 793	char *bp = crq->buf;
 794	int len = PAGE_SIZE;
 795
 796	detail->cache_request(detail, crq->item, &bp, &len);
 797	if (len < 0)
 798		return -EAGAIN;
 799	return PAGE_SIZE - len;
 800}
 801
 802static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 803			  loff_t *ppos, struct cache_detail *cd)
 804{
 805	struct cache_reader *rp = filp->private_data;
 806	struct cache_request *rq;
 807	struct inode *inode = file_inode(filp);
 808	int err;
 809
 810	if (count == 0)
 811		return 0;
 812
 813	inode_lock(inode); /* protect against multiple concurrent
 814			      * readers on this file */
 815 again:
 816	spin_lock(&queue_lock);
 817	/* need to find next request */
 818	while (rp->q.list.next != &cd->queue &&
 819	       list_entry(rp->q.list.next, struct cache_queue, list)
 820	       ->reader) {
 821		struct list_head *next = rp->q.list.next;
 822		list_move(&rp->q.list, next);
 823	}
 824	if (rp->q.list.next == &cd->queue) {
 825		spin_unlock(&queue_lock);
 826		inode_unlock(inode);
 827		WARN_ON_ONCE(rp->offset);
 828		return 0;
 829	}
 830	rq = container_of(rp->q.list.next, struct cache_request, q.list);
 831	WARN_ON_ONCE(rq->q.reader);
 832	if (rp->offset == 0)
 833		rq->readers++;
 834	spin_unlock(&queue_lock);
 835
 836	if (rq->len == 0) {
 837		err = cache_request(cd, rq);
 838		if (err < 0)
 839			goto out;
 840		rq->len = err;
 841	}
 842
 843	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 844		err = -EAGAIN;
 845		spin_lock(&queue_lock);
 846		list_move(&rp->q.list, &rq->q.list);
 847		spin_unlock(&queue_lock);
 848	} else {
 849		if (rp->offset + count > rq->len)
 850			count = rq->len - rp->offset;
 851		err = -EFAULT;
 852		if (copy_to_user(buf, rq->buf + rp->offset, count))
 853			goto out;
 854		rp->offset += count;
 855		if (rp->offset >= rq->len) {
 856			rp->offset = 0;
 857			spin_lock(&queue_lock);
 858			list_move(&rp->q.list, &rq->q.list);
 859			spin_unlock(&queue_lock);
 860		}
 861		err = 0;
 862	}
 863 out:
 864	if (rp->offset == 0) {
 865		/* need to release rq */
 866		spin_lock(&queue_lock);
 867		rq->readers--;
 868		if (rq->readers == 0 &&
 869		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
 870			list_del(&rq->q.list);
 871			spin_unlock(&queue_lock);
 872			cache_put(rq->item, cd);
 873			kfree(rq->buf);
 874			kfree(rq);
 875		} else
 876			spin_unlock(&queue_lock);
 877	}
 878	if (err == -EAGAIN)
 879		goto again;
 880	inode_unlock(inode);
 881	return err ? err :  count;
 882}
 883
 884static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 885				 size_t count, struct cache_detail *cd)
 886{
 887	ssize_t ret;
 888
 889	if (count == 0)
 890		return -EINVAL;
 891	if (copy_from_user(kaddr, buf, count))
 892		return -EFAULT;
 893	kaddr[count] = '\0';
 894	ret = cd->cache_parse(cd, kaddr, count);
 895	if (!ret)
 896		ret = count;
 897	return ret;
 898}
 899
 900static ssize_t cache_slow_downcall(const char __user *buf,
 901				   size_t count, struct cache_detail *cd)
 902{
 903	static char write_buf[8192]; /* protected by queue_io_mutex */
 904	ssize_t ret = -EINVAL;
 905
 906	if (count >= sizeof(write_buf))
 907		goto out;
 908	mutex_lock(&queue_io_mutex);
 909	ret = cache_do_downcall(write_buf, buf, count, cd);
 910	mutex_unlock(&queue_io_mutex);
 911out:
 912	return ret;
 913}
 914
 915static ssize_t cache_downcall(struct address_space *mapping,
 916			      const char __user *buf,
 917			      size_t count, struct cache_detail *cd)
 918{
 919	struct page *page;
 920	char *kaddr;
 921	ssize_t ret = -ENOMEM;
 922
 923	if (count >= PAGE_SIZE)
 924		goto out_slow;
 925
 926	page = find_or_create_page(mapping, 0, GFP_KERNEL);
 927	if (!page)
 928		goto out_slow;
 929
 930	kaddr = kmap(page);
 931	ret = cache_do_downcall(kaddr, buf, count, cd);
 932	kunmap(page);
 933	unlock_page(page);
 934	put_page(page);
 935	return ret;
 936out_slow:
 937	return cache_slow_downcall(buf, count, cd);
 938}
 939
 940static ssize_t cache_write(struct file *filp, const char __user *buf,
 941			   size_t count, loff_t *ppos,
 942			   struct cache_detail *cd)
 943{
 944	struct address_space *mapping = filp->f_mapping;
 945	struct inode *inode = file_inode(filp);
 946	ssize_t ret = -EINVAL;
 947
 948	if (!cd->cache_parse)
 949		goto out;
 950
 951	inode_lock(inode);
 952	ret = cache_downcall(mapping, buf, count, cd);
 953	inode_unlock(inode);
 954out:
 955	return ret;
 956}
 957
 958static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 959
 960static __poll_t cache_poll(struct file *filp, poll_table *wait,
 961			       struct cache_detail *cd)
 962{
 963	__poll_t mask;
 964	struct cache_reader *rp = filp->private_data;
 965	struct cache_queue *cq;
 966
 967	poll_wait(filp, &queue_wait, wait);
 968
 969	/* alway allow write */
 970	mask = EPOLLOUT | EPOLLWRNORM;
 971
 972	if (!rp)
 973		return mask;
 974
 975	spin_lock(&queue_lock);
 976
 977	for (cq= &rp->q; &cq->list != &cd->queue;
 978	     cq = list_entry(cq->list.next, struct cache_queue, list))
 979		if (!cq->reader) {
 980			mask |= EPOLLIN | EPOLLRDNORM;
 981			break;
 982		}
 983	spin_unlock(&queue_lock);
 984	return mask;
 985}
 986
 987static int cache_ioctl(struct inode *ino, struct file *filp,
 988		       unsigned int cmd, unsigned long arg,
 989		       struct cache_detail *cd)
 990{
 991	int len = 0;
 992	struct cache_reader *rp = filp->private_data;
 993	struct cache_queue *cq;
 994
 995	if (cmd != FIONREAD || !rp)
 996		return -EINVAL;
 997
 998	spin_lock(&queue_lock);
 999
1000	/* only find the length remaining in current request,
1001	 * or the length of the next request
1002	 */
1003	for (cq= &rp->q; &cq->list != &cd->queue;
1004	     cq = list_entry(cq->list.next, struct cache_queue, list))
1005		if (!cq->reader) {
1006			struct cache_request *cr =
1007				container_of(cq, struct cache_request, q);
1008			len = cr->len - rp->offset;
1009			break;
1010		}
1011	spin_unlock(&queue_lock);
1012
1013	return put_user(len, (int __user *)arg);
1014}
1015
1016static int cache_open(struct inode *inode, struct file *filp,
1017		      struct cache_detail *cd)
1018{
1019	struct cache_reader *rp = NULL;
1020
1021	if (!cd || !try_module_get(cd->owner))
1022		return -EACCES;
1023	nonseekable_open(inode, filp);
1024	if (filp->f_mode & FMODE_READ) {
1025		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1026		if (!rp) {
1027			module_put(cd->owner);
1028			return -ENOMEM;
1029		}
1030		rp->offset = 0;
1031		rp->q.reader = 1;
1032
1033		spin_lock(&queue_lock);
1034		list_add(&rp->q.list, &cd->queue);
1035		spin_unlock(&queue_lock);
1036	}
1037	if (filp->f_mode & FMODE_WRITE)
1038		atomic_inc(&cd->writers);
1039	filp->private_data = rp;
1040	return 0;
1041}
1042
1043static int cache_release(struct inode *inode, struct file *filp,
1044			 struct cache_detail *cd)
1045{
1046	struct cache_reader *rp = filp->private_data;
1047
1048	if (rp) {
1049		spin_lock(&queue_lock);
1050		if (rp->offset) {
1051			struct cache_queue *cq;
1052			for (cq= &rp->q; &cq->list != &cd->queue;
1053			     cq = list_entry(cq->list.next, struct cache_queue, list))
1054				if (!cq->reader) {
1055					container_of(cq, struct cache_request, q)
1056						->readers--;
1057					break;
1058				}
1059			rp->offset = 0;
1060		}
1061		list_del(&rp->q.list);
1062		spin_unlock(&queue_lock);
1063
1064		filp->private_data = NULL;
1065		kfree(rp);
1066
1067	}
1068	if (filp->f_mode & FMODE_WRITE) {
1069		atomic_dec(&cd->writers);
1070		cd->last_close = seconds_since_boot();
1071	}
1072	module_put(cd->owner);
1073	return 0;
1074}
1075
1076
1077
1078static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1079{
1080	struct cache_queue *cq, *tmp;
1081	struct cache_request *cr;
1082	struct list_head dequeued;
1083
1084	INIT_LIST_HEAD(&dequeued);
1085	spin_lock(&queue_lock);
1086	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1087		if (!cq->reader) {
1088			cr = container_of(cq, struct cache_request, q);
1089			if (cr->item != ch)
1090				continue;
1091			if (test_bit(CACHE_PENDING, &ch->flags))
1092				/* Lost a race and it is pending again */
1093				break;
1094			if (cr->readers != 0)
1095				continue;
1096			list_move(&cr->q.list, &dequeued);
1097		}
1098	spin_unlock(&queue_lock);
1099	while (!list_empty(&dequeued)) {
1100		cr = list_entry(dequeued.next, struct cache_request, q.list);
1101		list_del(&cr->q.list);
1102		cache_put(cr->item, detail);
1103		kfree(cr->buf);
1104		kfree(cr);
1105	}
1106}
1107
1108/*
1109 * Support routines for text-based upcalls.
1110 * Fields are separated by spaces.
1111 * Fields are either mangled to quote space tab newline slosh with slosh
1112 * or a hexified with a leading \x
1113 * Record is terminated with newline.
1114 *
1115 */
1116
1117void qword_add(char **bpp, int *lp, char *str)
1118{
1119	char *bp = *bpp;
1120	int len = *lp;
1121	int ret;
1122
1123	if (len < 0) return;
1124
1125	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1126	if (ret >= len) {
1127		bp += len;
1128		len = -1;
1129	} else {
1130		bp += ret;
1131		len -= ret;
1132		*bp++ = ' ';
1133		len--;
1134	}
1135	*bpp = bp;
1136	*lp = len;
1137}
1138EXPORT_SYMBOL_GPL(qword_add);
1139
1140void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1141{
1142	char *bp = *bpp;
1143	int len = *lp;
1144
1145	if (len < 0) return;
1146
1147	if (len > 2) {
1148		*bp++ = '\\';
1149		*bp++ = 'x';
1150		len -= 2;
1151		while (blen && len >= 2) {
1152			bp = hex_byte_pack(bp, *buf++);
1153			len -= 2;
1154			blen--;
1155		}
1156	}
1157	if (blen || len<1) len = -1;
1158	else {
1159		*bp++ = ' ';
1160		len--;
1161	}
1162	*bpp = bp;
1163	*lp = len;
1164}
1165EXPORT_SYMBOL_GPL(qword_addhex);
1166
1167static void warn_no_listener(struct cache_detail *detail)
1168{
1169	if (detail->last_warn != detail->last_close) {
1170		detail->last_warn = detail->last_close;
1171		if (detail->warn_no_listener)
1172			detail->warn_no_listener(detail, detail->last_close != 0);
1173	}
1174}
1175
1176static bool cache_listeners_exist(struct cache_detail *detail)
1177{
1178	if (atomic_read(&detail->writers))
1179		return true;
1180	if (detail->last_close == 0)
1181		/* This cache was never opened */
1182		return false;
1183	if (detail->last_close < seconds_since_boot() - 30)
1184		/*
1185		 * We allow for the possibility that someone might
1186		 * restart a userspace daemon without restarting the
1187		 * server; but after 30 seconds, we give up.
1188		 */
1189		 return false;
1190	return true;
1191}
1192
1193/*
1194 * register an upcall request to user-space and queue it up for read() by the
1195 * upcall daemon.
1196 *
1197 * Each request is at most one page long.
1198 */
1199int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1200{
1201
1202	char *buf;
1203	struct cache_request *crq;
1204	int ret = 0;
1205
1206	if (!detail->cache_request)
1207		return -EINVAL;
1208
1209	if (!cache_listeners_exist(detail)) {
1210		warn_no_listener(detail);
1211		return -EINVAL;
1212	}
1213	if (test_bit(CACHE_CLEANED, &h->flags))
1214		/* Too late to make an upcall */
1215		return -EAGAIN;
1216
1217	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1218	if (!buf)
1219		return -EAGAIN;
1220
1221	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1222	if (!crq) {
1223		kfree(buf);
1224		return -EAGAIN;
1225	}
1226
1227	crq->q.reader = 0;
1228	crq->buf = buf;
1229	crq->len = 0;
1230	crq->readers = 0;
1231	spin_lock(&queue_lock);
1232	if (test_bit(CACHE_PENDING, &h->flags)) {
1233		crq->item = cache_get(h);
1234		list_add_tail(&crq->q.list, &detail->queue);
 
1235	} else
1236		/* Lost a race, no longer PENDING, so don't enqueue */
1237		ret = -EAGAIN;
1238	spin_unlock(&queue_lock);
1239	wake_up(&queue_wait);
1240	if (ret == -EAGAIN) {
1241		kfree(buf);
1242		kfree(crq);
1243	}
1244	return ret;
1245}
 
 
 
 
 
 
 
1246EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1247
 
 
 
 
 
 
 
 
 
 
 
 
1248/*
1249 * parse a message from user-space and pass it
1250 * to an appropriate cache
1251 * Messages are, like requests, separated into fields by
1252 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1253 *
1254 * Message is
1255 *   reply cachename expiry key ... content....
1256 *
1257 * key and content are both parsed by cache
1258 */
1259
1260int qword_get(char **bpp, char *dest, int bufsize)
1261{
1262	/* return bytes copied, or -1 on error */
1263	char *bp = *bpp;
1264	int len = 0;
1265
1266	while (*bp == ' ') bp++;
1267
1268	if (bp[0] == '\\' && bp[1] == 'x') {
1269		/* HEX STRING */
1270		bp += 2;
1271		while (len < bufsize - 1) {
1272			int h, l;
1273
1274			h = hex_to_bin(bp[0]);
1275			if (h < 0)
1276				break;
1277
1278			l = hex_to_bin(bp[1]);
1279			if (l < 0)
1280				break;
1281
1282			*dest++ = (h << 4) | l;
1283			bp += 2;
1284			len++;
1285		}
1286	} else {
1287		/* text with \nnn octal quoting */
1288		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1289			if (*bp == '\\' &&
1290			    isodigit(bp[1]) && (bp[1] <= '3') &&
1291			    isodigit(bp[2]) &&
1292			    isodigit(bp[3])) {
1293				int byte = (*++bp -'0');
1294				bp++;
1295				byte = (byte << 3) | (*bp++ - '0');
1296				byte = (byte << 3) | (*bp++ - '0');
1297				*dest++ = byte;
1298				len++;
1299			} else {
1300				*dest++ = *bp++;
1301				len++;
1302			}
1303		}
1304	}
1305
1306	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1307		return -1;
1308	while (*bp == ' ') bp++;
1309	*bpp = bp;
1310	*dest = '\0';
1311	return len;
1312}
1313EXPORT_SYMBOL_GPL(qword_get);
1314
1315
1316/*
1317 * support /proc/net/rpc/$CACHENAME/content
1318 * as a seqfile.
1319 * We call ->cache_show passing NULL for the item to
1320 * get a header, then pass each real item in the cache
1321 */
1322
1323static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1324{
1325	loff_t n = *pos;
1326	unsigned int hash, entry;
1327	struct cache_head *ch;
1328	struct cache_detail *cd = m->private;
1329
1330	if (!n--)
1331		return SEQ_START_TOKEN;
1332	hash = n >> 32;
1333	entry = n & ((1LL<<32) - 1);
1334
1335	hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1336		if (!entry--)
1337			return ch;
1338	n &= ~((1LL<<32) - 1);
1339	do {
1340		hash++;
1341		n += 1LL<<32;
1342	} while(hash < cd->hash_size &&
1343		hlist_empty(&cd->hash_table[hash]));
1344	if (hash >= cd->hash_size)
1345		return NULL;
1346	*pos = n+1;
1347	return hlist_entry_safe(rcu_dereference_raw(
1348				hlist_first_rcu(&cd->hash_table[hash])),
1349				struct cache_head, cache_list);
1350}
1351
1352static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1353{
1354	struct cache_head *ch = p;
1355	int hash = (*pos >> 32);
1356	struct cache_detail *cd = m->private;
1357
1358	if (p == SEQ_START_TOKEN)
1359		hash = 0;
1360	else if (ch->cache_list.next == NULL) {
1361		hash++;
1362		*pos += 1LL<<32;
1363	} else {
1364		++*pos;
1365		return hlist_entry_safe(rcu_dereference_raw(
1366					hlist_next_rcu(&ch->cache_list)),
1367					struct cache_head, cache_list);
1368	}
1369	*pos &= ~((1LL<<32) - 1);
1370	while (hash < cd->hash_size &&
1371	       hlist_empty(&cd->hash_table[hash])) {
1372		hash++;
1373		*pos += 1LL<<32;
1374	}
1375	if (hash >= cd->hash_size)
1376		return NULL;
1377	++*pos;
1378	return hlist_entry_safe(rcu_dereference_raw(
1379				hlist_first_rcu(&cd->hash_table[hash])),
1380				struct cache_head, cache_list);
1381}
1382
1383void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1384	__acquires(RCU)
1385{
1386	rcu_read_lock();
1387	return __cache_seq_start(m, pos);
1388}
1389EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1390
1391void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1392{
1393	return cache_seq_next(file, p, pos);
1394}
1395EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1396
1397void cache_seq_stop_rcu(struct seq_file *m, void *p)
1398	__releases(RCU)
1399{
1400	rcu_read_unlock();
1401}
1402EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1403
1404static int c_show(struct seq_file *m, void *p)
1405{
1406	struct cache_head *cp = p;
1407	struct cache_detail *cd = m->private;
1408
1409	if (p == SEQ_START_TOKEN)
1410		return cd->cache_show(m, cd, NULL);
1411
1412	ifdebug(CACHE)
1413		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1414			   convert_to_wallclock(cp->expiry_time),
1415			   kref_read(&cp->ref), cp->flags);
1416	cache_get(cp);
1417	if (cache_check(cd, cp, NULL))
1418		/* cache_check does a cache_put on failure */
1419		seq_printf(m, "# ");
1420	else {
1421		if (cache_is_expired(cd, cp))
1422			seq_printf(m, "# ");
1423		cache_put(cp, cd);
1424	}
1425
1426	return cd->cache_show(m, cd, cp);
1427}
1428
1429static const struct seq_operations cache_content_op = {
1430	.start	= cache_seq_start_rcu,
1431	.next	= cache_seq_next_rcu,
1432	.stop	= cache_seq_stop_rcu,
1433	.show	= c_show,
1434};
1435
1436static int content_open(struct inode *inode, struct file *file,
1437			struct cache_detail *cd)
1438{
1439	struct seq_file *seq;
1440	int err;
1441
1442	if (!cd || !try_module_get(cd->owner))
1443		return -EACCES;
1444
1445	err = seq_open(file, &cache_content_op);
1446	if (err) {
1447		module_put(cd->owner);
1448		return err;
1449	}
1450
1451	seq = file->private_data;
1452	seq->private = cd;
1453	return 0;
1454}
1455
1456static int content_release(struct inode *inode, struct file *file,
1457		struct cache_detail *cd)
1458{
1459	int ret = seq_release(inode, file);
1460	module_put(cd->owner);
1461	return ret;
1462}
1463
1464static int open_flush(struct inode *inode, struct file *file,
1465			struct cache_detail *cd)
1466{
1467	if (!cd || !try_module_get(cd->owner))
1468		return -EACCES;
1469	return nonseekable_open(inode, file);
1470}
1471
1472static int release_flush(struct inode *inode, struct file *file,
1473			struct cache_detail *cd)
1474{
1475	module_put(cd->owner);
1476	return 0;
1477}
1478
1479static ssize_t read_flush(struct file *file, char __user *buf,
1480			  size_t count, loff_t *ppos,
1481			  struct cache_detail *cd)
1482{
1483	char tbuf[22];
1484	size_t len;
1485
1486	len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1487			convert_to_wallclock(cd->flush_time));
1488	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1489}
1490
1491static ssize_t write_flush(struct file *file, const char __user *buf,
1492			   size_t count, loff_t *ppos,
1493			   struct cache_detail *cd)
1494{
1495	char tbuf[20];
1496	char *ep;
1497	time_t now;
1498
1499	if (*ppos || count > sizeof(tbuf)-1)
1500		return -EINVAL;
1501	if (copy_from_user(tbuf, buf, count))
1502		return -EFAULT;
1503	tbuf[count] = 0;
1504	simple_strtoul(tbuf, &ep, 0);
1505	if (*ep && *ep != '\n')
1506		return -EINVAL;
1507	/* Note that while we check that 'buf' holds a valid number,
1508	 * we always ignore the value and just flush everything.
1509	 * Making use of the number leads to races.
1510	 */
1511
1512	now = seconds_since_boot();
1513	/* Always flush everything, so behave like cache_purge()
1514	 * Do this by advancing flush_time to the current time,
1515	 * or by one second if it has already reached the current time.
1516	 * Newly added cache entries will always have ->last_refresh greater
1517	 * that ->flush_time, so they don't get flushed prematurely.
1518	 */
1519
1520	if (cd->flush_time >= now)
1521		now = cd->flush_time + 1;
1522
1523	cd->flush_time = now;
1524	cd->nextcheck = now;
1525	cache_flush();
1526
1527	if (cd->flush)
1528		cd->flush();
1529
1530	*ppos += count;
1531	return count;
1532}
1533
1534static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1535				 size_t count, loff_t *ppos)
1536{
1537	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1538
1539	return cache_read(filp, buf, count, ppos, cd);
1540}
1541
1542static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1543				  size_t count, loff_t *ppos)
1544{
1545	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1546
1547	return cache_write(filp, buf, count, ppos, cd);
1548}
1549
1550static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1551{
1552	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1553
1554	return cache_poll(filp, wait, cd);
1555}
1556
1557static long cache_ioctl_procfs(struct file *filp,
1558			       unsigned int cmd, unsigned long arg)
1559{
1560	struct inode *inode = file_inode(filp);
1561	struct cache_detail *cd = PDE_DATA(inode);
1562
1563	return cache_ioctl(inode, filp, cmd, arg, cd);
1564}
1565
1566static int cache_open_procfs(struct inode *inode, struct file *filp)
1567{
1568	struct cache_detail *cd = PDE_DATA(inode);
1569
1570	return cache_open(inode, filp, cd);
1571}
1572
1573static int cache_release_procfs(struct inode *inode, struct file *filp)
1574{
1575	struct cache_detail *cd = PDE_DATA(inode);
1576
1577	return cache_release(inode, filp, cd);
1578}
1579
1580static const struct file_operations cache_file_operations_procfs = {
1581	.owner		= THIS_MODULE,
1582	.llseek		= no_llseek,
1583	.read		= cache_read_procfs,
1584	.write		= cache_write_procfs,
1585	.poll		= cache_poll_procfs,
1586	.unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1587	.open		= cache_open_procfs,
1588	.release	= cache_release_procfs,
1589};
1590
1591static int content_open_procfs(struct inode *inode, struct file *filp)
1592{
1593	struct cache_detail *cd = PDE_DATA(inode);
1594
1595	return content_open(inode, filp, cd);
1596}
1597
1598static int content_release_procfs(struct inode *inode, struct file *filp)
1599{
1600	struct cache_detail *cd = PDE_DATA(inode);
1601
1602	return content_release(inode, filp, cd);
1603}
1604
1605static const struct file_operations content_file_operations_procfs = {
1606	.open		= content_open_procfs,
1607	.read		= seq_read,
1608	.llseek		= seq_lseek,
1609	.release	= content_release_procfs,
1610};
1611
1612static int open_flush_procfs(struct inode *inode, struct file *filp)
1613{
1614	struct cache_detail *cd = PDE_DATA(inode);
1615
1616	return open_flush(inode, filp, cd);
1617}
1618
1619static int release_flush_procfs(struct inode *inode, struct file *filp)
1620{
1621	struct cache_detail *cd = PDE_DATA(inode);
1622
1623	return release_flush(inode, filp, cd);
1624}
1625
1626static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1627			    size_t count, loff_t *ppos)
1628{
1629	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1630
1631	return read_flush(filp, buf, count, ppos, cd);
1632}
1633
1634static ssize_t write_flush_procfs(struct file *filp,
1635				  const char __user *buf,
1636				  size_t count, loff_t *ppos)
1637{
1638	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1639
1640	return write_flush(filp, buf, count, ppos, cd);
1641}
1642
1643static const struct file_operations cache_flush_operations_procfs = {
1644	.open		= open_flush_procfs,
1645	.read		= read_flush_procfs,
1646	.write		= write_flush_procfs,
1647	.release	= release_flush_procfs,
1648	.llseek		= no_llseek,
1649};
1650
1651static void remove_cache_proc_entries(struct cache_detail *cd)
1652{
1653	if (cd->procfs) {
1654		proc_remove(cd->procfs);
1655		cd->procfs = NULL;
1656	}
1657}
1658
1659#ifdef CONFIG_PROC_FS
1660static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1661{
1662	struct proc_dir_entry *p;
1663	struct sunrpc_net *sn;
1664
1665	sn = net_generic(net, sunrpc_net_id);
1666	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1667	if (cd->procfs == NULL)
1668		goto out_nomem;
1669
1670	p = proc_create_data("flush", S_IFREG | 0600,
1671			     cd->procfs, &cache_flush_operations_procfs, cd);
1672	if (p == NULL)
1673		goto out_nomem;
1674
1675	if (cd->cache_request || cd->cache_parse) {
1676		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1677				     &cache_file_operations_procfs, cd);
1678		if (p == NULL)
1679			goto out_nomem;
1680	}
1681	if (cd->cache_show) {
1682		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1683				     &content_file_operations_procfs, cd);
1684		if (p == NULL)
1685			goto out_nomem;
1686	}
1687	return 0;
1688out_nomem:
1689	remove_cache_proc_entries(cd);
1690	return -ENOMEM;
1691}
1692#else /* CONFIG_PROC_FS */
1693static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1694{
1695	return 0;
1696}
1697#endif
1698
1699void __init cache_initialize(void)
1700{
1701	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1702}
1703
1704int cache_register_net(struct cache_detail *cd, struct net *net)
1705{
1706	int ret;
1707
1708	sunrpc_init_cache_detail(cd);
1709	ret = create_cache_proc_entries(cd, net);
1710	if (ret)
1711		sunrpc_destroy_cache_detail(cd);
1712	return ret;
1713}
1714EXPORT_SYMBOL_GPL(cache_register_net);
1715
1716void cache_unregister_net(struct cache_detail *cd, struct net *net)
1717{
1718	remove_cache_proc_entries(cd);
1719	sunrpc_destroy_cache_detail(cd);
1720}
1721EXPORT_SYMBOL_GPL(cache_unregister_net);
1722
1723struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1724{
1725	struct cache_detail *cd;
1726	int i;
1727
1728	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1729	if (cd == NULL)
1730		return ERR_PTR(-ENOMEM);
1731
1732	cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1733				 GFP_KERNEL);
1734	if (cd->hash_table == NULL) {
1735		kfree(cd);
1736		return ERR_PTR(-ENOMEM);
1737	}
1738
1739	for (i = 0; i < cd->hash_size; i++)
1740		INIT_HLIST_HEAD(&cd->hash_table[i]);
1741	cd->net = net;
1742	return cd;
1743}
1744EXPORT_SYMBOL_GPL(cache_create_net);
1745
1746void cache_destroy_net(struct cache_detail *cd, struct net *net)
1747{
1748	kfree(cd->hash_table);
1749	kfree(cd);
1750}
1751EXPORT_SYMBOL_GPL(cache_destroy_net);
1752
1753static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1754				 size_t count, loff_t *ppos)
1755{
1756	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1757
1758	return cache_read(filp, buf, count, ppos, cd);
1759}
1760
1761static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1762				  size_t count, loff_t *ppos)
1763{
1764	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1765
1766	return cache_write(filp, buf, count, ppos, cd);
1767}
1768
1769static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1770{
1771	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1772
1773	return cache_poll(filp, wait, cd);
1774}
1775
1776static long cache_ioctl_pipefs(struct file *filp,
1777			      unsigned int cmd, unsigned long arg)
1778{
1779	struct inode *inode = file_inode(filp);
1780	struct cache_detail *cd = RPC_I(inode)->private;
1781
1782	return cache_ioctl(inode, filp, cmd, arg, cd);
1783}
1784
1785static int cache_open_pipefs(struct inode *inode, struct file *filp)
1786{
1787	struct cache_detail *cd = RPC_I(inode)->private;
1788
1789	return cache_open(inode, filp, cd);
1790}
1791
1792static int cache_release_pipefs(struct inode *inode, struct file *filp)
1793{
1794	struct cache_detail *cd = RPC_I(inode)->private;
1795
1796	return cache_release(inode, filp, cd);
1797}
1798
1799const struct file_operations cache_file_operations_pipefs = {
1800	.owner		= THIS_MODULE,
1801	.llseek		= no_llseek,
1802	.read		= cache_read_pipefs,
1803	.write		= cache_write_pipefs,
1804	.poll		= cache_poll_pipefs,
1805	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1806	.open		= cache_open_pipefs,
1807	.release	= cache_release_pipefs,
1808};
1809
1810static int content_open_pipefs(struct inode *inode, struct file *filp)
1811{
1812	struct cache_detail *cd = RPC_I(inode)->private;
1813
1814	return content_open(inode, filp, cd);
1815}
1816
1817static int content_release_pipefs(struct inode *inode, struct file *filp)
1818{
1819	struct cache_detail *cd = RPC_I(inode)->private;
1820
1821	return content_release(inode, filp, cd);
1822}
1823
1824const struct file_operations content_file_operations_pipefs = {
1825	.open		= content_open_pipefs,
1826	.read		= seq_read,
1827	.llseek		= seq_lseek,
1828	.release	= content_release_pipefs,
1829};
1830
1831static int open_flush_pipefs(struct inode *inode, struct file *filp)
1832{
1833	struct cache_detail *cd = RPC_I(inode)->private;
1834
1835	return open_flush(inode, filp, cd);
1836}
1837
1838static int release_flush_pipefs(struct inode *inode, struct file *filp)
1839{
1840	struct cache_detail *cd = RPC_I(inode)->private;
1841
1842	return release_flush(inode, filp, cd);
1843}
1844
1845static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1846			    size_t count, loff_t *ppos)
1847{
1848	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1849
1850	return read_flush(filp, buf, count, ppos, cd);
1851}
1852
1853static ssize_t write_flush_pipefs(struct file *filp,
1854				  const char __user *buf,
1855				  size_t count, loff_t *ppos)
1856{
1857	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1858
1859	return write_flush(filp, buf, count, ppos, cd);
1860}
1861
1862const struct file_operations cache_flush_operations_pipefs = {
1863	.open		= open_flush_pipefs,
1864	.read		= read_flush_pipefs,
1865	.write		= write_flush_pipefs,
1866	.release	= release_flush_pipefs,
1867	.llseek		= no_llseek,
1868};
1869
1870int sunrpc_cache_register_pipefs(struct dentry *parent,
1871				 const char *name, umode_t umode,
1872				 struct cache_detail *cd)
1873{
1874	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1875	if (IS_ERR(dir))
1876		return PTR_ERR(dir);
1877	cd->pipefs = dir;
1878	return 0;
1879}
1880EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1881
1882void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1883{
1884	if (cd->pipefs) {
1885		rpc_remove_cache_dir(cd->pipefs);
1886		cd->pipefs = NULL;
1887	}
1888}
1889EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1890
1891void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1892{
1893	spin_lock(&cd->hash_lock);
1894	if (!hlist_unhashed(&h->cache_list)){
1895		hlist_del_init_rcu(&h->cache_list);
1896		cd->entries--;
1897		spin_unlock(&cd->hash_lock);
1898		cache_put(h, cd);
1899	} else
1900		spin_unlock(&cd->hash_lock);
1901}
1902EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sunrpc/cache.c
   4 *
   5 * Generic code for various authentication-related caches
   6 * used by sunrpc clients and servers.
   7 *
   8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/fs.h>
  13#include <linux/file.h>
  14#include <linux/slab.h>
  15#include <linux/signal.h>
  16#include <linux/sched.h>
  17#include <linux/kmod.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/ctype.h>
  21#include <linux/string_helpers.h>
  22#include <linux/uaccess.h>
  23#include <linux/poll.h>
  24#include <linux/seq_file.h>
  25#include <linux/proc_fs.h>
  26#include <linux/net.h>
  27#include <linux/workqueue.h>
  28#include <linux/mutex.h>
  29#include <linux/pagemap.h>
  30#include <asm/ioctls.h>
  31#include <linux/sunrpc/types.h>
  32#include <linux/sunrpc/cache.h>
  33#include <linux/sunrpc/stats.h>
  34#include <linux/sunrpc/rpc_pipe_fs.h>
  35#include <trace/events/sunrpc.h>
  36#include "netns.h"
  37
  38#define	 RPCDBG_FACILITY RPCDBG_CACHE
  39
  40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  41static void cache_revisit_request(struct cache_head *item);
 
  42
  43static void cache_init(struct cache_head *h, struct cache_detail *detail)
  44{
  45	time64_t now = seconds_since_boot();
  46	INIT_HLIST_NODE(&h->cache_list);
  47	h->flags = 0;
  48	kref_init(&h->ref);
  49	h->expiry_time = now + CACHE_NEW_EXPIRY;
  50	if (now <= detail->flush_time)
  51		/* ensure it isn't already expired */
  52		now = detail->flush_time + 1;
  53	h->last_refresh = now;
  54}
  55
 
 
 
  56static void cache_fresh_unlocked(struct cache_head *head,
  57				struct cache_detail *detail);
  58
  59static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
  60						struct cache_head *key,
  61						int hash)
  62{
  63	struct hlist_head *head = &detail->hash_table[hash];
  64	struct cache_head *tmp;
  65
  66	rcu_read_lock();
  67	hlist_for_each_entry_rcu(tmp, head, cache_list) {
  68		if (!detail->match(tmp, key))
  69			continue;
  70		if (test_bit(CACHE_VALID, &tmp->flags) &&
  71		    cache_is_expired(detail, tmp))
  72			continue;
  73		tmp = cache_get_rcu(tmp);
  74		rcu_read_unlock();
  75		return tmp;
  76	}
  77	rcu_read_unlock();
  78	return NULL;
  79}
  80
  81static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
  82					    struct cache_detail *cd)
  83{
  84	/* Must be called under cd->hash_lock */
  85	hlist_del_init_rcu(&ch->cache_list);
  86	set_bit(CACHE_CLEANED, &ch->flags);
  87	cd->entries --;
  88}
  89
  90static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
  91					  struct cache_detail *cd)
  92{
  93	cache_fresh_unlocked(ch, cd);
  94	cache_put(ch, cd);
  95}
  96
  97static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
  98						 struct cache_head *key,
  99						 int hash)
 100{
 101	struct cache_head *new, *tmp, *freeme = NULL;
 102	struct hlist_head *head = &detail->hash_table[hash];
 103
 104	new = detail->alloc();
 105	if (!new)
 106		return NULL;
 107	/* must fully initialise 'new', else
 108	 * we might get lose if we need to
 109	 * cache_put it soon.
 110	 */
 111	cache_init(new, detail);
 112	detail->init(new, key);
 113
 114	spin_lock(&detail->hash_lock);
 115
 116	/* check if entry appeared while we slept */
 117	hlist_for_each_entry_rcu(tmp, head, cache_list,
 118				 lockdep_is_held(&detail->hash_lock)) {
 119		if (!detail->match(tmp, key))
 120			continue;
 121		if (test_bit(CACHE_VALID, &tmp->flags) &&
 122		    cache_is_expired(detail, tmp)) {
 123			sunrpc_begin_cache_remove_entry(tmp, detail);
 124			trace_cache_entry_expired(detail, tmp);
 125			freeme = tmp;
 126			break;
 
 
 
 
 
 127		}
 128		cache_get(tmp);
 129		spin_unlock(&detail->hash_lock);
 130		cache_put(new, detail);
 131		return tmp;
 132	}
 133
 134	hlist_add_head_rcu(&new->cache_list, head);
 135	detail->entries++;
 136	cache_get(new);
 137	spin_unlock(&detail->hash_lock);
 138
 139	if (freeme)
 140		sunrpc_end_cache_remove_entry(freeme, detail);
 
 
 141	return new;
 142}
 143
 144struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
 145					   struct cache_head *key, int hash)
 146{
 147	struct cache_head *ret;
 148
 149	ret = sunrpc_cache_find_rcu(detail, key, hash);
 150	if (ret)
 151		return ret;
 152	/* Didn't find anything, insert an empty entry */
 153	return sunrpc_cache_add_entry(detail, key, hash);
 154}
 155EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
 156
 157static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 158
 159static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
 160			       struct cache_detail *detail)
 161{
 162	time64_t now = seconds_since_boot();
 163	if (now <= detail->flush_time)
 164		/* ensure it isn't immediately treated as expired */
 165		now = detail->flush_time + 1;
 166	head->expiry_time = expiry;
 167	head->last_refresh = now;
 168	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 169	set_bit(CACHE_VALID, &head->flags);
 170}
 171
 172static void cache_fresh_unlocked(struct cache_head *head,
 173				 struct cache_detail *detail)
 174{
 175	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 176		cache_revisit_request(head);
 177		cache_dequeue(detail, head);
 178	}
 179}
 180
 181static void cache_make_negative(struct cache_detail *detail,
 182				struct cache_head *h)
 183{
 184	set_bit(CACHE_NEGATIVE, &h->flags);
 185	trace_cache_entry_make_negative(detail, h);
 186}
 187
 188static void cache_entry_update(struct cache_detail *detail,
 189			       struct cache_head *h,
 190			       struct cache_head *new)
 191{
 192	if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
 193		detail->update(h, new);
 194		trace_cache_entry_update(detail, h);
 195	} else {
 196		cache_make_negative(detail, h);
 197	}
 198}
 199
 200struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 201				       struct cache_head *new, struct cache_head *old, int hash)
 202{
 203	/* The 'old' entry is to be replaced by 'new'.
 204	 * If 'old' is not VALID, we update it directly,
 205	 * otherwise we need to replace it
 206	 */
 207	struct cache_head *tmp;
 208
 209	if (!test_bit(CACHE_VALID, &old->flags)) {
 210		spin_lock(&detail->hash_lock);
 211		if (!test_bit(CACHE_VALID, &old->flags)) {
 212			cache_entry_update(detail, old, new);
 
 
 
 213			cache_fresh_locked(old, new->expiry_time, detail);
 214			spin_unlock(&detail->hash_lock);
 215			cache_fresh_unlocked(old, detail);
 216			return old;
 217		}
 218		spin_unlock(&detail->hash_lock);
 219	}
 220	/* We need to insert a new entry */
 221	tmp = detail->alloc();
 222	if (!tmp) {
 223		cache_put(old, detail);
 224		return NULL;
 225	}
 226	cache_init(tmp, detail);
 227	detail->init(tmp, old);
 228
 229	spin_lock(&detail->hash_lock);
 230	cache_entry_update(detail, tmp, new);
 
 
 
 231	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 232	detail->entries++;
 233	cache_get(tmp);
 234	cache_fresh_locked(tmp, new->expiry_time, detail);
 235	cache_fresh_locked(old, 0, detail);
 236	spin_unlock(&detail->hash_lock);
 237	cache_fresh_unlocked(tmp, detail);
 238	cache_fresh_unlocked(old, detail);
 239	cache_put(old, detail);
 240	return tmp;
 241}
 242EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 243
 
 
 
 
 
 
 
 244static inline int cache_is_valid(struct cache_head *h)
 245{
 246	if (!test_bit(CACHE_VALID, &h->flags))
 247		return -EAGAIN;
 248	else {
 249		/* entry is valid */
 250		if (test_bit(CACHE_NEGATIVE, &h->flags))
 251			return -ENOENT;
 252		else {
 253			/*
 254			 * In combination with write barrier in
 255			 * sunrpc_cache_update, ensures that anyone
 256			 * using the cache entry after this sees the
 257			 * updated contents:
 258			 */
 259			smp_rmb();
 260			return 0;
 261		}
 262	}
 263}
 264
 265static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 266{
 267	int rv;
 268
 269	spin_lock(&detail->hash_lock);
 270	rv = cache_is_valid(h);
 271	if (rv == -EAGAIN) {
 272		cache_make_negative(detail, h);
 273		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
 274				   detail);
 275		rv = -ENOENT;
 276	}
 277	spin_unlock(&detail->hash_lock);
 278	cache_fresh_unlocked(h, detail);
 279	return rv;
 280}
 281
 282/*
 283 * This is the generic cache management routine for all
 284 * the authentication caches.
 285 * It checks the currency of a cache item and will (later)
 286 * initiate an upcall to fill it if needed.
 287 *
 288 *
 289 * Returns 0 if the cache_head can be used, or cache_puts it and returns
 290 * -EAGAIN if upcall is pending and request has been queued
 291 * -ETIMEDOUT if upcall failed or request could not be queue or
 292 *           upcall completed but item is still invalid (implying that
 293 *           the cache item has been replaced with a newer one).
 294 * -ENOENT if cache entry was negative
 295 */
 296int cache_check(struct cache_detail *detail,
 297		    struct cache_head *h, struct cache_req *rqstp)
 298{
 299	int rv;
 300	time64_t refresh_age, age;
 301
 302	/* First decide return status as best we can */
 303	rv = cache_is_valid(h);
 304
 305	/* now see if we want to start an upcall */
 306	refresh_age = (h->expiry_time - h->last_refresh);
 307	age = seconds_since_boot() - h->last_refresh;
 308
 309	if (rqstp == NULL) {
 310		if (rv == -EAGAIN)
 311			rv = -ENOENT;
 312	} else if (rv == -EAGAIN ||
 313		   (h->expiry_time != 0 && age > refresh_age/2)) {
 314		dprintk("RPC:       Want update, refage=%lld, age=%lld\n",
 315				refresh_age, age);
 316		switch (detail->cache_upcall(detail, h)) {
 317		case -EINVAL:
 
 
 
 
 
 
 
 
 318			rv = try_to_negate_entry(detail, h);
 319			break;
 320		case -EAGAIN:
 321			cache_fresh_unlocked(h, detail);
 322			break;
 323		}
 324	}
 325
 326	if (rv == -EAGAIN) {
 327		if (!cache_defer_req(rqstp, h)) {
 328			/*
 329			 * Request was not deferred; handle it as best
 330			 * we can ourselves:
 331			 */
 332			rv = cache_is_valid(h);
 333			if (rv == -EAGAIN)
 334				rv = -ETIMEDOUT;
 335		}
 336	}
 337	if (rv)
 338		cache_put(h, detail);
 339	return rv;
 340}
 341EXPORT_SYMBOL_GPL(cache_check);
 342
 343/*
 344 * caches need to be periodically cleaned.
 345 * For this we maintain a list of cache_detail and
 346 * a current pointer into that list and into the table
 347 * for that entry.
 348 *
 349 * Each time cache_clean is called it finds the next non-empty entry
 350 * in the current table and walks the list in that entry
 351 * looking for entries that can be removed.
 352 *
 353 * An entry gets removed if:
 354 * - The expiry is before current time
 355 * - The last_refresh time is before the flush_time for that cache
 356 *
 357 * later we might drop old entries with non-NEVER expiry if that table
 358 * is getting 'full' for some definition of 'full'
 359 *
 360 * The question of "how often to scan a table" is an interesting one
 361 * and is answered in part by the use of the "nextcheck" field in the
 362 * cache_detail.
 363 * When a scan of a table begins, the nextcheck field is set to a time
 364 * that is well into the future.
 365 * While scanning, if an expiry time is found that is earlier than the
 366 * current nextcheck time, nextcheck is set to that expiry time.
 367 * If the flush_time is ever set to a time earlier than the nextcheck
 368 * time, the nextcheck time is then set to that flush_time.
 369 *
 370 * A table is then only scanned if the current time is at least
 371 * the nextcheck time.
 372 *
 373 */
 374
 375static LIST_HEAD(cache_list);
 376static DEFINE_SPINLOCK(cache_list_lock);
 377static struct cache_detail *current_detail;
 378static int current_index;
 379
 380static void do_cache_clean(struct work_struct *work);
 381static struct delayed_work cache_cleaner;
 382
 383void sunrpc_init_cache_detail(struct cache_detail *cd)
 384{
 385	spin_lock_init(&cd->hash_lock);
 386	INIT_LIST_HEAD(&cd->queue);
 387	spin_lock(&cache_list_lock);
 388	cd->nextcheck = 0;
 389	cd->entries = 0;
 390	atomic_set(&cd->writers, 0);
 391	cd->last_close = 0;
 392	cd->last_warn = -1;
 393	list_add(&cd->others, &cache_list);
 394	spin_unlock(&cache_list_lock);
 395
 396	/* start the cleaning process */
 397	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 398}
 399EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 400
 401void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 402{
 403	cache_purge(cd);
 404	spin_lock(&cache_list_lock);
 405	spin_lock(&cd->hash_lock);
 406	if (current_detail == cd)
 407		current_detail = NULL;
 408	list_del_init(&cd->others);
 409	spin_unlock(&cd->hash_lock);
 410	spin_unlock(&cache_list_lock);
 411	if (list_empty(&cache_list)) {
 412		/* module must be being unloaded so its safe to kill the worker */
 413		cancel_delayed_work_sync(&cache_cleaner);
 414	}
 415}
 416EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
 417
 418/* clean cache tries to find something to clean
 419 * and cleans it.
 420 * It returns 1 if it cleaned something,
 421 *            0 if it didn't find anything this time
 422 *           -1 if it fell off the end of the list.
 423 */
 424static int cache_clean(void)
 425{
 426	int rv = 0;
 427	struct list_head *next;
 428
 429	spin_lock(&cache_list_lock);
 430
 431	/* find a suitable table if we don't already have one */
 432	while (current_detail == NULL ||
 433	    current_index >= current_detail->hash_size) {
 434		if (current_detail)
 435			next = current_detail->others.next;
 436		else
 437			next = cache_list.next;
 438		if (next == &cache_list) {
 439			current_detail = NULL;
 440			spin_unlock(&cache_list_lock);
 441			return -1;
 442		}
 443		current_detail = list_entry(next, struct cache_detail, others);
 444		if (current_detail->nextcheck > seconds_since_boot())
 445			current_index = current_detail->hash_size;
 446		else {
 447			current_index = 0;
 448			current_detail->nextcheck = seconds_since_boot()+30*60;
 449		}
 450	}
 451
 452	/* find a non-empty bucket in the table */
 453	while (current_detail &&
 454	       current_index < current_detail->hash_size &&
 455	       hlist_empty(&current_detail->hash_table[current_index]))
 456		current_index++;
 457
 458	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 459
 460	if (current_detail && current_index < current_detail->hash_size) {
 461		struct cache_head *ch = NULL;
 462		struct cache_detail *d;
 463		struct hlist_head *head;
 464		struct hlist_node *tmp;
 465
 466		spin_lock(&current_detail->hash_lock);
 467
 468		/* Ok, now to clean this strand */
 469
 470		head = &current_detail->hash_table[current_index];
 471		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 472			if (current_detail->nextcheck > ch->expiry_time)
 473				current_detail->nextcheck = ch->expiry_time+1;
 474			if (!cache_is_expired(current_detail, ch))
 475				continue;
 476
 477			sunrpc_begin_cache_remove_entry(ch, current_detail);
 478			trace_cache_entry_expired(current_detail, ch);
 479			rv = 1;
 480			break;
 481		}
 482
 483		spin_unlock(&current_detail->hash_lock);
 484		d = current_detail;
 485		if (!ch)
 486			current_index ++;
 487		spin_unlock(&cache_list_lock);
 488		if (ch)
 489			sunrpc_end_cache_remove_entry(ch, d);
 
 
 
 490	} else
 491		spin_unlock(&cache_list_lock);
 492
 493	return rv;
 494}
 495
 496/*
 497 * We want to regularly clean the cache, so we need to schedule some work ...
 498 */
 499static void do_cache_clean(struct work_struct *work)
 500{
 501	int delay = 5;
 502	if (cache_clean() == -1)
 503		delay = round_jiffies_relative(30*HZ);
 504
 505	if (list_empty(&cache_list))
 506		delay = 0;
 507
 508	if (delay)
 509		queue_delayed_work(system_power_efficient_wq,
 510				   &cache_cleaner, delay);
 511}
 512
 513
 514/*
 515 * Clean all caches promptly.  This just calls cache_clean
 516 * repeatedly until we are sure that every cache has had a chance to
 517 * be fully cleaned
 518 */
 519void cache_flush(void)
 520{
 521	while (cache_clean() != -1)
 522		cond_resched();
 523	while (cache_clean() != -1)
 524		cond_resched();
 525}
 526EXPORT_SYMBOL_GPL(cache_flush);
 527
 528void cache_purge(struct cache_detail *detail)
 529{
 530	struct cache_head *ch = NULL;
 531	struct hlist_head *head = NULL;
 
 532	int i = 0;
 533
 534	spin_lock(&detail->hash_lock);
 535	if (!detail->entries) {
 536		spin_unlock(&detail->hash_lock);
 537		return;
 538	}
 539
 540	dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
 541	for (i = 0; i < detail->hash_size; i++) {
 542		head = &detail->hash_table[i];
 543		while (!hlist_empty(head)) {
 544			ch = hlist_entry(head->first, struct cache_head,
 545					 cache_list);
 546			sunrpc_begin_cache_remove_entry(ch, detail);
 
 547			spin_unlock(&detail->hash_lock);
 548			sunrpc_end_cache_remove_entry(ch, detail);
 
 549			spin_lock(&detail->hash_lock);
 550		}
 551	}
 552	spin_unlock(&detail->hash_lock);
 553}
 554EXPORT_SYMBOL_GPL(cache_purge);
 555
 556
 557/*
 558 * Deferral and Revisiting of Requests.
 559 *
 560 * If a cache lookup finds a pending entry, we
 561 * need to defer the request and revisit it later.
 562 * All deferred requests are stored in a hash table,
 563 * indexed by "struct cache_head *".
 564 * As it may be wasteful to store a whole request
 565 * structure, we allow the request to provide a
 566 * deferred form, which must contain a
 567 * 'struct cache_deferred_req'
 568 * This cache_deferred_req contains a method to allow
 569 * it to be revisited when cache info is available
 570 */
 571
 572#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
 573#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 574
 575#define	DFR_MAX	300	/* ??? */
 576
 577static DEFINE_SPINLOCK(cache_defer_lock);
 578static LIST_HEAD(cache_defer_list);
 579static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 580static int cache_defer_cnt;
 581
 582static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 583{
 584	hlist_del_init(&dreq->hash);
 585	if (!list_empty(&dreq->recent)) {
 586		list_del_init(&dreq->recent);
 587		cache_defer_cnt--;
 588	}
 589}
 590
 591static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 592{
 593	int hash = DFR_HASH(item);
 594
 595	INIT_LIST_HEAD(&dreq->recent);
 596	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 597}
 598
 599static void setup_deferral(struct cache_deferred_req *dreq,
 600			   struct cache_head *item,
 601			   int count_me)
 602{
 603
 604	dreq->item = item;
 605
 606	spin_lock(&cache_defer_lock);
 607
 608	__hash_deferred_req(dreq, item);
 609
 610	if (count_me) {
 611		cache_defer_cnt++;
 612		list_add(&dreq->recent, &cache_defer_list);
 613	}
 614
 615	spin_unlock(&cache_defer_lock);
 616
 617}
 618
 619struct thread_deferred_req {
 620	struct cache_deferred_req handle;
 621	struct completion completion;
 622};
 623
 624static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 625{
 626	struct thread_deferred_req *dr =
 627		container_of(dreq, struct thread_deferred_req, handle);
 628	complete(&dr->completion);
 629}
 630
 631static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 632{
 633	struct thread_deferred_req sleeper;
 634	struct cache_deferred_req *dreq = &sleeper.handle;
 635
 636	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 637	dreq->revisit = cache_restart_thread;
 638
 639	setup_deferral(dreq, item, 0);
 640
 641	if (!test_bit(CACHE_PENDING, &item->flags) ||
 642	    wait_for_completion_interruptible_timeout(
 643		    &sleeper.completion, req->thread_wait) <= 0) {
 644		/* The completion wasn't completed, so we need
 645		 * to clean up
 646		 */
 647		spin_lock(&cache_defer_lock);
 648		if (!hlist_unhashed(&sleeper.handle.hash)) {
 649			__unhash_deferred_req(&sleeper.handle);
 650			spin_unlock(&cache_defer_lock);
 651		} else {
 652			/* cache_revisit_request already removed
 653			 * this from the hash table, but hasn't
 654			 * called ->revisit yet.  It will very soon
 655			 * and we need to wait for it.
 656			 */
 657			spin_unlock(&cache_defer_lock);
 658			wait_for_completion(&sleeper.completion);
 659		}
 660	}
 661}
 662
 663static void cache_limit_defers(void)
 664{
 665	/* Make sure we haven't exceed the limit of allowed deferred
 666	 * requests.
 667	 */
 668	struct cache_deferred_req *discard = NULL;
 669
 670	if (cache_defer_cnt <= DFR_MAX)
 671		return;
 672
 673	spin_lock(&cache_defer_lock);
 674
 675	/* Consider removing either the first or the last */
 676	if (cache_defer_cnt > DFR_MAX) {
 677		if (prandom_u32() & 1)
 678			discard = list_entry(cache_defer_list.next,
 679					     struct cache_deferred_req, recent);
 680		else
 681			discard = list_entry(cache_defer_list.prev,
 682					     struct cache_deferred_req, recent);
 683		__unhash_deferred_req(discard);
 684	}
 685	spin_unlock(&cache_defer_lock);
 686	if (discard)
 687		discard->revisit(discard, 1);
 688}
 689
 690/* Return true if and only if a deferred request is queued. */
 691static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 692{
 693	struct cache_deferred_req *dreq;
 694
 695	if (req->thread_wait) {
 696		cache_wait_req(req, item);
 697		if (!test_bit(CACHE_PENDING, &item->flags))
 698			return false;
 699	}
 700	dreq = req->defer(req);
 701	if (dreq == NULL)
 702		return false;
 703	setup_deferral(dreq, item, 1);
 704	if (!test_bit(CACHE_PENDING, &item->flags))
 705		/* Bit could have been cleared before we managed to
 706		 * set up the deferral, so need to revisit just in case
 707		 */
 708		cache_revisit_request(item);
 709
 710	cache_limit_defers();
 711	return true;
 712}
 713
 714static void cache_revisit_request(struct cache_head *item)
 715{
 716	struct cache_deferred_req *dreq;
 717	struct list_head pending;
 718	struct hlist_node *tmp;
 719	int hash = DFR_HASH(item);
 720
 721	INIT_LIST_HEAD(&pending);
 722	spin_lock(&cache_defer_lock);
 723
 724	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
 725		if (dreq->item == item) {
 726			__unhash_deferred_req(dreq);
 727			list_add(&dreq->recent, &pending);
 728		}
 729
 730	spin_unlock(&cache_defer_lock);
 731
 732	while (!list_empty(&pending)) {
 733		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 734		list_del_init(&dreq->recent);
 735		dreq->revisit(dreq, 0);
 736	}
 737}
 738
 739void cache_clean_deferred(void *owner)
 740{
 741	struct cache_deferred_req *dreq, *tmp;
 742	struct list_head pending;
 743
 744
 745	INIT_LIST_HEAD(&pending);
 746	spin_lock(&cache_defer_lock);
 747
 748	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 749		if (dreq->owner == owner) {
 750			__unhash_deferred_req(dreq);
 751			list_add(&dreq->recent, &pending);
 752		}
 753	}
 754	spin_unlock(&cache_defer_lock);
 755
 756	while (!list_empty(&pending)) {
 757		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 758		list_del_init(&dreq->recent);
 759		dreq->revisit(dreq, 1);
 760	}
 761}
 762
 763/*
 764 * communicate with user-space
 765 *
 766 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
 767 * On read, you get a full request, or block.
 768 * On write, an update request is processed.
 769 * Poll works if anything to read, and always allows write.
 770 *
 771 * Implemented by linked list of requests.  Each open file has
 772 * a ->private that also exists in this list.  New requests are added
 773 * to the end and may wakeup and preceding readers.
 774 * New readers are added to the head.  If, on read, an item is found with
 775 * CACHE_UPCALLING clear, we free it from the list.
 776 *
 777 */
 778
 779static DEFINE_SPINLOCK(queue_lock);
 780static DEFINE_MUTEX(queue_io_mutex);
 781
 782struct cache_queue {
 783	struct list_head	list;
 784	int			reader;	/* if 0, then request */
 785};
 786struct cache_request {
 787	struct cache_queue	q;
 788	struct cache_head	*item;
 789	char			* buf;
 790	int			len;
 791	int			readers;
 792};
 793struct cache_reader {
 794	struct cache_queue	q;
 795	int			offset;	/* if non-0, we have a refcnt on next request */
 796};
 797
 798static int cache_request(struct cache_detail *detail,
 799			       struct cache_request *crq)
 800{
 801	char *bp = crq->buf;
 802	int len = PAGE_SIZE;
 803
 804	detail->cache_request(detail, crq->item, &bp, &len);
 805	if (len < 0)
 806		return -EAGAIN;
 807	return PAGE_SIZE - len;
 808}
 809
 810static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 811			  loff_t *ppos, struct cache_detail *cd)
 812{
 813	struct cache_reader *rp = filp->private_data;
 814	struct cache_request *rq;
 815	struct inode *inode = file_inode(filp);
 816	int err;
 817
 818	if (count == 0)
 819		return 0;
 820
 821	inode_lock(inode); /* protect against multiple concurrent
 822			      * readers on this file */
 823 again:
 824	spin_lock(&queue_lock);
 825	/* need to find next request */
 826	while (rp->q.list.next != &cd->queue &&
 827	       list_entry(rp->q.list.next, struct cache_queue, list)
 828	       ->reader) {
 829		struct list_head *next = rp->q.list.next;
 830		list_move(&rp->q.list, next);
 831	}
 832	if (rp->q.list.next == &cd->queue) {
 833		spin_unlock(&queue_lock);
 834		inode_unlock(inode);
 835		WARN_ON_ONCE(rp->offset);
 836		return 0;
 837	}
 838	rq = container_of(rp->q.list.next, struct cache_request, q.list);
 839	WARN_ON_ONCE(rq->q.reader);
 840	if (rp->offset == 0)
 841		rq->readers++;
 842	spin_unlock(&queue_lock);
 843
 844	if (rq->len == 0) {
 845		err = cache_request(cd, rq);
 846		if (err < 0)
 847			goto out;
 848		rq->len = err;
 849	}
 850
 851	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 852		err = -EAGAIN;
 853		spin_lock(&queue_lock);
 854		list_move(&rp->q.list, &rq->q.list);
 855		spin_unlock(&queue_lock);
 856	} else {
 857		if (rp->offset + count > rq->len)
 858			count = rq->len - rp->offset;
 859		err = -EFAULT;
 860		if (copy_to_user(buf, rq->buf + rp->offset, count))
 861			goto out;
 862		rp->offset += count;
 863		if (rp->offset >= rq->len) {
 864			rp->offset = 0;
 865			spin_lock(&queue_lock);
 866			list_move(&rp->q.list, &rq->q.list);
 867			spin_unlock(&queue_lock);
 868		}
 869		err = 0;
 870	}
 871 out:
 872	if (rp->offset == 0) {
 873		/* need to release rq */
 874		spin_lock(&queue_lock);
 875		rq->readers--;
 876		if (rq->readers == 0 &&
 877		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
 878			list_del(&rq->q.list);
 879			spin_unlock(&queue_lock);
 880			cache_put(rq->item, cd);
 881			kfree(rq->buf);
 882			kfree(rq);
 883		} else
 884			spin_unlock(&queue_lock);
 885	}
 886	if (err == -EAGAIN)
 887		goto again;
 888	inode_unlock(inode);
 889	return err ? err :  count;
 890}
 891
 892static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 893				 size_t count, struct cache_detail *cd)
 894{
 895	ssize_t ret;
 896
 897	if (count == 0)
 898		return -EINVAL;
 899	if (copy_from_user(kaddr, buf, count))
 900		return -EFAULT;
 901	kaddr[count] = '\0';
 902	ret = cd->cache_parse(cd, kaddr, count);
 903	if (!ret)
 904		ret = count;
 905	return ret;
 906}
 907
 908static ssize_t cache_slow_downcall(const char __user *buf,
 909				   size_t count, struct cache_detail *cd)
 910{
 911	static char write_buf[8192]; /* protected by queue_io_mutex */
 912	ssize_t ret = -EINVAL;
 913
 914	if (count >= sizeof(write_buf))
 915		goto out;
 916	mutex_lock(&queue_io_mutex);
 917	ret = cache_do_downcall(write_buf, buf, count, cd);
 918	mutex_unlock(&queue_io_mutex);
 919out:
 920	return ret;
 921}
 922
 923static ssize_t cache_downcall(struct address_space *mapping,
 924			      const char __user *buf,
 925			      size_t count, struct cache_detail *cd)
 926{
 927	struct page *page;
 928	char *kaddr;
 929	ssize_t ret = -ENOMEM;
 930
 931	if (count >= PAGE_SIZE)
 932		goto out_slow;
 933
 934	page = find_or_create_page(mapping, 0, GFP_KERNEL);
 935	if (!page)
 936		goto out_slow;
 937
 938	kaddr = kmap(page);
 939	ret = cache_do_downcall(kaddr, buf, count, cd);
 940	kunmap(page);
 941	unlock_page(page);
 942	put_page(page);
 943	return ret;
 944out_slow:
 945	return cache_slow_downcall(buf, count, cd);
 946}
 947
 948static ssize_t cache_write(struct file *filp, const char __user *buf,
 949			   size_t count, loff_t *ppos,
 950			   struct cache_detail *cd)
 951{
 952	struct address_space *mapping = filp->f_mapping;
 953	struct inode *inode = file_inode(filp);
 954	ssize_t ret = -EINVAL;
 955
 956	if (!cd->cache_parse)
 957		goto out;
 958
 959	inode_lock(inode);
 960	ret = cache_downcall(mapping, buf, count, cd);
 961	inode_unlock(inode);
 962out:
 963	return ret;
 964}
 965
 966static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 967
 968static __poll_t cache_poll(struct file *filp, poll_table *wait,
 969			       struct cache_detail *cd)
 970{
 971	__poll_t mask;
 972	struct cache_reader *rp = filp->private_data;
 973	struct cache_queue *cq;
 974
 975	poll_wait(filp, &queue_wait, wait);
 976
 977	/* alway allow write */
 978	mask = EPOLLOUT | EPOLLWRNORM;
 979
 980	if (!rp)
 981		return mask;
 982
 983	spin_lock(&queue_lock);
 984
 985	for (cq= &rp->q; &cq->list != &cd->queue;
 986	     cq = list_entry(cq->list.next, struct cache_queue, list))
 987		if (!cq->reader) {
 988			mask |= EPOLLIN | EPOLLRDNORM;
 989			break;
 990		}
 991	spin_unlock(&queue_lock);
 992	return mask;
 993}
 994
 995static int cache_ioctl(struct inode *ino, struct file *filp,
 996		       unsigned int cmd, unsigned long arg,
 997		       struct cache_detail *cd)
 998{
 999	int len = 0;
1000	struct cache_reader *rp = filp->private_data;
1001	struct cache_queue *cq;
1002
1003	if (cmd != FIONREAD || !rp)
1004		return -EINVAL;
1005
1006	spin_lock(&queue_lock);
1007
1008	/* only find the length remaining in current request,
1009	 * or the length of the next request
1010	 */
1011	for (cq= &rp->q; &cq->list != &cd->queue;
1012	     cq = list_entry(cq->list.next, struct cache_queue, list))
1013		if (!cq->reader) {
1014			struct cache_request *cr =
1015				container_of(cq, struct cache_request, q);
1016			len = cr->len - rp->offset;
1017			break;
1018		}
1019	spin_unlock(&queue_lock);
1020
1021	return put_user(len, (int __user *)arg);
1022}
1023
1024static int cache_open(struct inode *inode, struct file *filp,
1025		      struct cache_detail *cd)
1026{
1027	struct cache_reader *rp = NULL;
1028
1029	if (!cd || !try_module_get(cd->owner))
1030		return -EACCES;
1031	nonseekable_open(inode, filp);
1032	if (filp->f_mode & FMODE_READ) {
1033		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1034		if (!rp) {
1035			module_put(cd->owner);
1036			return -ENOMEM;
1037		}
1038		rp->offset = 0;
1039		rp->q.reader = 1;
1040
1041		spin_lock(&queue_lock);
1042		list_add(&rp->q.list, &cd->queue);
1043		spin_unlock(&queue_lock);
1044	}
1045	if (filp->f_mode & FMODE_WRITE)
1046		atomic_inc(&cd->writers);
1047	filp->private_data = rp;
1048	return 0;
1049}
1050
1051static int cache_release(struct inode *inode, struct file *filp,
1052			 struct cache_detail *cd)
1053{
1054	struct cache_reader *rp = filp->private_data;
1055
1056	if (rp) {
1057		spin_lock(&queue_lock);
1058		if (rp->offset) {
1059			struct cache_queue *cq;
1060			for (cq= &rp->q; &cq->list != &cd->queue;
1061			     cq = list_entry(cq->list.next, struct cache_queue, list))
1062				if (!cq->reader) {
1063					container_of(cq, struct cache_request, q)
1064						->readers--;
1065					break;
1066				}
1067			rp->offset = 0;
1068		}
1069		list_del(&rp->q.list);
1070		spin_unlock(&queue_lock);
1071
1072		filp->private_data = NULL;
1073		kfree(rp);
1074
1075	}
1076	if (filp->f_mode & FMODE_WRITE) {
1077		atomic_dec(&cd->writers);
1078		cd->last_close = seconds_since_boot();
1079	}
1080	module_put(cd->owner);
1081	return 0;
1082}
1083
1084
1085
1086static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1087{
1088	struct cache_queue *cq, *tmp;
1089	struct cache_request *cr;
1090	struct list_head dequeued;
1091
1092	INIT_LIST_HEAD(&dequeued);
1093	spin_lock(&queue_lock);
1094	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1095		if (!cq->reader) {
1096			cr = container_of(cq, struct cache_request, q);
1097			if (cr->item != ch)
1098				continue;
1099			if (test_bit(CACHE_PENDING, &ch->flags))
1100				/* Lost a race and it is pending again */
1101				break;
1102			if (cr->readers != 0)
1103				continue;
1104			list_move(&cr->q.list, &dequeued);
1105		}
1106	spin_unlock(&queue_lock);
1107	while (!list_empty(&dequeued)) {
1108		cr = list_entry(dequeued.next, struct cache_request, q.list);
1109		list_del(&cr->q.list);
1110		cache_put(cr->item, detail);
1111		kfree(cr->buf);
1112		kfree(cr);
1113	}
1114}
1115
1116/*
1117 * Support routines for text-based upcalls.
1118 * Fields are separated by spaces.
1119 * Fields are either mangled to quote space tab newline slosh with slosh
1120 * or a hexified with a leading \x
1121 * Record is terminated with newline.
1122 *
1123 */
1124
1125void qword_add(char **bpp, int *lp, char *str)
1126{
1127	char *bp = *bpp;
1128	int len = *lp;
1129	int ret;
1130
1131	if (len < 0) return;
1132
1133	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1134	if (ret >= len) {
1135		bp += len;
1136		len = -1;
1137	} else {
1138		bp += ret;
1139		len -= ret;
1140		*bp++ = ' ';
1141		len--;
1142	}
1143	*bpp = bp;
1144	*lp = len;
1145}
1146EXPORT_SYMBOL_GPL(qword_add);
1147
1148void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1149{
1150	char *bp = *bpp;
1151	int len = *lp;
1152
1153	if (len < 0) return;
1154
1155	if (len > 2) {
1156		*bp++ = '\\';
1157		*bp++ = 'x';
1158		len -= 2;
1159		while (blen && len >= 2) {
1160			bp = hex_byte_pack(bp, *buf++);
1161			len -= 2;
1162			blen--;
1163		}
1164	}
1165	if (blen || len<1) len = -1;
1166	else {
1167		*bp++ = ' ';
1168		len--;
1169	}
1170	*bpp = bp;
1171	*lp = len;
1172}
1173EXPORT_SYMBOL_GPL(qword_addhex);
1174
1175static void warn_no_listener(struct cache_detail *detail)
1176{
1177	if (detail->last_warn != detail->last_close) {
1178		detail->last_warn = detail->last_close;
1179		if (detail->warn_no_listener)
1180			detail->warn_no_listener(detail, detail->last_close != 0);
1181	}
1182}
1183
1184static bool cache_listeners_exist(struct cache_detail *detail)
1185{
1186	if (atomic_read(&detail->writers))
1187		return true;
1188	if (detail->last_close == 0)
1189		/* This cache was never opened */
1190		return false;
1191	if (detail->last_close < seconds_since_boot() - 30)
1192		/*
1193		 * We allow for the possibility that someone might
1194		 * restart a userspace daemon without restarting the
1195		 * server; but after 30 seconds, we give up.
1196		 */
1197		 return false;
1198	return true;
1199}
1200
1201/*
1202 * register an upcall request to user-space and queue it up for read() by the
1203 * upcall daemon.
1204 *
1205 * Each request is at most one page long.
1206 */
1207static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1208{
 
1209	char *buf;
1210	struct cache_request *crq;
1211	int ret = 0;
1212
 
 
 
 
 
 
 
1213	if (test_bit(CACHE_CLEANED, &h->flags))
1214		/* Too late to make an upcall */
1215		return -EAGAIN;
1216
1217	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1218	if (!buf)
1219		return -EAGAIN;
1220
1221	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1222	if (!crq) {
1223		kfree(buf);
1224		return -EAGAIN;
1225	}
1226
1227	crq->q.reader = 0;
1228	crq->buf = buf;
1229	crq->len = 0;
1230	crq->readers = 0;
1231	spin_lock(&queue_lock);
1232	if (test_bit(CACHE_PENDING, &h->flags)) {
1233		crq->item = cache_get(h);
1234		list_add_tail(&crq->q.list, &detail->queue);
1235		trace_cache_entry_upcall(detail, h);
1236	} else
1237		/* Lost a race, no longer PENDING, so don't enqueue */
1238		ret = -EAGAIN;
1239	spin_unlock(&queue_lock);
1240	wake_up(&queue_wait);
1241	if (ret == -EAGAIN) {
1242		kfree(buf);
1243		kfree(crq);
1244	}
1245	return ret;
1246}
1247
1248int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1249{
1250	if (test_and_set_bit(CACHE_PENDING, &h->flags))
1251		return 0;
1252	return cache_pipe_upcall(detail, h);
1253}
1254EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1255
1256int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1257				     struct cache_head *h)
1258{
1259	if (!cache_listeners_exist(detail)) {
1260		warn_no_listener(detail);
1261		trace_cache_entry_no_listener(detail, h);
1262		return -EINVAL;
1263	}
1264	return sunrpc_cache_pipe_upcall(detail, h);
1265}
1266EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1267
1268/*
1269 * parse a message from user-space and pass it
1270 * to an appropriate cache
1271 * Messages are, like requests, separated into fields by
1272 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1273 *
1274 * Message is
1275 *   reply cachename expiry key ... content....
1276 *
1277 * key and content are both parsed by cache
1278 */
1279
1280int qword_get(char **bpp, char *dest, int bufsize)
1281{
1282	/* return bytes copied, or -1 on error */
1283	char *bp = *bpp;
1284	int len = 0;
1285
1286	while (*bp == ' ') bp++;
1287
1288	if (bp[0] == '\\' && bp[1] == 'x') {
1289		/* HEX STRING */
1290		bp += 2;
1291		while (len < bufsize - 1) {
1292			int h, l;
1293
1294			h = hex_to_bin(bp[0]);
1295			if (h < 0)
1296				break;
1297
1298			l = hex_to_bin(bp[1]);
1299			if (l < 0)
1300				break;
1301
1302			*dest++ = (h << 4) | l;
1303			bp += 2;
1304			len++;
1305		}
1306	} else {
1307		/* text with \nnn octal quoting */
1308		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1309			if (*bp == '\\' &&
1310			    isodigit(bp[1]) && (bp[1] <= '3') &&
1311			    isodigit(bp[2]) &&
1312			    isodigit(bp[3])) {
1313				int byte = (*++bp -'0');
1314				bp++;
1315				byte = (byte << 3) | (*bp++ - '0');
1316				byte = (byte << 3) | (*bp++ - '0');
1317				*dest++ = byte;
1318				len++;
1319			} else {
1320				*dest++ = *bp++;
1321				len++;
1322			}
1323		}
1324	}
1325
1326	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1327		return -1;
1328	while (*bp == ' ') bp++;
1329	*bpp = bp;
1330	*dest = '\0';
1331	return len;
1332}
1333EXPORT_SYMBOL_GPL(qword_get);
1334
1335
1336/*
1337 * support /proc/net/rpc/$CACHENAME/content
1338 * as a seqfile.
1339 * We call ->cache_show passing NULL for the item to
1340 * get a header, then pass each real item in the cache
1341 */
1342
1343static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1344{
1345	loff_t n = *pos;
1346	unsigned int hash, entry;
1347	struct cache_head *ch;
1348	struct cache_detail *cd = m->private;
1349
1350	if (!n--)
1351		return SEQ_START_TOKEN;
1352	hash = n >> 32;
1353	entry = n & ((1LL<<32) - 1);
1354
1355	hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1356		if (!entry--)
1357			return ch;
1358	n &= ~((1LL<<32) - 1);
1359	do {
1360		hash++;
1361		n += 1LL<<32;
1362	} while(hash < cd->hash_size &&
1363		hlist_empty(&cd->hash_table[hash]));
1364	if (hash >= cd->hash_size)
1365		return NULL;
1366	*pos = n+1;
1367	return hlist_entry_safe(rcu_dereference_raw(
1368				hlist_first_rcu(&cd->hash_table[hash])),
1369				struct cache_head, cache_list);
1370}
1371
1372static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1373{
1374	struct cache_head *ch = p;
1375	int hash = (*pos >> 32);
1376	struct cache_detail *cd = m->private;
1377
1378	if (p == SEQ_START_TOKEN)
1379		hash = 0;
1380	else if (ch->cache_list.next == NULL) {
1381		hash++;
1382		*pos += 1LL<<32;
1383	} else {
1384		++*pos;
1385		return hlist_entry_safe(rcu_dereference_raw(
1386					hlist_next_rcu(&ch->cache_list)),
1387					struct cache_head, cache_list);
1388	}
1389	*pos &= ~((1LL<<32) - 1);
1390	while (hash < cd->hash_size &&
1391	       hlist_empty(&cd->hash_table[hash])) {
1392		hash++;
1393		*pos += 1LL<<32;
1394	}
1395	if (hash >= cd->hash_size)
1396		return NULL;
1397	++*pos;
1398	return hlist_entry_safe(rcu_dereference_raw(
1399				hlist_first_rcu(&cd->hash_table[hash])),
1400				struct cache_head, cache_list);
1401}
1402
1403void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1404	__acquires(RCU)
1405{
1406	rcu_read_lock();
1407	return __cache_seq_start(m, pos);
1408}
1409EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1410
1411void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1412{
1413	return cache_seq_next(file, p, pos);
1414}
1415EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1416
1417void cache_seq_stop_rcu(struct seq_file *m, void *p)
1418	__releases(RCU)
1419{
1420	rcu_read_unlock();
1421}
1422EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1423
1424static int c_show(struct seq_file *m, void *p)
1425{
1426	struct cache_head *cp = p;
1427	struct cache_detail *cd = m->private;
1428
1429	if (p == SEQ_START_TOKEN)
1430		return cd->cache_show(m, cd, NULL);
1431
1432	ifdebug(CACHE)
1433		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1434			   convert_to_wallclock(cp->expiry_time),
1435			   kref_read(&cp->ref), cp->flags);
1436	cache_get(cp);
1437	if (cache_check(cd, cp, NULL))
1438		/* cache_check does a cache_put on failure */
1439		seq_printf(m, "# ");
1440	else {
1441		if (cache_is_expired(cd, cp))
1442			seq_printf(m, "# ");
1443		cache_put(cp, cd);
1444	}
1445
1446	return cd->cache_show(m, cd, cp);
1447}
1448
1449static const struct seq_operations cache_content_op = {
1450	.start	= cache_seq_start_rcu,
1451	.next	= cache_seq_next_rcu,
1452	.stop	= cache_seq_stop_rcu,
1453	.show	= c_show,
1454};
1455
1456static int content_open(struct inode *inode, struct file *file,
1457			struct cache_detail *cd)
1458{
1459	struct seq_file *seq;
1460	int err;
1461
1462	if (!cd || !try_module_get(cd->owner))
1463		return -EACCES;
1464
1465	err = seq_open(file, &cache_content_op);
1466	if (err) {
1467		module_put(cd->owner);
1468		return err;
1469	}
1470
1471	seq = file->private_data;
1472	seq->private = cd;
1473	return 0;
1474}
1475
1476static int content_release(struct inode *inode, struct file *file,
1477		struct cache_detail *cd)
1478{
1479	int ret = seq_release(inode, file);
1480	module_put(cd->owner);
1481	return ret;
1482}
1483
1484static int open_flush(struct inode *inode, struct file *file,
1485			struct cache_detail *cd)
1486{
1487	if (!cd || !try_module_get(cd->owner))
1488		return -EACCES;
1489	return nonseekable_open(inode, file);
1490}
1491
1492static int release_flush(struct inode *inode, struct file *file,
1493			struct cache_detail *cd)
1494{
1495	module_put(cd->owner);
1496	return 0;
1497}
1498
1499static ssize_t read_flush(struct file *file, char __user *buf,
1500			  size_t count, loff_t *ppos,
1501			  struct cache_detail *cd)
1502{
1503	char tbuf[22];
1504	size_t len;
1505
1506	len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1507			convert_to_wallclock(cd->flush_time));
1508	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1509}
1510
1511static ssize_t write_flush(struct file *file, const char __user *buf,
1512			   size_t count, loff_t *ppos,
1513			   struct cache_detail *cd)
1514{
1515	char tbuf[20];
1516	char *ep;
1517	time64_t now;
1518
1519	if (*ppos || count > sizeof(tbuf)-1)
1520		return -EINVAL;
1521	if (copy_from_user(tbuf, buf, count))
1522		return -EFAULT;
1523	tbuf[count] = 0;
1524	simple_strtoul(tbuf, &ep, 0);
1525	if (*ep && *ep != '\n')
1526		return -EINVAL;
1527	/* Note that while we check that 'buf' holds a valid number,
1528	 * we always ignore the value and just flush everything.
1529	 * Making use of the number leads to races.
1530	 */
1531
1532	now = seconds_since_boot();
1533	/* Always flush everything, so behave like cache_purge()
1534	 * Do this by advancing flush_time to the current time,
1535	 * or by one second if it has already reached the current time.
1536	 * Newly added cache entries will always have ->last_refresh greater
1537	 * that ->flush_time, so they don't get flushed prematurely.
1538	 */
1539
1540	if (cd->flush_time >= now)
1541		now = cd->flush_time + 1;
1542
1543	cd->flush_time = now;
1544	cd->nextcheck = now;
1545	cache_flush();
1546
1547	if (cd->flush)
1548		cd->flush();
1549
1550	*ppos += count;
1551	return count;
1552}
1553
1554static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1555				 size_t count, loff_t *ppos)
1556{
1557	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1558
1559	return cache_read(filp, buf, count, ppos, cd);
1560}
1561
1562static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1563				  size_t count, loff_t *ppos)
1564{
1565	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1566
1567	return cache_write(filp, buf, count, ppos, cd);
1568}
1569
1570static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1571{
1572	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1573
1574	return cache_poll(filp, wait, cd);
1575}
1576
1577static long cache_ioctl_procfs(struct file *filp,
1578			       unsigned int cmd, unsigned long arg)
1579{
1580	struct inode *inode = file_inode(filp);
1581	struct cache_detail *cd = PDE_DATA(inode);
1582
1583	return cache_ioctl(inode, filp, cmd, arg, cd);
1584}
1585
1586static int cache_open_procfs(struct inode *inode, struct file *filp)
1587{
1588	struct cache_detail *cd = PDE_DATA(inode);
1589
1590	return cache_open(inode, filp, cd);
1591}
1592
1593static int cache_release_procfs(struct inode *inode, struct file *filp)
1594{
1595	struct cache_detail *cd = PDE_DATA(inode);
1596
1597	return cache_release(inode, filp, cd);
1598}
1599
1600static const struct proc_ops cache_channel_proc_ops = {
1601	.proc_lseek	= no_llseek,
1602	.proc_read	= cache_read_procfs,
1603	.proc_write	= cache_write_procfs,
1604	.proc_poll	= cache_poll_procfs,
1605	.proc_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1606	.proc_open	= cache_open_procfs,
1607	.proc_release	= cache_release_procfs,
 
1608};
1609
1610static int content_open_procfs(struct inode *inode, struct file *filp)
1611{
1612	struct cache_detail *cd = PDE_DATA(inode);
1613
1614	return content_open(inode, filp, cd);
1615}
1616
1617static int content_release_procfs(struct inode *inode, struct file *filp)
1618{
1619	struct cache_detail *cd = PDE_DATA(inode);
1620
1621	return content_release(inode, filp, cd);
1622}
1623
1624static const struct proc_ops content_proc_ops = {
1625	.proc_open	= content_open_procfs,
1626	.proc_read	= seq_read,
1627	.proc_lseek	= seq_lseek,
1628	.proc_release	= content_release_procfs,
1629};
1630
1631static int open_flush_procfs(struct inode *inode, struct file *filp)
1632{
1633	struct cache_detail *cd = PDE_DATA(inode);
1634
1635	return open_flush(inode, filp, cd);
1636}
1637
1638static int release_flush_procfs(struct inode *inode, struct file *filp)
1639{
1640	struct cache_detail *cd = PDE_DATA(inode);
1641
1642	return release_flush(inode, filp, cd);
1643}
1644
1645static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1646			    size_t count, loff_t *ppos)
1647{
1648	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1649
1650	return read_flush(filp, buf, count, ppos, cd);
1651}
1652
1653static ssize_t write_flush_procfs(struct file *filp,
1654				  const char __user *buf,
1655				  size_t count, loff_t *ppos)
1656{
1657	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1658
1659	return write_flush(filp, buf, count, ppos, cd);
1660}
1661
1662static const struct proc_ops cache_flush_proc_ops = {
1663	.proc_open	= open_flush_procfs,
1664	.proc_read	= read_flush_procfs,
1665	.proc_write	= write_flush_procfs,
1666	.proc_release	= release_flush_procfs,
1667	.proc_lseek	= no_llseek,
1668};
1669
1670static void remove_cache_proc_entries(struct cache_detail *cd)
1671{
1672	if (cd->procfs) {
1673		proc_remove(cd->procfs);
1674		cd->procfs = NULL;
1675	}
1676}
1677
1678#ifdef CONFIG_PROC_FS
1679static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1680{
1681	struct proc_dir_entry *p;
1682	struct sunrpc_net *sn;
1683
1684	sn = net_generic(net, sunrpc_net_id);
1685	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1686	if (cd->procfs == NULL)
1687		goto out_nomem;
1688
1689	p = proc_create_data("flush", S_IFREG | 0600,
1690			     cd->procfs, &cache_flush_proc_ops, cd);
1691	if (p == NULL)
1692		goto out_nomem;
1693
1694	if (cd->cache_request || cd->cache_parse) {
1695		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1696				     &cache_channel_proc_ops, cd);
1697		if (p == NULL)
1698			goto out_nomem;
1699	}
1700	if (cd->cache_show) {
1701		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1702				     &content_proc_ops, cd);
1703		if (p == NULL)
1704			goto out_nomem;
1705	}
1706	return 0;
1707out_nomem:
1708	remove_cache_proc_entries(cd);
1709	return -ENOMEM;
1710}
1711#else /* CONFIG_PROC_FS */
1712static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1713{
1714	return 0;
1715}
1716#endif
1717
1718void __init cache_initialize(void)
1719{
1720	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1721}
1722
1723int cache_register_net(struct cache_detail *cd, struct net *net)
1724{
1725	int ret;
1726
1727	sunrpc_init_cache_detail(cd);
1728	ret = create_cache_proc_entries(cd, net);
1729	if (ret)
1730		sunrpc_destroy_cache_detail(cd);
1731	return ret;
1732}
1733EXPORT_SYMBOL_GPL(cache_register_net);
1734
1735void cache_unregister_net(struct cache_detail *cd, struct net *net)
1736{
1737	remove_cache_proc_entries(cd);
1738	sunrpc_destroy_cache_detail(cd);
1739}
1740EXPORT_SYMBOL_GPL(cache_unregister_net);
1741
1742struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1743{
1744	struct cache_detail *cd;
1745	int i;
1746
1747	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1748	if (cd == NULL)
1749		return ERR_PTR(-ENOMEM);
1750
1751	cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1752				 GFP_KERNEL);
1753	if (cd->hash_table == NULL) {
1754		kfree(cd);
1755		return ERR_PTR(-ENOMEM);
1756	}
1757
1758	for (i = 0; i < cd->hash_size; i++)
1759		INIT_HLIST_HEAD(&cd->hash_table[i]);
1760	cd->net = net;
1761	return cd;
1762}
1763EXPORT_SYMBOL_GPL(cache_create_net);
1764
1765void cache_destroy_net(struct cache_detail *cd, struct net *net)
1766{
1767	kfree(cd->hash_table);
1768	kfree(cd);
1769}
1770EXPORT_SYMBOL_GPL(cache_destroy_net);
1771
1772static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1773				 size_t count, loff_t *ppos)
1774{
1775	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1776
1777	return cache_read(filp, buf, count, ppos, cd);
1778}
1779
1780static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1781				  size_t count, loff_t *ppos)
1782{
1783	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1784
1785	return cache_write(filp, buf, count, ppos, cd);
1786}
1787
1788static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1789{
1790	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1791
1792	return cache_poll(filp, wait, cd);
1793}
1794
1795static long cache_ioctl_pipefs(struct file *filp,
1796			      unsigned int cmd, unsigned long arg)
1797{
1798	struct inode *inode = file_inode(filp);
1799	struct cache_detail *cd = RPC_I(inode)->private;
1800
1801	return cache_ioctl(inode, filp, cmd, arg, cd);
1802}
1803
1804static int cache_open_pipefs(struct inode *inode, struct file *filp)
1805{
1806	struct cache_detail *cd = RPC_I(inode)->private;
1807
1808	return cache_open(inode, filp, cd);
1809}
1810
1811static int cache_release_pipefs(struct inode *inode, struct file *filp)
1812{
1813	struct cache_detail *cd = RPC_I(inode)->private;
1814
1815	return cache_release(inode, filp, cd);
1816}
1817
1818const struct file_operations cache_file_operations_pipefs = {
1819	.owner		= THIS_MODULE,
1820	.llseek		= no_llseek,
1821	.read		= cache_read_pipefs,
1822	.write		= cache_write_pipefs,
1823	.poll		= cache_poll_pipefs,
1824	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1825	.open		= cache_open_pipefs,
1826	.release	= cache_release_pipefs,
1827};
1828
1829static int content_open_pipefs(struct inode *inode, struct file *filp)
1830{
1831	struct cache_detail *cd = RPC_I(inode)->private;
1832
1833	return content_open(inode, filp, cd);
1834}
1835
1836static int content_release_pipefs(struct inode *inode, struct file *filp)
1837{
1838	struct cache_detail *cd = RPC_I(inode)->private;
1839
1840	return content_release(inode, filp, cd);
1841}
1842
1843const struct file_operations content_file_operations_pipefs = {
1844	.open		= content_open_pipefs,
1845	.read		= seq_read,
1846	.llseek		= seq_lseek,
1847	.release	= content_release_pipefs,
1848};
1849
1850static int open_flush_pipefs(struct inode *inode, struct file *filp)
1851{
1852	struct cache_detail *cd = RPC_I(inode)->private;
1853
1854	return open_flush(inode, filp, cd);
1855}
1856
1857static int release_flush_pipefs(struct inode *inode, struct file *filp)
1858{
1859	struct cache_detail *cd = RPC_I(inode)->private;
1860
1861	return release_flush(inode, filp, cd);
1862}
1863
1864static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1865			    size_t count, loff_t *ppos)
1866{
1867	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1868
1869	return read_flush(filp, buf, count, ppos, cd);
1870}
1871
1872static ssize_t write_flush_pipefs(struct file *filp,
1873				  const char __user *buf,
1874				  size_t count, loff_t *ppos)
1875{
1876	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1877
1878	return write_flush(filp, buf, count, ppos, cd);
1879}
1880
1881const struct file_operations cache_flush_operations_pipefs = {
1882	.open		= open_flush_pipefs,
1883	.read		= read_flush_pipefs,
1884	.write		= write_flush_pipefs,
1885	.release	= release_flush_pipefs,
1886	.llseek		= no_llseek,
1887};
1888
1889int sunrpc_cache_register_pipefs(struct dentry *parent,
1890				 const char *name, umode_t umode,
1891				 struct cache_detail *cd)
1892{
1893	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1894	if (IS_ERR(dir))
1895		return PTR_ERR(dir);
1896	cd->pipefs = dir;
1897	return 0;
1898}
1899EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1900
1901void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1902{
1903	if (cd->pipefs) {
1904		rpc_remove_cache_dir(cd->pipefs);
1905		cd->pipefs = NULL;
1906	}
1907}
1908EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1909
1910void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1911{
1912	spin_lock(&cd->hash_lock);
1913	if (!hlist_unhashed(&h->cache_list)){
1914		sunrpc_begin_cache_remove_entry(h, cd);
 
1915		spin_unlock(&cd->hash_lock);
1916		sunrpc_end_cache_remove_entry(h, cd);
1917	} else
1918		spin_unlock(&cd->hash_lock);
1919}
1920EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);