Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * net/sunrpc/cache.c
   3 *
   4 * Generic code for various authentication-related caches
   5 * used by sunrpc clients and servers.
   6 *
   7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   8 *
   9 * Released under terms in GPL version 2.  See COPYING.
  10 *
  11 */
  12
  13#include <linux/types.h>
  14#include <linux/fs.h>
  15#include <linux/file.h>
  16#include <linux/slab.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <asm/uaccess.h>
 
  24#include <linux/poll.h>
  25#include <linux/seq_file.h>
  26#include <linux/proc_fs.h>
  27#include <linux/net.h>
  28#include <linux/workqueue.h>
  29#include <linux/mutex.h>
  30#include <linux/pagemap.h>
  31#include <asm/ioctls.h>
  32#include <linux/sunrpc/types.h>
  33#include <linux/sunrpc/cache.h>
  34#include <linux/sunrpc/stats.h>
  35#include <linux/sunrpc/rpc_pipe_fs.h>
  36#include "netns.h"
  37
  38#define	 RPCDBG_FACILITY RPCDBG_CACHE
  39
  40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  41static void cache_revisit_request(struct cache_head *item);
  42
  43static void cache_init(struct cache_head *h)
  44{
  45	time_t now = seconds_since_boot();
  46	h->next = NULL;
  47	h->flags = 0;
  48	kref_init(&h->ref);
  49	h->expiry_time = now + CACHE_NEW_EXPIRY;
 
 
 
  50	h->last_refresh = now;
  51}
  52
  53static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
  54{
  55	return  (h->expiry_time < seconds_since_boot()) ||
  56		(detail->flush_time > h->last_refresh);
  57}
  58
  59struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
  60				       struct cache_head *key, int hash)
  61{
  62	struct cache_head **head,  **hp;
  63	struct cache_head *new = NULL, *freeme = NULL;
  64
  65	head = &detail->hash_table[hash];
  66
  67	read_lock(&detail->hash_lock);
  68
  69	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
  70		struct cache_head *tmp = *hp;
  71		if (detail->match(tmp, key)) {
  72			if (cache_is_expired(detail, tmp))
  73				/* This entry is expired, we will discard it. */
  74				break;
  75			cache_get(tmp);
  76			read_unlock(&detail->hash_lock);
  77			return tmp;
  78		}
  79	}
  80	read_unlock(&detail->hash_lock);
  81	/* Didn't find anything, insert an empty entry */
  82
  83	new = detail->alloc();
  84	if (!new)
  85		return NULL;
  86	/* must fully initialise 'new', else
  87	 * we might get lose if we need to
  88	 * cache_put it soon.
  89	 */
  90	cache_init(new);
  91	detail->init(new, key);
  92
  93	write_lock(&detail->hash_lock);
  94
  95	/* check if entry appeared while we slept */
  96	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
  97		struct cache_head *tmp = *hp;
  98		if (detail->match(tmp, key)) {
  99			if (cache_is_expired(detail, tmp)) {
 100				*hp = tmp->next;
 101				tmp->next = NULL;
 102				detail->entries --;
 103				freeme = tmp;
 104				break;
 105			}
 106			cache_get(tmp);
 107			write_unlock(&detail->hash_lock);
 108			cache_put(new, detail);
 109			return tmp;
 110		}
 111	}
 112	new->next = *head;
 113	*head = new;
 114	detail->entries++;
 115	cache_get(new);
 116	write_unlock(&detail->hash_lock);
 117
 118	if (freeme)
 119		cache_put(freeme, detail);
 120	return new;
 121}
 122EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
 123
 124
 125static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 126
 127static void cache_fresh_locked(struct cache_head *head, time_t expiry)
 
 128{
 
 
 
 
 129	head->expiry_time = expiry;
 130	head->last_refresh = seconds_since_boot();
 131	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 132	set_bit(CACHE_VALID, &head->flags);
 133}
 134
 135static void cache_fresh_unlocked(struct cache_head *head,
 136				 struct cache_detail *detail)
 137{
 138	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 139		cache_revisit_request(head);
 140		cache_dequeue(detail, head);
 141	}
 142}
 143
 144struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 145				       struct cache_head *new, struct cache_head *old, int hash)
 146{
 147	/* The 'old' entry is to be replaced by 'new'.
 148	 * If 'old' is not VALID, we update it directly,
 149	 * otherwise we need to replace it
 150	 */
 151	struct cache_head **head;
 152	struct cache_head *tmp;
 153
 154	if (!test_bit(CACHE_VALID, &old->flags)) {
 155		write_lock(&detail->hash_lock);
 156		if (!test_bit(CACHE_VALID, &old->flags)) {
 157			if (test_bit(CACHE_NEGATIVE, &new->flags))
 158				set_bit(CACHE_NEGATIVE, &old->flags);
 159			else
 160				detail->update(old, new);
 161			cache_fresh_locked(old, new->expiry_time);
 162			write_unlock(&detail->hash_lock);
 163			cache_fresh_unlocked(old, detail);
 164			return old;
 165		}
 166		write_unlock(&detail->hash_lock);
 167	}
 168	/* We need to insert a new entry */
 169	tmp = detail->alloc();
 170	if (!tmp) {
 171		cache_put(old, detail);
 172		return NULL;
 173	}
 174	cache_init(tmp);
 175	detail->init(tmp, old);
 176	head = &detail->hash_table[hash];
 177
 178	write_lock(&detail->hash_lock);
 179	if (test_bit(CACHE_NEGATIVE, &new->flags))
 180		set_bit(CACHE_NEGATIVE, &tmp->flags);
 181	else
 182		detail->update(tmp, new);
 183	tmp->next = *head;
 184	*head = tmp;
 185	detail->entries++;
 186	cache_get(tmp);
 187	cache_fresh_locked(tmp, new->expiry_time);
 188	cache_fresh_locked(old, 0);
 189	write_unlock(&detail->hash_lock);
 190	cache_fresh_unlocked(tmp, detail);
 191	cache_fresh_unlocked(old, detail);
 192	cache_put(old, detail);
 193	return tmp;
 194}
 195EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 196
 197static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 198{
 199	if (!cd->cache_upcall)
 200		return -EINVAL;
 201	return cd->cache_upcall(cd, h);
 202}
 203
 204static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
 205{
 206	if (!test_bit(CACHE_VALID, &h->flags))
 207		return -EAGAIN;
 208	else {
 209		/* entry is valid */
 210		if (test_bit(CACHE_NEGATIVE, &h->flags))
 211			return -ENOENT;
 212		else {
 213			/*
 214			 * In combination with write barrier in
 215			 * sunrpc_cache_update, ensures that anyone
 216			 * using the cache entry after this sees the
 217			 * updated contents:
 218			 */
 219			smp_rmb();
 220			return 0;
 221		}
 222	}
 223}
 224
 225static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 226{
 227	int rv;
 228
 229	write_lock(&detail->hash_lock);
 230	rv = cache_is_valid(detail, h);
 231	if (rv != -EAGAIN) {
 232		write_unlock(&detail->hash_lock);
 233		return rv;
 
 
 234	}
 235	set_bit(CACHE_NEGATIVE, &h->flags);
 236	cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
 237	write_unlock(&detail->hash_lock);
 238	cache_fresh_unlocked(h, detail);
 239	return -ENOENT;
 240}
 241
 242/*
 243 * This is the generic cache management routine for all
 244 * the authentication caches.
 245 * It checks the currency of a cache item and will (later)
 246 * initiate an upcall to fill it if needed.
 247 *
 248 *
 249 * Returns 0 if the cache_head can be used, or cache_puts it and returns
 250 * -EAGAIN if upcall is pending and request has been queued
 251 * -ETIMEDOUT if upcall failed or request could not be queue or
 252 *           upcall completed but item is still invalid (implying that
 253 *           the cache item has been replaced with a newer one).
 254 * -ENOENT if cache entry was negative
 255 */
 256int cache_check(struct cache_detail *detail,
 257		    struct cache_head *h, struct cache_req *rqstp)
 258{
 259	int rv;
 260	long refresh_age, age;
 261
 262	/* First decide return status as best we can */
 263	rv = cache_is_valid(detail, h);
 264
 265	/* now see if we want to start an upcall */
 266	refresh_age = (h->expiry_time - h->last_refresh);
 267	age = seconds_since_boot() - h->last_refresh;
 268
 269	if (rqstp == NULL) {
 270		if (rv == -EAGAIN)
 271			rv = -ENOENT;
 272	} else if (rv == -EAGAIN || age > refresh_age/2) {
 
 273		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
 274				refresh_age, age);
 275		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 276			switch (cache_make_upcall(detail, h)) {
 277			case -EINVAL:
 278				clear_bit(CACHE_PENDING, &h->flags);
 279				cache_revisit_request(h);
 280				rv = try_to_negate_entry(detail, h);
 281				break;
 282			case -EAGAIN:
 283				clear_bit(CACHE_PENDING, &h->flags);
 284				cache_revisit_request(h);
 285				break;
 286			}
 287		}
 288	}
 289
 290	if (rv == -EAGAIN) {
 291		if (!cache_defer_req(rqstp, h)) {
 292			/*
 293			 * Request was not deferred; handle it as best
 294			 * we can ourselves:
 295			 */
 296			rv = cache_is_valid(detail, h);
 297			if (rv == -EAGAIN)
 298				rv = -ETIMEDOUT;
 299		}
 300	}
 301	if (rv)
 302		cache_put(h, detail);
 303	return rv;
 304}
 305EXPORT_SYMBOL_GPL(cache_check);
 306
 307/*
 308 * caches need to be periodically cleaned.
 309 * For this we maintain a list of cache_detail and
 310 * a current pointer into that list and into the table
 311 * for that entry.
 312 *
 313 * Each time clean_cache is called it finds the next non-empty entry
 314 * in the current table and walks the list in that entry
 315 * looking for entries that can be removed.
 316 *
 317 * An entry gets removed if:
 318 * - The expiry is before current time
 319 * - The last_refresh time is before the flush_time for that cache
 320 *
 321 * later we might drop old entries with non-NEVER expiry if that table
 322 * is getting 'full' for some definition of 'full'
 323 *
 324 * The question of "how often to scan a table" is an interesting one
 325 * and is answered in part by the use of the "nextcheck" field in the
 326 * cache_detail.
 327 * When a scan of a table begins, the nextcheck field is set to a time
 328 * that is well into the future.
 329 * While scanning, if an expiry time is found that is earlier than the
 330 * current nextcheck time, nextcheck is set to that expiry time.
 331 * If the flush_time is ever set to a time earlier than the nextcheck
 332 * time, the nextcheck time is then set to that flush_time.
 333 *
 334 * A table is then only scanned if the current time is at least
 335 * the nextcheck time.
 336 *
 337 */
 338
 339static LIST_HEAD(cache_list);
 340static DEFINE_SPINLOCK(cache_list_lock);
 341static struct cache_detail *current_detail;
 342static int current_index;
 343
 344static void do_cache_clean(struct work_struct *work);
 345static struct delayed_work cache_cleaner;
 346
 347static void sunrpc_init_cache_detail(struct cache_detail *cd)
 348{
 349	rwlock_init(&cd->hash_lock);
 350	INIT_LIST_HEAD(&cd->queue);
 351	spin_lock(&cache_list_lock);
 352	cd->nextcheck = 0;
 353	cd->entries = 0;
 354	atomic_set(&cd->readers, 0);
 355	cd->last_close = 0;
 356	cd->last_warn = -1;
 357	list_add(&cd->others, &cache_list);
 358	spin_unlock(&cache_list_lock);
 359
 360	/* start the cleaning process */
 361	schedule_delayed_work(&cache_cleaner, 0);
 362}
 
 363
 364static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 365{
 366	cache_purge(cd);
 367	spin_lock(&cache_list_lock);
 368	write_lock(&cd->hash_lock);
 369	if (cd->entries || atomic_read(&cd->inuse)) {
 370		write_unlock(&cd->hash_lock);
 371		spin_unlock(&cache_list_lock);
 372		goto out;
 373	}
 374	if (current_detail == cd)
 375		current_detail = NULL;
 376	list_del_init(&cd->others);
 377	write_unlock(&cd->hash_lock);
 378	spin_unlock(&cache_list_lock);
 379	if (list_empty(&cache_list)) {
 380		/* module must be being unloaded so its safe to kill the worker */
 381		cancel_delayed_work_sync(&cache_cleaner);
 382	}
 383	return;
 384out:
 385	printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
 386}
 
 387
 388/* clean cache tries to find something to clean
 389 * and cleans it.
 390 * It returns 1 if it cleaned something,
 391 *            0 if it didn't find anything this time
 392 *           -1 if it fell off the end of the list.
 393 */
 394static int cache_clean(void)
 395{
 396	int rv = 0;
 397	struct list_head *next;
 398
 399	spin_lock(&cache_list_lock);
 400
 401	/* find a suitable table if we don't already have one */
 402	while (current_detail == NULL ||
 403	    current_index >= current_detail->hash_size) {
 404		if (current_detail)
 405			next = current_detail->others.next;
 406		else
 407			next = cache_list.next;
 408		if (next == &cache_list) {
 409			current_detail = NULL;
 410			spin_unlock(&cache_list_lock);
 411			return -1;
 412		}
 413		current_detail = list_entry(next, struct cache_detail, others);
 414		if (current_detail->nextcheck > seconds_since_boot())
 415			current_index = current_detail->hash_size;
 416		else {
 417			current_index = 0;
 418			current_detail->nextcheck = seconds_since_boot()+30*60;
 419		}
 420	}
 421
 422	/* find a non-empty bucket in the table */
 423	while (current_detail &&
 424	       current_index < current_detail->hash_size &&
 425	       current_detail->hash_table[current_index] == NULL)
 426		current_index++;
 427
 428	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 429
 430	if (current_detail && current_index < current_detail->hash_size) {
 431		struct cache_head *ch, **cp;
 432		struct cache_detail *d;
 
 
 433
 434		write_lock(&current_detail->hash_lock);
 435
 436		/* Ok, now to clean this strand */
 437
 438		cp = & current_detail->hash_table[current_index];
 439		for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
 440			if (current_detail->nextcheck > ch->expiry_time)
 441				current_detail->nextcheck = ch->expiry_time+1;
 442			if (!cache_is_expired(current_detail, ch))
 443				continue;
 444
 445			*cp = ch->next;
 446			ch->next = NULL;
 447			current_detail->entries--;
 448			rv = 1;
 449			break;
 450		}
 451
 452		write_unlock(&current_detail->hash_lock);
 453		d = current_detail;
 454		if (!ch)
 455			current_index ++;
 456		spin_unlock(&cache_list_lock);
 457		if (ch) {
 458			if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
 459				cache_dequeue(current_detail, ch);
 460			cache_revisit_request(ch);
 461			cache_put(ch, d);
 462		}
 463	} else
 464		spin_unlock(&cache_list_lock);
 465
 466	return rv;
 467}
 468
 469/*
 470 * We want to regularly clean the cache, so we need to schedule some work ...
 471 */
 472static void do_cache_clean(struct work_struct *work)
 473{
 474	int delay = 5;
 475	if (cache_clean() == -1)
 476		delay = round_jiffies_relative(30*HZ);
 477
 478	if (list_empty(&cache_list))
 479		delay = 0;
 480
 481	if (delay)
 482		schedule_delayed_work(&cache_cleaner, delay);
 
 483}
 484
 485
 486/*
 487 * Clean all caches promptly.  This just calls cache_clean
 488 * repeatedly until we are sure that every cache has had a chance to
 489 * be fully cleaned
 490 */
 491void cache_flush(void)
 492{
 493	while (cache_clean() != -1)
 494		cond_resched();
 495	while (cache_clean() != -1)
 496		cond_resched();
 497}
 498EXPORT_SYMBOL_GPL(cache_flush);
 499
 500void cache_purge(struct cache_detail *detail)
 501{
 502	detail->flush_time = LONG_MAX;
 
 
 
 
 503	detail->nextcheck = seconds_since_boot();
 504	cache_flush();
 505	detail->flush_time = 1;
 506}
 507EXPORT_SYMBOL_GPL(cache_purge);
 508
 509
 510/*
 511 * Deferral and Revisiting of Requests.
 512 *
 513 * If a cache lookup finds a pending entry, we
 514 * need to defer the request and revisit it later.
 515 * All deferred requests are stored in a hash table,
 516 * indexed by "struct cache_head *".
 517 * As it may be wasteful to store a whole request
 518 * structure, we allow the request to provide a
 519 * deferred form, which must contain a
 520 * 'struct cache_deferred_req'
 521 * This cache_deferred_req contains a method to allow
 522 * it to be revisited when cache info is available
 523 */
 524
 525#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
 526#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 527
 528#define	DFR_MAX	300	/* ??? */
 529
 530static DEFINE_SPINLOCK(cache_defer_lock);
 531static LIST_HEAD(cache_defer_list);
 532static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 533static int cache_defer_cnt;
 534
 535static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 536{
 537	hlist_del_init(&dreq->hash);
 538	if (!list_empty(&dreq->recent)) {
 539		list_del_init(&dreq->recent);
 540		cache_defer_cnt--;
 541	}
 542}
 543
 544static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 545{
 546	int hash = DFR_HASH(item);
 547
 548	INIT_LIST_HEAD(&dreq->recent);
 549	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 550}
 551
 552static void setup_deferral(struct cache_deferred_req *dreq,
 553			   struct cache_head *item,
 554			   int count_me)
 555{
 556
 557	dreq->item = item;
 558
 559	spin_lock(&cache_defer_lock);
 560
 561	__hash_deferred_req(dreq, item);
 562
 563	if (count_me) {
 564		cache_defer_cnt++;
 565		list_add(&dreq->recent, &cache_defer_list);
 566	}
 567
 568	spin_unlock(&cache_defer_lock);
 569
 570}
 571
 572struct thread_deferred_req {
 573	struct cache_deferred_req handle;
 574	struct completion completion;
 575};
 576
 577static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 578{
 579	struct thread_deferred_req *dr =
 580		container_of(dreq, struct thread_deferred_req, handle);
 581	complete(&dr->completion);
 582}
 583
 584static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 585{
 586	struct thread_deferred_req sleeper;
 587	struct cache_deferred_req *dreq = &sleeper.handle;
 588
 589	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 590	dreq->revisit = cache_restart_thread;
 591
 592	setup_deferral(dreq, item, 0);
 593
 594	if (!test_bit(CACHE_PENDING, &item->flags) ||
 595	    wait_for_completion_interruptible_timeout(
 596		    &sleeper.completion, req->thread_wait) <= 0) {
 597		/* The completion wasn't completed, so we need
 598		 * to clean up
 599		 */
 600		spin_lock(&cache_defer_lock);
 601		if (!hlist_unhashed(&sleeper.handle.hash)) {
 602			__unhash_deferred_req(&sleeper.handle);
 603			spin_unlock(&cache_defer_lock);
 604		} else {
 605			/* cache_revisit_request already removed
 606			 * this from the hash table, but hasn't
 607			 * called ->revisit yet.  It will very soon
 608			 * and we need to wait for it.
 609			 */
 610			spin_unlock(&cache_defer_lock);
 611			wait_for_completion(&sleeper.completion);
 612		}
 613	}
 614}
 615
 616static void cache_limit_defers(void)
 617{
 618	/* Make sure we haven't exceed the limit of allowed deferred
 619	 * requests.
 620	 */
 621	struct cache_deferred_req *discard = NULL;
 622
 623	if (cache_defer_cnt <= DFR_MAX)
 624		return;
 625
 626	spin_lock(&cache_defer_lock);
 627
 628	/* Consider removing either the first or the last */
 629	if (cache_defer_cnt > DFR_MAX) {
 630		if (net_random() & 1)
 631			discard = list_entry(cache_defer_list.next,
 632					     struct cache_deferred_req, recent);
 633		else
 634			discard = list_entry(cache_defer_list.prev,
 635					     struct cache_deferred_req, recent);
 636		__unhash_deferred_req(discard);
 637	}
 638	spin_unlock(&cache_defer_lock);
 639	if (discard)
 640		discard->revisit(discard, 1);
 641}
 642
 643/* Return true if and only if a deferred request is queued. */
 644static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 645{
 646	struct cache_deferred_req *dreq;
 647
 648	if (req->thread_wait) {
 649		cache_wait_req(req, item);
 650		if (!test_bit(CACHE_PENDING, &item->flags))
 651			return false;
 652	}
 653	dreq = req->defer(req);
 654	if (dreq == NULL)
 655		return false;
 656	setup_deferral(dreq, item, 1);
 657	if (!test_bit(CACHE_PENDING, &item->flags))
 658		/* Bit could have been cleared before we managed to
 659		 * set up the deferral, so need to revisit just in case
 660		 */
 661		cache_revisit_request(item);
 662
 663	cache_limit_defers();
 664	return true;
 665}
 666
 667static void cache_revisit_request(struct cache_head *item)
 668{
 669	struct cache_deferred_req *dreq;
 670	struct list_head pending;
 671	struct hlist_node *lp, *tmp;
 672	int hash = DFR_HASH(item);
 673
 674	INIT_LIST_HEAD(&pending);
 675	spin_lock(&cache_defer_lock);
 676
 677	hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
 678		if (dreq->item == item) {
 679			__unhash_deferred_req(dreq);
 680			list_add(&dreq->recent, &pending);
 681		}
 682
 683	spin_unlock(&cache_defer_lock);
 684
 685	while (!list_empty(&pending)) {
 686		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 687		list_del_init(&dreq->recent);
 688		dreq->revisit(dreq, 0);
 689	}
 690}
 691
 692void cache_clean_deferred(void *owner)
 693{
 694	struct cache_deferred_req *dreq, *tmp;
 695	struct list_head pending;
 696
 697
 698	INIT_LIST_HEAD(&pending);
 699	spin_lock(&cache_defer_lock);
 700
 701	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 702		if (dreq->owner == owner) {
 703			__unhash_deferred_req(dreq);
 704			list_add(&dreq->recent, &pending);
 705		}
 706	}
 707	spin_unlock(&cache_defer_lock);
 708
 709	while (!list_empty(&pending)) {
 710		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 711		list_del_init(&dreq->recent);
 712		dreq->revisit(dreq, 1);
 713	}
 714}
 715
 716/*
 717 * communicate with user-space
 718 *
 719 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
 720 * On read, you get a full request, or block.
 721 * On write, an update request is processed.
 722 * Poll works if anything to read, and always allows write.
 723 *
 724 * Implemented by linked list of requests.  Each open file has
 725 * a ->private that also exists in this list.  New requests are added
 726 * to the end and may wakeup and preceding readers.
 727 * New readers are added to the head.  If, on read, an item is found with
 728 * CACHE_UPCALLING clear, we free it from the list.
 729 *
 730 */
 731
 732static DEFINE_SPINLOCK(queue_lock);
 733static DEFINE_MUTEX(queue_io_mutex);
 734
 735struct cache_queue {
 736	struct list_head	list;
 737	int			reader;	/* if 0, then request */
 738};
 739struct cache_request {
 740	struct cache_queue	q;
 741	struct cache_head	*item;
 742	char			* buf;
 743	int			len;
 744	int			readers;
 745};
 746struct cache_reader {
 747	struct cache_queue	q;
 748	int			offset;	/* if non-0, we have a refcnt on next request */
 749};
 750
 
 
 
 
 
 
 
 
 
 
 
 
 751static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 752			  loff_t *ppos, struct cache_detail *cd)
 753{
 754	struct cache_reader *rp = filp->private_data;
 755	struct cache_request *rq;
 756	struct inode *inode = filp->f_path.dentry->d_inode;
 757	int err;
 758
 759	if (count == 0)
 760		return 0;
 761
 762	mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
 763			      * readers on this file */
 764 again:
 765	spin_lock(&queue_lock);
 766	/* need to find next request */
 767	while (rp->q.list.next != &cd->queue &&
 768	       list_entry(rp->q.list.next, struct cache_queue, list)
 769	       ->reader) {
 770		struct list_head *next = rp->q.list.next;
 771		list_move(&rp->q.list, next);
 772	}
 773	if (rp->q.list.next == &cd->queue) {
 774		spin_unlock(&queue_lock);
 775		mutex_unlock(&inode->i_mutex);
 776		BUG_ON(rp->offset);
 777		return 0;
 778	}
 779	rq = container_of(rp->q.list.next, struct cache_request, q.list);
 780	BUG_ON(rq->q.reader);
 781	if (rp->offset == 0)
 782		rq->readers++;
 783	spin_unlock(&queue_lock);
 784
 
 
 
 
 
 
 
 785	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 786		err = -EAGAIN;
 787		spin_lock(&queue_lock);
 788		list_move(&rp->q.list, &rq->q.list);
 789		spin_unlock(&queue_lock);
 790	} else {
 791		if (rp->offset + count > rq->len)
 792			count = rq->len - rp->offset;
 793		err = -EFAULT;
 794		if (copy_to_user(buf, rq->buf + rp->offset, count))
 795			goto out;
 796		rp->offset += count;
 797		if (rp->offset >= rq->len) {
 798			rp->offset = 0;
 799			spin_lock(&queue_lock);
 800			list_move(&rp->q.list, &rq->q.list);
 801			spin_unlock(&queue_lock);
 802		}
 803		err = 0;
 804	}
 805 out:
 806	if (rp->offset == 0) {
 807		/* need to release rq */
 808		spin_lock(&queue_lock);
 809		rq->readers--;
 810		if (rq->readers == 0 &&
 811		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
 812			list_del(&rq->q.list);
 813			spin_unlock(&queue_lock);
 814			cache_put(rq->item, cd);
 815			kfree(rq->buf);
 816			kfree(rq);
 817		} else
 818			spin_unlock(&queue_lock);
 819	}
 820	if (err == -EAGAIN)
 821		goto again;
 822	mutex_unlock(&inode->i_mutex);
 823	return err ? err :  count;
 824}
 825
 826static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 827				 size_t count, struct cache_detail *cd)
 828{
 829	ssize_t ret;
 830
 
 
 831	if (copy_from_user(kaddr, buf, count))
 832		return -EFAULT;
 833	kaddr[count] = '\0';
 834	ret = cd->cache_parse(cd, kaddr, count);
 835	if (!ret)
 836		ret = count;
 837	return ret;
 838}
 839
 840static ssize_t cache_slow_downcall(const char __user *buf,
 841				   size_t count, struct cache_detail *cd)
 842{
 843	static char write_buf[8192]; /* protected by queue_io_mutex */
 844	ssize_t ret = -EINVAL;
 845
 846	if (count >= sizeof(write_buf))
 847		goto out;
 848	mutex_lock(&queue_io_mutex);
 849	ret = cache_do_downcall(write_buf, buf, count, cd);
 850	mutex_unlock(&queue_io_mutex);
 851out:
 852	return ret;
 853}
 854
 855static ssize_t cache_downcall(struct address_space *mapping,
 856			      const char __user *buf,
 857			      size_t count, struct cache_detail *cd)
 858{
 859	struct page *page;
 860	char *kaddr;
 861	ssize_t ret = -ENOMEM;
 862
 863	if (count >= PAGE_CACHE_SIZE)
 864		goto out_slow;
 865
 866	page = find_or_create_page(mapping, 0, GFP_KERNEL);
 867	if (!page)
 868		goto out_slow;
 869
 870	kaddr = kmap(page);
 871	ret = cache_do_downcall(kaddr, buf, count, cd);
 872	kunmap(page);
 873	unlock_page(page);
 874	page_cache_release(page);
 875	return ret;
 876out_slow:
 877	return cache_slow_downcall(buf, count, cd);
 878}
 879
 880static ssize_t cache_write(struct file *filp, const char __user *buf,
 881			   size_t count, loff_t *ppos,
 882			   struct cache_detail *cd)
 883{
 884	struct address_space *mapping = filp->f_mapping;
 885	struct inode *inode = filp->f_path.dentry->d_inode;
 886	ssize_t ret = -EINVAL;
 887
 888	if (!cd->cache_parse)
 889		goto out;
 890
 891	mutex_lock(&inode->i_mutex);
 892	ret = cache_downcall(mapping, buf, count, cd);
 893	mutex_unlock(&inode->i_mutex);
 894out:
 895	return ret;
 896}
 897
 898static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 899
 900static unsigned int cache_poll(struct file *filp, poll_table *wait,
 901			       struct cache_detail *cd)
 902{
 903	unsigned int mask;
 904	struct cache_reader *rp = filp->private_data;
 905	struct cache_queue *cq;
 906
 907	poll_wait(filp, &queue_wait, wait);
 908
 909	/* alway allow write */
 910	mask = POLL_OUT | POLLWRNORM;
 911
 912	if (!rp)
 913		return mask;
 914
 915	spin_lock(&queue_lock);
 916
 917	for (cq= &rp->q; &cq->list != &cd->queue;
 918	     cq = list_entry(cq->list.next, struct cache_queue, list))
 919		if (!cq->reader) {
 920			mask |= POLLIN | POLLRDNORM;
 921			break;
 922		}
 923	spin_unlock(&queue_lock);
 924	return mask;
 925}
 926
 927static int cache_ioctl(struct inode *ino, struct file *filp,
 928		       unsigned int cmd, unsigned long arg,
 929		       struct cache_detail *cd)
 930{
 931	int len = 0;
 932	struct cache_reader *rp = filp->private_data;
 933	struct cache_queue *cq;
 934
 935	if (cmd != FIONREAD || !rp)
 936		return -EINVAL;
 937
 938	spin_lock(&queue_lock);
 939
 940	/* only find the length remaining in current request,
 941	 * or the length of the next request
 942	 */
 943	for (cq= &rp->q; &cq->list != &cd->queue;
 944	     cq = list_entry(cq->list.next, struct cache_queue, list))
 945		if (!cq->reader) {
 946			struct cache_request *cr =
 947				container_of(cq, struct cache_request, q);
 948			len = cr->len - rp->offset;
 949			break;
 950		}
 951	spin_unlock(&queue_lock);
 952
 953	return put_user(len, (int __user *)arg);
 954}
 955
 956static int cache_open(struct inode *inode, struct file *filp,
 957		      struct cache_detail *cd)
 958{
 959	struct cache_reader *rp = NULL;
 960
 961	if (!cd || !try_module_get(cd->owner))
 962		return -EACCES;
 963	nonseekable_open(inode, filp);
 964	if (filp->f_mode & FMODE_READ) {
 965		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 966		if (!rp)
 
 967			return -ENOMEM;
 
 968		rp->offset = 0;
 969		rp->q.reader = 1;
 970		atomic_inc(&cd->readers);
 971		spin_lock(&queue_lock);
 972		list_add(&rp->q.list, &cd->queue);
 973		spin_unlock(&queue_lock);
 974	}
 975	filp->private_data = rp;
 976	return 0;
 977}
 978
 979static int cache_release(struct inode *inode, struct file *filp,
 980			 struct cache_detail *cd)
 981{
 982	struct cache_reader *rp = filp->private_data;
 983
 984	if (rp) {
 985		spin_lock(&queue_lock);
 986		if (rp->offset) {
 987			struct cache_queue *cq;
 988			for (cq= &rp->q; &cq->list != &cd->queue;
 989			     cq = list_entry(cq->list.next, struct cache_queue, list))
 990				if (!cq->reader) {
 991					container_of(cq, struct cache_request, q)
 992						->readers--;
 993					break;
 994				}
 995			rp->offset = 0;
 996		}
 997		list_del(&rp->q.list);
 998		spin_unlock(&queue_lock);
 999
1000		filp->private_data = NULL;
1001		kfree(rp);
1002
1003		cd->last_close = seconds_since_boot();
1004		atomic_dec(&cd->readers);
1005	}
1006	module_put(cd->owner);
1007	return 0;
1008}
1009
1010
1011
1012static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1013{
1014	struct cache_queue *cq;
 
 
 
 
1015	spin_lock(&queue_lock);
1016	list_for_each_entry(cq, &detail->queue, list)
1017		if (!cq->reader) {
1018			struct cache_request *cr = container_of(cq, struct cache_request, q);
1019			if (cr->item != ch)
1020				continue;
 
 
 
1021			if (cr->readers != 0)
1022				continue;
1023			list_del(&cr->q.list);
1024			spin_unlock(&queue_lock);
1025			cache_put(cr->item, detail);
1026			kfree(cr->buf);
1027			kfree(cr);
1028			return;
1029		}
1030	spin_unlock(&queue_lock);
 
 
 
 
 
 
 
1031}
1032
1033/*
1034 * Support routines for text-based upcalls.
1035 * Fields are separated by spaces.
1036 * Fields are either mangled to quote space tab newline slosh with slosh
1037 * or a hexified with a leading \x
1038 * Record is terminated with newline.
1039 *
1040 */
1041
1042void qword_add(char **bpp, int *lp, char *str)
1043{
1044	char *bp = *bpp;
1045	int len = *lp;
1046	char c;
1047
1048	if (len < 0) return;
1049
1050	while ((c=*str++) && len)
1051		switch(c) {
1052		case ' ':
1053		case '\t':
1054		case '\n':
1055		case '\\':
1056			if (len >= 4) {
1057				*bp++ = '\\';
1058				*bp++ = '0' + ((c & 0300)>>6);
1059				*bp++ = '0' + ((c & 0070)>>3);
1060				*bp++ = '0' + ((c & 0007)>>0);
1061			}
1062			len -= 4;
1063			break;
1064		default:
1065			*bp++ = c;
1066			len--;
1067		}
1068	if (c || len <1) len = -1;
1069	else {
1070		*bp++ = ' ';
1071		len--;
1072	}
1073	*bpp = bp;
1074	*lp = len;
1075}
1076EXPORT_SYMBOL_GPL(qword_add);
1077
1078void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1079{
1080	char *bp = *bpp;
1081	int len = *lp;
1082
1083	if (len < 0) return;
1084
1085	if (len > 2) {
1086		*bp++ = '\\';
1087		*bp++ = 'x';
1088		len -= 2;
1089		while (blen && len >= 2) {
1090			unsigned char c = *buf++;
1091			*bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1092			*bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1093			len -= 2;
1094			blen--;
1095		}
1096	}
1097	if (blen || len<1) len = -1;
1098	else {
1099		*bp++ = ' ';
1100		len--;
1101	}
1102	*bpp = bp;
1103	*lp = len;
1104}
1105EXPORT_SYMBOL_GPL(qword_addhex);
1106
1107static void warn_no_listener(struct cache_detail *detail)
1108{
1109	if (detail->last_warn != detail->last_close) {
1110		detail->last_warn = detail->last_close;
1111		if (detail->warn_no_listener)
1112			detail->warn_no_listener(detail, detail->last_close != 0);
1113	}
1114}
1115
1116static bool cache_listeners_exist(struct cache_detail *detail)
1117{
1118	if (atomic_read(&detail->readers))
1119		return true;
1120	if (detail->last_close == 0)
1121		/* This cache was never opened */
1122		return false;
1123	if (detail->last_close < seconds_since_boot() - 30)
1124		/*
1125		 * We allow for the possibility that someone might
1126		 * restart a userspace daemon without restarting the
1127		 * server; but after 30 seconds, we give up.
1128		 */
1129		 return false;
1130	return true;
1131}
1132
1133/*
1134 * register an upcall request to user-space and queue it up for read() by the
1135 * upcall daemon.
1136 *
1137 * Each request is at most one page long.
1138 */
1139int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1140		void (*cache_request)(struct cache_detail *,
1141				      struct cache_head *,
1142				      char **,
1143				      int *))
1144{
1145
1146	char *buf;
1147	struct cache_request *crq;
1148	char *bp;
1149	int len;
 
 
1150
1151	if (!cache_listeners_exist(detail)) {
1152		warn_no_listener(detail);
1153		return -EINVAL;
1154	}
 
 
 
1155
1156	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1157	if (!buf)
1158		return -EAGAIN;
1159
1160	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1161	if (!crq) {
1162		kfree(buf);
1163		return -EAGAIN;
1164	}
1165
1166	bp = buf; len = PAGE_SIZE;
1167
1168	cache_request(detail, h, &bp, &len);
1169
1170	if (len < 0) {
1171		kfree(buf);
1172		kfree(crq);
1173		return -EAGAIN;
1174	}
1175	crq->q.reader = 0;
1176	crq->item = cache_get(h);
1177	crq->buf = buf;
1178	crq->len = PAGE_SIZE - len;
1179	crq->readers = 0;
1180	spin_lock(&queue_lock);
1181	list_add_tail(&crq->q.list, &detail->queue);
 
 
 
 
 
1182	spin_unlock(&queue_lock);
1183	wake_up(&queue_wait);
1184	return 0;
 
 
 
 
1185}
1186EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1187
1188/*
1189 * parse a message from user-space and pass it
1190 * to an appropriate cache
1191 * Messages are, like requests, separated into fields by
1192 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1193 *
1194 * Message is
1195 *   reply cachename expiry key ... content....
1196 *
1197 * key and content are both parsed by cache
1198 */
1199
1200#define isodigit(c) (isdigit(c) && c <= '7')
1201int qword_get(char **bpp, char *dest, int bufsize)
1202{
1203	/* return bytes copied, or -1 on error */
1204	char *bp = *bpp;
1205	int len = 0;
1206
1207	while (*bp == ' ') bp++;
1208
1209	if (bp[0] == '\\' && bp[1] == 'x') {
1210		/* HEX STRING */
1211		bp += 2;
1212		while (len < bufsize) {
1213			int h, l;
1214
1215			h = hex_to_bin(bp[0]);
1216			if (h < 0)
1217				break;
1218
1219			l = hex_to_bin(bp[1]);
1220			if (l < 0)
1221				break;
1222
1223			*dest++ = (h << 4) | l;
1224			bp += 2;
1225			len++;
1226		}
1227	} else {
1228		/* text with \nnn octal quoting */
1229		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1230			if (*bp == '\\' &&
1231			    isodigit(bp[1]) && (bp[1] <= '3') &&
1232			    isodigit(bp[2]) &&
1233			    isodigit(bp[3])) {
1234				int byte = (*++bp -'0');
1235				bp++;
1236				byte = (byte << 3) | (*bp++ - '0');
1237				byte = (byte << 3) | (*bp++ - '0');
1238				*dest++ = byte;
1239				len++;
1240			} else {
1241				*dest++ = *bp++;
1242				len++;
1243			}
1244		}
1245	}
1246
1247	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1248		return -1;
1249	while (*bp == ' ') bp++;
1250	*bpp = bp;
1251	*dest = '\0';
1252	return len;
1253}
1254EXPORT_SYMBOL_GPL(qword_get);
1255
1256
1257/*
1258 * support /proc/sunrpc/cache/$CACHENAME/content
1259 * as a seqfile.
1260 * We call ->cache_show passing NULL for the item to
1261 * get a header, then pass each real item in the cache
1262 */
1263
1264struct handle {
1265	struct cache_detail *cd;
1266};
1267
1268static void *c_start(struct seq_file *m, loff_t *pos)
1269	__acquires(cd->hash_lock)
1270{
1271	loff_t n = *pos;
1272	unsigned hash, entry;
1273	struct cache_head *ch;
1274	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1275
1276
1277	read_lock(&cd->hash_lock);
1278	if (!n--)
1279		return SEQ_START_TOKEN;
1280	hash = n >> 32;
1281	entry = n & ((1LL<<32) - 1);
1282
1283	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1284		if (!entry--)
1285			return ch;
1286	n &= ~((1LL<<32) - 1);
1287	do {
1288		hash++;
1289		n += 1LL<<32;
1290	} while(hash < cd->hash_size &&
1291		cd->hash_table[hash]==NULL);
1292	if (hash >= cd->hash_size)
1293		return NULL;
1294	*pos = n+1;
1295	return cd->hash_table[hash];
 
1296}
 
1297
1298static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1299{
1300	struct cache_head *ch = p;
1301	int hash = (*pos >> 32);
1302	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1303
1304	if (p == SEQ_START_TOKEN)
1305		hash = 0;
1306	else if (ch->next == NULL) {
1307		hash++;
1308		*pos += 1LL<<32;
1309	} else {
1310		++*pos;
1311		return ch->next;
 
1312	}
1313	*pos &= ~((1LL<<32) - 1);
1314	while (hash < cd->hash_size &&
1315	       cd->hash_table[hash] == NULL) {
1316		hash++;
1317		*pos += 1LL<<32;
1318	}
1319	if (hash >= cd->hash_size)
1320		return NULL;
1321	++*pos;
1322	return cd->hash_table[hash];
 
1323}
 
1324
1325static void c_stop(struct seq_file *m, void *p)
1326	__releases(cd->hash_lock)
1327{
1328	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1329	read_unlock(&cd->hash_lock);
1330}
 
1331
1332static int c_show(struct seq_file *m, void *p)
1333{
1334	struct cache_head *cp = p;
1335	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1336
1337	if (p == SEQ_START_TOKEN)
1338		return cd->cache_show(m, cd, NULL);
1339
1340	ifdebug(CACHE)
1341		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1342			   convert_to_wallclock(cp->expiry_time),
1343			   atomic_read(&cp->ref.refcount), cp->flags);
1344	cache_get(cp);
1345	if (cache_check(cd, cp, NULL))
1346		/* cache_check does a cache_put on failure */
1347		seq_printf(m, "# ");
1348	else
 
 
1349		cache_put(cp, cd);
 
1350
1351	return cd->cache_show(m, cd, cp);
1352}
1353
1354static const struct seq_operations cache_content_op = {
1355	.start	= c_start,
1356	.next	= c_next,
1357	.stop	= c_stop,
1358	.show	= c_show,
1359};
1360
1361static int content_open(struct inode *inode, struct file *file,
1362			struct cache_detail *cd)
1363{
1364	struct handle *han;
 
1365
1366	if (!cd || !try_module_get(cd->owner))
1367		return -EACCES;
1368	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1369	if (han == NULL) {
 
1370		module_put(cd->owner);
1371		return -ENOMEM;
1372	}
1373
1374	han->cd = cd;
 
1375	return 0;
1376}
1377
1378static int content_release(struct inode *inode, struct file *file,
1379		struct cache_detail *cd)
1380{
1381	int ret = seq_release_private(inode, file);
1382	module_put(cd->owner);
1383	return ret;
1384}
1385
1386static int open_flush(struct inode *inode, struct file *file,
1387			struct cache_detail *cd)
1388{
1389	if (!cd || !try_module_get(cd->owner))
1390		return -EACCES;
1391	return nonseekable_open(inode, file);
1392}
1393
1394static int release_flush(struct inode *inode, struct file *file,
1395			struct cache_detail *cd)
1396{
1397	module_put(cd->owner);
1398	return 0;
1399}
1400
1401static ssize_t read_flush(struct file *file, char __user *buf,
1402			  size_t count, loff_t *ppos,
1403			  struct cache_detail *cd)
1404{
1405	char tbuf[20];
1406	unsigned long p = *ppos;
1407	size_t len;
1408
1409	sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1410	len = strlen(tbuf);
1411	if (p >= len)
1412		return 0;
1413	len -= p;
1414	if (len > count)
1415		len = count;
1416	if (copy_to_user(buf, (void*)(tbuf+p), len))
1417		return -EFAULT;
1418	*ppos += len;
1419	return len;
1420}
1421
1422static ssize_t write_flush(struct file *file, const char __user *buf,
1423			   size_t count, loff_t *ppos,
1424			   struct cache_detail *cd)
1425{
1426	char tbuf[20];
1427	char *bp, *ep;
 
1428
1429	if (*ppos || count > sizeof(tbuf)-1)
1430		return -EINVAL;
1431	if (copy_from_user(tbuf, buf, count))
1432		return -EFAULT;
1433	tbuf[count] = 0;
1434	simple_strtoul(tbuf, &ep, 0);
1435	if (*ep && *ep != '\n')
1436		return -EINVAL;
1437
1438	bp = tbuf;
1439	cd->flush_time = get_expiry(&bp);
1440	cd->nextcheck = seconds_since_boot();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1441	cache_flush();
1442
1443	*ppos += count;
1444	return count;
1445}
1446
1447static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1448				 size_t count, loff_t *ppos)
1449{
1450	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1451
1452	return cache_read(filp, buf, count, ppos, cd);
1453}
1454
1455static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1456				  size_t count, loff_t *ppos)
1457{
1458	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1459
1460	return cache_write(filp, buf, count, ppos, cd);
1461}
1462
1463static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1464{
1465	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1466
1467	return cache_poll(filp, wait, cd);
1468}
1469
1470static long cache_ioctl_procfs(struct file *filp,
1471			       unsigned int cmd, unsigned long arg)
1472{
1473	struct inode *inode = filp->f_path.dentry->d_inode;
1474	struct cache_detail *cd = PDE(inode)->data;
1475
1476	return cache_ioctl(inode, filp, cmd, arg, cd);
1477}
1478
1479static int cache_open_procfs(struct inode *inode, struct file *filp)
1480{
1481	struct cache_detail *cd = PDE(inode)->data;
1482
1483	return cache_open(inode, filp, cd);
1484}
1485
1486static int cache_release_procfs(struct inode *inode, struct file *filp)
1487{
1488	struct cache_detail *cd = PDE(inode)->data;
1489
1490	return cache_release(inode, filp, cd);
1491}
1492
1493static const struct file_operations cache_file_operations_procfs = {
1494	.owner		= THIS_MODULE,
1495	.llseek		= no_llseek,
1496	.read		= cache_read_procfs,
1497	.write		= cache_write_procfs,
1498	.poll		= cache_poll_procfs,
1499	.unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1500	.open		= cache_open_procfs,
1501	.release	= cache_release_procfs,
1502};
1503
1504static int content_open_procfs(struct inode *inode, struct file *filp)
1505{
1506	struct cache_detail *cd = PDE(inode)->data;
1507
1508	return content_open(inode, filp, cd);
1509}
1510
1511static int content_release_procfs(struct inode *inode, struct file *filp)
1512{
1513	struct cache_detail *cd = PDE(inode)->data;
1514
1515	return content_release(inode, filp, cd);
1516}
1517
1518static const struct file_operations content_file_operations_procfs = {
1519	.open		= content_open_procfs,
1520	.read		= seq_read,
1521	.llseek		= seq_lseek,
1522	.release	= content_release_procfs,
1523};
1524
1525static int open_flush_procfs(struct inode *inode, struct file *filp)
1526{
1527	struct cache_detail *cd = PDE(inode)->data;
1528
1529	return open_flush(inode, filp, cd);
1530}
1531
1532static int release_flush_procfs(struct inode *inode, struct file *filp)
1533{
1534	struct cache_detail *cd = PDE(inode)->data;
1535
1536	return release_flush(inode, filp, cd);
1537}
1538
1539static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1540			    size_t count, loff_t *ppos)
1541{
1542	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1543
1544	return read_flush(filp, buf, count, ppos, cd);
1545}
1546
1547static ssize_t write_flush_procfs(struct file *filp,
1548				  const char __user *buf,
1549				  size_t count, loff_t *ppos)
1550{
1551	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1552
1553	return write_flush(filp, buf, count, ppos, cd);
1554}
1555
1556static const struct file_operations cache_flush_operations_procfs = {
1557	.open		= open_flush_procfs,
1558	.read		= read_flush_procfs,
1559	.write		= write_flush_procfs,
1560	.release	= release_flush_procfs,
1561	.llseek		= no_llseek,
1562};
1563
1564static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1565{
1566	struct sunrpc_net *sn;
1567
1568	if (cd->u.procfs.proc_ent == NULL)
1569		return;
1570	if (cd->u.procfs.flush_ent)
1571		remove_proc_entry("flush", cd->u.procfs.proc_ent);
1572	if (cd->u.procfs.channel_ent)
1573		remove_proc_entry("channel", cd->u.procfs.proc_ent);
1574	if (cd->u.procfs.content_ent)
1575		remove_proc_entry("content", cd->u.procfs.proc_ent);
1576	cd->u.procfs.proc_ent = NULL;
1577	sn = net_generic(net, sunrpc_net_id);
1578	remove_proc_entry(cd->name, sn->proc_net_rpc);
1579}
1580
1581#ifdef CONFIG_PROC_FS
1582static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1583{
1584	struct proc_dir_entry *p;
1585	struct sunrpc_net *sn;
1586
1587	sn = net_generic(net, sunrpc_net_id);
1588	cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1589	if (cd->u.procfs.proc_ent == NULL)
1590		goto out_nomem;
1591	cd->u.procfs.channel_ent = NULL;
1592	cd->u.procfs.content_ent = NULL;
1593
1594	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1595			     cd->u.procfs.proc_ent,
1596			     &cache_flush_operations_procfs, cd);
1597	cd->u.procfs.flush_ent = p;
1598	if (p == NULL)
1599		goto out_nomem;
1600
1601	if (cd->cache_upcall || cd->cache_parse) {
1602		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1603				     cd->u.procfs.proc_ent,
1604				     &cache_file_operations_procfs, cd);
1605		cd->u.procfs.channel_ent = p;
1606		if (p == NULL)
1607			goto out_nomem;
1608	}
1609	if (cd->cache_show) {
1610		p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1611				cd->u.procfs.proc_ent,
1612				&content_file_operations_procfs, cd);
1613		cd->u.procfs.content_ent = p;
1614		if (p == NULL)
1615			goto out_nomem;
1616	}
1617	return 0;
1618out_nomem:
1619	remove_cache_proc_entries(cd, net);
1620	return -ENOMEM;
1621}
1622#else /* CONFIG_PROC_FS */
1623static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1624{
1625	return 0;
1626}
1627#endif
1628
1629void __init cache_initialize(void)
1630{
1631	INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
1632}
1633
1634int cache_register_net(struct cache_detail *cd, struct net *net)
1635{
1636	int ret;
1637
1638	sunrpc_init_cache_detail(cd);
1639	ret = create_cache_proc_entries(cd, net);
1640	if (ret)
1641		sunrpc_destroy_cache_detail(cd);
1642	return ret;
1643}
1644
1645int cache_register(struct cache_detail *cd)
1646{
1647	return cache_register_net(cd, &init_net);
1648}
1649EXPORT_SYMBOL_GPL(cache_register);
1650
1651void cache_unregister_net(struct cache_detail *cd, struct net *net)
1652{
1653	remove_cache_proc_entries(cd, net);
1654	sunrpc_destroy_cache_detail(cd);
1655}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1656
1657void cache_unregister(struct cache_detail *cd)
1658{
1659	cache_unregister_net(cd, &init_net);
 
1660}
1661EXPORT_SYMBOL_GPL(cache_unregister);
1662
1663static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1664				 size_t count, loff_t *ppos)
1665{
1666	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1667
1668	return cache_read(filp, buf, count, ppos, cd);
1669}
1670
1671static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1672				  size_t count, loff_t *ppos)
1673{
1674	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1675
1676	return cache_write(filp, buf, count, ppos, cd);
1677}
1678
1679static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1680{
1681	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1682
1683	return cache_poll(filp, wait, cd);
1684}
1685
1686static long cache_ioctl_pipefs(struct file *filp,
1687			      unsigned int cmd, unsigned long arg)
1688{
1689	struct inode *inode = filp->f_dentry->d_inode;
1690	struct cache_detail *cd = RPC_I(inode)->private;
1691
1692	return cache_ioctl(inode, filp, cmd, arg, cd);
1693}
1694
1695static int cache_open_pipefs(struct inode *inode, struct file *filp)
1696{
1697	struct cache_detail *cd = RPC_I(inode)->private;
1698
1699	return cache_open(inode, filp, cd);
1700}
1701
1702static int cache_release_pipefs(struct inode *inode, struct file *filp)
1703{
1704	struct cache_detail *cd = RPC_I(inode)->private;
1705
1706	return cache_release(inode, filp, cd);
1707}
1708
1709const struct file_operations cache_file_operations_pipefs = {
1710	.owner		= THIS_MODULE,
1711	.llseek		= no_llseek,
1712	.read		= cache_read_pipefs,
1713	.write		= cache_write_pipefs,
1714	.poll		= cache_poll_pipefs,
1715	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1716	.open		= cache_open_pipefs,
1717	.release	= cache_release_pipefs,
1718};
1719
1720static int content_open_pipefs(struct inode *inode, struct file *filp)
1721{
1722	struct cache_detail *cd = RPC_I(inode)->private;
1723
1724	return content_open(inode, filp, cd);
1725}
1726
1727static int content_release_pipefs(struct inode *inode, struct file *filp)
1728{
1729	struct cache_detail *cd = RPC_I(inode)->private;
1730
1731	return content_release(inode, filp, cd);
1732}
1733
1734const struct file_operations content_file_operations_pipefs = {
1735	.open		= content_open_pipefs,
1736	.read		= seq_read,
1737	.llseek		= seq_lseek,
1738	.release	= content_release_pipefs,
1739};
1740
1741static int open_flush_pipefs(struct inode *inode, struct file *filp)
1742{
1743	struct cache_detail *cd = RPC_I(inode)->private;
1744
1745	return open_flush(inode, filp, cd);
1746}
1747
1748static int release_flush_pipefs(struct inode *inode, struct file *filp)
1749{
1750	struct cache_detail *cd = RPC_I(inode)->private;
1751
1752	return release_flush(inode, filp, cd);
1753}
1754
1755static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1756			    size_t count, loff_t *ppos)
1757{
1758	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1759
1760	return read_flush(filp, buf, count, ppos, cd);
1761}
1762
1763static ssize_t write_flush_pipefs(struct file *filp,
1764				  const char __user *buf,
1765				  size_t count, loff_t *ppos)
1766{
1767	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1768
1769	return write_flush(filp, buf, count, ppos, cd);
1770}
1771
1772const struct file_operations cache_flush_operations_pipefs = {
1773	.open		= open_flush_pipefs,
1774	.read		= read_flush_pipefs,
1775	.write		= write_flush_pipefs,
1776	.release	= release_flush_pipefs,
1777	.llseek		= no_llseek,
1778};
1779
1780int sunrpc_cache_register_pipefs(struct dentry *parent,
1781				 const char *name, mode_t umode,
1782				 struct cache_detail *cd)
1783{
1784	struct qstr q;
1785	struct dentry *dir;
1786	int ret = 0;
1787
1788	sunrpc_init_cache_detail(cd);
1789	q.name = name;
1790	q.len = strlen(name);
1791	q.hash = full_name_hash(q.name, q.len);
1792	dir = rpc_create_cache_dir(parent, &q, umode, cd);
1793	if (!IS_ERR(dir))
1794		cd->u.pipefs.dir = dir;
1795	else {
1796		sunrpc_destroy_cache_detail(cd);
1797		ret = PTR_ERR(dir);
1798	}
1799	return ret;
1800}
1801EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1802
1803void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1804{
1805	rpc_remove_cache_dir(cd->u.pipefs.dir);
1806	cd->u.pipefs.dir = NULL;
1807	sunrpc_destroy_cache_detail(cd);
1808}
1809EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1810
v4.10.11
   1/*
   2 * net/sunrpc/cache.c
   3 *
   4 * Generic code for various authentication-related caches
   5 * used by sunrpc clients and servers.
   6 *
   7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   8 *
   9 * Released under terms in GPL version 2.  See COPYING.
  10 *
  11 */
  12
  13#include <linux/types.h>
  14#include <linux/fs.h>
  15#include <linux/file.h>
  16#include <linux/slab.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <linux/string_helpers.h>
  24#include <linux/uaccess.h>
  25#include <linux/poll.h>
  26#include <linux/seq_file.h>
  27#include <linux/proc_fs.h>
  28#include <linux/net.h>
  29#include <linux/workqueue.h>
  30#include <linux/mutex.h>
  31#include <linux/pagemap.h>
  32#include <asm/ioctls.h>
  33#include <linux/sunrpc/types.h>
  34#include <linux/sunrpc/cache.h>
  35#include <linux/sunrpc/stats.h>
  36#include <linux/sunrpc/rpc_pipe_fs.h>
  37#include "netns.h"
  38
  39#define	 RPCDBG_FACILITY RPCDBG_CACHE
  40
  41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  42static void cache_revisit_request(struct cache_head *item);
  43
  44static void cache_init(struct cache_head *h, struct cache_detail *detail)
  45{
  46	time_t now = seconds_since_boot();
  47	INIT_HLIST_NODE(&h->cache_list);
  48	h->flags = 0;
  49	kref_init(&h->ref);
  50	h->expiry_time = now + CACHE_NEW_EXPIRY;
  51	if (now <= detail->flush_time)
  52		/* ensure it isn't already expired */
  53		now = detail->flush_time + 1;
  54	h->last_refresh = now;
  55}
  56
 
 
 
 
 
 
  57struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
  58				       struct cache_head *key, int hash)
  59{
  60	struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
  61	struct hlist_head *head;
  62
  63	head = &detail->hash_table[hash];
  64
  65	read_lock(&detail->hash_lock);
  66
  67	hlist_for_each_entry(tmp, head, cache_list) {
 
  68		if (detail->match(tmp, key)) {
  69			if (cache_is_expired(detail, tmp))
  70				/* This entry is expired, we will discard it. */
  71				break;
  72			cache_get(tmp);
  73			read_unlock(&detail->hash_lock);
  74			return tmp;
  75		}
  76	}
  77	read_unlock(&detail->hash_lock);
  78	/* Didn't find anything, insert an empty entry */
  79
  80	new = detail->alloc();
  81	if (!new)
  82		return NULL;
  83	/* must fully initialise 'new', else
  84	 * we might get lose if we need to
  85	 * cache_put it soon.
  86	 */
  87	cache_init(new, detail);
  88	detail->init(new, key);
  89
  90	write_lock(&detail->hash_lock);
  91
  92	/* check if entry appeared while we slept */
  93	hlist_for_each_entry(tmp, head, cache_list) {
 
  94		if (detail->match(tmp, key)) {
  95			if (cache_is_expired(detail, tmp)) {
  96				hlist_del_init(&tmp->cache_list);
 
  97				detail->entries --;
  98				freeme = tmp;
  99				break;
 100			}
 101			cache_get(tmp);
 102			write_unlock(&detail->hash_lock);
 103			cache_put(new, detail);
 104			return tmp;
 105		}
 106	}
 107
 108	hlist_add_head(&new->cache_list, head);
 109	detail->entries++;
 110	cache_get(new);
 111	write_unlock(&detail->hash_lock);
 112
 113	if (freeme)
 114		cache_put(freeme, detail);
 115	return new;
 116}
 117EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
 118
 119
 120static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 121
 122static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 123			       struct cache_detail *detail)
 124{
 125	time_t now = seconds_since_boot();
 126	if (now <= detail->flush_time)
 127		/* ensure it isn't immediately treated as expired */
 128		now = detail->flush_time + 1;
 129	head->expiry_time = expiry;
 130	head->last_refresh = now;
 131	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 132	set_bit(CACHE_VALID, &head->flags);
 133}
 134
 135static void cache_fresh_unlocked(struct cache_head *head,
 136				 struct cache_detail *detail)
 137{
 138	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 139		cache_revisit_request(head);
 140		cache_dequeue(detail, head);
 141	}
 142}
 143
 144struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 145				       struct cache_head *new, struct cache_head *old, int hash)
 146{
 147	/* The 'old' entry is to be replaced by 'new'.
 148	 * If 'old' is not VALID, we update it directly,
 149	 * otherwise we need to replace it
 150	 */
 
 151	struct cache_head *tmp;
 152
 153	if (!test_bit(CACHE_VALID, &old->flags)) {
 154		write_lock(&detail->hash_lock);
 155		if (!test_bit(CACHE_VALID, &old->flags)) {
 156			if (test_bit(CACHE_NEGATIVE, &new->flags))
 157				set_bit(CACHE_NEGATIVE, &old->flags);
 158			else
 159				detail->update(old, new);
 160			cache_fresh_locked(old, new->expiry_time, detail);
 161			write_unlock(&detail->hash_lock);
 162			cache_fresh_unlocked(old, detail);
 163			return old;
 164		}
 165		write_unlock(&detail->hash_lock);
 166	}
 167	/* We need to insert a new entry */
 168	tmp = detail->alloc();
 169	if (!tmp) {
 170		cache_put(old, detail);
 171		return NULL;
 172	}
 173	cache_init(tmp, detail);
 174	detail->init(tmp, old);
 
 175
 176	write_lock(&detail->hash_lock);
 177	if (test_bit(CACHE_NEGATIVE, &new->flags))
 178		set_bit(CACHE_NEGATIVE, &tmp->flags);
 179	else
 180		detail->update(tmp, new);
 181	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 
 182	detail->entries++;
 183	cache_get(tmp);
 184	cache_fresh_locked(tmp, new->expiry_time, detail);
 185	cache_fresh_locked(old, 0, detail);
 186	write_unlock(&detail->hash_lock);
 187	cache_fresh_unlocked(tmp, detail);
 188	cache_fresh_unlocked(old, detail);
 189	cache_put(old, detail);
 190	return tmp;
 191}
 192EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 193
 194static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 195{
 196	if (cd->cache_upcall)
 197		return cd->cache_upcall(cd, h);
 198	return sunrpc_cache_pipe_upcall(cd, h);
 199}
 200
 201static inline int cache_is_valid(struct cache_head *h)
 202{
 203	if (!test_bit(CACHE_VALID, &h->flags))
 204		return -EAGAIN;
 205	else {
 206		/* entry is valid */
 207		if (test_bit(CACHE_NEGATIVE, &h->flags))
 208			return -ENOENT;
 209		else {
 210			/*
 211			 * In combination with write barrier in
 212			 * sunrpc_cache_update, ensures that anyone
 213			 * using the cache entry after this sees the
 214			 * updated contents:
 215			 */
 216			smp_rmb();
 217			return 0;
 218		}
 219	}
 220}
 221
 222static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 223{
 224	int rv;
 225
 226	write_lock(&detail->hash_lock);
 227	rv = cache_is_valid(h);
 228	if (rv == -EAGAIN) {
 229		set_bit(CACHE_NEGATIVE, &h->flags);
 230		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
 231				   detail);
 232		rv = -ENOENT;
 233	}
 
 
 234	write_unlock(&detail->hash_lock);
 235	cache_fresh_unlocked(h, detail);
 236	return rv;
 237}
 238
 239/*
 240 * This is the generic cache management routine for all
 241 * the authentication caches.
 242 * It checks the currency of a cache item and will (later)
 243 * initiate an upcall to fill it if needed.
 244 *
 245 *
 246 * Returns 0 if the cache_head can be used, or cache_puts it and returns
 247 * -EAGAIN if upcall is pending and request has been queued
 248 * -ETIMEDOUT if upcall failed or request could not be queue or
 249 *           upcall completed but item is still invalid (implying that
 250 *           the cache item has been replaced with a newer one).
 251 * -ENOENT if cache entry was negative
 252 */
 253int cache_check(struct cache_detail *detail,
 254		    struct cache_head *h, struct cache_req *rqstp)
 255{
 256	int rv;
 257	long refresh_age, age;
 258
 259	/* First decide return status as best we can */
 260	rv = cache_is_valid(h);
 261
 262	/* now see if we want to start an upcall */
 263	refresh_age = (h->expiry_time - h->last_refresh);
 264	age = seconds_since_boot() - h->last_refresh;
 265
 266	if (rqstp == NULL) {
 267		if (rv == -EAGAIN)
 268			rv = -ENOENT;
 269	} else if (rv == -EAGAIN ||
 270		   (h->expiry_time != 0 && age > refresh_age/2)) {
 271		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
 272				refresh_age, age);
 273		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 274			switch (cache_make_upcall(detail, h)) {
 275			case -EINVAL:
 
 
 276				rv = try_to_negate_entry(detail, h);
 277				break;
 278			case -EAGAIN:
 279				cache_fresh_unlocked(h, detail);
 
 280				break;
 281			}
 282		}
 283	}
 284
 285	if (rv == -EAGAIN) {
 286		if (!cache_defer_req(rqstp, h)) {
 287			/*
 288			 * Request was not deferred; handle it as best
 289			 * we can ourselves:
 290			 */
 291			rv = cache_is_valid(h);
 292			if (rv == -EAGAIN)
 293				rv = -ETIMEDOUT;
 294		}
 295	}
 296	if (rv)
 297		cache_put(h, detail);
 298	return rv;
 299}
 300EXPORT_SYMBOL_GPL(cache_check);
 301
 302/*
 303 * caches need to be periodically cleaned.
 304 * For this we maintain a list of cache_detail and
 305 * a current pointer into that list and into the table
 306 * for that entry.
 307 *
 308 * Each time cache_clean is called it finds the next non-empty entry
 309 * in the current table and walks the list in that entry
 310 * looking for entries that can be removed.
 311 *
 312 * An entry gets removed if:
 313 * - The expiry is before current time
 314 * - The last_refresh time is before the flush_time for that cache
 315 *
 316 * later we might drop old entries with non-NEVER expiry if that table
 317 * is getting 'full' for some definition of 'full'
 318 *
 319 * The question of "how often to scan a table" is an interesting one
 320 * and is answered in part by the use of the "nextcheck" field in the
 321 * cache_detail.
 322 * When a scan of a table begins, the nextcheck field is set to a time
 323 * that is well into the future.
 324 * While scanning, if an expiry time is found that is earlier than the
 325 * current nextcheck time, nextcheck is set to that expiry time.
 326 * If the flush_time is ever set to a time earlier than the nextcheck
 327 * time, the nextcheck time is then set to that flush_time.
 328 *
 329 * A table is then only scanned if the current time is at least
 330 * the nextcheck time.
 331 *
 332 */
 333
 334static LIST_HEAD(cache_list);
 335static DEFINE_SPINLOCK(cache_list_lock);
 336static struct cache_detail *current_detail;
 337static int current_index;
 338
 339static void do_cache_clean(struct work_struct *work);
 340static struct delayed_work cache_cleaner;
 341
 342void sunrpc_init_cache_detail(struct cache_detail *cd)
 343{
 344	rwlock_init(&cd->hash_lock);
 345	INIT_LIST_HEAD(&cd->queue);
 346	spin_lock(&cache_list_lock);
 347	cd->nextcheck = 0;
 348	cd->entries = 0;
 349	atomic_set(&cd->readers, 0);
 350	cd->last_close = 0;
 351	cd->last_warn = -1;
 352	list_add(&cd->others, &cache_list);
 353	spin_unlock(&cache_list_lock);
 354
 355	/* start the cleaning process */
 356	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 357}
 358EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 359
 360void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 361{
 362	cache_purge(cd);
 363	spin_lock(&cache_list_lock);
 364	write_lock(&cd->hash_lock);
 365	if (cd->entries) {
 366		write_unlock(&cd->hash_lock);
 367		spin_unlock(&cache_list_lock);
 368		goto out;
 369	}
 370	if (current_detail == cd)
 371		current_detail = NULL;
 372	list_del_init(&cd->others);
 373	write_unlock(&cd->hash_lock);
 374	spin_unlock(&cache_list_lock);
 375	if (list_empty(&cache_list)) {
 376		/* module must be being unloaded so its safe to kill the worker */
 377		cancel_delayed_work_sync(&cache_cleaner);
 378	}
 379	return;
 380out:
 381	printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
 382}
 383EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
 384
 385/* clean cache tries to find something to clean
 386 * and cleans it.
 387 * It returns 1 if it cleaned something,
 388 *            0 if it didn't find anything this time
 389 *           -1 if it fell off the end of the list.
 390 */
 391static int cache_clean(void)
 392{
 393	int rv = 0;
 394	struct list_head *next;
 395
 396	spin_lock(&cache_list_lock);
 397
 398	/* find a suitable table if we don't already have one */
 399	while (current_detail == NULL ||
 400	    current_index >= current_detail->hash_size) {
 401		if (current_detail)
 402			next = current_detail->others.next;
 403		else
 404			next = cache_list.next;
 405		if (next == &cache_list) {
 406			current_detail = NULL;
 407			spin_unlock(&cache_list_lock);
 408			return -1;
 409		}
 410		current_detail = list_entry(next, struct cache_detail, others);
 411		if (current_detail->nextcheck > seconds_since_boot())
 412			current_index = current_detail->hash_size;
 413		else {
 414			current_index = 0;
 415			current_detail->nextcheck = seconds_since_boot()+30*60;
 416		}
 417	}
 418
 419	/* find a non-empty bucket in the table */
 420	while (current_detail &&
 421	       current_index < current_detail->hash_size &&
 422	       hlist_empty(&current_detail->hash_table[current_index]))
 423		current_index++;
 424
 425	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 426
 427	if (current_detail && current_index < current_detail->hash_size) {
 428		struct cache_head *ch = NULL;
 429		struct cache_detail *d;
 430		struct hlist_head *head;
 431		struct hlist_node *tmp;
 432
 433		write_lock(&current_detail->hash_lock);
 434
 435		/* Ok, now to clean this strand */
 436
 437		head = &current_detail->hash_table[current_index];
 438		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 439			if (current_detail->nextcheck > ch->expiry_time)
 440				current_detail->nextcheck = ch->expiry_time+1;
 441			if (!cache_is_expired(current_detail, ch))
 442				continue;
 443
 444			hlist_del_init(&ch->cache_list);
 
 445			current_detail->entries--;
 446			rv = 1;
 447			break;
 448		}
 449
 450		write_unlock(&current_detail->hash_lock);
 451		d = current_detail;
 452		if (!ch)
 453			current_index ++;
 454		spin_unlock(&cache_list_lock);
 455		if (ch) {
 456			set_bit(CACHE_CLEANED, &ch->flags);
 457			cache_fresh_unlocked(ch, d);
 
 458			cache_put(ch, d);
 459		}
 460	} else
 461		spin_unlock(&cache_list_lock);
 462
 463	return rv;
 464}
 465
 466/*
 467 * We want to regularly clean the cache, so we need to schedule some work ...
 468 */
 469static void do_cache_clean(struct work_struct *work)
 470{
 471	int delay = 5;
 472	if (cache_clean() == -1)
 473		delay = round_jiffies_relative(30*HZ);
 474
 475	if (list_empty(&cache_list))
 476		delay = 0;
 477
 478	if (delay)
 479		queue_delayed_work(system_power_efficient_wq,
 480				   &cache_cleaner, delay);
 481}
 482
 483
 484/*
 485 * Clean all caches promptly.  This just calls cache_clean
 486 * repeatedly until we are sure that every cache has had a chance to
 487 * be fully cleaned
 488 */
 489void cache_flush(void)
 490{
 491	while (cache_clean() != -1)
 492		cond_resched();
 493	while (cache_clean() != -1)
 494		cond_resched();
 495}
 496EXPORT_SYMBOL_GPL(cache_flush);
 497
 498void cache_purge(struct cache_detail *detail)
 499{
 500	time_t now = seconds_since_boot();
 501	if (detail->flush_time >= now)
 502		now = detail->flush_time + 1;
 503	/* 'now' is the maximum value any 'last_refresh' can have */
 504	detail->flush_time = now;
 505	detail->nextcheck = seconds_since_boot();
 506	cache_flush();
 
 507}
 508EXPORT_SYMBOL_GPL(cache_purge);
 509
 510
 511/*
 512 * Deferral and Revisiting of Requests.
 513 *
 514 * If a cache lookup finds a pending entry, we
 515 * need to defer the request and revisit it later.
 516 * All deferred requests are stored in a hash table,
 517 * indexed by "struct cache_head *".
 518 * As it may be wasteful to store a whole request
 519 * structure, we allow the request to provide a
 520 * deferred form, which must contain a
 521 * 'struct cache_deferred_req'
 522 * This cache_deferred_req contains a method to allow
 523 * it to be revisited when cache info is available
 524 */
 525
 526#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
 527#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 528
 529#define	DFR_MAX	300	/* ??? */
 530
 531static DEFINE_SPINLOCK(cache_defer_lock);
 532static LIST_HEAD(cache_defer_list);
 533static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 534static int cache_defer_cnt;
 535
 536static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 537{
 538	hlist_del_init(&dreq->hash);
 539	if (!list_empty(&dreq->recent)) {
 540		list_del_init(&dreq->recent);
 541		cache_defer_cnt--;
 542	}
 543}
 544
 545static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 546{
 547	int hash = DFR_HASH(item);
 548
 549	INIT_LIST_HEAD(&dreq->recent);
 550	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 551}
 552
 553static void setup_deferral(struct cache_deferred_req *dreq,
 554			   struct cache_head *item,
 555			   int count_me)
 556{
 557
 558	dreq->item = item;
 559
 560	spin_lock(&cache_defer_lock);
 561
 562	__hash_deferred_req(dreq, item);
 563
 564	if (count_me) {
 565		cache_defer_cnt++;
 566		list_add(&dreq->recent, &cache_defer_list);
 567	}
 568
 569	spin_unlock(&cache_defer_lock);
 570
 571}
 572
 573struct thread_deferred_req {
 574	struct cache_deferred_req handle;
 575	struct completion completion;
 576};
 577
 578static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 579{
 580	struct thread_deferred_req *dr =
 581		container_of(dreq, struct thread_deferred_req, handle);
 582	complete(&dr->completion);
 583}
 584
 585static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 586{
 587	struct thread_deferred_req sleeper;
 588	struct cache_deferred_req *dreq = &sleeper.handle;
 589
 590	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 591	dreq->revisit = cache_restart_thread;
 592
 593	setup_deferral(dreq, item, 0);
 594
 595	if (!test_bit(CACHE_PENDING, &item->flags) ||
 596	    wait_for_completion_interruptible_timeout(
 597		    &sleeper.completion, req->thread_wait) <= 0) {
 598		/* The completion wasn't completed, so we need
 599		 * to clean up
 600		 */
 601		spin_lock(&cache_defer_lock);
 602		if (!hlist_unhashed(&sleeper.handle.hash)) {
 603			__unhash_deferred_req(&sleeper.handle);
 604			spin_unlock(&cache_defer_lock);
 605		} else {
 606			/* cache_revisit_request already removed
 607			 * this from the hash table, but hasn't
 608			 * called ->revisit yet.  It will very soon
 609			 * and we need to wait for it.
 610			 */
 611			spin_unlock(&cache_defer_lock);
 612			wait_for_completion(&sleeper.completion);
 613		}
 614	}
 615}
 616
 617static void cache_limit_defers(void)
 618{
 619	/* Make sure we haven't exceed the limit of allowed deferred
 620	 * requests.
 621	 */
 622	struct cache_deferred_req *discard = NULL;
 623
 624	if (cache_defer_cnt <= DFR_MAX)
 625		return;
 626
 627	spin_lock(&cache_defer_lock);
 628
 629	/* Consider removing either the first or the last */
 630	if (cache_defer_cnt > DFR_MAX) {
 631		if (prandom_u32() & 1)
 632			discard = list_entry(cache_defer_list.next,
 633					     struct cache_deferred_req, recent);
 634		else
 635			discard = list_entry(cache_defer_list.prev,
 636					     struct cache_deferred_req, recent);
 637		__unhash_deferred_req(discard);
 638	}
 639	spin_unlock(&cache_defer_lock);
 640	if (discard)
 641		discard->revisit(discard, 1);
 642}
 643
 644/* Return true if and only if a deferred request is queued. */
 645static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 646{
 647	struct cache_deferred_req *dreq;
 648
 649	if (req->thread_wait) {
 650		cache_wait_req(req, item);
 651		if (!test_bit(CACHE_PENDING, &item->flags))
 652			return false;
 653	}
 654	dreq = req->defer(req);
 655	if (dreq == NULL)
 656		return false;
 657	setup_deferral(dreq, item, 1);
 658	if (!test_bit(CACHE_PENDING, &item->flags))
 659		/* Bit could have been cleared before we managed to
 660		 * set up the deferral, so need to revisit just in case
 661		 */
 662		cache_revisit_request(item);
 663
 664	cache_limit_defers();
 665	return true;
 666}
 667
 668static void cache_revisit_request(struct cache_head *item)
 669{
 670	struct cache_deferred_req *dreq;
 671	struct list_head pending;
 672	struct hlist_node *tmp;
 673	int hash = DFR_HASH(item);
 674
 675	INIT_LIST_HEAD(&pending);
 676	spin_lock(&cache_defer_lock);
 677
 678	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
 679		if (dreq->item == item) {
 680			__unhash_deferred_req(dreq);
 681			list_add(&dreq->recent, &pending);
 682		}
 683
 684	spin_unlock(&cache_defer_lock);
 685
 686	while (!list_empty(&pending)) {
 687		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 688		list_del_init(&dreq->recent);
 689		dreq->revisit(dreq, 0);
 690	}
 691}
 692
 693void cache_clean_deferred(void *owner)
 694{
 695	struct cache_deferred_req *dreq, *tmp;
 696	struct list_head pending;
 697
 698
 699	INIT_LIST_HEAD(&pending);
 700	spin_lock(&cache_defer_lock);
 701
 702	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 703		if (dreq->owner == owner) {
 704			__unhash_deferred_req(dreq);
 705			list_add(&dreq->recent, &pending);
 706		}
 707	}
 708	spin_unlock(&cache_defer_lock);
 709
 710	while (!list_empty(&pending)) {
 711		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 712		list_del_init(&dreq->recent);
 713		dreq->revisit(dreq, 1);
 714	}
 715}
 716
 717/*
 718 * communicate with user-space
 719 *
 720 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
 721 * On read, you get a full request, or block.
 722 * On write, an update request is processed.
 723 * Poll works if anything to read, and always allows write.
 724 *
 725 * Implemented by linked list of requests.  Each open file has
 726 * a ->private that also exists in this list.  New requests are added
 727 * to the end and may wakeup and preceding readers.
 728 * New readers are added to the head.  If, on read, an item is found with
 729 * CACHE_UPCALLING clear, we free it from the list.
 730 *
 731 */
 732
 733static DEFINE_SPINLOCK(queue_lock);
 734static DEFINE_MUTEX(queue_io_mutex);
 735
 736struct cache_queue {
 737	struct list_head	list;
 738	int			reader;	/* if 0, then request */
 739};
 740struct cache_request {
 741	struct cache_queue	q;
 742	struct cache_head	*item;
 743	char			* buf;
 744	int			len;
 745	int			readers;
 746};
 747struct cache_reader {
 748	struct cache_queue	q;
 749	int			offset;	/* if non-0, we have a refcnt on next request */
 750};
 751
 752static int cache_request(struct cache_detail *detail,
 753			       struct cache_request *crq)
 754{
 755	char *bp = crq->buf;
 756	int len = PAGE_SIZE;
 757
 758	detail->cache_request(detail, crq->item, &bp, &len);
 759	if (len < 0)
 760		return -EAGAIN;
 761	return PAGE_SIZE - len;
 762}
 763
 764static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 765			  loff_t *ppos, struct cache_detail *cd)
 766{
 767	struct cache_reader *rp = filp->private_data;
 768	struct cache_request *rq;
 769	struct inode *inode = file_inode(filp);
 770	int err;
 771
 772	if (count == 0)
 773		return 0;
 774
 775	inode_lock(inode); /* protect against multiple concurrent
 776			      * readers on this file */
 777 again:
 778	spin_lock(&queue_lock);
 779	/* need to find next request */
 780	while (rp->q.list.next != &cd->queue &&
 781	       list_entry(rp->q.list.next, struct cache_queue, list)
 782	       ->reader) {
 783		struct list_head *next = rp->q.list.next;
 784		list_move(&rp->q.list, next);
 785	}
 786	if (rp->q.list.next == &cd->queue) {
 787		spin_unlock(&queue_lock);
 788		inode_unlock(inode);
 789		WARN_ON_ONCE(rp->offset);
 790		return 0;
 791	}
 792	rq = container_of(rp->q.list.next, struct cache_request, q.list);
 793	WARN_ON_ONCE(rq->q.reader);
 794	if (rp->offset == 0)
 795		rq->readers++;
 796	spin_unlock(&queue_lock);
 797
 798	if (rq->len == 0) {
 799		err = cache_request(cd, rq);
 800		if (err < 0)
 801			goto out;
 802		rq->len = err;
 803	}
 804
 805	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 806		err = -EAGAIN;
 807		spin_lock(&queue_lock);
 808		list_move(&rp->q.list, &rq->q.list);
 809		spin_unlock(&queue_lock);
 810	} else {
 811		if (rp->offset + count > rq->len)
 812			count = rq->len - rp->offset;
 813		err = -EFAULT;
 814		if (copy_to_user(buf, rq->buf + rp->offset, count))
 815			goto out;
 816		rp->offset += count;
 817		if (rp->offset >= rq->len) {
 818			rp->offset = 0;
 819			spin_lock(&queue_lock);
 820			list_move(&rp->q.list, &rq->q.list);
 821			spin_unlock(&queue_lock);
 822		}
 823		err = 0;
 824	}
 825 out:
 826	if (rp->offset == 0) {
 827		/* need to release rq */
 828		spin_lock(&queue_lock);
 829		rq->readers--;
 830		if (rq->readers == 0 &&
 831		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
 832			list_del(&rq->q.list);
 833			spin_unlock(&queue_lock);
 834			cache_put(rq->item, cd);
 835			kfree(rq->buf);
 836			kfree(rq);
 837		} else
 838			spin_unlock(&queue_lock);
 839	}
 840	if (err == -EAGAIN)
 841		goto again;
 842	inode_unlock(inode);
 843	return err ? err :  count;
 844}
 845
 846static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 847				 size_t count, struct cache_detail *cd)
 848{
 849	ssize_t ret;
 850
 851	if (count == 0)
 852		return -EINVAL;
 853	if (copy_from_user(kaddr, buf, count))
 854		return -EFAULT;
 855	kaddr[count] = '\0';
 856	ret = cd->cache_parse(cd, kaddr, count);
 857	if (!ret)
 858		ret = count;
 859	return ret;
 860}
 861
 862static ssize_t cache_slow_downcall(const char __user *buf,
 863				   size_t count, struct cache_detail *cd)
 864{
 865	static char write_buf[8192]; /* protected by queue_io_mutex */
 866	ssize_t ret = -EINVAL;
 867
 868	if (count >= sizeof(write_buf))
 869		goto out;
 870	mutex_lock(&queue_io_mutex);
 871	ret = cache_do_downcall(write_buf, buf, count, cd);
 872	mutex_unlock(&queue_io_mutex);
 873out:
 874	return ret;
 875}
 876
 877static ssize_t cache_downcall(struct address_space *mapping,
 878			      const char __user *buf,
 879			      size_t count, struct cache_detail *cd)
 880{
 881	struct page *page;
 882	char *kaddr;
 883	ssize_t ret = -ENOMEM;
 884
 885	if (count >= PAGE_SIZE)
 886		goto out_slow;
 887
 888	page = find_or_create_page(mapping, 0, GFP_KERNEL);
 889	if (!page)
 890		goto out_slow;
 891
 892	kaddr = kmap(page);
 893	ret = cache_do_downcall(kaddr, buf, count, cd);
 894	kunmap(page);
 895	unlock_page(page);
 896	put_page(page);
 897	return ret;
 898out_slow:
 899	return cache_slow_downcall(buf, count, cd);
 900}
 901
 902static ssize_t cache_write(struct file *filp, const char __user *buf,
 903			   size_t count, loff_t *ppos,
 904			   struct cache_detail *cd)
 905{
 906	struct address_space *mapping = filp->f_mapping;
 907	struct inode *inode = file_inode(filp);
 908	ssize_t ret = -EINVAL;
 909
 910	if (!cd->cache_parse)
 911		goto out;
 912
 913	inode_lock(inode);
 914	ret = cache_downcall(mapping, buf, count, cd);
 915	inode_unlock(inode);
 916out:
 917	return ret;
 918}
 919
 920static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 921
 922static unsigned int cache_poll(struct file *filp, poll_table *wait,
 923			       struct cache_detail *cd)
 924{
 925	unsigned int mask;
 926	struct cache_reader *rp = filp->private_data;
 927	struct cache_queue *cq;
 928
 929	poll_wait(filp, &queue_wait, wait);
 930
 931	/* alway allow write */
 932	mask = POLLOUT | POLLWRNORM;
 933
 934	if (!rp)
 935		return mask;
 936
 937	spin_lock(&queue_lock);
 938
 939	for (cq= &rp->q; &cq->list != &cd->queue;
 940	     cq = list_entry(cq->list.next, struct cache_queue, list))
 941		if (!cq->reader) {
 942			mask |= POLLIN | POLLRDNORM;
 943			break;
 944		}
 945	spin_unlock(&queue_lock);
 946	return mask;
 947}
 948
 949static int cache_ioctl(struct inode *ino, struct file *filp,
 950		       unsigned int cmd, unsigned long arg,
 951		       struct cache_detail *cd)
 952{
 953	int len = 0;
 954	struct cache_reader *rp = filp->private_data;
 955	struct cache_queue *cq;
 956
 957	if (cmd != FIONREAD || !rp)
 958		return -EINVAL;
 959
 960	spin_lock(&queue_lock);
 961
 962	/* only find the length remaining in current request,
 963	 * or the length of the next request
 964	 */
 965	for (cq= &rp->q; &cq->list != &cd->queue;
 966	     cq = list_entry(cq->list.next, struct cache_queue, list))
 967		if (!cq->reader) {
 968			struct cache_request *cr =
 969				container_of(cq, struct cache_request, q);
 970			len = cr->len - rp->offset;
 971			break;
 972		}
 973	spin_unlock(&queue_lock);
 974
 975	return put_user(len, (int __user *)arg);
 976}
 977
 978static int cache_open(struct inode *inode, struct file *filp,
 979		      struct cache_detail *cd)
 980{
 981	struct cache_reader *rp = NULL;
 982
 983	if (!cd || !try_module_get(cd->owner))
 984		return -EACCES;
 985	nonseekable_open(inode, filp);
 986	if (filp->f_mode & FMODE_READ) {
 987		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 988		if (!rp) {
 989			module_put(cd->owner);
 990			return -ENOMEM;
 991		}
 992		rp->offset = 0;
 993		rp->q.reader = 1;
 994		atomic_inc(&cd->readers);
 995		spin_lock(&queue_lock);
 996		list_add(&rp->q.list, &cd->queue);
 997		spin_unlock(&queue_lock);
 998	}
 999	filp->private_data = rp;
1000	return 0;
1001}
1002
1003static int cache_release(struct inode *inode, struct file *filp,
1004			 struct cache_detail *cd)
1005{
1006	struct cache_reader *rp = filp->private_data;
1007
1008	if (rp) {
1009		spin_lock(&queue_lock);
1010		if (rp->offset) {
1011			struct cache_queue *cq;
1012			for (cq= &rp->q; &cq->list != &cd->queue;
1013			     cq = list_entry(cq->list.next, struct cache_queue, list))
1014				if (!cq->reader) {
1015					container_of(cq, struct cache_request, q)
1016						->readers--;
1017					break;
1018				}
1019			rp->offset = 0;
1020		}
1021		list_del(&rp->q.list);
1022		spin_unlock(&queue_lock);
1023
1024		filp->private_data = NULL;
1025		kfree(rp);
1026
1027		cd->last_close = seconds_since_boot();
1028		atomic_dec(&cd->readers);
1029	}
1030	module_put(cd->owner);
1031	return 0;
1032}
1033
1034
1035
1036static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1037{
1038	struct cache_queue *cq, *tmp;
1039	struct cache_request *cr;
1040	struct list_head dequeued;
1041
1042	INIT_LIST_HEAD(&dequeued);
1043	spin_lock(&queue_lock);
1044	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1045		if (!cq->reader) {
1046			cr = container_of(cq, struct cache_request, q);
1047			if (cr->item != ch)
1048				continue;
1049			if (test_bit(CACHE_PENDING, &ch->flags))
1050				/* Lost a race and it is pending again */
1051				break;
1052			if (cr->readers != 0)
1053				continue;
1054			list_move(&cr->q.list, &dequeued);
 
 
 
 
 
1055		}
1056	spin_unlock(&queue_lock);
1057	while (!list_empty(&dequeued)) {
1058		cr = list_entry(dequeued.next, struct cache_request, q.list);
1059		list_del(&cr->q.list);
1060		cache_put(cr->item, detail);
1061		kfree(cr->buf);
1062		kfree(cr);
1063	}
1064}
1065
1066/*
1067 * Support routines for text-based upcalls.
1068 * Fields are separated by spaces.
1069 * Fields are either mangled to quote space tab newline slosh with slosh
1070 * or a hexified with a leading \x
1071 * Record is terminated with newline.
1072 *
1073 */
1074
1075void qword_add(char **bpp, int *lp, char *str)
1076{
1077	char *bp = *bpp;
1078	int len = *lp;
1079	int ret;
1080
1081	if (len < 0) return;
1082
1083	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1084	if (ret >= len) {
1085		bp += len;
1086		len = -1;
1087	} else {
1088		bp += ret;
1089		len -= ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
1090		*bp++ = ' ';
1091		len--;
1092	}
1093	*bpp = bp;
1094	*lp = len;
1095}
1096EXPORT_SYMBOL_GPL(qword_add);
1097
1098void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1099{
1100	char *bp = *bpp;
1101	int len = *lp;
1102
1103	if (len < 0) return;
1104
1105	if (len > 2) {
1106		*bp++ = '\\';
1107		*bp++ = 'x';
1108		len -= 2;
1109		while (blen && len >= 2) {
1110			bp = hex_byte_pack(bp, *buf++);
 
 
1111			len -= 2;
1112			blen--;
1113		}
1114	}
1115	if (blen || len<1) len = -1;
1116	else {
1117		*bp++ = ' ';
1118		len--;
1119	}
1120	*bpp = bp;
1121	*lp = len;
1122}
1123EXPORT_SYMBOL_GPL(qword_addhex);
1124
1125static void warn_no_listener(struct cache_detail *detail)
1126{
1127	if (detail->last_warn != detail->last_close) {
1128		detail->last_warn = detail->last_close;
1129		if (detail->warn_no_listener)
1130			detail->warn_no_listener(detail, detail->last_close != 0);
1131	}
1132}
1133
1134static bool cache_listeners_exist(struct cache_detail *detail)
1135{
1136	if (atomic_read(&detail->readers))
1137		return true;
1138	if (detail->last_close == 0)
1139		/* This cache was never opened */
1140		return false;
1141	if (detail->last_close < seconds_since_boot() - 30)
1142		/*
1143		 * We allow for the possibility that someone might
1144		 * restart a userspace daemon without restarting the
1145		 * server; but after 30 seconds, we give up.
1146		 */
1147		 return false;
1148	return true;
1149}
1150
1151/*
1152 * register an upcall request to user-space and queue it up for read() by the
1153 * upcall daemon.
1154 *
1155 * Each request is at most one page long.
1156 */
1157int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
 
 
 
 
1158{
1159
1160	char *buf;
1161	struct cache_request *crq;
1162	int ret = 0;
1163
1164	if (!detail->cache_request)
1165		return -EINVAL;
1166
1167	if (!cache_listeners_exist(detail)) {
1168		warn_no_listener(detail);
1169		return -EINVAL;
1170	}
1171	if (test_bit(CACHE_CLEANED, &h->flags))
1172		/* Too late to make an upcall */
1173		return -EAGAIN;
1174
1175	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1176	if (!buf)
1177		return -EAGAIN;
1178
1179	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1180	if (!crq) {
1181		kfree(buf);
1182		return -EAGAIN;
1183	}
1184
 
 
 
 
 
 
 
 
 
1185	crq->q.reader = 0;
 
1186	crq->buf = buf;
1187	crq->len = 0;
1188	crq->readers = 0;
1189	spin_lock(&queue_lock);
1190	if (test_bit(CACHE_PENDING, &h->flags)) {
1191		crq->item = cache_get(h);
1192		list_add_tail(&crq->q.list, &detail->queue);
1193	} else
1194		/* Lost a race, no longer PENDING, so don't enqueue */
1195		ret = -EAGAIN;
1196	spin_unlock(&queue_lock);
1197	wake_up(&queue_wait);
1198	if (ret == -EAGAIN) {
1199		kfree(buf);
1200		kfree(crq);
1201	}
1202	return ret;
1203}
1204EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1205
1206/*
1207 * parse a message from user-space and pass it
1208 * to an appropriate cache
1209 * Messages are, like requests, separated into fields by
1210 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1211 *
1212 * Message is
1213 *   reply cachename expiry key ... content....
1214 *
1215 * key and content are both parsed by cache
1216 */
1217
 
1218int qword_get(char **bpp, char *dest, int bufsize)
1219{
1220	/* return bytes copied, or -1 on error */
1221	char *bp = *bpp;
1222	int len = 0;
1223
1224	while (*bp == ' ') bp++;
1225
1226	if (bp[0] == '\\' && bp[1] == 'x') {
1227		/* HEX STRING */
1228		bp += 2;
1229		while (len < bufsize - 1) {
1230			int h, l;
1231
1232			h = hex_to_bin(bp[0]);
1233			if (h < 0)
1234				break;
1235
1236			l = hex_to_bin(bp[1]);
1237			if (l < 0)
1238				break;
1239
1240			*dest++ = (h << 4) | l;
1241			bp += 2;
1242			len++;
1243		}
1244	} else {
1245		/* text with \nnn octal quoting */
1246		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1247			if (*bp == '\\' &&
1248			    isodigit(bp[1]) && (bp[1] <= '3') &&
1249			    isodigit(bp[2]) &&
1250			    isodigit(bp[3])) {
1251				int byte = (*++bp -'0');
1252				bp++;
1253				byte = (byte << 3) | (*bp++ - '0');
1254				byte = (byte << 3) | (*bp++ - '0');
1255				*dest++ = byte;
1256				len++;
1257			} else {
1258				*dest++ = *bp++;
1259				len++;
1260			}
1261		}
1262	}
1263
1264	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1265		return -1;
1266	while (*bp == ' ') bp++;
1267	*bpp = bp;
1268	*dest = '\0';
1269	return len;
1270}
1271EXPORT_SYMBOL_GPL(qword_get);
1272
1273
1274/*
1275 * support /proc/sunrpc/cache/$CACHENAME/content
1276 * as a seqfile.
1277 * We call ->cache_show passing NULL for the item to
1278 * get a header, then pass each real item in the cache
1279 */
1280
1281void *cache_seq_start(struct seq_file *m, loff_t *pos)
 
 
 
 
1282	__acquires(cd->hash_lock)
1283{
1284	loff_t n = *pos;
1285	unsigned int hash, entry;
1286	struct cache_head *ch;
1287	struct cache_detail *cd = m->private;
 
1288
1289	read_lock(&cd->hash_lock);
1290	if (!n--)
1291		return SEQ_START_TOKEN;
1292	hash = n >> 32;
1293	entry = n & ((1LL<<32) - 1);
1294
1295	hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
1296		if (!entry--)
1297			return ch;
1298	n &= ~((1LL<<32) - 1);
1299	do {
1300		hash++;
1301		n += 1LL<<32;
1302	} while(hash < cd->hash_size &&
1303		hlist_empty(&cd->hash_table[hash]));
1304	if (hash >= cd->hash_size)
1305		return NULL;
1306	*pos = n+1;
1307	return hlist_entry_safe(cd->hash_table[hash].first,
1308				struct cache_head, cache_list);
1309}
1310EXPORT_SYMBOL_GPL(cache_seq_start);
1311
1312void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1313{
1314	struct cache_head *ch = p;
1315	int hash = (*pos >> 32);
1316	struct cache_detail *cd = m->private;
1317
1318	if (p == SEQ_START_TOKEN)
1319		hash = 0;
1320	else if (ch->cache_list.next == NULL) {
1321		hash++;
1322		*pos += 1LL<<32;
1323	} else {
1324		++*pos;
1325		return hlist_entry_safe(ch->cache_list.next,
1326					struct cache_head, cache_list);
1327	}
1328	*pos &= ~((1LL<<32) - 1);
1329	while (hash < cd->hash_size &&
1330	       hlist_empty(&cd->hash_table[hash])) {
1331		hash++;
1332		*pos += 1LL<<32;
1333	}
1334	if (hash >= cd->hash_size)
1335		return NULL;
1336	++*pos;
1337	return hlist_entry_safe(cd->hash_table[hash].first,
1338				struct cache_head, cache_list);
1339}
1340EXPORT_SYMBOL_GPL(cache_seq_next);
1341
1342void cache_seq_stop(struct seq_file *m, void *p)
1343	__releases(cd->hash_lock)
1344{
1345	struct cache_detail *cd = m->private;
1346	read_unlock(&cd->hash_lock);
1347}
1348EXPORT_SYMBOL_GPL(cache_seq_stop);
1349
1350static int c_show(struct seq_file *m, void *p)
1351{
1352	struct cache_head *cp = p;
1353	struct cache_detail *cd = m->private;
1354
1355	if (p == SEQ_START_TOKEN)
1356		return cd->cache_show(m, cd, NULL);
1357
1358	ifdebug(CACHE)
1359		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1360			   convert_to_wallclock(cp->expiry_time),
1361			   atomic_read(&cp->ref.refcount), cp->flags);
1362	cache_get(cp);
1363	if (cache_check(cd, cp, NULL))
1364		/* cache_check does a cache_put on failure */
1365		seq_printf(m, "# ");
1366	else {
1367		if (cache_is_expired(cd, cp))
1368			seq_printf(m, "# ");
1369		cache_put(cp, cd);
1370	}
1371
1372	return cd->cache_show(m, cd, cp);
1373}
1374
1375static const struct seq_operations cache_content_op = {
1376	.start	= cache_seq_start,
1377	.next	= cache_seq_next,
1378	.stop	= cache_seq_stop,
1379	.show	= c_show,
1380};
1381
1382static int content_open(struct inode *inode, struct file *file,
1383			struct cache_detail *cd)
1384{
1385	struct seq_file *seq;
1386	int err;
1387
1388	if (!cd || !try_module_get(cd->owner))
1389		return -EACCES;
1390
1391	err = seq_open(file, &cache_content_op);
1392	if (err) {
1393		module_put(cd->owner);
1394		return err;
1395	}
1396
1397	seq = file->private_data;
1398	seq->private = cd;
1399	return 0;
1400}
1401
1402static int content_release(struct inode *inode, struct file *file,
1403		struct cache_detail *cd)
1404{
1405	int ret = seq_release(inode, file);
1406	module_put(cd->owner);
1407	return ret;
1408}
1409
1410static int open_flush(struct inode *inode, struct file *file,
1411			struct cache_detail *cd)
1412{
1413	if (!cd || !try_module_get(cd->owner))
1414		return -EACCES;
1415	return nonseekable_open(inode, file);
1416}
1417
1418static int release_flush(struct inode *inode, struct file *file,
1419			struct cache_detail *cd)
1420{
1421	module_put(cd->owner);
1422	return 0;
1423}
1424
1425static ssize_t read_flush(struct file *file, char __user *buf,
1426			  size_t count, loff_t *ppos,
1427			  struct cache_detail *cd)
1428{
1429	char tbuf[22];
1430	unsigned long p = *ppos;
1431	size_t len;
1432
1433	snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1434	len = strlen(tbuf);
1435	if (p >= len)
1436		return 0;
1437	len -= p;
1438	if (len > count)
1439		len = count;
1440	if (copy_to_user(buf, (void*)(tbuf+p), len))
1441		return -EFAULT;
1442	*ppos += len;
1443	return len;
1444}
1445
1446static ssize_t write_flush(struct file *file, const char __user *buf,
1447			   size_t count, loff_t *ppos,
1448			   struct cache_detail *cd)
1449{
1450	char tbuf[20];
1451	char *bp, *ep;
1452	time_t then, now;
1453
1454	if (*ppos || count > sizeof(tbuf)-1)
1455		return -EINVAL;
1456	if (copy_from_user(tbuf, buf, count))
1457		return -EFAULT;
1458	tbuf[count] = 0;
1459	simple_strtoul(tbuf, &ep, 0);
1460	if (*ep && *ep != '\n')
1461		return -EINVAL;
1462
1463	bp = tbuf;
1464	then = get_expiry(&bp);
1465	now = seconds_since_boot();
1466	cd->nextcheck = now;
1467	/* Can only set flush_time to 1 second beyond "now", or
1468	 * possibly 1 second beyond flushtime.  This is because
1469	 * flush_time never goes backwards so it mustn't get too far
1470	 * ahead of time.
1471	 */
1472	if (then >= now) {
1473		/* Want to flush everything, so behave like cache_purge() */
1474		if (cd->flush_time >= now)
1475			now = cd->flush_time + 1;
1476		then = now;
1477	}
1478
1479	cd->flush_time = then;
1480	cache_flush();
1481
1482	*ppos += count;
1483	return count;
1484}
1485
1486static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1487				 size_t count, loff_t *ppos)
1488{
1489	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1490
1491	return cache_read(filp, buf, count, ppos, cd);
1492}
1493
1494static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1495				  size_t count, loff_t *ppos)
1496{
1497	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1498
1499	return cache_write(filp, buf, count, ppos, cd);
1500}
1501
1502static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1503{
1504	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1505
1506	return cache_poll(filp, wait, cd);
1507}
1508
1509static long cache_ioctl_procfs(struct file *filp,
1510			       unsigned int cmd, unsigned long arg)
1511{
1512	struct inode *inode = file_inode(filp);
1513	struct cache_detail *cd = PDE_DATA(inode);
1514
1515	return cache_ioctl(inode, filp, cmd, arg, cd);
1516}
1517
1518static int cache_open_procfs(struct inode *inode, struct file *filp)
1519{
1520	struct cache_detail *cd = PDE_DATA(inode);
1521
1522	return cache_open(inode, filp, cd);
1523}
1524
1525static int cache_release_procfs(struct inode *inode, struct file *filp)
1526{
1527	struct cache_detail *cd = PDE_DATA(inode);
1528
1529	return cache_release(inode, filp, cd);
1530}
1531
1532static const struct file_operations cache_file_operations_procfs = {
1533	.owner		= THIS_MODULE,
1534	.llseek		= no_llseek,
1535	.read		= cache_read_procfs,
1536	.write		= cache_write_procfs,
1537	.poll		= cache_poll_procfs,
1538	.unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1539	.open		= cache_open_procfs,
1540	.release	= cache_release_procfs,
1541};
1542
1543static int content_open_procfs(struct inode *inode, struct file *filp)
1544{
1545	struct cache_detail *cd = PDE_DATA(inode);
1546
1547	return content_open(inode, filp, cd);
1548}
1549
1550static int content_release_procfs(struct inode *inode, struct file *filp)
1551{
1552	struct cache_detail *cd = PDE_DATA(inode);
1553
1554	return content_release(inode, filp, cd);
1555}
1556
1557static const struct file_operations content_file_operations_procfs = {
1558	.open		= content_open_procfs,
1559	.read		= seq_read,
1560	.llseek		= seq_lseek,
1561	.release	= content_release_procfs,
1562};
1563
1564static int open_flush_procfs(struct inode *inode, struct file *filp)
1565{
1566	struct cache_detail *cd = PDE_DATA(inode);
1567
1568	return open_flush(inode, filp, cd);
1569}
1570
1571static int release_flush_procfs(struct inode *inode, struct file *filp)
1572{
1573	struct cache_detail *cd = PDE_DATA(inode);
1574
1575	return release_flush(inode, filp, cd);
1576}
1577
1578static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1579			    size_t count, loff_t *ppos)
1580{
1581	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1582
1583	return read_flush(filp, buf, count, ppos, cd);
1584}
1585
1586static ssize_t write_flush_procfs(struct file *filp,
1587				  const char __user *buf,
1588				  size_t count, loff_t *ppos)
1589{
1590	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1591
1592	return write_flush(filp, buf, count, ppos, cd);
1593}
1594
1595static const struct file_operations cache_flush_operations_procfs = {
1596	.open		= open_flush_procfs,
1597	.read		= read_flush_procfs,
1598	.write		= write_flush_procfs,
1599	.release	= release_flush_procfs,
1600	.llseek		= no_llseek,
1601};
1602
1603static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1604{
1605	struct sunrpc_net *sn;
1606
1607	if (cd->u.procfs.proc_ent == NULL)
1608		return;
1609	if (cd->u.procfs.flush_ent)
1610		remove_proc_entry("flush", cd->u.procfs.proc_ent);
1611	if (cd->u.procfs.channel_ent)
1612		remove_proc_entry("channel", cd->u.procfs.proc_ent);
1613	if (cd->u.procfs.content_ent)
1614		remove_proc_entry("content", cd->u.procfs.proc_ent);
1615	cd->u.procfs.proc_ent = NULL;
1616	sn = net_generic(net, sunrpc_net_id);
1617	remove_proc_entry(cd->name, sn->proc_net_rpc);
1618}
1619
1620#ifdef CONFIG_PROC_FS
1621static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1622{
1623	struct proc_dir_entry *p;
1624	struct sunrpc_net *sn;
1625
1626	sn = net_generic(net, sunrpc_net_id);
1627	cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1628	if (cd->u.procfs.proc_ent == NULL)
1629		goto out_nomem;
1630	cd->u.procfs.channel_ent = NULL;
1631	cd->u.procfs.content_ent = NULL;
1632
1633	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1634			     cd->u.procfs.proc_ent,
1635			     &cache_flush_operations_procfs, cd);
1636	cd->u.procfs.flush_ent = p;
1637	if (p == NULL)
1638		goto out_nomem;
1639
1640	if (cd->cache_request || cd->cache_parse) {
1641		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1642				     cd->u.procfs.proc_ent,
1643				     &cache_file_operations_procfs, cd);
1644		cd->u.procfs.channel_ent = p;
1645		if (p == NULL)
1646			goto out_nomem;
1647	}
1648	if (cd->cache_show) {
1649		p = proc_create_data("content", S_IFREG|S_IRUSR,
1650				cd->u.procfs.proc_ent,
1651				&content_file_operations_procfs, cd);
1652		cd->u.procfs.content_ent = p;
1653		if (p == NULL)
1654			goto out_nomem;
1655	}
1656	return 0;
1657out_nomem:
1658	remove_cache_proc_entries(cd, net);
1659	return -ENOMEM;
1660}
1661#else /* CONFIG_PROC_FS */
1662static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1663{
1664	return 0;
1665}
1666#endif
1667
1668void __init cache_initialize(void)
1669{
1670	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1671}
1672
1673int cache_register_net(struct cache_detail *cd, struct net *net)
1674{
1675	int ret;
1676
1677	sunrpc_init_cache_detail(cd);
1678	ret = create_cache_proc_entries(cd, net);
1679	if (ret)
1680		sunrpc_destroy_cache_detail(cd);
1681	return ret;
1682}
1683EXPORT_SYMBOL_GPL(cache_register_net);
 
 
 
 
 
1684
1685void cache_unregister_net(struct cache_detail *cd, struct net *net)
1686{
1687	remove_cache_proc_entries(cd, net);
1688	sunrpc_destroy_cache_detail(cd);
1689}
1690EXPORT_SYMBOL_GPL(cache_unregister_net);
1691
1692struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1693{
1694	struct cache_detail *cd;
1695	int i;
1696
1697	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1698	if (cd == NULL)
1699		return ERR_PTR(-ENOMEM);
1700
1701	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
1702				 GFP_KERNEL);
1703	if (cd->hash_table == NULL) {
1704		kfree(cd);
1705		return ERR_PTR(-ENOMEM);
1706	}
1707
1708	for (i = 0; i < cd->hash_size; i++)
1709		INIT_HLIST_HEAD(&cd->hash_table[i]);
1710	cd->net = net;
1711	return cd;
1712}
1713EXPORT_SYMBOL_GPL(cache_create_net);
1714
1715void cache_destroy_net(struct cache_detail *cd, struct net *net)
1716{
1717	kfree(cd->hash_table);
1718	kfree(cd);
1719}
1720EXPORT_SYMBOL_GPL(cache_destroy_net);
1721
1722static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1723				 size_t count, loff_t *ppos)
1724{
1725	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1726
1727	return cache_read(filp, buf, count, ppos, cd);
1728}
1729
1730static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1731				  size_t count, loff_t *ppos)
1732{
1733	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1734
1735	return cache_write(filp, buf, count, ppos, cd);
1736}
1737
1738static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1739{
1740	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1741
1742	return cache_poll(filp, wait, cd);
1743}
1744
1745static long cache_ioctl_pipefs(struct file *filp,
1746			      unsigned int cmd, unsigned long arg)
1747{
1748	struct inode *inode = file_inode(filp);
1749	struct cache_detail *cd = RPC_I(inode)->private;
1750
1751	return cache_ioctl(inode, filp, cmd, arg, cd);
1752}
1753
1754static int cache_open_pipefs(struct inode *inode, struct file *filp)
1755{
1756	struct cache_detail *cd = RPC_I(inode)->private;
1757
1758	return cache_open(inode, filp, cd);
1759}
1760
1761static int cache_release_pipefs(struct inode *inode, struct file *filp)
1762{
1763	struct cache_detail *cd = RPC_I(inode)->private;
1764
1765	return cache_release(inode, filp, cd);
1766}
1767
1768const struct file_operations cache_file_operations_pipefs = {
1769	.owner		= THIS_MODULE,
1770	.llseek		= no_llseek,
1771	.read		= cache_read_pipefs,
1772	.write		= cache_write_pipefs,
1773	.poll		= cache_poll_pipefs,
1774	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1775	.open		= cache_open_pipefs,
1776	.release	= cache_release_pipefs,
1777};
1778
1779static int content_open_pipefs(struct inode *inode, struct file *filp)
1780{
1781	struct cache_detail *cd = RPC_I(inode)->private;
1782
1783	return content_open(inode, filp, cd);
1784}
1785
1786static int content_release_pipefs(struct inode *inode, struct file *filp)
1787{
1788	struct cache_detail *cd = RPC_I(inode)->private;
1789
1790	return content_release(inode, filp, cd);
1791}
1792
1793const struct file_operations content_file_operations_pipefs = {
1794	.open		= content_open_pipefs,
1795	.read		= seq_read,
1796	.llseek		= seq_lseek,
1797	.release	= content_release_pipefs,
1798};
1799
1800static int open_flush_pipefs(struct inode *inode, struct file *filp)
1801{
1802	struct cache_detail *cd = RPC_I(inode)->private;
1803
1804	return open_flush(inode, filp, cd);
1805}
1806
1807static int release_flush_pipefs(struct inode *inode, struct file *filp)
1808{
1809	struct cache_detail *cd = RPC_I(inode)->private;
1810
1811	return release_flush(inode, filp, cd);
1812}
1813
1814static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1815			    size_t count, loff_t *ppos)
1816{
1817	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1818
1819	return read_flush(filp, buf, count, ppos, cd);
1820}
1821
1822static ssize_t write_flush_pipefs(struct file *filp,
1823				  const char __user *buf,
1824				  size_t count, loff_t *ppos)
1825{
1826	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1827
1828	return write_flush(filp, buf, count, ppos, cd);
1829}
1830
1831const struct file_operations cache_flush_operations_pipefs = {
1832	.open		= open_flush_pipefs,
1833	.read		= read_flush_pipefs,
1834	.write		= write_flush_pipefs,
1835	.release	= release_flush_pipefs,
1836	.llseek		= no_llseek,
1837};
1838
1839int sunrpc_cache_register_pipefs(struct dentry *parent,
1840				 const char *name, umode_t umode,
1841				 struct cache_detail *cd)
1842{
1843	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1844	if (IS_ERR(dir))
1845		return PTR_ERR(dir);
1846	cd->u.pipefs.dir = dir;
1847	return 0;
 
 
 
 
 
 
 
 
 
 
 
1848}
1849EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1850
1851void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1852{
1853	rpc_remove_cache_dir(cd->u.pipefs.dir);
1854	cd->u.pipefs.dir = NULL;
 
1855}
1856EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1857