Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sunrpc/cache.c
4 *
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
7 *
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 */
10
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kmod.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/ctype.h>
21#include <linux/string_helpers.h>
22#include <linux/uaccess.h>
23#include <linux/poll.h>
24#include <linux/seq_file.h>
25#include <linux/proc_fs.h>
26#include <linux/net.h>
27#include <linux/workqueue.h>
28#include <linux/mutex.h>
29#include <linux/pagemap.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34#include <linux/sunrpc/rpc_pipe_fs.h>
35#include <trace/events/sunrpc.h>
36
37#include "netns.h"
38#include "fail.h"
39
40#define RPCDBG_FACILITY RPCDBG_CACHE
41
42static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
43static void cache_revisit_request(struct cache_head *item);
44
45static void cache_init(struct cache_head *h, struct cache_detail *detail)
46{
47 time64_t now = seconds_since_boot();
48 INIT_HLIST_NODE(&h->cache_list);
49 h->flags = 0;
50 kref_init(&h->ref);
51 h->expiry_time = now + CACHE_NEW_EXPIRY;
52 if (now <= detail->flush_time)
53 /* ensure it isn't already expired */
54 now = detail->flush_time + 1;
55 h->last_refresh = now;
56}
57
58static void cache_fresh_unlocked(struct cache_head *head,
59 struct cache_detail *detail);
60
61static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
62 struct cache_head *key,
63 int hash)
64{
65 struct hlist_head *head = &detail->hash_table[hash];
66 struct cache_head *tmp;
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tmp, head, cache_list) {
70 if (!detail->match(tmp, key))
71 continue;
72 if (test_bit(CACHE_VALID, &tmp->flags) &&
73 cache_is_expired(detail, tmp))
74 continue;
75 tmp = cache_get_rcu(tmp);
76 rcu_read_unlock();
77 return tmp;
78 }
79 rcu_read_unlock();
80 return NULL;
81}
82
83static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
84 struct cache_detail *cd)
85{
86 /* Must be called under cd->hash_lock */
87 hlist_del_init_rcu(&ch->cache_list);
88 set_bit(CACHE_CLEANED, &ch->flags);
89 cd->entries --;
90}
91
92static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
93 struct cache_detail *cd)
94{
95 cache_fresh_unlocked(ch, cd);
96 cache_put(ch, cd);
97}
98
99static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
100 struct cache_head *key,
101 int hash)
102{
103 struct cache_head *new, *tmp, *freeme = NULL;
104 struct hlist_head *head = &detail->hash_table[hash];
105
106 new = detail->alloc();
107 if (!new)
108 return NULL;
109 /* must fully initialise 'new', else
110 * we might get lose if we need to
111 * cache_put it soon.
112 */
113 cache_init(new, detail);
114 detail->init(new, key);
115
116 spin_lock(&detail->hash_lock);
117
118 /* check if entry appeared while we slept */
119 hlist_for_each_entry_rcu(tmp, head, cache_list,
120 lockdep_is_held(&detail->hash_lock)) {
121 if (!detail->match(tmp, key))
122 continue;
123 if (test_bit(CACHE_VALID, &tmp->flags) &&
124 cache_is_expired(detail, tmp)) {
125 sunrpc_begin_cache_remove_entry(tmp, detail);
126 trace_cache_entry_expired(detail, tmp);
127 freeme = tmp;
128 break;
129 }
130 cache_get(tmp);
131 spin_unlock(&detail->hash_lock);
132 cache_put(new, detail);
133 return tmp;
134 }
135
136 hlist_add_head_rcu(&new->cache_list, head);
137 detail->entries++;
138 cache_get(new);
139 spin_unlock(&detail->hash_lock);
140
141 if (freeme)
142 sunrpc_end_cache_remove_entry(freeme, detail);
143 return new;
144}
145
146struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
147 struct cache_head *key, int hash)
148{
149 struct cache_head *ret;
150
151 ret = sunrpc_cache_find_rcu(detail, key, hash);
152 if (ret)
153 return ret;
154 /* Didn't find anything, insert an empty entry */
155 return sunrpc_cache_add_entry(detail, key, hash);
156}
157EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
158
159static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
160
161static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
162 struct cache_detail *detail)
163{
164 time64_t now = seconds_since_boot();
165 if (now <= detail->flush_time)
166 /* ensure it isn't immediately treated as expired */
167 now = detail->flush_time + 1;
168 head->expiry_time = expiry;
169 head->last_refresh = now;
170 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
171 set_bit(CACHE_VALID, &head->flags);
172}
173
174static void cache_fresh_unlocked(struct cache_head *head,
175 struct cache_detail *detail)
176{
177 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
178 cache_revisit_request(head);
179 cache_dequeue(detail, head);
180 }
181}
182
183static void cache_make_negative(struct cache_detail *detail,
184 struct cache_head *h)
185{
186 set_bit(CACHE_NEGATIVE, &h->flags);
187 trace_cache_entry_make_negative(detail, h);
188}
189
190static void cache_entry_update(struct cache_detail *detail,
191 struct cache_head *h,
192 struct cache_head *new)
193{
194 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
195 detail->update(h, new);
196 trace_cache_entry_update(detail, h);
197 } else {
198 cache_make_negative(detail, h);
199 }
200}
201
202struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
203 struct cache_head *new, struct cache_head *old, int hash)
204{
205 /* The 'old' entry is to be replaced by 'new'.
206 * If 'old' is not VALID, we update it directly,
207 * otherwise we need to replace it
208 */
209 struct cache_head *tmp;
210
211 if (!test_bit(CACHE_VALID, &old->flags)) {
212 spin_lock(&detail->hash_lock);
213 if (!test_bit(CACHE_VALID, &old->flags)) {
214 cache_entry_update(detail, old, new);
215 cache_fresh_locked(old, new->expiry_time, detail);
216 spin_unlock(&detail->hash_lock);
217 cache_fresh_unlocked(old, detail);
218 return old;
219 }
220 spin_unlock(&detail->hash_lock);
221 }
222 /* We need to insert a new entry */
223 tmp = detail->alloc();
224 if (!tmp) {
225 cache_put(old, detail);
226 return NULL;
227 }
228 cache_init(tmp, detail);
229 detail->init(tmp, old);
230
231 spin_lock(&detail->hash_lock);
232 cache_entry_update(detail, tmp, new);
233 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
234 detail->entries++;
235 cache_get(tmp);
236 cache_fresh_locked(tmp, new->expiry_time, detail);
237 cache_fresh_locked(old, 0, detail);
238 spin_unlock(&detail->hash_lock);
239 cache_fresh_unlocked(tmp, detail);
240 cache_fresh_unlocked(old, detail);
241 cache_put(old, detail);
242 return tmp;
243}
244EXPORT_SYMBOL_GPL(sunrpc_cache_update);
245
246static inline int cache_is_valid(struct cache_head *h)
247{
248 if (!test_bit(CACHE_VALID, &h->flags))
249 return -EAGAIN;
250 else {
251 /* entry is valid */
252 if (test_bit(CACHE_NEGATIVE, &h->flags))
253 return -ENOENT;
254 else {
255 /*
256 * In combination with write barrier in
257 * sunrpc_cache_update, ensures that anyone
258 * using the cache entry after this sees the
259 * updated contents:
260 */
261 smp_rmb();
262 return 0;
263 }
264 }
265}
266
267static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
268{
269 int rv;
270
271 spin_lock(&detail->hash_lock);
272 rv = cache_is_valid(h);
273 if (rv == -EAGAIN) {
274 cache_make_negative(detail, h);
275 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
276 detail);
277 rv = -ENOENT;
278 }
279 spin_unlock(&detail->hash_lock);
280 cache_fresh_unlocked(h, detail);
281 return rv;
282}
283
284/*
285 * This is the generic cache management routine for all
286 * the authentication caches.
287 * It checks the currency of a cache item and will (later)
288 * initiate an upcall to fill it if needed.
289 *
290 *
291 * Returns 0 if the cache_head can be used, or cache_puts it and returns
292 * -EAGAIN if upcall is pending and request has been queued
293 * -ETIMEDOUT if upcall failed or request could not be queue or
294 * upcall completed but item is still invalid (implying that
295 * the cache item has been replaced with a newer one).
296 * -ENOENT if cache entry was negative
297 */
298int cache_check(struct cache_detail *detail,
299 struct cache_head *h, struct cache_req *rqstp)
300{
301 int rv;
302 time64_t refresh_age, age;
303
304 /* First decide return status as best we can */
305 rv = cache_is_valid(h);
306
307 /* now see if we want to start an upcall */
308 refresh_age = (h->expiry_time - h->last_refresh);
309 age = seconds_since_boot() - h->last_refresh;
310
311 if (rqstp == NULL) {
312 if (rv == -EAGAIN)
313 rv = -ENOENT;
314 } else if (rv == -EAGAIN ||
315 (h->expiry_time != 0 && age > refresh_age/2)) {
316 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
317 refresh_age, age);
318 switch (detail->cache_upcall(detail, h)) {
319 case -EINVAL:
320 rv = try_to_negate_entry(detail, h);
321 break;
322 case -EAGAIN:
323 cache_fresh_unlocked(h, detail);
324 break;
325 }
326 }
327
328 if (rv == -EAGAIN) {
329 if (!cache_defer_req(rqstp, h)) {
330 /*
331 * Request was not deferred; handle it as best
332 * we can ourselves:
333 */
334 rv = cache_is_valid(h);
335 if (rv == -EAGAIN)
336 rv = -ETIMEDOUT;
337 }
338 }
339 if (rv)
340 cache_put(h, detail);
341 return rv;
342}
343EXPORT_SYMBOL_GPL(cache_check);
344
345/*
346 * caches need to be periodically cleaned.
347 * For this we maintain a list of cache_detail and
348 * a current pointer into that list and into the table
349 * for that entry.
350 *
351 * Each time cache_clean is called it finds the next non-empty entry
352 * in the current table and walks the list in that entry
353 * looking for entries that can be removed.
354 *
355 * An entry gets removed if:
356 * - The expiry is before current time
357 * - The last_refresh time is before the flush_time for that cache
358 *
359 * later we might drop old entries with non-NEVER expiry if that table
360 * is getting 'full' for some definition of 'full'
361 *
362 * The question of "how often to scan a table" is an interesting one
363 * and is answered in part by the use of the "nextcheck" field in the
364 * cache_detail.
365 * When a scan of a table begins, the nextcheck field is set to a time
366 * that is well into the future.
367 * While scanning, if an expiry time is found that is earlier than the
368 * current nextcheck time, nextcheck is set to that expiry time.
369 * If the flush_time is ever set to a time earlier than the nextcheck
370 * time, the nextcheck time is then set to that flush_time.
371 *
372 * A table is then only scanned if the current time is at least
373 * the nextcheck time.
374 *
375 */
376
377static LIST_HEAD(cache_list);
378static DEFINE_SPINLOCK(cache_list_lock);
379static struct cache_detail *current_detail;
380static int current_index;
381
382static void do_cache_clean(struct work_struct *work);
383static struct delayed_work cache_cleaner;
384
385void sunrpc_init_cache_detail(struct cache_detail *cd)
386{
387 spin_lock_init(&cd->hash_lock);
388 INIT_LIST_HEAD(&cd->queue);
389 spin_lock(&cache_list_lock);
390 cd->nextcheck = 0;
391 cd->entries = 0;
392 atomic_set(&cd->writers, 0);
393 cd->last_close = 0;
394 cd->last_warn = -1;
395 list_add(&cd->others, &cache_list);
396 spin_unlock(&cache_list_lock);
397
398 /* start the cleaning process */
399 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
400}
401EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
402
403void sunrpc_destroy_cache_detail(struct cache_detail *cd)
404{
405 cache_purge(cd);
406 spin_lock(&cache_list_lock);
407 spin_lock(&cd->hash_lock);
408 if (current_detail == cd)
409 current_detail = NULL;
410 list_del_init(&cd->others);
411 spin_unlock(&cd->hash_lock);
412 spin_unlock(&cache_list_lock);
413 if (list_empty(&cache_list)) {
414 /* module must be being unloaded so its safe to kill the worker */
415 cancel_delayed_work_sync(&cache_cleaner);
416 }
417}
418EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
419
420/* clean cache tries to find something to clean
421 * and cleans it.
422 * It returns 1 if it cleaned something,
423 * 0 if it didn't find anything this time
424 * -1 if it fell off the end of the list.
425 */
426static int cache_clean(void)
427{
428 int rv = 0;
429 struct list_head *next;
430
431 spin_lock(&cache_list_lock);
432
433 /* find a suitable table if we don't already have one */
434 while (current_detail == NULL ||
435 current_index >= current_detail->hash_size) {
436 if (current_detail)
437 next = current_detail->others.next;
438 else
439 next = cache_list.next;
440 if (next == &cache_list) {
441 current_detail = NULL;
442 spin_unlock(&cache_list_lock);
443 return -1;
444 }
445 current_detail = list_entry(next, struct cache_detail, others);
446 if (current_detail->nextcheck > seconds_since_boot())
447 current_index = current_detail->hash_size;
448 else {
449 current_index = 0;
450 current_detail->nextcheck = seconds_since_boot()+30*60;
451 }
452 }
453
454 /* find a non-empty bucket in the table */
455 while (current_detail &&
456 current_index < current_detail->hash_size &&
457 hlist_empty(¤t_detail->hash_table[current_index]))
458 current_index++;
459
460 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
461
462 if (current_detail && current_index < current_detail->hash_size) {
463 struct cache_head *ch = NULL;
464 struct cache_detail *d;
465 struct hlist_head *head;
466 struct hlist_node *tmp;
467
468 spin_lock(¤t_detail->hash_lock);
469
470 /* Ok, now to clean this strand */
471
472 head = ¤t_detail->hash_table[current_index];
473 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
474 if (current_detail->nextcheck > ch->expiry_time)
475 current_detail->nextcheck = ch->expiry_time+1;
476 if (!cache_is_expired(current_detail, ch))
477 continue;
478
479 sunrpc_begin_cache_remove_entry(ch, current_detail);
480 trace_cache_entry_expired(current_detail, ch);
481 rv = 1;
482 break;
483 }
484
485 spin_unlock(¤t_detail->hash_lock);
486 d = current_detail;
487 if (!ch)
488 current_index ++;
489 spin_unlock(&cache_list_lock);
490 if (ch)
491 sunrpc_end_cache_remove_entry(ch, d);
492 } else
493 spin_unlock(&cache_list_lock);
494
495 return rv;
496}
497
498/*
499 * We want to regularly clean the cache, so we need to schedule some work ...
500 */
501static void do_cache_clean(struct work_struct *work)
502{
503 int delay;
504
505 if (list_empty(&cache_list))
506 return;
507
508 if (cache_clean() == -1)
509 delay = round_jiffies_relative(30*HZ);
510 else
511 delay = 5;
512
513 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
514}
515
516
517/*
518 * Clean all caches promptly. This just calls cache_clean
519 * repeatedly until we are sure that every cache has had a chance to
520 * be fully cleaned
521 */
522void cache_flush(void)
523{
524 while (cache_clean() != -1)
525 cond_resched();
526 while (cache_clean() != -1)
527 cond_resched();
528}
529EXPORT_SYMBOL_GPL(cache_flush);
530
531void cache_purge(struct cache_detail *detail)
532{
533 struct cache_head *ch = NULL;
534 struct hlist_head *head = NULL;
535 int i = 0;
536
537 spin_lock(&detail->hash_lock);
538 if (!detail->entries) {
539 spin_unlock(&detail->hash_lock);
540 return;
541 }
542
543 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
544 for (i = 0; i < detail->hash_size; i++) {
545 head = &detail->hash_table[i];
546 while (!hlist_empty(head)) {
547 ch = hlist_entry(head->first, struct cache_head,
548 cache_list);
549 sunrpc_begin_cache_remove_entry(ch, detail);
550 spin_unlock(&detail->hash_lock);
551 sunrpc_end_cache_remove_entry(ch, detail);
552 spin_lock(&detail->hash_lock);
553 }
554 }
555 spin_unlock(&detail->hash_lock);
556}
557EXPORT_SYMBOL_GPL(cache_purge);
558
559
560/*
561 * Deferral and Revisiting of Requests.
562 *
563 * If a cache lookup finds a pending entry, we
564 * need to defer the request and revisit it later.
565 * All deferred requests are stored in a hash table,
566 * indexed by "struct cache_head *".
567 * As it may be wasteful to store a whole request
568 * structure, we allow the request to provide a
569 * deferred form, which must contain a
570 * 'struct cache_deferred_req'
571 * This cache_deferred_req contains a method to allow
572 * it to be revisited when cache info is available
573 */
574
575#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
576#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
577
578#define DFR_MAX 300 /* ??? */
579
580static DEFINE_SPINLOCK(cache_defer_lock);
581static LIST_HEAD(cache_defer_list);
582static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
583static int cache_defer_cnt;
584
585static void __unhash_deferred_req(struct cache_deferred_req *dreq)
586{
587 hlist_del_init(&dreq->hash);
588 if (!list_empty(&dreq->recent)) {
589 list_del_init(&dreq->recent);
590 cache_defer_cnt--;
591 }
592}
593
594static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
595{
596 int hash = DFR_HASH(item);
597
598 INIT_LIST_HEAD(&dreq->recent);
599 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
600}
601
602static void setup_deferral(struct cache_deferred_req *dreq,
603 struct cache_head *item,
604 int count_me)
605{
606
607 dreq->item = item;
608
609 spin_lock(&cache_defer_lock);
610
611 __hash_deferred_req(dreq, item);
612
613 if (count_me) {
614 cache_defer_cnt++;
615 list_add(&dreq->recent, &cache_defer_list);
616 }
617
618 spin_unlock(&cache_defer_lock);
619
620}
621
622struct thread_deferred_req {
623 struct cache_deferred_req handle;
624 struct completion completion;
625};
626
627static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
628{
629 struct thread_deferred_req *dr =
630 container_of(dreq, struct thread_deferred_req, handle);
631 complete(&dr->completion);
632}
633
634static void cache_wait_req(struct cache_req *req, struct cache_head *item)
635{
636 struct thread_deferred_req sleeper;
637 struct cache_deferred_req *dreq = &sleeper.handle;
638
639 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
640 dreq->revisit = cache_restart_thread;
641
642 setup_deferral(dreq, item, 0);
643
644 if (!test_bit(CACHE_PENDING, &item->flags) ||
645 wait_for_completion_interruptible_timeout(
646 &sleeper.completion, req->thread_wait) <= 0) {
647 /* The completion wasn't completed, so we need
648 * to clean up
649 */
650 spin_lock(&cache_defer_lock);
651 if (!hlist_unhashed(&sleeper.handle.hash)) {
652 __unhash_deferred_req(&sleeper.handle);
653 spin_unlock(&cache_defer_lock);
654 } else {
655 /* cache_revisit_request already removed
656 * this from the hash table, but hasn't
657 * called ->revisit yet. It will very soon
658 * and we need to wait for it.
659 */
660 spin_unlock(&cache_defer_lock);
661 wait_for_completion(&sleeper.completion);
662 }
663 }
664}
665
666static void cache_limit_defers(void)
667{
668 /* Make sure we haven't exceed the limit of allowed deferred
669 * requests.
670 */
671 struct cache_deferred_req *discard = NULL;
672
673 if (cache_defer_cnt <= DFR_MAX)
674 return;
675
676 spin_lock(&cache_defer_lock);
677
678 /* Consider removing either the first or the last */
679 if (cache_defer_cnt > DFR_MAX) {
680 if (get_random_u32_below(2))
681 discard = list_entry(cache_defer_list.next,
682 struct cache_deferred_req, recent);
683 else
684 discard = list_entry(cache_defer_list.prev,
685 struct cache_deferred_req, recent);
686 __unhash_deferred_req(discard);
687 }
688 spin_unlock(&cache_defer_lock);
689 if (discard)
690 discard->revisit(discard, 1);
691}
692
693#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
694static inline bool cache_defer_immediately(void)
695{
696 return !fail_sunrpc.ignore_cache_wait &&
697 should_fail(&fail_sunrpc.attr, 1);
698}
699#else
700static inline bool cache_defer_immediately(void)
701{
702 return false;
703}
704#endif
705
706/* Return true if and only if a deferred request is queued. */
707static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
708{
709 struct cache_deferred_req *dreq;
710
711 if (!cache_defer_immediately()) {
712 cache_wait_req(req, item);
713 if (!test_bit(CACHE_PENDING, &item->flags))
714 return false;
715 }
716
717 dreq = req->defer(req);
718 if (dreq == NULL)
719 return false;
720 setup_deferral(dreq, item, 1);
721 if (!test_bit(CACHE_PENDING, &item->flags))
722 /* Bit could have been cleared before we managed to
723 * set up the deferral, so need to revisit just in case
724 */
725 cache_revisit_request(item);
726
727 cache_limit_defers();
728 return true;
729}
730
731static void cache_revisit_request(struct cache_head *item)
732{
733 struct cache_deferred_req *dreq;
734 struct hlist_node *tmp;
735 int hash = DFR_HASH(item);
736 LIST_HEAD(pending);
737
738 spin_lock(&cache_defer_lock);
739
740 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
741 if (dreq->item == item) {
742 __unhash_deferred_req(dreq);
743 list_add(&dreq->recent, &pending);
744 }
745
746 spin_unlock(&cache_defer_lock);
747
748 while (!list_empty(&pending)) {
749 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
750 list_del_init(&dreq->recent);
751 dreq->revisit(dreq, 0);
752 }
753}
754
755void cache_clean_deferred(void *owner)
756{
757 struct cache_deferred_req *dreq, *tmp;
758 LIST_HEAD(pending);
759
760 spin_lock(&cache_defer_lock);
761
762 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
763 if (dreq->owner == owner) {
764 __unhash_deferred_req(dreq);
765 list_add(&dreq->recent, &pending);
766 }
767 }
768 spin_unlock(&cache_defer_lock);
769
770 while (!list_empty(&pending)) {
771 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
772 list_del_init(&dreq->recent);
773 dreq->revisit(dreq, 1);
774 }
775}
776
777/*
778 * communicate with user-space
779 *
780 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
781 * On read, you get a full request, or block.
782 * On write, an update request is processed.
783 * Poll works if anything to read, and always allows write.
784 *
785 * Implemented by linked list of requests. Each open file has
786 * a ->private that also exists in this list. New requests are added
787 * to the end and may wakeup and preceding readers.
788 * New readers are added to the head. If, on read, an item is found with
789 * CACHE_UPCALLING clear, we free it from the list.
790 *
791 */
792
793static DEFINE_SPINLOCK(queue_lock);
794
795struct cache_queue {
796 struct list_head list;
797 int reader; /* if 0, then request */
798};
799struct cache_request {
800 struct cache_queue q;
801 struct cache_head *item;
802 char * buf;
803 int len;
804 int readers;
805};
806struct cache_reader {
807 struct cache_queue q;
808 int offset; /* if non-0, we have a refcnt on next request */
809};
810
811static int cache_request(struct cache_detail *detail,
812 struct cache_request *crq)
813{
814 char *bp = crq->buf;
815 int len = PAGE_SIZE;
816
817 detail->cache_request(detail, crq->item, &bp, &len);
818 if (len < 0)
819 return -E2BIG;
820 return PAGE_SIZE - len;
821}
822
823static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
824 loff_t *ppos, struct cache_detail *cd)
825{
826 struct cache_reader *rp = filp->private_data;
827 struct cache_request *rq;
828 struct inode *inode = file_inode(filp);
829 int err;
830
831 if (count == 0)
832 return 0;
833
834 inode_lock(inode); /* protect against multiple concurrent
835 * readers on this file */
836 again:
837 spin_lock(&queue_lock);
838 /* need to find next request */
839 while (rp->q.list.next != &cd->queue &&
840 list_entry(rp->q.list.next, struct cache_queue, list)
841 ->reader) {
842 struct list_head *next = rp->q.list.next;
843 list_move(&rp->q.list, next);
844 }
845 if (rp->q.list.next == &cd->queue) {
846 spin_unlock(&queue_lock);
847 inode_unlock(inode);
848 WARN_ON_ONCE(rp->offset);
849 return 0;
850 }
851 rq = container_of(rp->q.list.next, struct cache_request, q.list);
852 WARN_ON_ONCE(rq->q.reader);
853 if (rp->offset == 0)
854 rq->readers++;
855 spin_unlock(&queue_lock);
856
857 if (rq->len == 0) {
858 err = cache_request(cd, rq);
859 if (err < 0)
860 goto out;
861 rq->len = err;
862 }
863
864 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
865 err = -EAGAIN;
866 spin_lock(&queue_lock);
867 list_move(&rp->q.list, &rq->q.list);
868 spin_unlock(&queue_lock);
869 } else {
870 if (rp->offset + count > rq->len)
871 count = rq->len - rp->offset;
872 err = -EFAULT;
873 if (copy_to_user(buf, rq->buf + rp->offset, count))
874 goto out;
875 rp->offset += count;
876 if (rp->offset >= rq->len) {
877 rp->offset = 0;
878 spin_lock(&queue_lock);
879 list_move(&rp->q.list, &rq->q.list);
880 spin_unlock(&queue_lock);
881 }
882 err = 0;
883 }
884 out:
885 if (rp->offset == 0) {
886 /* need to release rq */
887 spin_lock(&queue_lock);
888 rq->readers--;
889 if (rq->readers == 0 &&
890 !test_bit(CACHE_PENDING, &rq->item->flags)) {
891 list_del(&rq->q.list);
892 spin_unlock(&queue_lock);
893 cache_put(rq->item, cd);
894 kfree(rq->buf);
895 kfree(rq);
896 } else
897 spin_unlock(&queue_lock);
898 }
899 if (err == -EAGAIN)
900 goto again;
901 inode_unlock(inode);
902 return err ? err : count;
903}
904
905static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
906 size_t count, struct cache_detail *cd)
907{
908 ssize_t ret;
909
910 if (count == 0)
911 return -EINVAL;
912 if (copy_from_user(kaddr, buf, count))
913 return -EFAULT;
914 kaddr[count] = '\0';
915 ret = cd->cache_parse(cd, kaddr, count);
916 if (!ret)
917 ret = count;
918 return ret;
919}
920
921static ssize_t cache_downcall(struct address_space *mapping,
922 const char __user *buf,
923 size_t count, struct cache_detail *cd)
924{
925 char *write_buf;
926 ssize_t ret = -ENOMEM;
927
928 if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
929 ret = -EINVAL;
930 goto out;
931 }
932
933 write_buf = kvmalloc(count + 1, GFP_KERNEL);
934 if (!write_buf)
935 goto out;
936
937 ret = cache_do_downcall(write_buf, buf, count, cd);
938 kvfree(write_buf);
939out:
940 return ret;
941}
942
943static ssize_t cache_write(struct file *filp, const char __user *buf,
944 size_t count, loff_t *ppos,
945 struct cache_detail *cd)
946{
947 struct address_space *mapping = filp->f_mapping;
948 struct inode *inode = file_inode(filp);
949 ssize_t ret = -EINVAL;
950
951 if (!cd->cache_parse)
952 goto out;
953
954 inode_lock(inode);
955 ret = cache_downcall(mapping, buf, count, cd);
956 inode_unlock(inode);
957out:
958 return ret;
959}
960
961static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
962
963static __poll_t cache_poll(struct file *filp, poll_table *wait,
964 struct cache_detail *cd)
965{
966 __poll_t mask;
967 struct cache_reader *rp = filp->private_data;
968 struct cache_queue *cq;
969
970 poll_wait(filp, &queue_wait, wait);
971
972 /* alway allow write */
973 mask = EPOLLOUT | EPOLLWRNORM;
974
975 if (!rp)
976 return mask;
977
978 spin_lock(&queue_lock);
979
980 for (cq= &rp->q; &cq->list != &cd->queue;
981 cq = list_entry(cq->list.next, struct cache_queue, list))
982 if (!cq->reader) {
983 mask |= EPOLLIN | EPOLLRDNORM;
984 break;
985 }
986 spin_unlock(&queue_lock);
987 return mask;
988}
989
990static int cache_ioctl(struct inode *ino, struct file *filp,
991 unsigned int cmd, unsigned long arg,
992 struct cache_detail *cd)
993{
994 int len = 0;
995 struct cache_reader *rp = filp->private_data;
996 struct cache_queue *cq;
997
998 if (cmd != FIONREAD || !rp)
999 return -EINVAL;
1000
1001 spin_lock(&queue_lock);
1002
1003 /* only find the length remaining in current request,
1004 * or the length of the next request
1005 */
1006 for (cq= &rp->q; &cq->list != &cd->queue;
1007 cq = list_entry(cq->list.next, struct cache_queue, list))
1008 if (!cq->reader) {
1009 struct cache_request *cr =
1010 container_of(cq, struct cache_request, q);
1011 len = cr->len - rp->offset;
1012 break;
1013 }
1014 spin_unlock(&queue_lock);
1015
1016 return put_user(len, (int __user *)arg);
1017}
1018
1019static int cache_open(struct inode *inode, struct file *filp,
1020 struct cache_detail *cd)
1021{
1022 struct cache_reader *rp = NULL;
1023
1024 if (!cd || !try_module_get(cd->owner))
1025 return -EACCES;
1026 nonseekable_open(inode, filp);
1027 if (filp->f_mode & FMODE_READ) {
1028 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1029 if (!rp) {
1030 module_put(cd->owner);
1031 return -ENOMEM;
1032 }
1033 rp->offset = 0;
1034 rp->q.reader = 1;
1035
1036 spin_lock(&queue_lock);
1037 list_add(&rp->q.list, &cd->queue);
1038 spin_unlock(&queue_lock);
1039 }
1040 if (filp->f_mode & FMODE_WRITE)
1041 atomic_inc(&cd->writers);
1042 filp->private_data = rp;
1043 return 0;
1044}
1045
1046static int cache_release(struct inode *inode, struct file *filp,
1047 struct cache_detail *cd)
1048{
1049 struct cache_reader *rp = filp->private_data;
1050
1051 if (rp) {
1052 spin_lock(&queue_lock);
1053 if (rp->offset) {
1054 struct cache_queue *cq;
1055 for (cq= &rp->q; &cq->list != &cd->queue;
1056 cq = list_entry(cq->list.next, struct cache_queue, list))
1057 if (!cq->reader) {
1058 container_of(cq, struct cache_request, q)
1059 ->readers--;
1060 break;
1061 }
1062 rp->offset = 0;
1063 }
1064 list_del(&rp->q.list);
1065 spin_unlock(&queue_lock);
1066
1067 filp->private_data = NULL;
1068 kfree(rp);
1069
1070 }
1071 if (filp->f_mode & FMODE_WRITE) {
1072 atomic_dec(&cd->writers);
1073 cd->last_close = seconds_since_boot();
1074 }
1075 module_put(cd->owner);
1076 return 0;
1077}
1078
1079
1080
1081static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1082{
1083 struct cache_queue *cq, *tmp;
1084 struct cache_request *cr;
1085 LIST_HEAD(dequeued);
1086
1087 spin_lock(&queue_lock);
1088 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1089 if (!cq->reader) {
1090 cr = container_of(cq, struct cache_request, q);
1091 if (cr->item != ch)
1092 continue;
1093 if (test_bit(CACHE_PENDING, &ch->flags))
1094 /* Lost a race and it is pending again */
1095 break;
1096 if (cr->readers != 0)
1097 continue;
1098 list_move(&cr->q.list, &dequeued);
1099 }
1100 spin_unlock(&queue_lock);
1101 while (!list_empty(&dequeued)) {
1102 cr = list_entry(dequeued.next, struct cache_request, q.list);
1103 list_del(&cr->q.list);
1104 cache_put(cr->item, detail);
1105 kfree(cr->buf);
1106 kfree(cr);
1107 }
1108}
1109
1110/*
1111 * Support routines for text-based upcalls.
1112 * Fields are separated by spaces.
1113 * Fields are either mangled to quote space tab newline slosh with slosh
1114 * or a hexified with a leading \x
1115 * Record is terminated with newline.
1116 *
1117 */
1118
1119void qword_add(char **bpp, int *lp, char *str)
1120{
1121 char *bp = *bpp;
1122 int len = *lp;
1123 int ret;
1124
1125 if (len < 0) return;
1126
1127 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1128 if (ret >= len) {
1129 bp += len;
1130 len = -1;
1131 } else {
1132 bp += ret;
1133 len -= ret;
1134 *bp++ = ' ';
1135 len--;
1136 }
1137 *bpp = bp;
1138 *lp = len;
1139}
1140EXPORT_SYMBOL_GPL(qword_add);
1141
1142void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1143{
1144 char *bp = *bpp;
1145 int len = *lp;
1146
1147 if (len < 0) return;
1148
1149 if (len > 2) {
1150 *bp++ = '\\';
1151 *bp++ = 'x';
1152 len -= 2;
1153 while (blen && len >= 2) {
1154 bp = hex_byte_pack(bp, *buf++);
1155 len -= 2;
1156 blen--;
1157 }
1158 }
1159 if (blen || len<1) len = -1;
1160 else {
1161 *bp++ = ' ';
1162 len--;
1163 }
1164 *bpp = bp;
1165 *lp = len;
1166}
1167EXPORT_SYMBOL_GPL(qword_addhex);
1168
1169static void warn_no_listener(struct cache_detail *detail)
1170{
1171 if (detail->last_warn != detail->last_close) {
1172 detail->last_warn = detail->last_close;
1173 if (detail->warn_no_listener)
1174 detail->warn_no_listener(detail, detail->last_close != 0);
1175 }
1176}
1177
1178static bool cache_listeners_exist(struct cache_detail *detail)
1179{
1180 if (atomic_read(&detail->writers))
1181 return true;
1182 if (detail->last_close == 0)
1183 /* This cache was never opened */
1184 return false;
1185 if (detail->last_close < seconds_since_boot() - 30)
1186 /*
1187 * We allow for the possibility that someone might
1188 * restart a userspace daemon without restarting the
1189 * server; but after 30 seconds, we give up.
1190 */
1191 return false;
1192 return true;
1193}
1194
1195/*
1196 * register an upcall request to user-space and queue it up for read() by the
1197 * upcall daemon.
1198 *
1199 * Each request is at most one page long.
1200 */
1201static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1202{
1203 char *buf;
1204 struct cache_request *crq;
1205 int ret = 0;
1206
1207 if (test_bit(CACHE_CLEANED, &h->flags))
1208 /* Too late to make an upcall */
1209 return -EAGAIN;
1210
1211 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1212 if (!buf)
1213 return -EAGAIN;
1214
1215 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1216 if (!crq) {
1217 kfree(buf);
1218 return -EAGAIN;
1219 }
1220
1221 crq->q.reader = 0;
1222 crq->buf = buf;
1223 crq->len = 0;
1224 crq->readers = 0;
1225 spin_lock(&queue_lock);
1226 if (test_bit(CACHE_PENDING, &h->flags)) {
1227 crq->item = cache_get(h);
1228 list_add_tail(&crq->q.list, &detail->queue);
1229 trace_cache_entry_upcall(detail, h);
1230 } else
1231 /* Lost a race, no longer PENDING, so don't enqueue */
1232 ret = -EAGAIN;
1233 spin_unlock(&queue_lock);
1234 wake_up(&queue_wait);
1235 if (ret == -EAGAIN) {
1236 kfree(buf);
1237 kfree(crq);
1238 }
1239 return ret;
1240}
1241
1242int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1243{
1244 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1245 return 0;
1246 return cache_pipe_upcall(detail, h);
1247}
1248EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1249
1250int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1251 struct cache_head *h)
1252{
1253 if (!cache_listeners_exist(detail)) {
1254 warn_no_listener(detail);
1255 trace_cache_entry_no_listener(detail, h);
1256 return -EINVAL;
1257 }
1258 return sunrpc_cache_pipe_upcall(detail, h);
1259}
1260EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1261
1262/*
1263 * parse a message from user-space and pass it
1264 * to an appropriate cache
1265 * Messages are, like requests, separated into fields by
1266 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1267 *
1268 * Message is
1269 * reply cachename expiry key ... content....
1270 *
1271 * key and content are both parsed by cache
1272 */
1273
1274int qword_get(char **bpp, char *dest, int bufsize)
1275{
1276 /* return bytes copied, or -1 on error */
1277 char *bp = *bpp;
1278 int len = 0;
1279
1280 while (*bp == ' ') bp++;
1281
1282 if (bp[0] == '\\' && bp[1] == 'x') {
1283 /* HEX STRING */
1284 bp += 2;
1285 while (len < bufsize - 1) {
1286 int h, l;
1287
1288 h = hex_to_bin(bp[0]);
1289 if (h < 0)
1290 break;
1291
1292 l = hex_to_bin(bp[1]);
1293 if (l < 0)
1294 break;
1295
1296 *dest++ = (h << 4) | l;
1297 bp += 2;
1298 len++;
1299 }
1300 } else {
1301 /* text with \nnn octal quoting */
1302 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1303 if (*bp == '\\' &&
1304 isodigit(bp[1]) && (bp[1] <= '3') &&
1305 isodigit(bp[2]) &&
1306 isodigit(bp[3])) {
1307 int byte = (*++bp -'0');
1308 bp++;
1309 byte = (byte << 3) | (*bp++ - '0');
1310 byte = (byte << 3) | (*bp++ - '0');
1311 *dest++ = byte;
1312 len++;
1313 } else {
1314 *dest++ = *bp++;
1315 len++;
1316 }
1317 }
1318 }
1319
1320 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1321 return -1;
1322 while (*bp == ' ') bp++;
1323 *bpp = bp;
1324 *dest = '\0';
1325 return len;
1326}
1327EXPORT_SYMBOL_GPL(qword_get);
1328
1329
1330/*
1331 * support /proc/net/rpc/$CACHENAME/content
1332 * as a seqfile.
1333 * We call ->cache_show passing NULL for the item to
1334 * get a header, then pass each real item in the cache
1335 */
1336
1337static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1338{
1339 loff_t n = *pos;
1340 unsigned int hash, entry;
1341 struct cache_head *ch;
1342 struct cache_detail *cd = m->private;
1343
1344 if (!n--)
1345 return SEQ_START_TOKEN;
1346 hash = n >> 32;
1347 entry = n & ((1LL<<32) - 1);
1348
1349 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1350 if (!entry--)
1351 return ch;
1352 n &= ~((1LL<<32) - 1);
1353 do {
1354 hash++;
1355 n += 1LL<<32;
1356 } while(hash < cd->hash_size &&
1357 hlist_empty(&cd->hash_table[hash]));
1358 if (hash >= cd->hash_size)
1359 return NULL;
1360 *pos = n+1;
1361 return hlist_entry_safe(rcu_dereference_raw(
1362 hlist_first_rcu(&cd->hash_table[hash])),
1363 struct cache_head, cache_list);
1364}
1365
1366static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1367{
1368 struct cache_head *ch = p;
1369 int hash = (*pos >> 32);
1370 struct cache_detail *cd = m->private;
1371
1372 if (p == SEQ_START_TOKEN)
1373 hash = 0;
1374 else if (ch->cache_list.next == NULL) {
1375 hash++;
1376 *pos += 1LL<<32;
1377 } else {
1378 ++*pos;
1379 return hlist_entry_safe(rcu_dereference_raw(
1380 hlist_next_rcu(&ch->cache_list)),
1381 struct cache_head, cache_list);
1382 }
1383 *pos &= ~((1LL<<32) - 1);
1384 while (hash < cd->hash_size &&
1385 hlist_empty(&cd->hash_table[hash])) {
1386 hash++;
1387 *pos += 1LL<<32;
1388 }
1389 if (hash >= cd->hash_size)
1390 return NULL;
1391 ++*pos;
1392 return hlist_entry_safe(rcu_dereference_raw(
1393 hlist_first_rcu(&cd->hash_table[hash])),
1394 struct cache_head, cache_list);
1395}
1396
1397void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1398 __acquires(RCU)
1399{
1400 rcu_read_lock();
1401 return __cache_seq_start(m, pos);
1402}
1403EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1404
1405void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1406{
1407 return cache_seq_next(file, p, pos);
1408}
1409EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1410
1411void cache_seq_stop_rcu(struct seq_file *m, void *p)
1412 __releases(RCU)
1413{
1414 rcu_read_unlock();
1415}
1416EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1417
1418static int c_show(struct seq_file *m, void *p)
1419{
1420 struct cache_head *cp = p;
1421 struct cache_detail *cd = m->private;
1422
1423 if (p == SEQ_START_TOKEN)
1424 return cd->cache_show(m, cd, NULL);
1425
1426 ifdebug(CACHE)
1427 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1428 convert_to_wallclock(cp->expiry_time),
1429 kref_read(&cp->ref), cp->flags);
1430 if (!cache_get_rcu(cp))
1431 return 0;
1432
1433 if (cache_check(cd, cp, NULL))
1434 /* cache_check does a cache_put on failure */
1435 seq_puts(m, "# ");
1436 else {
1437 if (cache_is_expired(cd, cp))
1438 seq_puts(m, "# ");
1439 cache_put(cp, cd);
1440 }
1441
1442 return cd->cache_show(m, cd, cp);
1443}
1444
1445static const struct seq_operations cache_content_op = {
1446 .start = cache_seq_start_rcu,
1447 .next = cache_seq_next_rcu,
1448 .stop = cache_seq_stop_rcu,
1449 .show = c_show,
1450};
1451
1452static int content_open(struct inode *inode, struct file *file,
1453 struct cache_detail *cd)
1454{
1455 struct seq_file *seq;
1456 int err;
1457
1458 if (!cd || !try_module_get(cd->owner))
1459 return -EACCES;
1460
1461 err = seq_open(file, &cache_content_op);
1462 if (err) {
1463 module_put(cd->owner);
1464 return err;
1465 }
1466
1467 seq = file->private_data;
1468 seq->private = cd;
1469 return 0;
1470}
1471
1472static int content_release(struct inode *inode, struct file *file,
1473 struct cache_detail *cd)
1474{
1475 int ret = seq_release(inode, file);
1476 module_put(cd->owner);
1477 return ret;
1478}
1479
1480static int open_flush(struct inode *inode, struct file *file,
1481 struct cache_detail *cd)
1482{
1483 if (!cd || !try_module_get(cd->owner))
1484 return -EACCES;
1485 return nonseekable_open(inode, file);
1486}
1487
1488static int release_flush(struct inode *inode, struct file *file,
1489 struct cache_detail *cd)
1490{
1491 module_put(cd->owner);
1492 return 0;
1493}
1494
1495static ssize_t read_flush(struct file *file, char __user *buf,
1496 size_t count, loff_t *ppos,
1497 struct cache_detail *cd)
1498{
1499 char tbuf[22];
1500 size_t len;
1501
1502 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1503 convert_to_wallclock(cd->flush_time));
1504 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1505}
1506
1507static ssize_t write_flush(struct file *file, const char __user *buf,
1508 size_t count, loff_t *ppos,
1509 struct cache_detail *cd)
1510{
1511 char tbuf[20];
1512 char *ep;
1513 time64_t now;
1514
1515 if (*ppos || count > sizeof(tbuf)-1)
1516 return -EINVAL;
1517 if (copy_from_user(tbuf, buf, count))
1518 return -EFAULT;
1519 tbuf[count] = 0;
1520 simple_strtoul(tbuf, &ep, 0);
1521 if (*ep && *ep != '\n')
1522 return -EINVAL;
1523 /* Note that while we check that 'buf' holds a valid number,
1524 * we always ignore the value and just flush everything.
1525 * Making use of the number leads to races.
1526 */
1527
1528 now = seconds_since_boot();
1529 /* Always flush everything, so behave like cache_purge()
1530 * Do this by advancing flush_time to the current time,
1531 * or by one second if it has already reached the current time.
1532 * Newly added cache entries will always have ->last_refresh greater
1533 * that ->flush_time, so they don't get flushed prematurely.
1534 */
1535
1536 if (cd->flush_time >= now)
1537 now = cd->flush_time + 1;
1538
1539 cd->flush_time = now;
1540 cd->nextcheck = now;
1541 cache_flush();
1542
1543 if (cd->flush)
1544 cd->flush();
1545
1546 *ppos += count;
1547 return count;
1548}
1549
1550static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1551 size_t count, loff_t *ppos)
1552{
1553 struct cache_detail *cd = pde_data(file_inode(filp));
1554
1555 return cache_read(filp, buf, count, ppos, cd);
1556}
1557
1558static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1559 size_t count, loff_t *ppos)
1560{
1561 struct cache_detail *cd = pde_data(file_inode(filp));
1562
1563 return cache_write(filp, buf, count, ppos, cd);
1564}
1565
1566static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1567{
1568 struct cache_detail *cd = pde_data(file_inode(filp));
1569
1570 return cache_poll(filp, wait, cd);
1571}
1572
1573static long cache_ioctl_procfs(struct file *filp,
1574 unsigned int cmd, unsigned long arg)
1575{
1576 struct inode *inode = file_inode(filp);
1577 struct cache_detail *cd = pde_data(inode);
1578
1579 return cache_ioctl(inode, filp, cmd, arg, cd);
1580}
1581
1582static int cache_open_procfs(struct inode *inode, struct file *filp)
1583{
1584 struct cache_detail *cd = pde_data(inode);
1585
1586 return cache_open(inode, filp, cd);
1587}
1588
1589static int cache_release_procfs(struct inode *inode, struct file *filp)
1590{
1591 struct cache_detail *cd = pde_data(inode);
1592
1593 return cache_release(inode, filp, cd);
1594}
1595
1596static const struct proc_ops cache_channel_proc_ops = {
1597 .proc_read = cache_read_procfs,
1598 .proc_write = cache_write_procfs,
1599 .proc_poll = cache_poll_procfs,
1600 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1601 .proc_open = cache_open_procfs,
1602 .proc_release = cache_release_procfs,
1603};
1604
1605static int content_open_procfs(struct inode *inode, struct file *filp)
1606{
1607 struct cache_detail *cd = pde_data(inode);
1608
1609 return content_open(inode, filp, cd);
1610}
1611
1612static int content_release_procfs(struct inode *inode, struct file *filp)
1613{
1614 struct cache_detail *cd = pde_data(inode);
1615
1616 return content_release(inode, filp, cd);
1617}
1618
1619static const struct proc_ops content_proc_ops = {
1620 .proc_open = content_open_procfs,
1621 .proc_read = seq_read,
1622 .proc_lseek = seq_lseek,
1623 .proc_release = content_release_procfs,
1624};
1625
1626static int open_flush_procfs(struct inode *inode, struct file *filp)
1627{
1628 struct cache_detail *cd = pde_data(inode);
1629
1630 return open_flush(inode, filp, cd);
1631}
1632
1633static int release_flush_procfs(struct inode *inode, struct file *filp)
1634{
1635 struct cache_detail *cd = pde_data(inode);
1636
1637 return release_flush(inode, filp, cd);
1638}
1639
1640static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1641 size_t count, loff_t *ppos)
1642{
1643 struct cache_detail *cd = pde_data(file_inode(filp));
1644
1645 return read_flush(filp, buf, count, ppos, cd);
1646}
1647
1648static ssize_t write_flush_procfs(struct file *filp,
1649 const char __user *buf,
1650 size_t count, loff_t *ppos)
1651{
1652 struct cache_detail *cd = pde_data(file_inode(filp));
1653
1654 return write_flush(filp, buf, count, ppos, cd);
1655}
1656
1657static const struct proc_ops cache_flush_proc_ops = {
1658 .proc_open = open_flush_procfs,
1659 .proc_read = read_flush_procfs,
1660 .proc_write = write_flush_procfs,
1661 .proc_release = release_flush_procfs,
1662};
1663
1664static void remove_cache_proc_entries(struct cache_detail *cd)
1665{
1666 if (cd->procfs) {
1667 proc_remove(cd->procfs);
1668 cd->procfs = NULL;
1669 }
1670}
1671
1672static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1673{
1674 struct proc_dir_entry *p;
1675 struct sunrpc_net *sn;
1676
1677 if (!IS_ENABLED(CONFIG_PROC_FS))
1678 return 0;
1679
1680 sn = net_generic(net, sunrpc_net_id);
1681 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1682 if (cd->procfs == NULL)
1683 goto out_nomem;
1684
1685 p = proc_create_data("flush", S_IFREG | 0600,
1686 cd->procfs, &cache_flush_proc_ops, cd);
1687 if (p == NULL)
1688 goto out_nomem;
1689
1690 if (cd->cache_request || cd->cache_parse) {
1691 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1692 &cache_channel_proc_ops, cd);
1693 if (p == NULL)
1694 goto out_nomem;
1695 }
1696 if (cd->cache_show) {
1697 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1698 &content_proc_ops, cd);
1699 if (p == NULL)
1700 goto out_nomem;
1701 }
1702 return 0;
1703out_nomem:
1704 remove_cache_proc_entries(cd);
1705 return -ENOMEM;
1706}
1707
1708void __init cache_initialize(void)
1709{
1710 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1711}
1712
1713int cache_register_net(struct cache_detail *cd, struct net *net)
1714{
1715 int ret;
1716
1717 sunrpc_init_cache_detail(cd);
1718 ret = create_cache_proc_entries(cd, net);
1719 if (ret)
1720 sunrpc_destroy_cache_detail(cd);
1721 return ret;
1722}
1723EXPORT_SYMBOL_GPL(cache_register_net);
1724
1725void cache_unregister_net(struct cache_detail *cd, struct net *net)
1726{
1727 remove_cache_proc_entries(cd);
1728 sunrpc_destroy_cache_detail(cd);
1729}
1730EXPORT_SYMBOL_GPL(cache_unregister_net);
1731
1732struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1733{
1734 struct cache_detail *cd;
1735 int i;
1736
1737 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1738 if (cd == NULL)
1739 return ERR_PTR(-ENOMEM);
1740
1741 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1742 GFP_KERNEL);
1743 if (cd->hash_table == NULL) {
1744 kfree(cd);
1745 return ERR_PTR(-ENOMEM);
1746 }
1747
1748 for (i = 0; i < cd->hash_size; i++)
1749 INIT_HLIST_HEAD(&cd->hash_table[i]);
1750 cd->net = net;
1751 return cd;
1752}
1753EXPORT_SYMBOL_GPL(cache_create_net);
1754
1755void cache_destroy_net(struct cache_detail *cd, struct net *net)
1756{
1757 kfree(cd->hash_table);
1758 kfree(cd);
1759}
1760EXPORT_SYMBOL_GPL(cache_destroy_net);
1761
1762static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1763 size_t count, loff_t *ppos)
1764{
1765 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1766
1767 return cache_read(filp, buf, count, ppos, cd);
1768}
1769
1770static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1771 size_t count, loff_t *ppos)
1772{
1773 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1774
1775 return cache_write(filp, buf, count, ppos, cd);
1776}
1777
1778static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1779{
1780 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1781
1782 return cache_poll(filp, wait, cd);
1783}
1784
1785static long cache_ioctl_pipefs(struct file *filp,
1786 unsigned int cmd, unsigned long arg)
1787{
1788 struct inode *inode = file_inode(filp);
1789 struct cache_detail *cd = RPC_I(inode)->private;
1790
1791 return cache_ioctl(inode, filp, cmd, arg, cd);
1792}
1793
1794static int cache_open_pipefs(struct inode *inode, struct file *filp)
1795{
1796 struct cache_detail *cd = RPC_I(inode)->private;
1797
1798 return cache_open(inode, filp, cd);
1799}
1800
1801static int cache_release_pipefs(struct inode *inode, struct file *filp)
1802{
1803 struct cache_detail *cd = RPC_I(inode)->private;
1804
1805 return cache_release(inode, filp, cd);
1806}
1807
1808const struct file_operations cache_file_operations_pipefs = {
1809 .owner = THIS_MODULE,
1810 .read = cache_read_pipefs,
1811 .write = cache_write_pipefs,
1812 .poll = cache_poll_pipefs,
1813 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1814 .open = cache_open_pipefs,
1815 .release = cache_release_pipefs,
1816};
1817
1818static int content_open_pipefs(struct inode *inode, struct file *filp)
1819{
1820 struct cache_detail *cd = RPC_I(inode)->private;
1821
1822 return content_open(inode, filp, cd);
1823}
1824
1825static int content_release_pipefs(struct inode *inode, struct file *filp)
1826{
1827 struct cache_detail *cd = RPC_I(inode)->private;
1828
1829 return content_release(inode, filp, cd);
1830}
1831
1832const struct file_operations content_file_operations_pipefs = {
1833 .open = content_open_pipefs,
1834 .read = seq_read,
1835 .llseek = seq_lseek,
1836 .release = content_release_pipefs,
1837};
1838
1839static int open_flush_pipefs(struct inode *inode, struct file *filp)
1840{
1841 struct cache_detail *cd = RPC_I(inode)->private;
1842
1843 return open_flush(inode, filp, cd);
1844}
1845
1846static int release_flush_pipefs(struct inode *inode, struct file *filp)
1847{
1848 struct cache_detail *cd = RPC_I(inode)->private;
1849
1850 return release_flush(inode, filp, cd);
1851}
1852
1853static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1854 size_t count, loff_t *ppos)
1855{
1856 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1857
1858 return read_flush(filp, buf, count, ppos, cd);
1859}
1860
1861static ssize_t write_flush_pipefs(struct file *filp,
1862 const char __user *buf,
1863 size_t count, loff_t *ppos)
1864{
1865 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1866
1867 return write_flush(filp, buf, count, ppos, cd);
1868}
1869
1870const struct file_operations cache_flush_operations_pipefs = {
1871 .open = open_flush_pipefs,
1872 .read = read_flush_pipefs,
1873 .write = write_flush_pipefs,
1874 .release = release_flush_pipefs,
1875};
1876
1877int sunrpc_cache_register_pipefs(struct dentry *parent,
1878 const char *name, umode_t umode,
1879 struct cache_detail *cd)
1880{
1881 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1882 if (IS_ERR(dir))
1883 return PTR_ERR(dir);
1884 cd->pipefs = dir;
1885 return 0;
1886}
1887EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1888
1889void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1890{
1891 if (cd->pipefs) {
1892 rpc_remove_cache_dir(cd->pipefs);
1893 cd->pipefs = NULL;
1894 }
1895}
1896EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1897
1898void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1899{
1900 spin_lock(&cd->hash_lock);
1901 if (!hlist_unhashed(&h->cache_list)){
1902 sunrpc_begin_cache_remove_entry(h, cd);
1903 spin_unlock(&cd->hash_lock);
1904 sunrpc_end_cache_remove_entry(h, cd);
1905 } else
1906 spin_unlock(&cd->hash_lock);
1907}
1908EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1/*
2 * net/sunrpc/cache.c
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2. See COPYING.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <asm/uaccess.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/proc_fs.h>
27#include <linux/net.h>
28#include <linux/workqueue.h>
29#include <linux/mutex.h>
30#include <linux/pagemap.h>
31#include <asm/ioctls.h>
32#include <linux/sunrpc/types.h>
33#include <linux/sunrpc/cache.h>
34#include <linux/sunrpc/stats.h>
35#include <linux/sunrpc/rpc_pipe_fs.h>
36#include "netns.h"
37
38#define RPCDBG_FACILITY RPCDBG_CACHE
39
40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41static void cache_revisit_request(struct cache_head *item);
42
43static void cache_init(struct cache_head *h)
44{
45 time_t now = seconds_since_boot();
46 h->next = NULL;
47 h->flags = 0;
48 kref_init(&h->ref);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 h->last_refresh = now;
51}
52
53static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
54{
55 return (h->expiry_time < seconds_since_boot()) ||
56 (detail->flush_time > h->last_refresh);
57}
58
59struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
60 struct cache_head *key, int hash)
61{
62 struct cache_head **head, **hp;
63 struct cache_head *new = NULL, *freeme = NULL;
64
65 head = &detail->hash_table[hash];
66
67 read_lock(&detail->hash_lock);
68
69 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
70 struct cache_head *tmp = *hp;
71 if (detail->match(tmp, key)) {
72 if (cache_is_expired(detail, tmp))
73 /* This entry is expired, we will discard it. */
74 break;
75 cache_get(tmp);
76 read_unlock(&detail->hash_lock);
77 return tmp;
78 }
79 }
80 read_unlock(&detail->hash_lock);
81 /* Didn't find anything, insert an empty entry */
82
83 new = detail->alloc();
84 if (!new)
85 return NULL;
86 /* must fully initialise 'new', else
87 * we might get lose if we need to
88 * cache_put it soon.
89 */
90 cache_init(new);
91 detail->init(new, key);
92
93 write_lock(&detail->hash_lock);
94
95 /* check if entry appeared while we slept */
96 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
97 struct cache_head *tmp = *hp;
98 if (detail->match(tmp, key)) {
99 if (cache_is_expired(detail, tmp)) {
100 *hp = tmp->next;
101 tmp->next = NULL;
102 detail->entries --;
103 freeme = tmp;
104 break;
105 }
106 cache_get(tmp);
107 write_unlock(&detail->hash_lock);
108 cache_put(new, detail);
109 return tmp;
110 }
111 }
112 new->next = *head;
113 *head = new;
114 detail->entries++;
115 cache_get(new);
116 write_unlock(&detail->hash_lock);
117
118 if (freeme)
119 cache_put(freeme, detail);
120 return new;
121}
122EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
123
124
125static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
126
127static void cache_fresh_locked(struct cache_head *head, time_t expiry)
128{
129 head->expiry_time = expiry;
130 head->last_refresh = seconds_since_boot();
131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
132 set_bit(CACHE_VALID, &head->flags);
133}
134
135static void cache_fresh_unlocked(struct cache_head *head,
136 struct cache_detail *detail)
137{
138 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
139 cache_revisit_request(head);
140 cache_dequeue(detail, head);
141 }
142}
143
144struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
145 struct cache_head *new, struct cache_head *old, int hash)
146{
147 /* The 'old' entry is to be replaced by 'new'.
148 * If 'old' is not VALID, we update it directly,
149 * otherwise we need to replace it
150 */
151 struct cache_head **head;
152 struct cache_head *tmp;
153
154 if (!test_bit(CACHE_VALID, &old->flags)) {
155 write_lock(&detail->hash_lock);
156 if (!test_bit(CACHE_VALID, &old->flags)) {
157 if (test_bit(CACHE_NEGATIVE, &new->flags))
158 set_bit(CACHE_NEGATIVE, &old->flags);
159 else
160 detail->update(old, new);
161 cache_fresh_locked(old, new->expiry_time);
162 write_unlock(&detail->hash_lock);
163 cache_fresh_unlocked(old, detail);
164 return old;
165 }
166 write_unlock(&detail->hash_lock);
167 }
168 /* We need to insert a new entry */
169 tmp = detail->alloc();
170 if (!tmp) {
171 cache_put(old, detail);
172 return NULL;
173 }
174 cache_init(tmp);
175 detail->init(tmp, old);
176 head = &detail->hash_table[hash];
177
178 write_lock(&detail->hash_lock);
179 if (test_bit(CACHE_NEGATIVE, &new->flags))
180 set_bit(CACHE_NEGATIVE, &tmp->flags);
181 else
182 detail->update(tmp, new);
183 tmp->next = *head;
184 *head = tmp;
185 detail->entries++;
186 cache_get(tmp);
187 cache_fresh_locked(tmp, new->expiry_time);
188 cache_fresh_locked(old, 0);
189 write_unlock(&detail->hash_lock);
190 cache_fresh_unlocked(tmp, detail);
191 cache_fresh_unlocked(old, detail);
192 cache_put(old, detail);
193 return tmp;
194}
195EXPORT_SYMBOL_GPL(sunrpc_cache_update);
196
197static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
198{
199 if (!cd->cache_upcall)
200 return -EINVAL;
201 return cd->cache_upcall(cd, h);
202}
203
204static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
205{
206 if (!test_bit(CACHE_VALID, &h->flags))
207 return -EAGAIN;
208 else {
209 /* entry is valid */
210 if (test_bit(CACHE_NEGATIVE, &h->flags))
211 return -ENOENT;
212 else {
213 /*
214 * In combination with write barrier in
215 * sunrpc_cache_update, ensures that anyone
216 * using the cache entry after this sees the
217 * updated contents:
218 */
219 smp_rmb();
220 return 0;
221 }
222 }
223}
224
225static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
226{
227 int rv;
228
229 write_lock(&detail->hash_lock);
230 rv = cache_is_valid(detail, h);
231 if (rv != -EAGAIN) {
232 write_unlock(&detail->hash_lock);
233 return rv;
234 }
235 set_bit(CACHE_NEGATIVE, &h->flags);
236 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
237 write_unlock(&detail->hash_lock);
238 cache_fresh_unlocked(h, detail);
239 return -ENOENT;
240}
241
242/*
243 * This is the generic cache management routine for all
244 * the authentication caches.
245 * It checks the currency of a cache item and will (later)
246 * initiate an upcall to fill it if needed.
247 *
248 *
249 * Returns 0 if the cache_head can be used, or cache_puts it and returns
250 * -EAGAIN if upcall is pending and request has been queued
251 * -ETIMEDOUT if upcall failed or request could not be queue or
252 * upcall completed but item is still invalid (implying that
253 * the cache item has been replaced with a newer one).
254 * -ENOENT if cache entry was negative
255 */
256int cache_check(struct cache_detail *detail,
257 struct cache_head *h, struct cache_req *rqstp)
258{
259 int rv;
260 long refresh_age, age;
261
262 /* First decide return status as best we can */
263 rv = cache_is_valid(detail, h);
264
265 /* now see if we want to start an upcall */
266 refresh_age = (h->expiry_time - h->last_refresh);
267 age = seconds_since_boot() - h->last_refresh;
268
269 if (rqstp == NULL) {
270 if (rv == -EAGAIN)
271 rv = -ENOENT;
272 } else if (rv == -EAGAIN || age > refresh_age/2) {
273 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
274 refresh_age, age);
275 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
276 switch (cache_make_upcall(detail, h)) {
277 case -EINVAL:
278 clear_bit(CACHE_PENDING, &h->flags);
279 cache_revisit_request(h);
280 rv = try_to_negate_entry(detail, h);
281 break;
282 case -EAGAIN:
283 clear_bit(CACHE_PENDING, &h->flags);
284 cache_revisit_request(h);
285 break;
286 }
287 }
288 }
289
290 if (rv == -EAGAIN) {
291 if (!cache_defer_req(rqstp, h)) {
292 /*
293 * Request was not deferred; handle it as best
294 * we can ourselves:
295 */
296 rv = cache_is_valid(detail, h);
297 if (rv == -EAGAIN)
298 rv = -ETIMEDOUT;
299 }
300 }
301 if (rv)
302 cache_put(h, detail);
303 return rv;
304}
305EXPORT_SYMBOL_GPL(cache_check);
306
307/*
308 * caches need to be periodically cleaned.
309 * For this we maintain a list of cache_detail and
310 * a current pointer into that list and into the table
311 * for that entry.
312 *
313 * Each time clean_cache is called it finds the next non-empty entry
314 * in the current table and walks the list in that entry
315 * looking for entries that can be removed.
316 *
317 * An entry gets removed if:
318 * - The expiry is before current time
319 * - The last_refresh time is before the flush_time for that cache
320 *
321 * later we might drop old entries with non-NEVER expiry if that table
322 * is getting 'full' for some definition of 'full'
323 *
324 * The question of "how often to scan a table" is an interesting one
325 * and is answered in part by the use of the "nextcheck" field in the
326 * cache_detail.
327 * When a scan of a table begins, the nextcheck field is set to a time
328 * that is well into the future.
329 * While scanning, if an expiry time is found that is earlier than the
330 * current nextcheck time, nextcheck is set to that expiry time.
331 * If the flush_time is ever set to a time earlier than the nextcheck
332 * time, the nextcheck time is then set to that flush_time.
333 *
334 * A table is then only scanned if the current time is at least
335 * the nextcheck time.
336 *
337 */
338
339static LIST_HEAD(cache_list);
340static DEFINE_SPINLOCK(cache_list_lock);
341static struct cache_detail *current_detail;
342static int current_index;
343
344static void do_cache_clean(struct work_struct *work);
345static struct delayed_work cache_cleaner;
346
347void sunrpc_init_cache_detail(struct cache_detail *cd)
348{
349 rwlock_init(&cd->hash_lock);
350 INIT_LIST_HEAD(&cd->queue);
351 spin_lock(&cache_list_lock);
352 cd->nextcheck = 0;
353 cd->entries = 0;
354 atomic_set(&cd->readers, 0);
355 cd->last_close = 0;
356 cd->last_warn = -1;
357 list_add(&cd->others, &cache_list);
358 spin_unlock(&cache_list_lock);
359
360 /* start the cleaning process */
361 schedule_delayed_work(&cache_cleaner, 0);
362}
363EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
364
365void sunrpc_destroy_cache_detail(struct cache_detail *cd)
366{
367 cache_purge(cd);
368 spin_lock(&cache_list_lock);
369 write_lock(&cd->hash_lock);
370 if (cd->entries || atomic_read(&cd->inuse)) {
371 write_unlock(&cd->hash_lock);
372 spin_unlock(&cache_list_lock);
373 goto out;
374 }
375 if (current_detail == cd)
376 current_detail = NULL;
377 list_del_init(&cd->others);
378 write_unlock(&cd->hash_lock);
379 spin_unlock(&cache_list_lock);
380 if (list_empty(&cache_list)) {
381 /* module must be being unloaded so its safe to kill the worker */
382 cancel_delayed_work_sync(&cache_cleaner);
383 }
384 return;
385out:
386 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
387}
388EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
389
390/* clean cache tries to find something to clean
391 * and cleans it.
392 * It returns 1 if it cleaned something,
393 * 0 if it didn't find anything this time
394 * -1 if it fell off the end of the list.
395 */
396static int cache_clean(void)
397{
398 int rv = 0;
399 struct list_head *next;
400
401 spin_lock(&cache_list_lock);
402
403 /* find a suitable table if we don't already have one */
404 while (current_detail == NULL ||
405 current_index >= current_detail->hash_size) {
406 if (current_detail)
407 next = current_detail->others.next;
408 else
409 next = cache_list.next;
410 if (next == &cache_list) {
411 current_detail = NULL;
412 spin_unlock(&cache_list_lock);
413 return -1;
414 }
415 current_detail = list_entry(next, struct cache_detail, others);
416 if (current_detail->nextcheck > seconds_since_boot())
417 current_index = current_detail->hash_size;
418 else {
419 current_index = 0;
420 current_detail->nextcheck = seconds_since_boot()+30*60;
421 }
422 }
423
424 /* find a non-empty bucket in the table */
425 while (current_detail &&
426 current_index < current_detail->hash_size &&
427 current_detail->hash_table[current_index] == NULL)
428 current_index++;
429
430 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
431
432 if (current_detail && current_index < current_detail->hash_size) {
433 struct cache_head *ch, **cp;
434 struct cache_detail *d;
435
436 write_lock(¤t_detail->hash_lock);
437
438 /* Ok, now to clean this strand */
439
440 cp = & current_detail->hash_table[current_index];
441 for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
442 if (current_detail->nextcheck > ch->expiry_time)
443 current_detail->nextcheck = ch->expiry_time+1;
444 if (!cache_is_expired(current_detail, ch))
445 continue;
446
447 *cp = ch->next;
448 ch->next = NULL;
449 current_detail->entries--;
450 rv = 1;
451 break;
452 }
453
454 write_unlock(¤t_detail->hash_lock);
455 d = current_detail;
456 if (!ch)
457 current_index ++;
458 spin_unlock(&cache_list_lock);
459 if (ch) {
460 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
461 cache_dequeue(current_detail, ch);
462 cache_revisit_request(ch);
463 cache_put(ch, d);
464 }
465 } else
466 spin_unlock(&cache_list_lock);
467
468 return rv;
469}
470
471/*
472 * We want to regularly clean the cache, so we need to schedule some work ...
473 */
474static void do_cache_clean(struct work_struct *work)
475{
476 int delay = 5;
477 if (cache_clean() == -1)
478 delay = round_jiffies_relative(30*HZ);
479
480 if (list_empty(&cache_list))
481 delay = 0;
482
483 if (delay)
484 schedule_delayed_work(&cache_cleaner, delay);
485}
486
487
488/*
489 * Clean all caches promptly. This just calls cache_clean
490 * repeatedly until we are sure that every cache has had a chance to
491 * be fully cleaned
492 */
493void cache_flush(void)
494{
495 while (cache_clean() != -1)
496 cond_resched();
497 while (cache_clean() != -1)
498 cond_resched();
499}
500EXPORT_SYMBOL_GPL(cache_flush);
501
502void cache_purge(struct cache_detail *detail)
503{
504 detail->flush_time = LONG_MAX;
505 detail->nextcheck = seconds_since_boot();
506 cache_flush();
507 detail->flush_time = 1;
508}
509EXPORT_SYMBOL_GPL(cache_purge);
510
511
512/*
513 * Deferral and Revisiting of Requests.
514 *
515 * If a cache lookup finds a pending entry, we
516 * need to defer the request and revisit it later.
517 * All deferred requests are stored in a hash table,
518 * indexed by "struct cache_head *".
519 * As it may be wasteful to store a whole request
520 * structure, we allow the request to provide a
521 * deferred form, which must contain a
522 * 'struct cache_deferred_req'
523 * This cache_deferred_req contains a method to allow
524 * it to be revisited when cache info is available
525 */
526
527#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
528#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
529
530#define DFR_MAX 300 /* ??? */
531
532static DEFINE_SPINLOCK(cache_defer_lock);
533static LIST_HEAD(cache_defer_list);
534static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
535static int cache_defer_cnt;
536
537static void __unhash_deferred_req(struct cache_deferred_req *dreq)
538{
539 hlist_del_init(&dreq->hash);
540 if (!list_empty(&dreq->recent)) {
541 list_del_init(&dreq->recent);
542 cache_defer_cnt--;
543 }
544}
545
546static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
547{
548 int hash = DFR_HASH(item);
549
550 INIT_LIST_HEAD(&dreq->recent);
551 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
552}
553
554static void setup_deferral(struct cache_deferred_req *dreq,
555 struct cache_head *item,
556 int count_me)
557{
558
559 dreq->item = item;
560
561 spin_lock(&cache_defer_lock);
562
563 __hash_deferred_req(dreq, item);
564
565 if (count_me) {
566 cache_defer_cnt++;
567 list_add(&dreq->recent, &cache_defer_list);
568 }
569
570 spin_unlock(&cache_defer_lock);
571
572}
573
574struct thread_deferred_req {
575 struct cache_deferred_req handle;
576 struct completion completion;
577};
578
579static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
580{
581 struct thread_deferred_req *dr =
582 container_of(dreq, struct thread_deferred_req, handle);
583 complete(&dr->completion);
584}
585
586static void cache_wait_req(struct cache_req *req, struct cache_head *item)
587{
588 struct thread_deferred_req sleeper;
589 struct cache_deferred_req *dreq = &sleeper.handle;
590
591 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
592 dreq->revisit = cache_restart_thread;
593
594 setup_deferral(dreq, item, 0);
595
596 if (!test_bit(CACHE_PENDING, &item->flags) ||
597 wait_for_completion_interruptible_timeout(
598 &sleeper.completion, req->thread_wait) <= 0) {
599 /* The completion wasn't completed, so we need
600 * to clean up
601 */
602 spin_lock(&cache_defer_lock);
603 if (!hlist_unhashed(&sleeper.handle.hash)) {
604 __unhash_deferred_req(&sleeper.handle);
605 spin_unlock(&cache_defer_lock);
606 } else {
607 /* cache_revisit_request already removed
608 * this from the hash table, but hasn't
609 * called ->revisit yet. It will very soon
610 * and we need to wait for it.
611 */
612 spin_unlock(&cache_defer_lock);
613 wait_for_completion(&sleeper.completion);
614 }
615 }
616}
617
618static void cache_limit_defers(void)
619{
620 /* Make sure we haven't exceed the limit of allowed deferred
621 * requests.
622 */
623 struct cache_deferred_req *discard = NULL;
624
625 if (cache_defer_cnt <= DFR_MAX)
626 return;
627
628 spin_lock(&cache_defer_lock);
629
630 /* Consider removing either the first or the last */
631 if (cache_defer_cnt > DFR_MAX) {
632 if (net_random() & 1)
633 discard = list_entry(cache_defer_list.next,
634 struct cache_deferred_req, recent);
635 else
636 discard = list_entry(cache_defer_list.prev,
637 struct cache_deferred_req, recent);
638 __unhash_deferred_req(discard);
639 }
640 spin_unlock(&cache_defer_lock);
641 if (discard)
642 discard->revisit(discard, 1);
643}
644
645/* Return true if and only if a deferred request is queued. */
646static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
647{
648 struct cache_deferred_req *dreq;
649
650 if (req->thread_wait) {
651 cache_wait_req(req, item);
652 if (!test_bit(CACHE_PENDING, &item->flags))
653 return false;
654 }
655 dreq = req->defer(req);
656 if (dreq == NULL)
657 return false;
658 setup_deferral(dreq, item, 1);
659 if (!test_bit(CACHE_PENDING, &item->flags))
660 /* Bit could have been cleared before we managed to
661 * set up the deferral, so need to revisit just in case
662 */
663 cache_revisit_request(item);
664
665 cache_limit_defers();
666 return true;
667}
668
669static void cache_revisit_request(struct cache_head *item)
670{
671 struct cache_deferred_req *dreq;
672 struct list_head pending;
673 struct hlist_node *lp, *tmp;
674 int hash = DFR_HASH(item);
675
676 INIT_LIST_HEAD(&pending);
677 spin_lock(&cache_defer_lock);
678
679 hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
680 if (dreq->item == item) {
681 __unhash_deferred_req(dreq);
682 list_add(&dreq->recent, &pending);
683 }
684
685 spin_unlock(&cache_defer_lock);
686
687 while (!list_empty(&pending)) {
688 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
689 list_del_init(&dreq->recent);
690 dreq->revisit(dreq, 0);
691 }
692}
693
694void cache_clean_deferred(void *owner)
695{
696 struct cache_deferred_req *dreq, *tmp;
697 struct list_head pending;
698
699
700 INIT_LIST_HEAD(&pending);
701 spin_lock(&cache_defer_lock);
702
703 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
704 if (dreq->owner == owner) {
705 __unhash_deferred_req(dreq);
706 list_add(&dreq->recent, &pending);
707 }
708 }
709 spin_unlock(&cache_defer_lock);
710
711 while (!list_empty(&pending)) {
712 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
713 list_del_init(&dreq->recent);
714 dreq->revisit(dreq, 1);
715 }
716}
717
718/*
719 * communicate with user-space
720 *
721 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
722 * On read, you get a full request, or block.
723 * On write, an update request is processed.
724 * Poll works if anything to read, and always allows write.
725 *
726 * Implemented by linked list of requests. Each open file has
727 * a ->private that also exists in this list. New requests are added
728 * to the end and may wakeup and preceding readers.
729 * New readers are added to the head. If, on read, an item is found with
730 * CACHE_UPCALLING clear, we free it from the list.
731 *
732 */
733
734static DEFINE_SPINLOCK(queue_lock);
735static DEFINE_MUTEX(queue_io_mutex);
736
737struct cache_queue {
738 struct list_head list;
739 int reader; /* if 0, then request */
740};
741struct cache_request {
742 struct cache_queue q;
743 struct cache_head *item;
744 char * buf;
745 int len;
746 int readers;
747};
748struct cache_reader {
749 struct cache_queue q;
750 int offset; /* if non-0, we have a refcnt on next request */
751};
752
753static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
754 loff_t *ppos, struct cache_detail *cd)
755{
756 struct cache_reader *rp = filp->private_data;
757 struct cache_request *rq;
758 struct inode *inode = filp->f_path.dentry->d_inode;
759 int err;
760
761 if (count == 0)
762 return 0;
763
764 mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
765 * readers on this file */
766 again:
767 spin_lock(&queue_lock);
768 /* need to find next request */
769 while (rp->q.list.next != &cd->queue &&
770 list_entry(rp->q.list.next, struct cache_queue, list)
771 ->reader) {
772 struct list_head *next = rp->q.list.next;
773 list_move(&rp->q.list, next);
774 }
775 if (rp->q.list.next == &cd->queue) {
776 spin_unlock(&queue_lock);
777 mutex_unlock(&inode->i_mutex);
778 BUG_ON(rp->offset);
779 return 0;
780 }
781 rq = container_of(rp->q.list.next, struct cache_request, q.list);
782 BUG_ON(rq->q.reader);
783 if (rp->offset == 0)
784 rq->readers++;
785 spin_unlock(&queue_lock);
786
787 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
788 err = -EAGAIN;
789 spin_lock(&queue_lock);
790 list_move(&rp->q.list, &rq->q.list);
791 spin_unlock(&queue_lock);
792 } else {
793 if (rp->offset + count > rq->len)
794 count = rq->len - rp->offset;
795 err = -EFAULT;
796 if (copy_to_user(buf, rq->buf + rp->offset, count))
797 goto out;
798 rp->offset += count;
799 if (rp->offset >= rq->len) {
800 rp->offset = 0;
801 spin_lock(&queue_lock);
802 list_move(&rp->q.list, &rq->q.list);
803 spin_unlock(&queue_lock);
804 }
805 err = 0;
806 }
807 out:
808 if (rp->offset == 0) {
809 /* need to release rq */
810 spin_lock(&queue_lock);
811 rq->readers--;
812 if (rq->readers == 0 &&
813 !test_bit(CACHE_PENDING, &rq->item->flags)) {
814 list_del(&rq->q.list);
815 spin_unlock(&queue_lock);
816 cache_put(rq->item, cd);
817 kfree(rq->buf);
818 kfree(rq);
819 } else
820 spin_unlock(&queue_lock);
821 }
822 if (err == -EAGAIN)
823 goto again;
824 mutex_unlock(&inode->i_mutex);
825 return err ? err : count;
826}
827
828static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
829 size_t count, struct cache_detail *cd)
830{
831 ssize_t ret;
832
833 if (count == 0)
834 return -EINVAL;
835 if (copy_from_user(kaddr, buf, count))
836 return -EFAULT;
837 kaddr[count] = '\0';
838 ret = cd->cache_parse(cd, kaddr, count);
839 if (!ret)
840 ret = count;
841 return ret;
842}
843
844static ssize_t cache_slow_downcall(const char __user *buf,
845 size_t count, struct cache_detail *cd)
846{
847 static char write_buf[8192]; /* protected by queue_io_mutex */
848 ssize_t ret = -EINVAL;
849
850 if (count >= sizeof(write_buf))
851 goto out;
852 mutex_lock(&queue_io_mutex);
853 ret = cache_do_downcall(write_buf, buf, count, cd);
854 mutex_unlock(&queue_io_mutex);
855out:
856 return ret;
857}
858
859static ssize_t cache_downcall(struct address_space *mapping,
860 const char __user *buf,
861 size_t count, struct cache_detail *cd)
862{
863 struct page *page;
864 char *kaddr;
865 ssize_t ret = -ENOMEM;
866
867 if (count >= PAGE_CACHE_SIZE)
868 goto out_slow;
869
870 page = find_or_create_page(mapping, 0, GFP_KERNEL);
871 if (!page)
872 goto out_slow;
873
874 kaddr = kmap(page);
875 ret = cache_do_downcall(kaddr, buf, count, cd);
876 kunmap(page);
877 unlock_page(page);
878 page_cache_release(page);
879 return ret;
880out_slow:
881 return cache_slow_downcall(buf, count, cd);
882}
883
884static ssize_t cache_write(struct file *filp, const char __user *buf,
885 size_t count, loff_t *ppos,
886 struct cache_detail *cd)
887{
888 struct address_space *mapping = filp->f_mapping;
889 struct inode *inode = filp->f_path.dentry->d_inode;
890 ssize_t ret = -EINVAL;
891
892 if (!cd->cache_parse)
893 goto out;
894
895 mutex_lock(&inode->i_mutex);
896 ret = cache_downcall(mapping, buf, count, cd);
897 mutex_unlock(&inode->i_mutex);
898out:
899 return ret;
900}
901
902static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
903
904static unsigned int cache_poll(struct file *filp, poll_table *wait,
905 struct cache_detail *cd)
906{
907 unsigned int mask;
908 struct cache_reader *rp = filp->private_data;
909 struct cache_queue *cq;
910
911 poll_wait(filp, &queue_wait, wait);
912
913 /* alway allow write */
914 mask = POLL_OUT | POLLWRNORM;
915
916 if (!rp)
917 return mask;
918
919 spin_lock(&queue_lock);
920
921 for (cq= &rp->q; &cq->list != &cd->queue;
922 cq = list_entry(cq->list.next, struct cache_queue, list))
923 if (!cq->reader) {
924 mask |= POLLIN | POLLRDNORM;
925 break;
926 }
927 spin_unlock(&queue_lock);
928 return mask;
929}
930
931static int cache_ioctl(struct inode *ino, struct file *filp,
932 unsigned int cmd, unsigned long arg,
933 struct cache_detail *cd)
934{
935 int len = 0;
936 struct cache_reader *rp = filp->private_data;
937 struct cache_queue *cq;
938
939 if (cmd != FIONREAD || !rp)
940 return -EINVAL;
941
942 spin_lock(&queue_lock);
943
944 /* only find the length remaining in current request,
945 * or the length of the next request
946 */
947 for (cq= &rp->q; &cq->list != &cd->queue;
948 cq = list_entry(cq->list.next, struct cache_queue, list))
949 if (!cq->reader) {
950 struct cache_request *cr =
951 container_of(cq, struct cache_request, q);
952 len = cr->len - rp->offset;
953 break;
954 }
955 spin_unlock(&queue_lock);
956
957 return put_user(len, (int __user *)arg);
958}
959
960static int cache_open(struct inode *inode, struct file *filp,
961 struct cache_detail *cd)
962{
963 struct cache_reader *rp = NULL;
964
965 if (!cd || !try_module_get(cd->owner))
966 return -EACCES;
967 nonseekable_open(inode, filp);
968 if (filp->f_mode & FMODE_READ) {
969 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
970 if (!rp)
971 return -ENOMEM;
972 rp->offset = 0;
973 rp->q.reader = 1;
974 atomic_inc(&cd->readers);
975 spin_lock(&queue_lock);
976 list_add(&rp->q.list, &cd->queue);
977 spin_unlock(&queue_lock);
978 }
979 filp->private_data = rp;
980 return 0;
981}
982
983static int cache_release(struct inode *inode, struct file *filp,
984 struct cache_detail *cd)
985{
986 struct cache_reader *rp = filp->private_data;
987
988 if (rp) {
989 spin_lock(&queue_lock);
990 if (rp->offset) {
991 struct cache_queue *cq;
992 for (cq= &rp->q; &cq->list != &cd->queue;
993 cq = list_entry(cq->list.next, struct cache_queue, list))
994 if (!cq->reader) {
995 container_of(cq, struct cache_request, q)
996 ->readers--;
997 break;
998 }
999 rp->offset = 0;
1000 }
1001 list_del(&rp->q.list);
1002 spin_unlock(&queue_lock);
1003
1004 filp->private_data = NULL;
1005 kfree(rp);
1006
1007 cd->last_close = seconds_since_boot();
1008 atomic_dec(&cd->readers);
1009 }
1010 module_put(cd->owner);
1011 return 0;
1012}
1013
1014
1015
1016static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1017{
1018 struct cache_queue *cq;
1019 spin_lock(&queue_lock);
1020 list_for_each_entry(cq, &detail->queue, list)
1021 if (!cq->reader) {
1022 struct cache_request *cr = container_of(cq, struct cache_request, q);
1023 if (cr->item != ch)
1024 continue;
1025 if (cr->readers != 0)
1026 continue;
1027 list_del(&cr->q.list);
1028 spin_unlock(&queue_lock);
1029 cache_put(cr->item, detail);
1030 kfree(cr->buf);
1031 kfree(cr);
1032 return;
1033 }
1034 spin_unlock(&queue_lock);
1035}
1036
1037/*
1038 * Support routines for text-based upcalls.
1039 * Fields are separated by spaces.
1040 * Fields are either mangled to quote space tab newline slosh with slosh
1041 * or a hexified with a leading \x
1042 * Record is terminated with newline.
1043 *
1044 */
1045
1046void qword_add(char **bpp, int *lp, char *str)
1047{
1048 char *bp = *bpp;
1049 int len = *lp;
1050 char c;
1051
1052 if (len < 0) return;
1053
1054 while ((c=*str++) && len)
1055 switch(c) {
1056 case ' ':
1057 case '\t':
1058 case '\n':
1059 case '\\':
1060 if (len >= 4) {
1061 *bp++ = '\\';
1062 *bp++ = '0' + ((c & 0300)>>6);
1063 *bp++ = '0' + ((c & 0070)>>3);
1064 *bp++ = '0' + ((c & 0007)>>0);
1065 }
1066 len -= 4;
1067 break;
1068 default:
1069 *bp++ = c;
1070 len--;
1071 }
1072 if (c || len <1) len = -1;
1073 else {
1074 *bp++ = ' ';
1075 len--;
1076 }
1077 *bpp = bp;
1078 *lp = len;
1079}
1080EXPORT_SYMBOL_GPL(qword_add);
1081
1082void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1083{
1084 char *bp = *bpp;
1085 int len = *lp;
1086
1087 if (len < 0) return;
1088
1089 if (len > 2) {
1090 *bp++ = '\\';
1091 *bp++ = 'x';
1092 len -= 2;
1093 while (blen && len >= 2) {
1094 unsigned char c = *buf++;
1095 *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1096 *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1097 len -= 2;
1098 blen--;
1099 }
1100 }
1101 if (blen || len<1) len = -1;
1102 else {
1103 *bp++ = ' ';
1104 len--;
1105 }
1106 *bpp = bp;
1107 *lp = len;
1108}
1109EXPORT_SYMBOL_GPL(qword_addhex);
1110
1111static void warn_no_listener(struct cache_detail *detail)
1112{
1113 if (detail->last_warn != detail->last_close) {
1114 detail->last_warn = detail->last_close;
1115 if (detail->warn_no_listener)
1116 detail->warn_no_listener(detail, detail->last_close != 0);
1117 }
1118}
1119
1120static bool cache_listeners_exist(struct cache_detail *detail)
1121{
1122 if (atomic_read(&detail->readers))
1123 return true;
1124 if (detail->last_close == 0)
1125 /* This cache was never opened */
1126 return false;
1127 if (detail->last_close < seconds_since_boot() - 30)
1128 /*
1129 * We allow for the possibility that someone might
1130 * restart a userspace daemon without restarting the
1131 * server; but after 30 seconds, we give up.
1132 */
1133 return false;
1134 return true;
1135}
1136
1137/*
1138 * register an upcall request to user-space and queue it up for read() by the
1139 * upcall daemon.
1140 *
1141 * Each request is at most one page long.
1142 */
1143int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1144 void (*cache_request)(struct cache_detail *,
1145 struct cache_head *,
1146 char **,
1147 int *))
1148{
1149
1150 char *buf;
1151 struct cache_request *crq;
1152 char *bp;
1153 int len;
1154
1155 if (!cache_listeners_exist(detail)) {
1156 warn_no_listener(detail);
1157 return -EINVAL;
1158 }
1159
1160 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1161 if (!buf)
1162 return -EAGAIN;
1163
1164 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1165 if (!crq) {
1166 kfree(buf);
1167 return -EAGAIN;
1168 }
1169
1170 bp = buf; len = PAGE_SIZE;
1171
1172 cache_request(detail, h, &bp, &len);
1173
1174 if (len < 0) {
1175 kfree(buf);
1176 kfree(crq);
1177 return -EAGAIN;
1178 }
1179 crq->q.reader = 0;
1180 crq->item = cache_get(h);
1181 crq->buf = buf;
1182 crq->len = PAGE_SIZE - len;
1183 crq->readers = 0;
1184 spin_lock(&queue_lock);
1185 list_add_tail(&crq->q.list, &detail->queue);
1186 spin_unlock(&queue_lock);
1187 wake_up(&queue_wait);
1188 return 0;
1189}
1190EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1191
1192/*
1193 * parse a message from user-space and pass it
1194 * to an appropriate cache
1195 * Messages are, like requests, separated into fields by
1196 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1197 *
1198 * Message is
1199 * reply cachename expiry key ... content....
1200 *
1201 * key and content are both parsed by cache
1202 */
1203
1204#define isodigit(c) (isdigit(c) && c <= '7')
1205int qword_get(char **bpp, char *dest, int bufsize)
1206{
1207 /* return bytes copied, or -1 on error */
1208 char *bp = *bpp;
1209 int len = 0;
1210
1211 while (*bp == ' ') bp++;
1212
1213 if (bp[0] == '\\' && bp[1] == 'x') {
1214 /* HEX STRING */
1215 bp += 2;
1216 while (len < bufsize) {
1217 int h, l;
1218
1219 h = hex_to_bin(bp[0]);
1220 if (h < 0)
1221 break;
1222
1223 l = hex_to_bin(bp[1]);
1224 if (l < 0)
1225 break;
1226
1227 *dest++ = (h << 4) | l;
1228 bp += 2;
1229 len++;
1230 }
1231 } else {
1232 /* text with \nnn octal quoting */
1233 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1234 if (*bp == '\\' &&
1235 isodigit(bp[1]) && (bp[1] <= '3') &&
1236 isodigit(bp[2]) &&
1237 isodigit(bp[3])) {
1238 int byte = (*++bp -'0');
1239 bp++;
1240 byte = (byte << 3) | (*bp++ - '0');
1241 byte = (byte << 3) | (*bp++ - '0');
1242 *dest++ = byte;
1243 len++;
1244 } else {
1245 *dest++ = *bp++;
1246 len++;
1247 }
1248 }
1249 }
1250
1251 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1252 return -1;
1253 while (*bp == ' ') bp++;
1254 *bpp = bp;
1255 *dest = '\0';
1256 return len;
1257}
1258EXPORT_SYMBOL_GPL(qword_get);
1259
1260
1261/*
1262 * support /proc/sunrpc/cache/$CACHENAME/content
1263 * as a seqfile.
1264 * We call ->cache_show passing NULL for the item to
1265 * get a header, then pass each real item in the cache
1266 */
1267
1268struct handle {
1269 struct cache_detail *cd;
1270};
1271
1272static void *c_start(struct seq_file *m, loff_t *pos)
1273 __acquires(cd->hash_lock)
1274{
1275 loff_t n = *pos;
1276 unsigned int hash, entry;
1277 struct cache_head *ch;
1278 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1279
1280
1281 read_lock(&cd->hash_lock);
1282 if (!n--)
1283 return SEQ_START_TOKEN;
1284 hash = n >> 32;
1285 entry = n & ((1LL<<32) - 1);
1286
1287 for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1288 if (!entry--)
1289 return ch;
1290 n &= ~((1LL<<32) - 1);
1291 do {
1292 hash++;
1293 n += 1LL<<32;
1294 } while(hash < cd->hash_size &&
1295 cd->hash_table[hash]==NULL);
1296 if (hash >= cd->hash_size)
1297 return NULL;
1298 *pos = n+1;
1299 return cd->hash_table[hash];
1300}
1301
1302static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1303{
1304 struct cache_head *ch = p;
1305 int hash = (*pos >> 32);
1306 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1307
1308 if (p == SEQ_START_TOKEN)
1309 hash = 0;
1310 else if (ch->next == NULL) {
1311 hash++;
1312 *pos += 1LL<<32;
1313 } else {
1314 ++*pos;
1315 return ch->next;
1316 }
1317 *pos &= ~((1LL<<32) - 1);
1318 while (hash < cd->hash_size &&
1319 cd->hash_table[hash] == NULL) {
1320 hash++;
1321 *pos += 1LL<<32;
1322 }
1323 if (hash >= cd->hash_size)
1324 return NULL;
1325 ++*pos;
1326 return cd->hash_table[hash];
1327}
1328
1329static void c_stop(struct seq_file *m, void *p)
1330 __releases(cd->hash_lock)
1331{
1332 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1333 read_unlock(&cd->hash_lock);
1334}
1335
1336static int c_show(struct seq_file *m, void *p)
1337{
1338 struct cache_head *cp = p;
1339 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1340
1341 if (p == SEQ_START_TOKEN)
1342 return cd->cache_show(m, cd, NULL);
1343
1344 ifdebug(CACHE)
1345 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1346 convert_to_wallclock(cp->expiry_time),
1347 atomic_read(&cp->ref.refcount), cp->flags);
1348 cache_get(cp);
1349 if (cache_check(cd, cp, NULL))
1350 /* cache_check does a cache_put on failure */
1351 seq_printf(m, "# ");
1352 else
1353 cache_put(cp, cd);
1354
1355 return cd->cache_show(m, cd, cp);
1356}
1357
1358static const struct seq_operations cache_content_op = {
1359 .start = c_start,
1360 .next = c_next,
1361 .stop = c_stop,
1362 .show = c_show,
1363};
1364
1365static int content_open(struct inode *inode, struct file *file,
1366 struct cache_detail *cd)
1367{
1368 struct handle *han;
1369
1370 if (!cd || !try_module_get(cd->owner))
1371 return -EACCES;
1372 han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1373 if (han == NULL) {
1374 module_put(cd->owner);
1375 return -ENOMEM;
1376 }
1377
1378 han->cd = cd;
1379 return 0;
1380}
1381
1382static int content_release(struct inode *inode, struct file *file,
1383 struct cache_detail *cd)
1384{
1385 int ret = seq_release_private(inode, file);
1386 module_put(cd->owner);
1387 return ret;
1388}
1389
1390static int open_flush(struct inode *inode, struct file *file,
1391 struct cache_detail *cd)
1392{
1393 if (!cd || !try_module_get(cd->owner))
1394 return -EACCES;
1395 return nonseekable_open(inode, file);
1396}
1397
1398static int release_flush(struct inode *inode, struct file *file,
1399 struct cache_detail *cd)
1400{
1401 module_put(cd->owner);
1402 return 0;
1403}
1404
1405static ssize_t read_flush(struct file *file, char __user *buf,
1406 size_t count, loff_t *ppos,
1407 struct cache_detail *cd)
1408{
1409 char tbuf[20];
1410 unsigned long p = *ppos;
1411 size_t len;
1412
1413 sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1414 len = strlen(tbuf);
1415 if (p >= len)
1416 return 0;
1417 len -= p;
1418 if (len > count)
1419 len = count;
1420 if (copy_to_user(buf, (void*)(tbuf+p), len))
1421 return -EFAULT;
1422 *ppos += len;
1423 return len;
1424}
1425
1426static ssize_t write_flush(struct file *file, const char __user *buf,
1427 size_t count, loff_t *ppos,
1428 struct cache_detail *cd)
1429{
1430 char tbuf[20];
1431 char *bp, *ep;
1432
1433 if (*ppos || count > sizeof(tbuf)-1)
1434 return -EINVAL;
1435 if (copy_from_user(tbuf, buf, count))
1436 return -EFAULT;
1437 tbuf[count] = 0;
1438 simple_strtoul(tbuf, &ep, 0);
1439 if (*ep && *ep != '\n')
1440 return -EINVAL;
1441
1442 bp = tbuf;
1443 cd->flush_time = get_expiry(&bp);
1444 cd->nextcheck = seconds_since_boot();
1445 cache_flush();
1446
1447 *ppos += count;
1448 return count;
1449}
1450
1451static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1452 size_t count, loff_t *ppos)
1453{
1454 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1455
1456 return cache_read(filp, buf, count, ppos, cd);
1457}
1458
1459static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1460 size_t count, loff_t *ppos)
1461{
1462 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1463
1464 return cache_write(filp, buf, count, ppos, cd);
1465}
1466
1467static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1468{
1469 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1470
1471 return cache_poll(filp, wait, cd);
1472}
1473
1474static long cache_ioctl_procfs(struct file *filp,
1475 unsigned int cmd, unsigned long arg)
1476{
1477 struct inode *inode = filp->f_path.dentry->d_inode;
1478 struct cache_detail *cd = PDE(inode)->data;
1479
1480 return cache_ioctl(inode, filp, cmd, arg, cd);
1481}
1482
1483static int cache_open_procfs(struct inode *inode, struct file *filp)
1484{
1485 struct cache_detail *cd = PDE(inode)->data;
1486
1487 return cache_open(inode, filp, cd);
1488}
1489
1490static int cache_release_procfs(struct inode *inode, struct file *filp)
1491{
1492 struct cache_detail *cd = PDE(inode)->data;
1493
1494 return cache_release(inode, filp, cd);
1495}
1496
1497static const struct file_operations cache_file_operations_procfs = {
1498 .owner = THIS_MODULE,
1499 .llseek = no_llseek,
1500 .read = cache_read_procfs,
1501 .write = cache_write_procfs,
1502 .poll = cache_poll_procfs,
1503 .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1504 .open = cache_open_procfs,
1505 .release = cache_release_procfs,
1506};
1507
1508static int content_open_procfs(struct inode *inode, struct file *filp)
1509{
1510 struct cache_detail *cd = PDE(inode)->data;
1511
1512 return content_open(inode, filp, cd);
1513}
1514
1515static int content_release_procfs(struct inode *inode, struct file *filp)
1516{
1517 struct cache_detail *cd = PDE(inode)->data;
1518
1519 return content_release(inode, filp, cd);
1520}
1521
1522static const struct file_operations content_file_operations_procfs = {
1523 .open = content_open_procfs,
1524 .read = seq_read,
1525 .llseek = seq_lseek,
1526 .release = content_release_procfs,
1527};
1528
1529static int open_flush_procfs(struct inode *inode, struct file *filp)
1530{
1531 struct cache_detail *cd = PDE(inode)->data;
1532
1533 return open_flush(inode, filp, cd);
1534}
1535
1536static int release_flush_procfs(struct inode *inode, struct file *filp)
1537{
1538 struct cache_detail *cd = PDE(inode)->data;
1539
1540 return release_flush(inode, filp, cd);
1541}
1542
1543static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1544 size_t count, loff_t *ppos)
1545{
1546 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1547
1548 return read_flush(filp, buf, count, ppos, cd);
1549}
1550
1551static ssize_t write_flush_procfs(struct file *filp,
1552 const char __user *buf,
1553 size_t count, loff_t *ppos)
1554{
1555 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1556
1557 return write_flush(filp, buf, count, ppos, cd);
1558}
1559
1560static const struct file_operations cache_flush_operations_procfs = {
1561 .open = open_flush_procfs,
1562 .read = read_flush_procfs,
1563 .write = write_flush_procfs,
1564 .release = release_flush_procfs,
1565 .llseek = no_llseek,
1566};
1567
1568static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1569{
1570 struct sunrpc_net *sn;
1571
1572 if (cd->u.procfs.proc_ent == NULL)
1573 return;
1574 if (cd->u.procfs.flush_ent)
1575 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1576 if (cd->u.procfs.channel_ent)
1577 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1578 if (cd->u.procfs.content_ent)
1579 remove_proc_entry("content", cd->u.procfs.proc_ent);
1580 cd->u.procfs.proc_ent = NULL;
1581 sn = net_generic(net, sunrpc_net_id);
1582 remove_proc_entry(cd->name, sn->proc_net_rpc);
1583}
1584
1585#ifdef CONFIG_PROC_FS
1586static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1587{
1588 struct proc_dir_entry *p;
1589 struct sunrpc_net *sn;
1590
1591 sn = net_generic(net, sunrpc_net_id);
1592 cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1593 if (cd->u.procfs.proc_ent == NULL)
1594 goto out_nomem;
1595 cd->u.procfs.channel_ent = NULL;
1596 cd->u.procfs.content_ent = NULL;
1597
1598 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1599 cd->u.procfs.proc_ent,
1600 &cache_flush_operations_procfs, cd);
1601 cd->u.procfs.flush_ent = p;
1602 if (p == NULL)
1603 goto out_nomem;
1604
1605 if (cd->cache_upcall || cd->cache_parse) {
1606 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1607 cd->u.procfs.proc_ent,
1608 &cache_file_operations_procfs, cd);
1609 cd->u.procfs.channel_ent = p;
1610 if (p == NULL)
1611 goto out_nomem;
1612 }
1613 if (cd->cache_show) {
1614 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1615 cd->u.procfs.proc_ent,
1616 &content_file_operations_procfs, cd);
1617 cd->u.procfs.content_ent = p;
1618 if (p == NULL)
1619 goto out_nomem;
1620 }
1621 return 0;
1622out_nomem:
1623 remove_cache_proc_entries(cd, net);
1624 return -ENOMEM;
1625}
1626#else /* CONFIG_PROC_FS */
1627static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1628{
1629 return 0;
1630}
1631#endif
1632
1633void __init cache_initialize(void)
1634{
1635 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
1636}
1637
1638int cache_register_net(struct cache_detail *cd, struct net *net)
1639{
1640 int ret;
1641
1642 sunrpc_init_cache_detail(cd);
1643 ret = create_cache_proc_entries(cd, net);
1644 if (ret)
1645 sunrpc_destroy_cache_detail(cd);
1646 return ret;
1647}
1648EXPORT_SYMBOL_GPL(cache_register_net);
1649
1650void cache_unregister_net(struct cache_detail *cd, struct net *net)
1651{
1652 remove_cache_proc_entries(cd, net);
1653 sunrpc_destroy_cache_detail(cd);
1654}
1655EXPORT_SYMBOL_GPL(cache_unregister_net);
1656
1657struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1658{
1659 struct cache_detail *cd;
1660
1661 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1662 if (cd == NULL)
1663 return ERR_PTR(-ENOMEM);
1664
1665 cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
1666 GFP_KERNEL);
1667 if (cd->hash_table == NULL) {
1668 kfree(cd);
1669 return ERR_PTR(-ENOMEM);
1670 }
1671 cd->net = net;
1672 return cd;
1673}
1674EXPORT_SYMBOL_GPL(cache_create_net);
1675
1676void cache_destroy_net(struct cache_detail *cd, struct net *net)
1677{
1678 kfree(cd->hash_table);
1679 kfree(cd);
1680}
1681EXPORT_SYMBOL_GPL(cache_destroy_net);
1682
1683static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1684 size_t count, loff_t *ppos)
1685{
1686 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1687
1688 return cache_read(filp, buf, count, ppos, cd);
1689}
1690
1691static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1692 size_t count, loff_t *ppos)
1693{
1694 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1695
1696 return cache_write(filp, buf, count, ppos, cd);
1697}
1698
1699static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1700{
1701 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1702
1703 return cache_poll(filp, wait, cd);
1704}
1705
1706static long cache_ioctl_pipefs(struct file *filp,
1707 unsigned int cmd, unsigned long arg)
1708{
1709 struct inode *inode = filp->f_dentry->d_inode;
1710 struct cache_detail *cd = RPC_I(inode)->private;
1711
1712 return cache_ioctl(inode, filp, cmd, arg, cd);
1713}
1714
1715static int cache_open_pipefs(struct inode *inode, struct file *filp)
1716{
1717 struct cache_detail *cd = RPC_I(inode)->private;
1718
1719 return cache_open(inode, filp, cd);
1720}
1721
1722static int cache_release_pipefs(struct inode *inode, struct file *filp)
1723{
1724 struct cache_detail *cd = RPC_I(inode)->private;
1725
1726 return cache_release(inode, filp, cd);
1727}
1728
1729const struct file_operations cache_file_operations_pipefs = {
1730 .owner = THIS_MODULE,
1731 .llseek = no_llseek,
1732 .read = cache_read_pipefs,
1733 .write = cache_write_pipefs,
1734 .poll = cache_poll_pipefs,
1735 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1736 .open = cache_open_pipefs,
1737 .release = cache_release_pipefs,
1738};
1739
1740static int content_open_pipefs(struct inode *inode, struct file *filp)
1741{
1742 struct cache_detail *cd = RPC_I(inode)->private;
1743
1744 return content_open(inode, filp, cd);
1745}
1746
1747static int content_release_pipefs(struct inode *inode, struct file *filp)
1748{
1749 struct cache_detail *cd = RPC_I(inode)->private;
1750
1751 return content_release(inode, filp, cd);
1752}
1753
1754const struct file_operations content_file_operations_pipefs = {
1755 .open = content_open_pipefs,
1756 .read = seq_read,
1757 .llseek = seq_lseek,
1758 .release = content_release_pipefs,
1759};
1760
1761static int open_flush_pipefs(struct inode *inode, struct file *filp)
1762{
1763 struct cache_detail *cd = RPC_I(inode)->private;
1764
1765 return open_flush(inode, filp, cd);
1766}
1767
1768static int release_flush_pipefs(struct inode *inode, struct file *filp)
1769{
1770 struct cache_detail *cd = RPC_I(inode)->private;
1771
1772 return release_flush(inode, filp, cd);
1773}
1774
1775static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1776 size_t count, loff_t *ppos)
1777{
1778 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1779
1780 return read_flush(filp, buf, count, ppos, cd);
1781}
1782
1783static ssize_t write_flush_pipefs(struct file *filp,
1784 const char __user *buf,
1785 size_t count, loff_t *ppos)
1786{
1787 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1788
1789 return write_flush(filp, buf, count, ppos, cd);
1790}
1791
1792const struct file_operations cache_flush_operations_pipefs = {
1793 .open = open_flush_pipefs,
1794 .read = read_flush_pipefs,
1795 .write = write_flush_pipefs,
1796 .release = release_flush_pipefs,
1797 .llseek = no_llseek,
1798};
1799
1800int sunrpc_cache_register_pipefs(struct dentry *parent,
1801 const char *name, umode_t umode,
1802 struct cache_detail *cd)
1803{
1804 struct qstr q;
1805 struct dentry *dir;
1806 int ret = 0;
1807
1808 q.name = name;
1809 q.len = strlen(name);
1810 q.hash = full_name_hash(q.name, q.len);
1811 dir = rpc_create_cache_dir(parent, &q, umode, cd);
1812 if (!IS_ERR(dir))
1813 cd->u.pipefs.dir = dir;
1814 else
1815 ret = PTR_ERR(dir);
1816 return ret;
1817}
1818EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1819
1820void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1821{
1822 rpc_remove_cache_dir(cd->u.pipefs.dir);
1823 cd->u.pipefs.dir = NULL;
1824}
1825EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1826