Loading...
1/*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
11#include <linux/slab.h>
12#include <linux/sunrpc/addr.h>
13#include <linux/highmem.h>
14#include <linux/log2.h>
15#include <linux/hash.h>
16#include <net/checksum.h>
17
18#include "nfsd.h"
19#include "cache.h"
20
21#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
22
23/*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28#define TARGET_BUCKET_SIZE 64
29
30static struct hlist_head * cache_hash;
31static struct list_head lru_head;
32static struct kmem_cache *drc_slab;
33
34/* max number of entries allowed in the cache */
35static unsigned int max_drc_entries;
36
37/* number of significant bits in the hash value */
38static unsigned int maskbits;
39
40/*
41 * Stats and other tracking of on the duplicate reply cache. All of these and
42 * the "rc" fields in nfsdstats are protected by the cache_lock
43 */
44
45/* total number of entries */
46static unsigned int num_drc_entries;
47
48/* cache misses due only to checksum comparison failures */
49static unsigned int payload_misses;
50
51/* amount of memory (in bytes) currently consumed by the DRC */
52static unsigned int drc_mem_usage;
53
54/* longest hash chain seen */
55static unsigned int longest_chain;
56
57/* size of cache when we saw the longest hash chain */
58static unsigned int longest_chain_cachesize;
59
60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused);
62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
63 struct shrink_control *sc);
64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
65 struct shrink_control *sc);
66
67static struct shrinker nfsd_reply_cache_shrinker = {
68 .scan_objects = nfsd_reply_cache_scan,
69 .count_objects = nfsd_reply_cache_count,
70 .seeks = 1,
71};
72
73/*
74 * locking for the reply cache:
75 * A cache entry is "single use" if c_state == RC_INPROG
76 * Otherwise, it when accessing _prev or _next, the lock must be held.
77 */
78static DEFINE_SPINLOCK(cache_lock);
79static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
80
81/*
82 * Put a cap on the size of the DRC based on the amount of available
83 * low memory in the machine.
84 *
85 * 64MB: 8192
86 * 128MB: 11585
87 * 256MB: 16384
88 * 512MB: 23170
89 * 1GB: 32768
90 * 2GB: 46340
91 * 4GB: 65536
92 * 8GB: 92681
93 * 16GB: 131072
94 *
95 * ...with a hard cap of 256k entries. In the worst case, each entry will be
96 * ~1k, so the above numbers should give a rough max of the amount of memory
97 * used in k.
98 */
99static unsigned int
100nfsd_cache_size_limit(void)
101{
102 unsigned int limit;
103 unsigned long low_pages = totalram_pages - totalhigh_pages;
104
105 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
106 return min_t(unsigned int, limit, 256*1024);
107}
108
109/*
110 * Compute the number of hash buckets we need. Divide the max cachesize by
111 * the "target" max bucket size, and round up to next power of two.
112 */
113static unsigned int
114nfsd_hashsize(unsigned int limit)
115{
116 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117}
118
119static struct svc_cacherep *
120nfsd_reply_cache_alloc(void)
121{
122 struct svc_cacherep *rp;
123
124 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
125 if (rp) {
126 rp->c_state = RC_UNUSED;
127 rp->c_type = RC_NOCACHE;
128 INIT_LIST_HEAD(&rp->c_lru);
129 INIT_HLIST_NODE(&rp->c_hash);
130 }
131 return rp;
132}
133
134static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136{
137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
138 drc_mem_usage -= rp->c_replvec.iov_len;
139 kfree(rp->c_replvec.iov_base);
140 }
141 if (!hlist_unhashed(&rp->c_hash))
142 hlist_del(&rp->c_hash);
143 list_del(&rp->c_lru);
144 --num_drc_entries;
145 drc_mem_usage -= sizeof(*rp);
146 kmem_cache_free(drc_slab, rp);
147}
148
149static void
150nfsd_reply_cache_free(struct svc_cacherep *rp)
151{
152 spin_lock(&cache_lock);
153 nfsd_reply_cache_free_locked(rp);
154 spin_unlock(&cache_lock);
155}
156
157int nfsd_reply_cache_init(void)
158{
159 unsigned int hashsize;
160
161 INIT_LIST_HEAD(&lru_head);
162 max_drc_entries = nfsd_cache_size_limit();
163 num_drc_entries = 0;
164 hashsize = nfsd_hashsize(max_drc_entries);
165 maskbits = ilog2(hashsize);
166
167 register_shrinker(&nfsd_reply_cache_shrinker);
168 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
169 0, 0, NULL);
170 if (!drc_slab)
171 goto out_nomem;
172
173 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
174 if (!cache_hash)
175 goto out_nomem;
176
177 return 0;
178out_nomem:
179 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
180 nfsd_reply_cache_shutdown();
181 return -ENOMEM;
182}
183
184void nfsd_reply_cache_shutdown(void)
185{
186 struct svc_cacherep *rp;
187
188 unregister_shrinker(&nfsd_reply_cache_shrinker);
189 cancel_delayed_work_sync(&cache_cleaner);
190
191 while (!list_empty(&lru_head)) {
192 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
193 nfsd_reply_cache_free_locked(rp);
194 }
195
196 kfree (cache_hash);
197 cache_hash = NULL;
198
199 if (drc_slab) {
200 kmem_cache_destroy(drc_slab);
201 drc_slab = NULL;
202 }
203}
204
205/*
206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
207 * not already scheduled.
208 */
209static void
210lru_put_end(struct svc_cacherep *rp)
211{
212 rp->c_timestamp = jiffies;
213 list_move_tail(&rp->c_lru, &lru_head);
214 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215}
216
217/*
218 * Move a cache entry from one hash list to another
219 */
220static void
221hash_refile(struct svc_cacherep *rp)
222{
223 hlist_del_init(&rp->c_hash);
224 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
225}
226
227static inline bool
228nfsd_cache_entry_expired(struct svc_cacherep *rp)
229{
230 return rp->c_state != RC_INPROG &&
231 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
232}
233
234/*
235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
236 * Also prune the oldest ones when the total exceeds the max number of entries.
237 */
238static long
239prune_cache_entries(void)
240{
241 struct svc_cacherep *rp, *tmp;
242 long freed = 0;
243
244 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
245 if (!nfsd_cache_entry_expired(rp) &&
246 num_drc_entries <= max_drc_entries)
247 break;
248 nfsd_reply_cache_free_locked(rp);
249 freed++;
250 }
251
252 /*
253 * Conditionally rearm the job. If we cleaned out the list, then
254 * cancel any pending run (since there won't be any work to do).
255 * Otherwise, we rearm the job or modify the existing one to run in
256 * RC_EXPIRE since we just ran the pruner.
257 */
258 if (list_empty(&lru_head))
259 cancel_delayed_work(&cache_cleaner);
260 else
261 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262 return freed;
263}
264
265static void
266cache_cleaner_func(struct work_struct *unused)
267{
268 spin_lock(&cache_lock);
269 prune_cache_entries();
270 spin_unlock(&cache_lock);
271}
272
273static unsigned long
274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
275{
276 unsigned long num;
277
278 spin_lock(&cache_lock);
279 num = num_drc_entries;
280 spin_unlock(&cache_lock);
281
282 return num;
283}
284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288 unsigned long freed;
289
290 spin_lock(&cache_lock);
291 freed = prune_cache_entries();
292 spin_unlock(&cache_lock);
293 return freed;
294}
295/*
296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
297 */
298static __wsum
299nfsd_cache_csum(struct svc_rqst *rqstp)
300{
301 int idx;
302 unsigned int base;
303 __wsum csum;
304 struct xdr_buf *buf = &rqstp->rq_arg;
305 const unsigned char *p = buf->head[0].iov_base;
306 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
307 RC_CSUMLEN);
308 size_t len = min(buf->head[0].iov_len, csum_len);
309
310 /* rq_arg.head first */
311 csum = csum_partial(p, len, 0);
312 csum_len -= len;
313
314 /* Continue into page array */
315 idx = buf->page_base / PAGE_SIZE;
316 base = buf->page_base & ~PAGE_MASK;
317 while (csum_len) {
318 p = page_address(buf->pages[idx]) + base;
319 len = min_t(size_t, PAGE_SIZE - base, csum_len);
320 csum = csum_partial(p, len, csum);
321 csum_len -= len;
322 base = 0;
323 ++idx;
324 }
325 return csum;
326}
327
328static bool
329nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
330{
331 /* Check RPC header info first */
332 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
333 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
334 rqstp->rq_arg.len != rp->c_len ||
335 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
336 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
337 return false;
338
339 /* compare checksum of NFS data */
340 if (csum != rp->c_csum) {
341 ++payload_misses;
342 return false;
343 }
344
345 return true;
346}
347
348/*
349 * Search the request hash for an entry that matches the given rqstp.
350 * Must be called with cache_lock held. Returns the found entry or
351 * NULL on failure.
352 */
353static struct svc_cacherep *
354nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
355{
356 struct svc_cacherep *rp, *ret = NULL;
357 struct hlist_head *rh;
358 unsigned int entries = 0;
359
360 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
361 hlist_for_each_entry(rp, rh, c_hash) {
362 ++entries;
363 if (nfsd_cache_match(rqstp, csum, rp)) {
364 ret = rp;
365 break;
366 }
367 }
368
369 /* tally hash chain length stats */
370 if (entries > longest_chain) {
371 longest_chain = entries;
372 longest_chain_cachesize = num_drc_entries;
373 } else if (entries == longest_chain) {
374 /* prefer to keep the smallest cachesize possible here */
375 longest_chain_cachesize = min(longest_chain_cachesize,
376 num_drc_entries);
377 }
378
379 return ret;
380}
381
382/*
383 * Try to find an entry matching the current call in the cache. When none
384 * is found, we try to grab the oldest expired entry off the LRU list. If
385 * a suitable one isn't there, then drop the cache_lock and allocate a
386 * new one, then search again in case one got inserted while this thread
387 * didn't hold the lock.
388 */
389int
390nfsd_cache_lookup(struct svc_rqst *rqstp)
391{
392 struct svc_cacherep *rp, *found;
393 __be32 xid = rqstp->rq_xid;
394 u32 proto = rqstp->rq_prot,
395 vers = rqstp->rq_vers,
396 proc = rqstp->rq_proc;
397 __wsum csum;
398 unsigned long age;
399 int type = rqstp->rq_cachetype;
400 int rtn = RC_DOIT;
401
402 rqstp->rq_cacherep = NULL;
403 if (type == RC_NOCACHE) {
404 nfsdstats.rcnocache++;
405 return rtn;
406 }
407
408 csum = nfsd_cache_csum(rqstp);
409
410 /*
411 * Since the common case is a cache miss followed by an insert,
412 * preallocate an entry.
413 */
414 rp = nfsd_reply_cache_alloc();
415 spin_lock(&cache_lock);
416 if (likely(rp)) {
417 ++num_drc_entries;
418 drc_mem_usage += sizeof(*rp);
419 }
420
421 /* go ahead and prune the cache */
422 prune_cache_entries();
423
424 found = nfsd_cache_search(rqstp, csum);
425 if (found) {
426 if (likely(rp))
427 nfsd_reply_cache_free_locked(rp);
428 rp = found;
429 goto found_entry;
430 }
431
432 if (!rp) {
433 dprintk("nfsd: unable to allocate DRC entry!\n");
434 goto out;
435 }
436
437 nfsdstats.rcmisses++;
438 rqstp->rq_cacherep = rp;
439 rp->c_state = RC_INPROG;
440 rp->c_xid = xid;
441 rp->c_proc = proc;
442 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
443 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
444 rp->c_prot = proto;
445 rp->c_vers = vers;
446 rp->c_len = rqstp->rq_arg.len;
447 rp->c_csum = csum;
448
449 hash_refile(rp);
450 lru_put_end(rp);
451
452 /* release any buffer */
453 if (rp->c_type == RC_REPLBUFF) {
454 drc_mem_usage -= rp->c_replvec.iov_len;
455 kfree(rp->c_replvec.iov_base);
456 rp->c_replvec.iov_base = NULL;
457 }
458 rp->c_type = RC_NOCACHE;
459 out:
460 spin_unlock(&cache_lock);
461 return rtn;
462
463found_entry:
464 nfsdstats.rchits++;
465 /* We found a matching entry which is either in progress or done. */
466 age = jiffies - rp->c_timestamp;
467 lru_put_end(rp);
468
469 rtn = RC_DROPIT;
470 /* Request being processed or excessive rexmits */
471 if (rp->c_state == RC_INPROG || age < RC_DELAY)
472 goto out;
473
474 /* From the hall of fame of impractical attacks:
475 * Is this a user who tries to snoop on the cache? */
476 rtn = RC_DOIT;
477 if (!rqstp->rq_secure && rp->c_secure)
478 goto out;
479
480 /* Compose RPC reply header */
481 switch (rp->c_type) {
482 case RC_NOCACHE:
483 break;
484 case RC_REPLSTAT:
485 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
486 rtn = RC_REPLY;
487 break;
488 case RC_REPLBUFF:
489 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
490 goto out; /* should not happen */
491 rtn = RC_REPLY;
492 break;
493 default:
494 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
495 nfsd_reply_cache_free_locked(rp);
496 }
497
498 goto out;
499}
500
501/*
502 * Update a cache entry. This is called from nfsd_dispatch when
503 * the procedure has been executed and the complete reply is in
504 * rqstp->rq_res.
505 *
506 * We're copying around data here rather than swapping buffers because
507 * the toplevel loop requires max-sized buffers, which would be a waste
508 * of memory for a cache with a max reply size of 100 bytes (diropokres).
509 *
510 * If we should start to use different types of cache entries tailored
511 * specifically for attrstat and fh's, we may save even more space.
512 *
513 * Also note that a cachetype of RC_NOCACHE can legally be passed when
514 * nfsd failed to encode a reply that otherwise would have been cached.
515 * In this case, nfsd_cache_update is called with statp == NULL.
516 */
517void
518nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
519{
520 struct svc_cacherep *rp = rqstp->rq_cacherep;
521 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
522 int len;
523 size_t bufsize = 0;
524
525 if (!rp)
526 return;
527
528 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
529 len >>= 2;
530
531 /* Don't cache excessive amounts of data and XDR failures */
532 if (!statp || len > (256 >> 2)) {
533 nfsd_reply_cache_free(rp);
534 return;
535 }
536
537 switch (cachetype) {
538 case RC_REPLSTAT:
539 if (len != 1)
540 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
541 rp->c_replstat = *statp;
542 break;
543 case RC_REPLBUFF:
544 cachv = &rp->c_replvec;
545 bufsize = len << 2;
546 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
547 if (!cachv->iov_base) {
548 nfsd_reply_cache_free(rp);
549 return;
550 }
551 cachv->iov_len = bufsize;
552 memcpy(cachv->iov_base, statp, bufsize);
553 break;
554 case RC_NOCACHE:
555 nfsd_reply_cache_free(rp);
556 return;
557 }
558 spin_lock(&cache_lock);
559 drc_mem_usage += bufsize;
560 lru_put_end(rp);
561 rp->c_secure = rqstp->rq_secure;
562 rp->c_type = cachetype;
563 rp->c_state = RC_DONE;
564 spin_unlock(&cache_lock);
565 return;
566}
567
568/*
569 * Copy cached reply to current reply buffer. Should always fit.
570 * FIXME as reply is in a page, we should just attach the page, and
571 * keep a refcount....
572 */
573static int
574nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
575{
576 struct kvec *vec = &rqstp->rq_res.head[0];
577
578 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
579 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
580 data->iov_len);
581 return 0;
582 }
583 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
584 vec->iov_len += data->iov_len;
585 return 1;
586}
587
588/*
589 * Note that fields may be added, removed or reordered in the future. Programs
590 * scraping this file for info should test the labels to ensure they're
591 * getting the correct field.
592 */
593static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
594{
595 spin_lock(&cache_lock);
596 seq_printf(m, "max entries: %u\n", max_drc_entries);
597 seq_printf(m, "num entries: %u\n", num_drc_entries);
598 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
599 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
600 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
601 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
602 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
603 seq_printf(m, "payload misses: %u\n", payload_misses);
604 seq_printf(m, "longest chain len: %u\n", longest_chain);
605 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
606 spin_unlock(&cache_lock);
607 return 0;
608}
609
610int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
611{
612 return single_open(file, nfsd_reply_cache_stats_show, NULL);
613}
1/*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
11#include <linux/slab.h>
12#include <linux/sunrpc/addr.h>
13#include <linux/highmem.h>
14#include <linux/log2.h>
15#include <linux/hash.h>
16#include <net/checksum.h>
17
18#include "nfsd.h"
19#include "cache.h"
20
21#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
22
23/*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28#define TARGET_BUCKET_SIZE 64
29
30struct nfsd_drc_bucket {
31 struct list_head lru_head;
32 spinlock_t cache_lock;
33};
34
35static struct nfsd_drc_bucket *drc_hashtbl;
36static struct kmem_cache *drc_slab;
37
38/* max number of entries allowed in the cache */
39static unsigned int max_drc_entries;
40
41/* number of significant bits in the hash value */
42static unsigned int maskbits;
43static unsigned int drc_hashsize;
44
45/*
46 * Stats and other tracking of on the duplicate reply cache. All of these and
47 * the "rc" fields in nfsdstats are protected by the cache_lock
48 */
49
50/* total number of entries */
51static atomic_t num_drc_entries;
52
53/* cache misses due only to checksum comparison failures */
54static unsigned int payload_misses;
55
56/* amount of memory (in bytes) currently consumed by the DRC */
57static unsigned int drc_mem_usage;
58
59/* longest hash chain seen */
60static unsigned int longest_chain;
61
62/* size of cache when we saw the longest hash chain */
63static unsigned int longest_chain_cachesize;
64
65static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
66static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
67 struct shrink_control *sc);
68static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
69 struct shrink_control *sc);
70
71static struct shrinker nfsd_reply_cache_shrinker = {
72 .scan_objects = nfsd_reply_cache_scan,
73 .count_objects = nfsd_reply_cache_count,
74 .seeks = 1,
75};
76
77/*
78 * Put a cap on the size of the DRC based on the amount of available
79 * low memory in the machine.
80 *
81 * 64MB: 8192
82 * 128MB: 11585
83 * 256MB: 16384
84 * 512MB: 23170
85 * 1GB: 32768
86 * 2GB: 46340
87 * 4GB: 65536
88 * 8GB: 92681
89 * 16GB: 131072
90 *
91 * ...with a hard cap of 256k entries. In the worst case, each entry will be
92 * ~1k, so the above numbers should give a rough max of the amount of memory
93 * used in k.
94 */
95static unsigned int
96nfsd_cache_size_limit(void)
97{
98 unsigned int limit;
99 unsigned long low_pages = totalram_pages - totalhigh_pages;
100
101 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
102 return min_t(unsigned int, limit, 256*1024);
103}
104
105/*
106 * Compute the number of hash buckets we need. Divide the max cachesize by
107 * the "target" max bucket size, and round up to next power of two.
108 */
109static unsigned int
110nfsd_hashsize(unsigned int limit)
111{
112 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
113}
114
115static u32
116nfsd_cache_hash(__be32 xid)
117{
118 return hash_32(be32_to_cpu(xid), maskbits);
119}
120
121static struct svc_cacherep *
122nfsd_reply_cache_alloc(void)
123{
124 struct svc_cacherep *rp;
125
126 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
127 if (rp) {
128 rp->c_state = RC_UNUSED;
129 rp->c_type = RC_NOCACHE;
130 INIT_LIST_HEAD(&rp->c_lru);
131 }
132 return rp;
133}
134
135static void
136nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
137{
138 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
139 drc_mem_usage -= rp->c_replvec.iov_len;
140 kfree(rp->c_replvec.iov_base);
141 }
142 list_del(&rp->c_lru);
143 atomic_dec(&num_drc_entries);
144 drc_mem_usage -= sizeof(*rp);
145 kmem_cache_free(drc_slab, rp);
146}
147
148static void
149nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
150{
151 spin_lock(&b->cache_lock);
152 nfsd_reply_cache_free_locked(rp);
153 spin_unlock(&b->cache_lock);
154}
155
156int nfsd_reply_cache_init(void)
157{
158 unsigned int hashsize;
159 unsigned int i;
160 int status = 0;
161
162 max_drc_entries = nfsd_cache_size_limit();
163 atomic_set(&num_drc_entries, 0);
164 hashsize = nfsd_hashsize(max_drc_entries);
165 maskbits = ilog2(hashsize);
166
167 status = register_shrinker(&nfsd_reply_cache_shrinker);
168 if (status)
169 return status;
170
171 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
172 0, 0, NULL);
173 if (!drc_slab)
174 goto out_nomem;
175
176 drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
177 if (!drc_hashtbl)
178 goto out_nomem;
179 for (i = 0; i < hashsize; i++) {
180 INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
181 spin_lock_init(&drc_hashtbl[i].cache_lock);
182 }
183 drc_hashsize = hashsize;
184
185 return 0;
186out_nomem:
187 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
188 nfsd_reply_cache_shutdown();
189 return -ENOMEM;
190}
191
192void nfsd_reply_cache_shutdown(void)
193{
194 struct svc_cacherep *rp;
195 unsigned int i;
196
197 unregister_shrinker(&nfsd_reply_cache_shrinker);
198
199 for (i = 0; i < drc_hashsize; i++) {
200 struct list_head *head = &drc_hashtbl[i].lru_head;
201 while (!list_empty(head)) {
202 rp = list_first_entry(head, struct svc_cacherep, c_lru);
203 nfsd_reply_cache_free_locked(rp);
204 }
205 }
206
207 kfree (drc_hashtbl);
208 drc_hashtbl = NULL;
209 drc_hashsize = 0;
210
211 kmem_cache_destroy(drc_slab);
212 drc_slab = NULL;
213}
214
215/*
216 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
217 * not already scheduled.
218 */
219static void
220lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
221{
222 rp->c_timestamp = jiffies;
223 list_move_tail(&rp->c_lru, &b->lru_head);
224}
225
226static long
227prune_bucket(struct nfsd_drc_bucket *b)
228{
229 struct svc_cacherep *rp, *tmp;
230 long freed = 0;
231
232 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
233 /*
234 * Don't free entries attached to calls that are still
235 * in-progress, but do keep scanning the list.
236 */
237 if (rp->c_state == RC_INPROG)
238 continue;
239 if (atomic_read(&num_drc_entries) <= max_drc_entries &&
240 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
241 break;
242 nfsd_reply_cache_free_locked(rp);
243 freed++;
244 }
245 return freed;
246}
247
248/*
249 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
250 * Also prune the oldest ones when the total exceeds the max number of entries.
251 */
252static long
253prune_cache_entries(void)
254{
255 unsigned int i;
256 long freed = 0;
257
258 for (i = 0; i < drc_hashsize; i++) {
259 struct nfsd_drc_bucket *b = &drc_hashtbl[i];
260
261 if (list_empty(&b->lru_head))
262 continue;
263 spin_lock(&b->cache_lock);
264 freed += prune_bucket(b);
265 spin_unlock(&b->cache_lock);
266 }
267 return freed;
268}
269
270static unsigned long
271nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
272{
273 return atomic_read(&num_drc_entries);
274}
275
276static unsigned long
277nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
278{
279 return prune_cache_entries();
280}
281/*
282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
283 */
284static __wsum
285nfsd_cache_csum(struct svc_rqst *rqstp)
286{
287 int idx;
288 unsigned int base;
289 __wsum csum;
290 struct xdr_buf *buf = &rqstp->rq_arg;
291 const unsigned char *p = buf->head[0].iov_base;
292 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
293 RC_CSUMLEN);
294 size_t len = min(buf->head[0].iov_len, csum_len);
295
296 /* rq_arg.head first */
297 csum = csum_partial(p, len, 0);
298 csum_len -= len;
299
300 /* Continue into page array */
301 idx = buf->page_base / PAGE_SIZE;
302 base = buf->page_base & ~PAGE_MASK;
303 while (csum_len) {
304 p = page_address(buf->pages[idx]) + base;
305 len = min_t(size_t, PAGE_SIZE - base, csum_len);
306 csum = csum_partial(p, len, csum);
307 csum_len -= len;
308 base = 0;
309 ++idx;
310 }
311 return csum;
312}
313
314static bool
315nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
316{
317 /* Check RPC XID first */
318 if (rqstp->rq_xid != rp->c_xid)
319 return false;
320 /* compare checksum of NFS data */
321 if (csum != rp->c_csum) {
322 ++payload_misses;
323 return false;
324 }
325
326 /* Other discriminators */
327 if (rqstp->rq_proc != rp->c_proc ||
328 rqstp->rq_prot != rp->c_prot ||
329 rqstp->rq_vers != rp->c_vers ||
330 rqstp->rq_arg.len != rp->c_len ||
331 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
332 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
333 return false;
334
335 return true;
336}
337
338/*
339 * Search the request hash for an entry that matches the given rqstp.
340 * Must be called with cache_lock held. Returns the found entry or
341 * NULL on failure.
342 */
343static struct svc_cacherep *
344nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
345 __wsum csum)
346{
347 struct svc_cacherep *rp, *ret = NULL;
348 struct list_head *rh = &b->lru_head;
349 unsigned int entries = 0;
350
351 list_for_each_entry(rp, rh, c_lru) {
352 ++entries;
353 if (nfsd_cache_match(rqstp, csum, rp)) {
354 ret = rp;
355 break;
356 }
357 }
358
359 /* tally hash chain length stats */
360 if (entries > longest_chain) {
361 longest_chain = entries;
362 longest_chain_cachesize = atomic_read(&num_drc_entries);
363 } else if (entries == longest_chain) {
364 /* prefer to keep the smallest cachesize possible here */
365 longest_chain_cachesize = min_t(unsigned int,
366 longest_chain_cachesize,
367 atomic_read(&num_drc_entries));
368 }
369
370 return ret;
371}
372
373/*
374 * Try to find an entry matching the current call in the cache. When none
375 * is found, we try to grab the oldest expired entry off the LRU list. If
376 * a suitable one isn't there, then drop the cache_lock and allocate a
377 * new one, then search again in case one got inserted while this thread
378 * didn't hold the lock.
379 */
380int
381nfsd_cache_lookup(struct svc_rqst *rqstp)
382{
383 struct svc_cacherep *rp, *found;
384 __be32 xid = rqstp->rq_xid;
385 u32 proto = rqstp->rq_prot,
386 vers = rqstp->rq_vers,
387 proc = rqstp->rq_proc;
388 __wsum csum;
389 u32 hash = nfsd_cache_hash(xid);
390 struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
391 unsigned long age;
392 int type = rqstp->rq_cachetype;
393 int rtn = RC_DOIT;
394
395 rqstp->rq_cacherep = NULL;
396 if (type == RC_NOCACHE) {
397 nfsdstats.rcnocache++;
398 return rtn;
399 }
400
401 csum = nfsd_cache_csum(rqstp);
402
403 /*
404 * Since the common case is a cache miss followed by an insert,
405 * preallocate an entry.
406 */
407 rp = nfsd_reply_cache_alloc();
408 spin_lock(&b->cache_lock);
409 if (likely(rp)) {
410 atomic_inc(&num_drc_entries);
411 drc_mem_usage += sizeof(*rp);
412 }
413
414 /* go ahead and prune the cache */
415 prune_bucket(b);
416
417 found = nfsd_cache_search(b, rqstp, csum);
418 if (found) {
419 if (likely(rp))
420 nfsd_reply_cache_free_locked(rp);
421 rp = found;
422 goto found_entry;
423 }
424
425 if (!rp) {
426 dprintk("nfsd: unable to allocate DRC entry!\n");
427 goto out;
428 }
429
430 nfsdstats.rcmisses++;
431 rqstp->rq_cacherep = rp;
432 rp->c_state = RC_INPROG;
433 rp->c_xid = xid;
434 rp->c_proc = proc;
435 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
436 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
437 rp->c_prot = proto;
438 rp->c_vers = vers;
439 rp->c_len = rqstp->rq_arg.len;
440 rp->c_csum = csum;
441
442 lru_put_end(b, rp);
443
444 /* release any buffer */
445 if (rp->c_type == RC_REPLBUFF) {
446 drc_mem_usage -= rp->c_replvec.iov_len;
447 kfree(rp->c_replvec.iov_base);
448 rp->c_replvec.iov_base = NULL;
449 }
450 rp->c_type = RC_NOCACHE;
451 out:
452 spin_unlock(&b->cache_lock);
453 return rtn;
454
455found_entry:
456 nfsdstats.rchits++;
457 /* We found a matching entry which is either in progress or done. */
458 age = jiffies - rp->c_timestamp;
459 lru_put_end(b, rp);
460
461 rtn = RC_DROPIT;
462 /* Request being processed or excessive rexmits */
463 if (rp->c_state == RC_INPROG || age < RC_DELAY)
464 goto out;
465
466 /* From the hall of fame of impractical attacks:
467 * Is this a user who tries to snoop on the cache? */
468 rtn = RC_DOIT;
469 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
470 goto out;
471
472 /* Compose RPC reply header */
473 switch (rp->c_type) {
474 case RC_NOCACHE:
475 break;
476 case RC_REPLSTAT:
477 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
478 rtn = RC_REPLY;
479 break;
480 case RC_REPLBUFF:
481 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
482 goto out; /* should not happen */
483 rtn = RC_REPLY;
484 break;
485 default:
486 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
487 nfsd_reply_cache_free_locked(rp);
488 }
489
490 goto out;
491}
492
493/*
494 * Update a cache entry. This is called from nfsd_dispatch when
495 * the procedure has been executed and the complete reply is in
496 * rqstp->rq_res.
497 *
498 * We're copying around data here rather than swapping buffers because
499 * the toplevel loop requires max-sized buffers, which would be a waste
500 * of memory for a cache with a max reply size of 100 bytes (diropokres).
501 *
502 * If we should start to use different types of cache entries tailored
503 * specifically for attrstat and fh's, we may save even more space.
504 *
505 * Also note that a cachetype of RC_NOCACHE can legally be passed when
506 * nfsd failed to encode a reply that otherwise would have been cached.
507 * In this case, nfsd_cache_update is called with statp == NULL.
508 */
509void
510nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
511{
512 struct svc_cacherep *rp = rqstp->rq_cacherep;
513 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
514 u32 hash;
515 struct nfsd_drc_bucket *b;
516 int len;
517 size_t bufsize = 0;
518
519 if (!rp)
520 return;
521
522 hash = nfsd_cache_hash(rp->c_xid);
523 b = &drc_hashtbl[hash];
524
525 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
526 len >>= 2;
527
528 /* Don't cache excessive amounts of data and XDR failures */
529 if (!statp || len > (256 >> 2)) {
530 nfsd_reply_cache_free(b, rp);
531 return;
532 }
533
534 switch (cachetype) {
535 case RC_REPLSTAT:
536 if (len != 1)
537 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
538 rp->c_replstat = *statp;
539 break;
540 case RC_REPLBUFF:
541 cachv = &rp->c_replvec;
542 bufsize = len << 2;
543 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
544 if (!cachv->iov_base) {
545 nfsd_reply_cache_free(b, rp);
546 return;
547 }
548 cachv->iov_len = bufsize;
549 memcpy(cachv->iov_base, statp, bufsize);
550 break;
551 case RC_NOCACHE:
552 nfsd_reply_cache_free(b, rp);
553 return;
554 }
555 spin_lock(&b->cache_lock);
556 drc_mem_usage += bufsize;
557 lru_put_end(b, rp);
558 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
559 rp->c_type = cachetype;
560 rp->c_state = RC_DONE;
561 spin_unlock(&b->cache_lock);
562 return;
563}
564
565/*
566 * Copy cached reply to current reply buffer. Should always fit.
567 * FIXME as reply is in a page, we should just attach the page, and
568 * keep a refcount....
569 */
570static int
571nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
572{
573 struct kvec *vec = &rqstp->rq_res.head[0];
574
575 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
576 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
577 data->iov_len);
578 return 0;
579 }
580 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
581 vec->iov_len += data->iov_len;
582 return 1;
583}
584
585/*
586 * Note that fields may be added, removed or reordered in the future. Programs
587 * scraping this file for info should test the labels to ensure they're
588 * getting the correct field.
589 */
590static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
591{
592 seq_printf(m, "max entries: %u\n", max_drc_entries);
593 seq_printf(m, "num entries: %u\n",
594 atomic_read(&num_drc_entries));
595 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
596 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
597 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
598 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
599 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
600 seq_printf(m, "payload misses: %u\n", payload_misses);
601 seq_printf(m, "longest chain len: %u\n", longest_chain);
602 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
603 return 0;
604}
605
606int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
607{
608 return single_open(file, nfsd_reply_cache_stats_show, NULL);
609}