Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Request reply cache. This is currently a global cache, but this may
  4 * change in the future and be a per-client cache.
  5 *
  6 * This code is heavily inspired by the 44BSD implementation, although
  7 * it does things a bit differently.
  8 *
  9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 10 */
 11
 12#include <linux/sunrpc/svc_xprt.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/sunrpc/addr.h>
 16#include <linux/highmem.h>
 17#include <linux/log2.h>
 18#include <linux/hash.h>
 19#include <net/checksum.h>
 20
 21#include "nfsd.h"
 22#include "cache.h"
 23#include "trace.h"
 
 24
 25/*
 26 * We use this value to determine the number of hash buckets from the max
 27 * cache size, the idea being that when the cache is at its maximum number
 28 * of entries, then this should be the average number of entries per bucket.
 29 */
 30#define TARGET_BUCKET_SIZE	64
 31
 32struct nfsd_drc_bucket {
 33	struct rb_root rb_head;
 34	struct list_head lru_head;
 35	spinlock_t cache_lock;
 36};
 37
 38static struct kmem_cache	*drc_slab;
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 
 41static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
 42					    struct shrink_control *sc);
 43static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
 44					   struct shrink_control *sc);
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46/*
 47 * Put a cap on the size of the DRC based on the amount of available
 48 * low memory in the machine.
 49 *
 50 *  64MB:    8192
 51 * 128MB:   11585
 52 * 256MB:   16384
 53 * 512MB:   23170
 54 *   1GB:   32768
 55 *   2GB:   46340
 56 *   4GB:   65536
 57 *   8GB:   92681
 58 *  16GB:  131072
 59 *
 60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 61 * ~1k, so the above numbers should give a rough max of the amount of memory
 62 * used in k.
 63 *
 64 * XXX: these limits are per-container, so memory used will increase
 65 * linearly with number of containers.  Maybe that's OK.
 66 */
 67static unsigned int
 68nfsd_cache_size_limit(void)
 69{
 70	unsigned int limit;
 71	unsigned long low_pages = totalram_pages() - totalhigh_pages();
 72
 73	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
 74	return min_t(unsigned int, limit, 256*1024);
 75}
 76
 77/*
 78 * Compute the number of hash buckets we need. Divide the max cachesize by
 79 * the "target" max bucket size, and round up to next power of two.
 80 */
 81static unsigned int
 82nfsd_hashsize(unsigned int limit)
 83{
 84	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
 85}
 86
 87static struct nfsd_cacherep *
 88nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
 89		    struct nfsd_net *nn)
 90{
 91	struct nfsd_cacherep *rp;
 92
 93	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
 94	if (rp) {
 95		rp->c_state = RC_UNUSED;
 96		rp->c_type = RC_NOCACHE;
 97		RB_CLEAR_NODE(&rp->c_node);
 98		INIT_LIST_HEAD(&rp->c_lru);
 99
100		memset(&rp->c_key, 0, sizeof(rp->c_key));
101		rp->c_key.k_xid = rqstp->rq_xid;
102		rp->c_key.k_proc = rqstp->rq_proc;
103		rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104		rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105		rp->c_key.k_prot = rqstp->rq_prot;
106		rp->c_key.k_vers = rqstp->rq_vers;
107		rp->c_key.k_len = rqstp->rq_arg.len;
108		rp->c_key.k_csum = csum;
109	}
110	return rp;
111}
112
113static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
114{
115	if (rp->c_type == RC_REPLBUFF)
116		kfree(rp->c_replvec.iov_base);
117	kmem_cache_free(drc_slab, rp);
118}
119
120static unsigned long
121nfsd_cacherep_dispose(struct list_head *dispose)
122{
123	struct nfsd_cacherep *rp;
124	unsigned long freed = 0;
125
126	while (!list_empty(dispose)) {
127		rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
128		list_del(&rp->c_lru);
129		nfsd_cacherep_free(rp);
130		freed++;
131	}
132	return freed;
133}
134
135static void
136nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
137			    struct nfsd_cacherep *rp)
138{
139	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
140		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
141	if (rp->c_state != RC_UNUSED) {
142		rb_erase(&rp->c_node, &b->rb_head);
143		list_del(&rp->c_lru);
144		atomic_dec(&nn->num_drc_entries);
145		nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
146	}
 
 
 
 
 
 
147}
148
149static void
150nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
151				struct nfsd_net *nn)
152{
153	nfsd_cacherep_unlink_locked(nn, b, rp);
154	nfsd_cacherep_free(rp);
155}
156
157static void
158nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
159			struct nfsd_net *nn)
160{
161	spin_lock(&b->cache_lock);
162	nfsd_cacherep_unlink_locked(nn, b, rp);
163	spin_unlock(&b->cache_lock);
164	nfsd_cacherep_free(rp);
165}
166
167int nfsd_drc_slab_create(void)
168{
169	drc_slab = kmem_cache_create("nfsd_drc",
170				sizeof(struct nfsd_cacherep), 0, 0, NULL);
171	return drc_slab ? 0: -ENOMEM;
172}
173
174void nfsd_drc_slab_free(void)
175{
176	kmem_cache_destroy(drc_slab);
177}
178
179/**
180 * nfsd_net_reply_cache_init - per net namespace reply cache set-up
181 * @nn: nfsd_net being initialized
182 *
183 * Returns zero on succes; otherwise a negative errno is returned.
184 */
185int nfsd_net_reply_cache_init(struct nfsd_net *nn)
186{
187	return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
188}
189
190/**
191 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
192 * @nn: nfsd_net being freed
193 *
194 */
195void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
196{
197	nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
198}
199
200int nfsd_reply_cache_init(struct nfsd_net *nn)
201{
202	unsigned int hashsize;
203	unsigned int i;
204
205	nn->max_drc_entries = nfsd_cache_size_limit();
206	atomic_set(&nn->num_drc_entries, 0);
207	hashsize = nfsd_hashsize(nn->max_drc_entries);
208	nn->maskbits = ilog2(hashsize);
209
210	nn->drc_hashtbl = kvzalloc(array_size(hashsize,
211				sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
212	if (!nn->drc_hashtbl)
213		return -ENOMEM;
214
215	nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s",
216						       nn->nfsd_name);
217	if (!nn->nfsd_reply_cache_shrinker)
218		goto out_shrinker;
219
220	nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan;
221	nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count;
222	nn->nfsd_reply_cache_shrinker->seeks = 1;
223	nn->nfsd_reply_cache_shrinker->private_data = nn;
224
225	shrinker_register(nn->nfsd_reply_cache_shrinker);
226
227	for (i = 0; i < hashsize; i++) {
228		INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
229		spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
230	}
231	nn->drc_hashsize = hashsize;
232
233	return 0;
234out_shrinker:
235	kvfree(nn->drc_hashtbl);
236	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 
237	return -ENOMEM;
238}
239
240void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
241{
242	struct nfsd_cacherep *rp;
243	unsigned int i;
244
245	shrinker_free(nn->nfsd_reply_cache_shrinker);
 
246
247	for (i = 0; i < nn->drc_hashsize; i++) {
248		struct list_head *head = &nn->drc_hashtbl[i].lru_head;
249		while (!list_empty(head)) {
250			rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
251			nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
252									rp, nn);
253		}
254	}
255
256	kvfree(nn->drc_hashtbl);
257	nn->drc_hashtbl = NULL;
258	nn->drc_hashsize = 0;
259
 
 
 
 
260}
261
262/*
263 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
264 * not already scheduled.
265 */
266static void
267lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
268{
269	rp->c_timestamp = jiffies;
270	list_move_tail(&rp->c_lru, &b->lru_head);
 
271}
272
273static noinline struct nfsd_drc_bucket *
274nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
 
 
 
275{
276	unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
 
 
277
278	return &nn->drc_hashtbl[hash];
 
 
 
 
279}
280
281/*
282 * Remove and return no more than @max expired entries in bucket @b.
283 * If @max is zero, do not limit the number of removed entries.
284 */
285static void
286nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
287			 unsigned int max, struct list_head *dispose)
288{
289	unsigned long expiry = jiffies - RC_EXPIRE;
290	struct nfsd_cacherep *rp, *tmp;
291	unsigned int freed = 0;
292
293	lockdep_assert_held(&b->cache_lock);
294
295	/* The bucket LRU is ordered oldest-first. */
296	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
297		/*
298		 * Don't free entries attached to calls that are still
299		 * in-progress, but do keep scanning the list.
300		 */
301		if (rp->c_state == RC_INPROG)
302			continue;
303
304		if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
305		    time_before(expiry, rp->c_timestamp))
 
306			break;
 
 
 
307
308		nfsd_cacherep_unlink_locked(nn, b, rp);
309		list_add(&rp->c_lru, dispose);
 
 
 
 
 
 
 
 
 
 
310
311		if (max && ++freed > max)
312			break;
313	}
 
 
 
314}
315
316/**
317 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
318 * @shrink: our registered shrinker context
319 * @sc: garbage collection parameters
320 *
321 * Returns the total number of entries in the duplicate reply cache. To
322 * keep things simple and quick, this is not the number of expired entries
323 * in the cache (ie, the number that would be removed by a call to
324 * nfsd_reply_cache_scan).
325 */
326static unsigned long
327nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
328{
329	struct nfsd_net *nn = shrink->private_data;
 
 
 
 
330
331	return atomic_read(&nn->num_drc_entries);
332}
333
334/**
335 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
336 * @shrink: our registered shrinker context
337 * @sc: garbage collection parameters
338 *
339 * Free expired entries on each bucket's LRU list until we've released
340 * nr_to_scan freed objects. Nothing will be released if the cache
341 * has not exceeded it's max_drc_entries limit.
342 *
343 * Returns the number of entries released by this call.
344 */
345static unsigned long
346nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
347{
348	struct nfsd_net *nn = shrink->private_data;
349	unsigned long freed = 0;
350	LIST_HEAD(dispose);
351	unsigned int i;
352
353	for (i = 0; i < nn->drc_hashsize; i++) {
354		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
355
356		if (list_empty(&b->lru_head))
357			continue;
358
359		spin_lock(&b->cache_lock);
360		nfsd_prune_bucket_locked(nn, b, 0, &dispose);
361		spin_unlock(&b->cache_lock);
362
363		freed += nfsd_cacherep_dispose(&dispose);
364		if (freed > sc->nr_to_scan)
365			break;
366	}
367	return freed;
368}
369
370/**
371 * nfsd_cache_csum - Checksum incoming NFS Call arguments
372 * @buf: buffer containing a whole RPC Call message
373 * @start: starting byte of the NFS Call header
374 * @remaining: size of the NFS Call header, in bytes
375 *
376 * Compute a weak checksum of the leading bytes of an NFS procedure
377 * call header to help verify that a retransmitted Call matches an
378 * entry in the duplicate reply cache.
379 *
380 * To avoid assumptions about how the RPC message is laid out in
381 * @buf and what else it might contain (eg, a GSS MIC suffix), the
382 * caller passes us the exact location and length of the NFS Call
383 * header.
384 *
385 * Returns a 32-bit checksum value, as defined in RFC 793.
386 */
387static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
388			      unsigned int remaining)
389{
390	unsigned int base, len;
391	struct xdr_buf subbuf;
392	__wsum csum = 0;
393	void *p;
394	int idx;
395
396	if (remaining > RC_CSUMLEN)
397		remaining = RC_CSUMLEN;
398	if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
399		return csum;
 
 
400
401	/* rq_arg.head first */
402	if (subbuf.head[0].iov_len) {
403		len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
404		csum = csum_partial(subbuf.head[0].iov_base, len, csum);
405		remaining -= len;
406	}
407
408	/* Continue into page array */
409	idx = subbuf.page_base / PAGE_SIZE;
410	base = subbuf.page_base & ~PAGE_MASK;
411	while (remaining) {
412		p = page_address(subbuf.pages[idx]) + base;
413		len = min_t(unsigned int, PAGE_SIZE - base, remaining);
414		csum = csum_partial(p, len, csum);
415		remaining -= len;
416		base = 0;
417		++idx;
418	}
419	return csum;
420}
421
422static int
423nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
424		   const struct nfsd_cacherep *rp, struct nfsd_net *nn)
425{
426	if (key->c_key.k_xid == rp->c_key.k_xid &&
427	    key->c_key.k_csum != rp->c_key.k_csum) {
428		nfsd_stats_payload_misses_inc(nn);
429		trace_nfsd_drc_mismatch(nn, key, rp);
 
 
 
 
 
 
 
 
430	}
431
432	return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
433}
434
435/*
436 * Search the request hash for an entry that matches the given rqstp.
437 * Must be called with cache_lock held. Returns the found entry or
438 * inserts an empty key on failure.
439 */
440static struct nfsd_cacherep *
441nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
442			struct nfsd_net *nn)
443{
444	struct nfsd_cacherep	*rp, *ret = key;
445	struct rb_node		**p = &b->rb_head.rb_node,
446				*parent = NULL;
447	unsigned int		entries = 0;
448	int cmp;
449
450	while (*p != NULL) {
 
451		++entries;
452		parent = *p;
453		rp = rb_entry(parent, struct nfsd_cacherep, c_node);
454
455		cmp = nfsd_cache_key_cmp(key, rp, nn);
456		if (cmp < 0)
457			p = &parent->rb_left;
458		else if (cmp > 0)
459			p = &parent->rb_right;
460		else {
461			ret = rp;
462			goto out;
463		}
464	}
465	rb_link_node(&key->c_node, parent, p);
466	rb_insert_color(&key->c_node, &b->rb_head);
467out:
468	/* tally hash chain length stats */
469	if (entries > nn->longest_chain) {
470		nn->longest_chain = entries;
471		nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
472	} else if (entries == nn->longest_chain) {
473		/* prefer to keep the smallest cachesize possible here */
474		nn->longest_chain_cachesize = min_t(unsigned int,
475				nn->longest_chain_cachesize,
476				atomic_read(&nn->num_drc_entries));
477	}
478
479	lru_put_end(b, ret);
480	return ret;
481}
482
483/**
484 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
485 * @rqstp: Incoming Call to find
486 * @start: starting byte in @rqstp->rq_arg of the NFS Call header
487 * @len: size of the NFS Call header, in bytes
488 * @cacherep: OUT: DRC entry for this request
489 *
490 * Try to find an entry matching the current call in the cache. When none
491 * is found, we try to grab the oldest expired entry off the LRU list. If
492 * a suitable one isn't there, then drop the cache_lock and allocate a
493 * new one, then search again in case one got inserted while this thread
494 * didn't hold the lock.
495 *
496 * Return values:
497 *   %RC_DOIT: Process the request normally
498 *   %RC_REPLY: Reply from cache
499 *   %RC_DROPIT: Do not process the request further
500 */
501int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
502		      unsigned int len, struct nfsd_cacherep **cacherep)
503{
504	struct nfsd_net		*nn;
505	struct nfsd_cacherep	*rp, *found;
 
 
 
506	__wsum			csum;
507	struct nfsd_drc_bucket	*b;
508	int type = rqstp->rq_cachetype;
509	LIST_HEAD(dispose);
510	int rtn = RC_DOIT;
511
 
512	if (type == RC_NOCACHE) {
513		nfsd_stats_rc_nocache_inc();
514		goto out;
515	}
516
517	csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
518
519	/*
520	 * Since the common case is a cache miss followed by an insert,
521	 * preallocate an entry.
522	 */
523	nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
524	rp = nfsd_cacherep_alloc(rqstp, csum, nn);
525	if (!rp)
526		goto out;
 
 
527
528	b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
529	spin_lock(&b->cache_lock);
530	found = nfsd_cache_insert(b, rp, nn);
531	if (found != rp)
 
 
 
 
532		goto found_entry;
533	*cacherep = rp;
534	rp->c_state = RC_INPROG;
535	nfsd_prune_bucket_locked(nn, b, 3, &dispose);
536	spin_unlock(&b->cache_lock);
537
538	nfsd_cacherep_dispose(&dispose);
 
 
 
539
540	nfsd_stats_rc_misses_inc();
541	atomic_inc(&nn->num_drc_entries);
542	nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
543	goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
545found_entry:
 
546	/* We found a matching entry which is either in progress or done. */
547	nfsd_reply_cache_free_locked(NULL, rp, nn);
548	nfsd_stats_rc_hits_inc();
549	rtn = RC_DROPIT;
550	rp = found;
551
552	/* Request being processed */
553	if (rp->c_state == RC_INPROG)
554		goto out_trace;
 
555
556	/* From the hall of fame of impractical attacks:
557	 * Is this a user who tries to snoop on the cache? */
558	rtn = RC_DOIT;
559	if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
560		goto out_trace;
561
562	/* Compose RPC reply header */
563	switch (rp->c_type) {
564	case RC_NOCACHE:
565		break;
566	case RC_REPLSTAT:
567		xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
568		rtn = RC_REPLY;
569		break;
570	case RC_REPLBUFF:
571		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
572			goto out_unlock; /* should not happen */
573		rtn = RC_REPLY;
574		break;
575	default:
576		WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
 
577	}
578
579out_trace:
580	trace_nfsd_drc_found(nn, rqstp, rtn);
581out_unlock:
582	spin_unlock(&b->cache_lock);
583out:
584	return rtn;
585}
586
587/**
588 * nfsd_cache_update - Update an entry in the duplicate reply cache.
589 * @rqstp: svc_rqst with a finished Reply
590 * @rp: IN: DRC entry for this request
591 * @cachetype: which cache to update
592 * @statp: pointer to Reply's NFS status code, or NULL
593 *
594 * This is called from nfsd_dispatch when the procedure has been
595 * executed and the complete reply is in rqstp->rq_res.
596 *
597 * We're copying around data here rather than swapping buffers because
598 * the toplevel loop requires max-sized buffers, which would be a waste
599 * of memory for a cache with a max reply size of 100 bytes (diropokres).
600 *
601 * If we should start to use different types of cache entries tailored
602 * specifically for attrstat and fh's, we may save even more space.
603 *
604 * Also note that a cachetype of RC_NOCACHE can legally be passed when
605 * nfsd failed to encode a reply that otherwise would have been cached.
606 * In this case, nfsd_cache_update is called with statp == NULL.
607 */
608void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
609		       int cachetype, __be32 *statp)
610{
611	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
612	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
613	struct nfsd_drc_bucket *b;
614	int		len;
615	size_t		bufsize = 0;
616
617	if (!rp)
618		return;
619
620	b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
621
622	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
623	len >>= 2;
624
625	/* Don't cache excessive amounts of data and XDR failures */
626	if (!statp || len > (256 >> 2)) {
627		nfsd_reply_cache_free(b, rp, nn);
628		return;
629	}
630
631	switch (cachetype) {
632	case RC_REPLSTAT:
633		if (len != 1)
634			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
635		rp->c_replstat = *statp;
636		break;
637	case RC_REPLBUFF:
638		cachv = &rp->c_replvec;
639		bufsize = len << 2;
640		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
641		if (!cachv->iov_base) {
642			nfsd_reply_cache_free(b, rp, nn);
643			return;
644		}
645		cachv->iov_len = bufsize;
646		memcpy(cachv->iov_base, statp, bufsize);
647		break;
648	case RC_NOCACHE:
649		nfsd_reply_cache_free(b, rp, nn);
650		return;
651	}
652	spin_lock(&b->cache_lock);
653	nfsd_stats_drc_mem_usage_add(nn, bufsize);
654	lru_put_end(b, rp);
655	rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
656	rp->c_type = cachetype;
657	rp->c_state = RC_DONE;
658	spin_unlock(&b->cache_lock);
659	return;
660}
661
 
 
 
 
 
662static int
663nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
664{
665	__be32 *p;
666
667	p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
668	if (unlikely(!p))
669		return false;
670	memcpy(p, data->iov_base, data->iov_len);
671	xdr_commit_encode(&rqstp->rq_res_stream);
672	return true;
 
 
673}
674
675/*
676 * Note that fields may be added, removed or reordered in the future. Programs
677 * scraping this file for info should test the labels to ensure they're
678 * getting the correct field.
679 */
680int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
681{
682	struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
683					  nfsd_net_id);
684
685	seq_printf(m, "max entries:           %u\n", nn->max_drc_entries);
686	seq_printf(m, "num entries:           %u\n",
687		   atomic_read(&nn->num_drc_entries));
688	seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
689	seq_printf(m, "mem usage:             %lld\n",
690		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
691	seq_printf(m, "cache hits:            %lld\n",
692		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
693	seq_printf(m, "cache misses:          %lld\n",
694		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
695	seq_printf(m, "not cached:            %lld\n",
696		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
697	seq_printf(m, "payload misses:        %lld\n",
698		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
699	seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
700	seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
701	return 0;
 
 
 
 
 
702}
v3.15
 
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 
 11#include <linux/slab.h>
 
 12#include <linux/sunrpc/addr.h>
 13#include <linux/highmem.h>
 14#include <linux/log2.h>
 15#include <linux/hash.h>
 16#include <net/checksum.h>
 17
 18#include "nfsd.h"
 19#include "cache.h"
 20
 21#define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
 22
 23/*
 24 * We use this value to determine the number of hash buckets from the max
 25 * cache size, the idea being that when the cache is at its maximum number
 26 * of entries, then this should be the average number of entries per bucket.
 27 */
 28#define TARGET_BUCKET_SIZE	64
 29
 30static struct hlist_head *	cache_hash;
 31static struct list_head 	lru_head;
 
 
 
 
 32static struct kmem_cache	*drc_slab;
 33
 34/* max number of entries allowed in the cache */
 35static unsigned int		max_drc_entries;
 36
 37/* number of significant bits in the hash value */
 38static unsigned int		maskbits;
 39
 40/*
 41 * Stats and other tracking of on the duplicate reply cache. All of these and
 42 * the "rc" fields in nfsdstats are protected by the cache_lock
 43 */
 44
 45/* total number of entries */
 46static unsigned int		num_drc_entries;
 47
 48/* cache misses due only to checksum comparison failures */
 49static unsigned int		payload_misses;
 50
 51/* amount of memory (in bytes) currently consumed by the DRC */
 52static unsigned int		drc_mem_usage;
 53
 54/* longest hash chain seen */
 55static unsigned int		longest_chain;
 56
 57/* size of cache when we saw the longest hash chain */
 58static unsigned int		longest_chain_cachesize;
 59
 60static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 61static void	cache_cleaner_func(struct work_struct *unused);
 62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
 63					    struct shrink_control *sc);
 64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
 65					   struct shrink_control *sc);
 66
 67static struct shrinker nfsd_reply_cache_shrinker = {
 68	.scan_objects = nfsd_reply_cache_scan,
 69	.count_objects = nfsd_reply_cache_count,
 70	.seeks	= 1,
 71};
 72
 73/*
 74 * locking for the reply cache:
 75 * A cache entry is "single use" if c_state == RC_INPROG
 76 * Otherwise, it when accessing _prev or _next, the lock must be held.
 77 */
 78static DEFINE_SPINLOCK(cache_lock);
 79static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
 80
 81/*
 82 * Put a cap on the size of the DRC based on the amount of available
 83 * low memory in the machine.
 84 *
 85 *  64MB:    8192
 86 * 128MB:   11585
 87 * 256MB:   16384
 88 * 512MB:   23170
 89 *   1GB:   32768
 90 *   2GB:   46340
 91 *   4GB:   65536
 92 *   8GB:   92681
 93 *  16GB:  131072
 94 *
 95 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 96 * ~1k, so the above numbers should give a rough max of the amount of memory
 97 * used in k.
 
 
 
 98 */
 99static unsigned int
100nfsd_cache_size_limit(void)
101{
102	unsigned int limit;
103	unsigned long low_pages = totalram_pages - totalhigh_pages;
104
105	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
106	return min_t(unsigned int, limit, 256*1024);
107}
108
109/*
110 * Compute the number of hash buckets we need. Divide the max cachesize by
111 * the "target" max bucket size, and round up to next power of two.
112 */
113static unsigned int
114nfsd_hashsize(unsigned int limit)
115{
116	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117}
118
119static struct svc_cacherep *
120nfsd_reply_cache_alloc(void)
 
121{
122	struct svc_cacherep	*rp;
123
124	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
125	if (rp) {
126		rp->c_state = RC_UNUSED;
127		rp->c_type = RC_NOCACHE;
 
128		INIT_LIST_HEAD(&rp->c_lru);
129		INIT_HLIST_NODE(&rp->c_hash);
 
 
 
 
 
 
 
 
 
130	}
131	return rp;
132}
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 
136{
137	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
138		drc_mem_usage -= rp->c_replvec.iov_len;
139		kfree(rp->c_replvec.iov_base);
 
 
 
 
140	}
141	if (!hlist_unhashed(&rp->c_hash))
142		hlist_del(&rp->c_hash);
143	list_del(&rp->c_lru);
144	--num_drc_entries;
145	drc_mem_usage -= sizeof(*rp);
146	kmem_cache_free(drc_slab, rp);
147}
148
149static void
150nfsd_reply_cache_free(struct svc_cacherep *rp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151{
152	spin_lock(&cache_lock);
153	nfsd_reply_cache_free_locked(rp);
154	spin_unlock(&cache_lock);
155}
156
157int nfsd_reply_cache_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	unsigned int hashsize;
 
160
161	INIT_LIST_HEAD(&lru_head);
162	max_drc_entries = nfsd_cache_size_limit();
163	num_drc_entries = 0;
164	hashsize = nfsd_hashsize(max_drc_entries);
165	maskbits = ilog2(hashsize);
166
167	register_shrinker(&nfsd_reply_cache_shrinker);
168	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
169					0, 0, NULL);
170	if (!drc_slab)
171		goto out_nomem;
172
173	cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
174	if (!cache_hash)
175		goto out_nomem;
 
 
 
 
 
 
 
 
 
 
 
 
176
177	return 0;
178out_nomem:
 
179	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
180	nfsd_reply_cache_shutdown();
181	return -ENOMEM;
182}
183
184void nfsd_reply_cache_shutdown(void)
185{
186	struct svc_cacherep	*rp;
 
187
188	unregister_shrinker(&nfsd_reply_cache_shrinker);
189	cancel_delayed_work_sync(&cache_cleaner);
190
191	while (!list_empty(&lru_head)) {
192		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
193		nfsd_reply_cache_free_locked(rp);
 
 
 
 
194	}
195
196	kfree (cache_hash);
197	cache_hash = NULL;
 
198
199	if (drc_slab) {
200		kmem_cache_destroy(drc_slab);
201		drc_slab = NULL;
202	}
203}
204
205/*
206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
207 * not already scheduled.
208 */
209static void
210lru_put_end(struct svc_cacherep *rp)
211{
212	rp->c_timestamp = jiffies;
213	list_move_tail(&rp->c_lru, &lru_head);
214	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215}
216
217/*
218 * Move a cache entry from one hash list to another
219 */
220static void
221hash_refile(struct svc_cacherep *rp)
222{
223	hlist_del_init(&rp->c_hash);
224	hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
225}
226
227static inline bool
228nfsd_cache_entry_expired(struct svc_cacherep *rp)
229{
230	return rp->c_state != RC_INPROG &&
231	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
232}
233
234/*
235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
236 * Also prune the oldest ones when the total exceeds the max number of entries.
237 */
238static long
239prune_cache_entries(void)
 
240{
241	struct svc_cacherep *rp, *tmp;
242	long freed = 0;
 
 
 
 
 
 
 
 
 
 
 
 
243
244	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
245		if (!nfsd_cache_entry_expired(rp) &&
246		    num_drc_entries <= max_drc_entries)
247			break;
248		nfsd_reply_cache_free_locked(rp);
249		freed++;
250	}
251
252	/*
253	 * Conditionally rearm the job. If we cleaned out the list, then
254	 * cancel any pending run (since there won't be any work to do).
255	 * Otherwise, we rearm the job or modify the existing one to run in
256	 * RC_EXPIRE since we just ran the pruner.
257	 */
258	if (list_empty(&lru_head))
259		cancel_delayed_work(&cache_cleaner);
260	else
261		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262	return freed;
263}
264
265static void
266cache_cleaner_func(struct work_struct *unused)
267{
268	spin_lock(&cache_lock);
269	prune_cache_entries();
270	spin_unlock(&cache_lock);
271}
272
 
 
 
 
 
 
 
 
 
 
273static unsigned long
274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
275{
276	unsigned long num;
277
278	spin_lock(&cache_lock);
279	num = num_drc_entries;
280	spin_unlock(&cache_lock);
281
282	return num;
283}
284
 
 
 
 
 
 
 
 
 
 
 
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288	unsigned long freed;
 
 
 
 
 
 
 
 
 
 
 
 
 
289
290	spin_lock(&cache_lock);
291	freed = prune_cache_entries();
292	spin_unlock(&cache_lock);
 
293	return freed;
294}
295/*
296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297 */
298static __wsum
299nfsd_cache_csum(struct svc_rqst *rqstp)
300{
 
 
 
 
301	int idx;
302	unsigned int base;
303	__wsum csum;
304	struct xdr_buf *buf = &rqstp->rq_arg;
305	const unsigned char *p = buf->head[0].iov_base;
306	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
307				RC_CSUMLEN);
308	size_t len = min(buf->head[0].iov_len, csum_len);
309
310	/* rq_arg.head first */
311	csum = csum_partial(p, len, 0);
312	csum_len -= len;
 
 
 
313
314	/* Continue into page array */
315	idx = buf->page_base / PAGE_SIZE;
316	base = buf->page_base & ~PAGE_MASK;
317	while (csum_len) {
318		p = page_address(buf->pages[idx]) + base;
319		len = min_t(size_t, PAGE_SIZE - base, csum_len);
320		csum = csum_partial(p, len, csum);
321		csum_len -= len;
322		base = 0;
323		++idx;
324	}
325	return csum;
326}
327
328static bool
329nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
 
330{
331	/* Check RPC header info first */
332	if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
333	    rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
334	    rqstp->rq_arg.len != rp->c_len ||
335	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
336	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
337		return false;
338
339	/* compare checksum of NFS data */
340	if (csum != rp->c_csum) {
341		++payload_misses;
342		return false;
343	}
344
345	return true;
346}
347
348/*
349 * Search the request hash for an entry that matches the given rqstp.
350 * Must be called with cache_lock held. Returns the found entry or
351 * NULL on failure.
352 */
353static struct svc_cacherep *
354nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
355{
356	struct svc_cacherep	*rp, *ret = NULL;
357	struct hlist_head 	*rh;
 
 
358	unsigned int		entries = 0;
 
359
360	rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
361	hlist_for_each_entry(rp, rh, c_hash) {
362		++entries;
363		if (nfsd_cache_match(rqstp, csum, rp)) {
 
 
 
 
 
 
 
 
364			ret = rp;
365			break;
366		}
367	}
368
 
 
369	/* tally hash chain length stats */
370	if (entries > longest_chain) {
371		longest_chain = entries;
372		longest_chain_cachesize = num_drc_entries;
373	} else if (entries == longest_chain) {
374		/* prefer to keep the smallest cachesize possible here */
375		longest_chain_cachesize = min(longest_chain_cachesize,
376						num_drc_entries);
 
377	}
378
 
379	return ret;
380}
381
382/*
 
 
 
 
 
 
383 * Try to find an entry matching the current call in the cache. When none
384 * is found, we try to grab the oldest expired entry off the LRU list. If
385 * a suitable one isn't there, then drop the cache_lock and allocate a
386 * new one, then search again in case one got inserted while this thread
387 * didn't hold the lock.
 
 
 
 
 
388 */
389int
390nfsd_cache_lookup(struct svc_rqst *rqstp)
391{
392	struct svc_cacherep	*rp, *found;
393	__be32			xid = rqstp->rq_xid;
394	u32			proto =  rqstp->rq_prot,
395				vers = rqstp->rq_vers,
396				proc = rqstp->rq_proc;
397	__wsum			csum;
398	unsigned long		age;
399	int type = rqstp->rq_cachetype;
 
400	int rtn = RC_DOIT;
401
402	rqstp->rq_cacherep = NULL;
403	if (type == RC_NOCACHE) {
404		nfsdstats.rcnocache++;
405		return rtn;
406	}
407
408	csum = nfsd_cache_csum(rqstp);
409
410	/*
411	 * Since the common case is a cache miss followed by an insert,
412	 * preallocate an entry.
413	 */
414	rp = nfsd_reply_cache_alloc();
415	spin_lock(&cache_lock);
416	if (likely(rp)) {
417		++num_drc_entries;
418		drc_mem_usage += sizeof(*rp);
419	}
420
421	/* go ahead and prune the cache */
422	prune_cache_entries();
423
424	found = nfsd_cache_search(rqstp, csum);
425	if (found) {
426		if (likely(rp))
427			nfsd_reply_cache_free_locked(rp);
428		rp = found;
429		goto found_entry;
430	}
 
 
 
431
432	if (!rp) {
433		dprintk("nfsd: unable to allocate DRC entry!\n");
434		goto out;
435	}
436
437	nfsdstats.rcmisses++;
438	rqstp->rq_cacherep = rp;
439	rp->c_state = RC_INPROG;
440	rp->c_xid = xid;
441	rp->c_proc = proc;
442	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
443	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
444	rp->c_prot = proto;
445	rp->c_vers = vers;
446	rp->c_len = rqstp->rq_arg.len;
447	rp->c_csum = csum;
448
449	hash_refile(rp);
450	lru_put_end(rp);
451
452	/* release any buffer */
453	if (rp->c_type == RC_REPLBUFF) {
454		drc_mem_usage -= rp->c_replvec.iov_len;
455		kfree(rp->c_replvec.iov_base);
456		rp->c_replvec.iov_base = NULL;
457	}
458	rp->c_type = RC_NOCACHE;
459 out:
460	spin_unlock(&cache_lock);
461	return rtn;
462
463found_entry:
464	nfsdstats.rchits++;
465	/* We found a matching entry which is either in progress or done. */
466	age = jiffies - rp->c_timestamp;
467	lru_put_end(rp);
 
 
468
469	rtn = RC_DROPIT;
470	/* Request being processed or excessive rexmits */
471	if (rp->c_state == RC_INPROG || age < RC_DELAY)
472		goto out;
473
474	/* From the hall of fame of impractical attacks:
475	 * Is this a user who tries to snoop on the cache? */
476	rtn = RC_DOIT;
477	if (!rqstp->rq_secure && rp->c_secure)
478		goto out;
479
480	/* Compose RPC reply header */
481	switch (rp->c_type) {
482	case RC_NOCACHE:
483		break;
484	case RC_REPLSTAT:
485		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
486		rtn = RC_REPLY;
487		break;
488	case RC_REPLBUFF:
489		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
490			goto out;	/* should not happen */
491		rtn = RC_REPLY;
492		break;
493	default:
494		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
495		nfsd_reply_cache_free_locked(rp);
496	}
497
498	goto out;
 
 
 
 
 
499}
500
501/*
502 * Update a cache entry. This is called from nfsd_dispatch when
503 * the procedure has been executed and the complete reply is in
504 * rqstp->rq_res.
 
 
 
 
 
505 *
506 * We're copying around data here rather than swapping buffers because
507 * the toplevel loop requires max-sized buffers, which would be a waste
508 * of memory for a cache with a max reply size of 100 bytes (diropokres).
509 *
510 * If we should start to use different types of cache entries tailored
511 * specifically for attrstat and fh's, we may save even more space.
512 *
513 * Also note that a cachetype of RC_NOCACHE can legally be passed when
514 * nfsd failed to encode a reply that otherwise would have been cached.
515 * In this case, nfsd_cache_update is called with statp == NULL.
516 */
517void
518nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
519{
520	struct svc_cacherep *rp = rqstp->rq_cacherep;
521	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
 
522	int		len;
523	size_t		bufsize = 0;
524
525	if (!rp)
526		return;
527
 
 
528	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
529	len >>= 2;
530
531	/* Don't cache excessive amounts of data and XDR failures */
532	if (!statp || len > (256 >> 2)) {
533		nfsd_reply_cache_free(rp);
534		return;
535	}
536
537	switch (cachetype) {
538	case RC_REPLSTAT:
539		if (len != 1)
540			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
541		rp->c_replstat = *statp;
542		break;
543	case RC_REPLBUFF:
544		cachv = &rp->c_replvec;
545		bufsize = len << 2;
546		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
547		if (!cachv->iov_base) {
548			nfsd_reply_cache_free(rp);
549			return;
550		}
551		cachv->iov_len = bufsize;
552		memcpy(cachv->iov_base, statp, bufsize);
553		break;
554	case RC_NOCACHE:
555		nfsd_reply_cache_free(rp);
556		return;
557	}
558	spin_lock(&cache_lock);
559	drc_mem_usage += bufsize;
560	lru_put_end(rp);
561	rp->c_secure = rqstp->rq_secure;
562	rp->c_type = cachetype;
563	rp->c_state = RC_DONE;
564	spin_unlock(&cache_lock);
565	return;
566}
567
568/*
569 * Copy cached reply to current reply buffer. Should always fit.
570 * FIXME as reply is in a page, we should just attach the page, and
571 * keep a refcount....
572 */
573static int
574nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
575{
576	struct kvec	*vec = &rqstp->rq_res.head[0];
577
578	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
579		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
580				data->iov_len);
581		return 0;
582	}
583	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
584	vec->iov_len += data->iov_len;
585	return 1;
586}
587
588/*
589 * Note that fields may be added, removed or reordered in the future. Programs
590 * scraping this file for info should test the labels to ensure they're
591 * getting the correct field.
592 */
593static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
594{
595	spin_lock(&cache_lock);
596	seq_printf(m, "max entries:           %u\n", max_drc_entries);
597	seq_printf(m, "num entries:           %u\n", num_drc_entries);
598	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
599	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
600	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
601	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
602	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
603	seq_printf(m, "payload misses:        %u\n", payload_misses);
604	seq_printf(m, "longest chain len:     %u\n", longest_chain);
605	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
606	spin_unlock(&cache_lock);
 
 
 
 
 
 
 
607	return 0;
608}
609
610int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
611{
612	return single_open(file, nfsd_reply_cache_stats_show, NULL);
613}