Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 
 11#include <linux/slab.h>
 
 12#include <linux/sunrpc/addr.h>
 13#include <linux/highmem.h>
 14#include <linux/log2.h>
 15#include <linux/hash.h>
 16#include <net/checksum.h>
 17
 18#include "nfsd.h"
 19#include "cache.h"
 20
 21#define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
 22
 23/*
 24 * We use this value to determine the number of hash buckets from the max
 25 * cache size, the idea being that when the cache is at its maximum number
 26 * of entries, then this should be the average number of entries per bucket.
 27 */
 28#define TARGET_BUCKET_SIZE	64
 29
 30static struct hlist_head *	cache_hash;
 31static struct list_head 	lru_head;
 32static struct kmem_cache	*drc_slab;
 33
 34/* max number of entries allowed in the cache */
 35static unsigned int		max_drc_entries;
 36
 37/* number of significant bits in the hash value */
 38static unsigned int		maskbits;
 39
 40/*
 41 * Stats and other tracking of on the duplicate reply cache. All of these and
 42 * the "rc" fields in nfsdstats are protected by the cache_lock
 43 */
 44
 45/* total number of entries */
 46static unsigned int		num_drc_entries;
 47
 48/* cache misses due only to checksum comparison failures */
 49static unsigned int		payload_misses;
 50
 51/* amount of memory (in bytes) currently consumed by the DRC */
 52static unsigned int		drc_mem_usage;
 53
 54/* longest hash chain seen */
 55static unsigned int		longest_chain;
 56
 57/* size of cache when we saw the longest hash chain */
 58static unsigned int		longest_chain_cachesize;
 59
 60static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 61static void	cache_cleaner_func(struct work_struct *unused);
 62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
 63					    struct shrink_control *sc);
 64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
 65					   struct shrink_control *sc);
 66
 67static struct shrinker nfsd_reply_cache_shrinker = {
 68	.scan_objects = nfsd_reply_cache_scan,
 69	.count_objects = nfsd_reply_cache_count,
 70	.seeks	= 1,
 71};
 72
 73/*
 74 * locking for the reply cache:
 75 * A cache entry is "single use" if c_state == RC_INPROG
 76 * Otherwise, it when accessing _prev or _next, the lock must be held.
 77 */
 78static DEFINE_SPINLOCK(cache_lock);
 79static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
 80
 81/*
 82 * Put a cap on the size of the DRC based on the amount of available
 83 * low memory in the machine.
 84 *
 85 *  64MB:    8192
 86 * 128MB:   11585
 87 * 256MB:   16384
 88 * 512MB:   23170
 89 *   1GB:   32768
 90 *   2GB:   46340
 91 *   4GB:   65536
 92 *   8GB:   92681
 93 *  16GB:  131072
 94 *
 95 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 96 * ~1k, so the above numbers should give a rough max of the amount of memory
 97 * used in k.
 
 
 
 98 */
 99static unsigned int
100nfsd_cache_size_limit(void)
101{
102	unsigned int limit;
103	unsigned long low_pages = totalram_pages - totalhigh_pages;
104
105	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
106	return min_t(unsigned int, limit, 256*1024);
107}
108
109/*
110 * Compute the number of hash buckets we need. Divide the max cachesize by
111 * the "target" max bucket size, and round up to next power of two.
112 */
113static unsigned int
114nfsd_hashsize(unsigned int limit)
115{
116	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117}
118
 
 
 
 
 
 
119static struct svc_cacherep *
120nfsd_reply_cache_alloc(void)
 
121{
122	struct svc_cacherep	*rp;
123
124	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
125	if (rp) {
126		rp->c_state = RC_UNUSED;
127		rp->c_type = RC_NOCACHE;
 
128		INIT_LIST_HEAD(&rp->c_lru);
129		INIT_HLIST_NODE(&rp->c_hash);
 
 
 
 
 
 
 
 
 
130	}
131	return rp;
132}
133
134static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 
136{
137	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
138		drc_mem_usage -= rp->c_replvec.iov_len;
139		kfree(rp->c_replvec.iov_base);
140	}
141	if (!hlist_unhashed(&rp->c_hash))
142		hlist_del(&rp->c_hash);
143	list_del(&rp->c_lru);
144	--num_drc_entries;
145	drc_mem_usage -= sizeof(*rp);
 
146	kmem_cache_free(drc_slab, rp);
147}
148
149static void
150nfsd_reply_cache_free(struct svc_cacherep *rp)
 
151{
152	spin_lock(&cache_lock);
153	nfsd_reply_cache_free_locked(rp);
154	spin_unlock(&cache_lock);
155}
156
157int nfsd_reply_cache_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	unsigned int hashsize;
 
 
160
161	INIT_LIST_HEAD(&lru_head);
162	max_drc_entries = nfsd_cache_size_limit();
163	num_drc_entries = 0;
164	hashsize = nfsd_hashsize(max_drc_entries);
165	maskbits = ilog2(hashsize);
166
167	register_shrinker(&nfsd_reply_cache_shrinker);
168	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
169					0, 0, NULL);
170	if (!drc_slab)
171		goto out_nomem;
172
173	cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
174	if (!cache_hash)
175		goto out_nomem;
176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177	return 0;
 
 
 
 
178out_nomem:
179	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
180	nfsd_reply_cache_shutdown();
181	return -ENOMEM;
182}
183
184void nfsd_reply_cache_shutdown(void)
185{
186	struct svc_cacherep	*rp;
 
187
188	unregister_shrinker(&nfsd_reply_cache_shrinker);
189	cancel_delayed_work_sync(&cache_cleaner);
190
191	while (!list_empty(&lru_head)) {
192		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
193		nfsd_reply_cache_free_locked(rp);
 
 
 
 
194	}
195
196	kfree (cache_hash);
197	cache_hash = NULL;
 
198
199	if (drc_slab) {
200		kmem_cache_destroy(drc_slab);
201		drc_slab = NULL;
202	}
203}
204
205/*
206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
207 * not already scheduled.
208 */
209static void
210lru_put_end(struct svc_cacherep *rp)
211{
212	rp->c_timestamp = jiffies;
213	list_move_tail(&rp->c_lru, &lru_head);
214	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215}
216
217/*
218 * Move a cache entry from one hash list to another
219 */
220static void
221hash_refile(struct svc_cacherep *rp)
222{
223	hlist_del_init(&rp->c_hash);
224	hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
225}
226
227static inline bool
228nfsd_cache_entry_expired(struct svc_cacherep *rp)
229{
230	return rp->c_state != RC_INPROG &&
231	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
 
 
 
 
 
 
 
 
 
232}
233
234/*
235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
236 * Also prune the oldest ones when the total exceeds the max number of entries.
237 */
238static long
239prune_cache_entries(void)
240{
241	struct svc_cacherep *rp, *tmp;
242	long freed = 0;
243
244	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
245		if (!nfsd_cache_entry_expired(rp) &&
246		    num_drc_entries <= max_drc_entries)
247			break;
248		nfsd_reply_cache_free_locked(rp);
249		freed++;
250	}
251
252	/*
253	 * Conditionally rearm the job. If we cleaned out the list, then
254	 * cancel any pending run (since there won't be any work to do).
255	 * Otherwise, we rearm the job or modify the existing one to run in
256	 * RC_EXPIRE since we just ran the pruner.
257	 */
258	if (list_empty(&lru_head))
259		cancel_delayed_work(&cache_cleaner);
260	else
261		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262	return freed;
263}
264
265static void
266cache_cleaner_func(struct work_struct *unused)
267{
268	spin_lock(&cache_lock);
269	prune_cache_entries();
270	spin_unlock(&cache_lock);
271}
272
273static unsigned long
274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
275{
276	unsigned long num;
 
277
278	spin_lock(&cache_lock);
279	num = num_drc_entries;
280	spin_unlock(&cache_lock);
281
282	return num;
283}
284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288	unsigned long freed;
 
289
290	spin_lock(&cache_lock);
291	freed = prune_cache_entries();
292	spin_unlock(&cache_lock);
293	return freed;
294}
295/*
296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
297 */
298static __wsum
299nfsd_cache_csum(struct svc_rqst *rqstp)
300{
301	int idx;
302	unsigned int base;
303	__wsum csum;
304	struct xdr_buf *buf = &rqstp->rq_arg;
305	const unsigned char *p = buf->head[0].iov_base;
306	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
307				RC_CSUMLEN);
308	size_t len = min(buf->head[0].iov_len, csum_len);
309
310	/* rq_arg.head first */
311	csum = csum_partial(p, len, 0);
312	csum_len -= len;
313
314	/* Continue into page array */
315	idx = buf->page_base / PAGE_SIZE;
316	base = buf->page_base & ~PAGE_MASK;
317	while (csum_len) {
318		p = page_address(buf->pages[idx]) + base;
319		len = min_t(size_t, PAGE_SIZE - base, csum_len);
320		csum = csum_partial(p, len, csum);
321		csum_len -= len;
322		base = 0;
323		++idx;
324	}
325	return csum;
326}
327
328static bool
329nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
 
330{
331	/* Check RPC header info first */
332	if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
333	    rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
334	    rqstp->rq_arg.len != rp->c_len ||
335	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
336	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
337		return false;
338
339	/* compare checksum of NFS data */
340	if (csum != rp->c_csum) {
341		++payload_misses;
342		return false;
343	}
344
345	return true;
346}
347
348/*
349 * Search the request hash for an entry that matches the given rqstp.
350 * Must be called with cache_lock held. Returns the found entry or
351 * NULL on failure.
352 */
353static struct svc_cacherep *
354nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
 
355{
356	struct svc_cacherep	*rp, *ret = NULL;
357	struct hlist_head 	*rh;
 
358	unsigned int		entries = 0;
 
359
360	rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
361	hlist_for_each_entry(rp, rh, c_hash) {
362		++entries;
363		if (nfsd_cache_match(rqstp, csum, rp)) {
 
 
 
 
 
 
 
 
364			ret = rp;
365			break;
366		}
367	}
368
 
 
369	/* tally hash chain length stats */
370	if (entries > longest_chain) {
371		longest_chain = entries;
372		longest_chain_cachesize = num_drc_entries;
373	} else if (entries == longest_chain) {
374		/* prefer to keep the smallest cachesize possible here */
375		longest_chain_cachesize = min(longest_chain_cachesize,
376						num_drc_entries);
 
377	}
378
 
379	return ret;
380}
381
382/*
 
 
 
383 * Try to find an entry matching the current call in the cache. When none
384 * is found, we try to grab the oldest expired entry off the LRU list. If
385 * a suitable one isn't there, then drop the cache_lock and allocate a
386 * new one, then search again in case one got inserted while this thread
387 * didn't hold the lock.
 
 
 
 
 
388 */
389int
390nfsd_cache_lookup(struct svc_rqst *rqstp)
391{
 
392	struct svc_cacherep	*rp, *found;
393	__be32			xid = rqstp->rq_xid;
394	u32			proto =  rqstp->rq_prot,
395				vers = rqstp->rq_vers,
396				proc = rqstp->rq_proc;
397	__wsum			csum;
398	unsigned long		age;
 
399	int type = rqstp->rq_cachetype;
400	int rtn = RC_DOIT;
401
402	rqstp->rq_cacherep = NULL;
403	if (type == RC_NOCACHE) {
404		nfsdstats.rcnocache++;
405		return rtn;
406	}
407
408	csum = nfsd_cache_csum(rqstp);
409
410	/*
411	 * Since the common case is a cache miss followed by an insert,
412	 * preallocate an entry.
413	 */
414	rp = nfsd_reply_cache_alloc();
415	spin_lock(&cache_lock);
416	if (likely(rp)) {
417		++num_drc_entries;
418		drc_mem_usage += sizeof(*rp);
419	}
420
421	/* go ahead and prune the cache */
422	prune_cache_entries();
423
424	found = nfsd_cache_search(rqstp, csum);
425	if (found) {
426		if (likely(rp))
427			nfsd_reply_cache_free_locked(rp);
428		rp = found;
429		goto found_entry;
430	}
431
432	if (!rp) {
433		dprintk("nfsd: unable to allocate DRC entry!\n");
434		goto out;
435	}
436
437	nfsdstats.rcmisses++;
438	rqstp->rq_cacherep = rp;
439	rp->c_state = RC_INPROG;
440	rp->c_xid = xid;
441	rp->c_proc = proc;
442	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
443	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
444	rp->c_prot = proto;
445	rp->c_vers = vers;
446	rp->c_len = rqstp->rq_arg.len;
447	rp->c_csum = csum;
448
449	hash_refile(rp);
450	lru_put_end(rp);
451
452	/* release any buffer */
453	if (rp->c_type == RC_REPLBUFF) {
454		drc_mem_usage -= rp->c_replvec.iov_len;
455		kfree(rp->c_replvec.iov_base);
456		rp->c_replvec.iov_base = NULL;
457	}
458	rp->c_type = RC_NOCACHE;
459 out:
460	spin_unlock(&cache_lock);
461	return rtn;
462
463found_entry:
464	nfsdstats.rchits++;
465	/* We found a matching entry which is either in progress or done. */
466	age = jiffies - rp->c_timestamp;
467	lru_put_end(rp);
468
469	rtn = RC_DROPIT;
470	/* Request being processed or excessive rexmits */
471	if (rp->c_state == RC_INPROG || age < RC_DELAY)
472		goto out;
 
473
474	/* From the hall of fame of impractical attacks:
475	 * Is this a user who tries to snoop on the cache? */
476	rtn = RC_DOIT;
477	if (!rqstp->rq_secure && rp->c_secure)
478		goto out;
479
480	/* Compose RPC reply header */
481	switch (rp->c_type) {
482	case RC_NOCACHE:
483		break;
484	case RC_REPLSTAT:
485		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
486		rtn = RC_REPLY;
487		break;
488	case RC_REPLBUFF:
489		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
490			goto out;	/* should not happen */
491		rtn = RC_REPLY;
492		break;
493	default:
494		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
495		nfsd_reply_cache_free_locked(rp);
496	}
497
498	goto out;
 
 
499}
500
501/*
502 * Update a cache entry. This is called from nfsd_dispatch when
503 * the procedure has been executed and the complete reply is in
504 * rqstp->rq_res.
 
 
 
 
505 *
506 * We're copying around data here rather than swapping buffers because
507 * the toplevel loop requires max-sized buffers, which would be a waste
508 * of memory for a cache with a max reply size of 100 bytes (diropokres).
509 *
510 * If we should start to use different types of cache entries tailored
511 * specifically for attrstat and fh's, we may save even more space.
512 *
513 * Also note that a cachetype of RC_NOCACHE can legally be passed when
514 * nfsd failed to encode a reply that otherwise would have been cached.
515 * In this case, nfsd_cache_update is called with statp == NULL.
516 */
517void
518nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
519{
 
520	struct svc_cacherep *rp = rqstp->rq_cacherep;
521	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
 
 
522	int		len;
523	size_t		bufsize = 0;
524
525	if (!rp)
526		return;
527
 
 
 
528	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
529	len >>= 2;
530
531	/* Don't cache excessive amounts of data and XDR failures */
532	if (!statp || len > (256 >> 2)) {
533		nfsd_reply_cache_free(rp);
534		return;
535	}
536
537	switch (cachetype) {
538	case RC_REPLSTAT:
539		if (len != 1)
540			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
541		rp->c_replstat = *statp;
542		break;
543	case RC_REPLBUFF:
544		cachv = &rp->c_replvec;
545		bufsize = len << 2;
546		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
547		if (!cachv->iov_base) {
548			nfsd_reply_cache_free(rp);
549			return;
550		}
551		cachv->iov_len = bufsize;
552		memcpy(cachv->iov_base, statp, bufsize);
553		break;
554	case RC_NOCACHE:
555		nfsd_reply_cache_free(rp);
556		return;
557	}
558	spin_lock(&cache_lock);
559	drc_mem_usage += bufsize;
560	lru_put_end(rp);
561	rp->c_secure = rqstp->rq_secure;
562	rp->c_type = cachetype;
563	rp->c_state = RC_DONE;
564	spin_unlock(&cache_lock);
565	return;
566}
567
568/*
569 * Copy cached reply to current reply buffer. Should always fit.
570 * FIXME as reply is in a page, we should just attach the page, and
571 * keep a refcount....
572 */
573static int
574nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
575{
576	struct kvec	*vec = &rqstp->rq_res.head[0];
577
578	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
579		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
580				data->iov_len);
581		return 0;
582	}
583	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
584	vec->iov_len += data->iov_len;
585	return 1;
586}
587
588/*
589 * Note that fields may be added, removed or reordered in the future. Programs
590 * scraping this file for info should test the labels to ensure they're
591 * getting the correct field.
592 */
593static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
594{
595	spin_lock(&cache_lock);
596	seq_printf(m, "max entries:           %u\n", max_drc_entries);
597	seq_printf(m, "num entries:           %u\n", num_drc_entries);
598	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
599	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
600	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
601	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
602	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
603	seq_printf(m, "payload misses:        %u\n", payload_misses);
604	seq_printf(m, "longest chain len:     %u\n", longest_chain);
605	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
606	spin_unlock(&cache_lock);
 
 
 
 
 
 
607	return 0;
608}
609
610int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
611{
612	return single_open(file, nfsd_reply_cache_stats_show, NULL);
 
 
 
613}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Request reply cache. This is currently a global cache, but this may
  4 * change in the future and be a per-client cache.
  5 *
  6 * This code is heavily inspired by the 44BSD implementation, although
  7 * it does things a bit differently.
  8 *
  9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 10 */
 11
 12#include <linux/sunrpc/svc_xprt.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/sunrpc/addr.h>
 16#include <linux/highmem.h>
 17#include <linux/log2.h>
 18#include <linux/hash.h>
 19#include <net/checksum.h>
 20
 21#include "nfsd.h"
 22#include "cache.h"
 23#include "trace.h"
 
 24
 25/*
 26 * We use this value to determine the number of hash buckets from the max
 27 * cache size, the idea being that when the cache is at its maximum number
 28 * of entries, then this should be the average number of entries per bucket.
 29 */
 30#define TARGET_BUCKET_SIZE	64
 31
 32struct nfsd_drc_bucket {
 33	struct rb_root rb_head;
 34	struct list_head lru_head;
 35	spinlock_t cache_lock;
 36};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static struct kmem_cache	*drc_slab;
 
 39
 40static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 
 41static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
 42					    struct shrink_control *sc);
 43static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
 44					   struct shrink_control *sc);
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46/*
 47 * Put a cap on the size of the DRC based on the amount of available
 48 * low memory in the machine.
 49 *
 50 *  64MB:    8192
 51 * 128MB:   11585
 52 * 256MB:   16384
 53 * 512MB:   23170
 54 *   1GB:   32768
 55 *   2GB:   46340
 56 *   4GB:   65536
 57 *   8GB:   92681
 58 *  16GB:  131072
 59 *
 60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 61 * ~1k, so the above numbers should give a rough max of the amount of memory
 62 * used in k.
 63 *
 64 * XXX: these limits are per-container, so memory used will increase
 65 * linearly with number of containers.  Maybe that's OK.
 66 */
 67static unsigned int
 68nfsd_cache_size_limit(void)
 69{
 70	unsigned int limit;
 71	unsigned long low_pages = totalram_pages() - totalhigh_pages();
 72
 73	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
 74	return min_t(unsigned int, limit, 256*1024);
 75}
 76
 77/*
 78 * Compute the number of hash buckets we need. Divide the max cachesize by
 79 * the "target" max bucket size, and round up to next power of two.
 80 */
 81static unsigned int
 82nfsd_hashsize(unsigned int limit)
 83{
 84	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
 85}
 86
 87static u32
 88nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
 89{
 90	return hash_32(be32_to_cpu(xid), nn->maskbits);
 91}
 92
 93static struct svc_cacherep *
 94nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
 95			struct nfsd_net *nn)
 96{
 97	struct svc_cacherep	*rp;
 98
 99	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
100	if (rp) {
101		rp->c_state = RC_UNUSED;
102		rp->c_type = RC_NOCACHE;
103		RB_CLEAR_NODE(&rp->c_node);
104		INIT_LIST_HEAD(&rp->c_lru);
105
106		memset(&rp->c_key, 0, sizeof(rp->c_key));
107		rp->c_key.k_xid = rqstp->rq_xid;
108		rp->c_key.k_proc = rqstp->rq_proc;
109		rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
110		rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
111		rp->c_key.k_prot = rqstp->rq_prot;
112		rp->c_key.k_vers = rqstp->rq_vers;
113		rp->c_key.k_len = rqstp->rq_arg.len;
114		rp->c_key.k_csum = csum;
115	}
116	return rp;
117}
118
119static void
120nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
121				struct nfsd_net *nn)
122{
123	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
124		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
125		kfree(rp->c_replvec.iov_base);
126	}
127	if (rp->c_state != RC_UNUSED) {
128		rb_erase(&rp->c_node, &b->rb_head);
129		list_del(&rp->c_lru);
130		atomic_dec(&nn->num_drc_entries);
131		nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
132	}
133	kmem_cache_free(drc_slab, rp);
134}
135
136static void
137nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
138			struct nfsd_net *nn)
139{
140	spin_lock(&b->cache_lock);
141	nfsd_reply_cache_free_locked(b, rp, nn);
142	spin_unlock(&b->cache_lock);
143}
144
145int nfsd_drc_slab_create(void)
146{
147	drc_slab = kmem_cache_create("nfsd_drc",
148				sizeof(struct svc_cacherep), 0, 0, NULL);
149	return drc_slab ? 0: -ENOMEM;
150}
151
152void nfsd_drc_slab_free(void)
153{
154	kmem_cache_destroy(drc_slab);
155}
156
157static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
158{
159	return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
160}
161
162static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
163{
164	nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
165}
166
167int nfsd_reply_cache_init(struct nfsd_net *nn)
168{
169	unsigned int hashsize;
170	unsigned int i;
171	int status = 0;
172
173	nn->max_drc_entries = nfsd_cache_size_limit();
174	atomic_set(&nn->num_drc_entries, 0);
175	hashsize = nfsd_hashsize(nn->max_drc_entries);
176	nn->maskbits = ilog2(hashsize);
 
 
 
 
 
 
 
177
178	status = nfsd_reply_cache_stats_init(nn);
179	if (status)
180		goto out_nomem;
181
182	nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
183	nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
184	nn->nfsd_reply_cache_shrinker.seeks = 1;
185	status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
186	if (status)
187		goto out_stats_destroy;
188
189	nn->drc_hashtbl = kvzalloc(array_size(hashsize,
190				sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
191	if (!nn->drc_hashtbl)
192		goto out_shrinker;
193
194	for (i = 0; i < hashsize; i++) {
195		INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
196		spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
197	}
198	nn->drc_hashsize = hashsize;
199
200	return 0;
201out_shrinker:
202	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
203out_stats_destroy:
204	nfsd_reply_cache_stats_destroy(nn);
205out_nomem:
206	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 
207	return -ENOMEM;
208}
209
210void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
211{
212	struct svc_cacherep	*rp;
213	unsigned int i;
214
215	nfsd_reply_cache_stats_destroy(nn);
216	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
217
218	for (i = 0; i < nn->drc_hashsize; i++) {
219		struct list_head *head = &nn->drc_hashtbl[i].lru_head;
220		while (!list_empty(head)) {
221			rp = list_first_entry(head, struct svc_cacherep, c_lru);
222			nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
223									rp, nn);
224		}
225	}
226
227	kvfree(nn->drc_hashtbl);
228	nn->drc_hashtbl = NULL;
229	nn->drc_hashsize = 0;
230
 
 
 
 
231}
232
233/*
234 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
235 * not already scheduled.
236 */
237static void
238lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
239{
240	rp->c_timestamp = jiffies;
241	list_move_tail(&rp->c_lru, &b->lru_head);
 
242}
243
244static long
245prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
 
 
 
246{
247	struct svc_cacherep *rp, *tmp;
248	long freed = 0;
 
249
250	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
251		/*
252		 * Don't free entries attached to calls that are still
253		 * in-progress, but do keep scanning the list.
254		 */
255		if (rp->c_state == RC_INPROG)
256			continue;
257		if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
258		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
259			break;
260		nfsd_reply_cache_free_locked(b, rp, nn);
261		freed++;
262	}
263	return freed;
264}
265
266/*
267 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
268 * Also prune the oldest ones when the total exceeds the max number of entries.
269 */
270static long
271prune_cache_entries(struct nfsd_net *nn)
272{
273	unsigned int i;
274	long freed = 0;
275
276	for (i = 0; i < nn->drc_hashsize; i++) {
277		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
 
 
 
 
 
278
279		if (list_empty(&b->lru_head))
280			continue;
281		spin_lock(&b->cache_lock);
282		freed += prune_bucket(b, nn);
283		spin_unlock(&b->cache_lock);
284	}
 
 
 
 
285	return freed;
286}
287
 
 
 
 
 
 
 
 
288static unsigned long
289nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
290{
291	struct nfsd_net *nn = container_of(shrink,
292				struct nfsd_net, nfsd_reply_cache_shrinker);
293
294	return atomic_read(&nn->num_drc_entries);
 
 
 
 
295}
296
297static unsigned long
298nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
299{
300	struct nfsd_net *nn = container_of(shrink,
301				struct nfsd_net, nfsd_reply_cache_shrinker);
302
303	return prune_cache_entries(nn);
 
 
 
304}
305/*
306 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
307 */
308static __wsum
309nfsd_cache_csum(struct svc_rqst *rqstp)
310{
311	int idx;
312	unsigned int base;
313	__wsum csum;
314	struct xdr_buf *buf = &rqstp->rq_arg;
315	const unsigned char *p = buf->head[0].iov_base;
316	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
317				RC_CSUMLEN);
318	size_t len = min(buf->head[0].iov_len, csum_len);
319
320	/* rq_arg.head first */
321	csum = csum_partial(p, len, 0);
322	csum_len -= len;
323
324	/* Continue into page array */
325	idx = buf->page_base / PAGE_SIZE;
326	base = buf->page_base & ~PAGE_MASK;
327	while (csum_len) {
328		p = page_address(buf->pages[idx]) + base;
329		len = min_t(size_t, PAGE_SIZE - base, csum_len);
330		csum = csum_partial(p, len, csum);
331		csum_len -= len;
332		base = 0;
333		++idx;
334	}
335	return csum;
336}
337
338static int
339nfsd_cache_key_cmp(const struct svc_cacherep *key,
340			const struct svc_cacherep *rp, struct nfsd_net *nn)
341{
342	if (key->c_key.k_xid == rp->c_key.k_xid &&
343	    key->c_key.k_csum != rp->c_key.k_csum) {
344		nfsd_stats_payload_misses_inc(nn);
345		trace_nfsd_drc_mismatch(nn, key, rp);
 
 
 
 
 
 
 
 
346	}
347
348	return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
349}
350
351/*
352 * Search the request hash for an entry that matches the given rqstp.
353 * Must be called with cache_lock held. Returns the found entry or
354 * inserts an empty key on failure.
355 */
356static struct svc_cacherep *
357nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
358			struct nfsd_net *nn)
359{
360	struct svc_cacherep	*rp, *ret = key;
361	struct rb_node		**p = &b->rb_head.rb_node,
362				*parent = NULL;
363	unsigned int		entries = 0;
364	int cmp;
365
366	while (*p != NULL) {
 
367		++entries;
368		parent = *p;
369		rp = rb_entry(parent, struct svc_cacherep, c_node);
370
371		cmp = nfsd_cache_key_cmp(key, rp, nn);
372		if (cmp < 0)
373			p = &parent->rb_left;
374		else if (cmp > 0)
375			p = &parent->rb_right;
376		else {
377			ret = rp;
378			goto out;
379		}
380	}
381	rb_link_node(&key->c_node, parent, p);
382	rb_insert_color(&key->c_node, &b->rb_head);
383out:
384	/* tally hash chain length stats */
385	if (entries > nn->longest_chain) {
386		nn->longest_chain = entries;
387		nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
388	} else if (entries == nn->longest_chain) {
389		/* prefer to keep the smallest cachesize possible here */
390		nn->longest_chain_cachesize = min_t(unsigned int,
391				nn->longest_chain_cachesize,
392				atomic_read(&nn->num_drc_entries));
393	}
394
395	lru_put_end(b, ret);
396	return ret;
397}
398
399/**
400 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
401 * @rqstp: Incoming Call to find
402 *
403 * Try to find an entry matching the current call in the cache. When none
404 * is found, we try to grab the oldest expired entry off the LRU list. If
405 * a suitable one isn't there, then drop the cache_lock and allocate a
406 * new one, then search again in case one got inserted while this thread
407 * didn't hold the lock.
408 *
409 * Return values:
410 *   %RC_DOIT: Process the request normally
411 *   %RC_REPLY: Reply from cache
412 *   %RC_DROPIT: Do not process the request further
413 */
414int nfsd_cache_lookup(struct svc_rqst *rqstp)
 
415{
416	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
417	struct svc_cacherep	*rp, *found;
418	__be32			xid = rqstp->rq_xid;
 
 
 
419	__wsum			csum;
420	u32 hash = nfsd_cache_hash(xid, nn);
421	struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
422	int type = rqstp->rq_cachetype;
423	int rtn = RC_DOIT;
424
425	rqstp->rq_cacherep = NULL;
426	if (type == RC_NOCACHE) {
427		nfsd_stats_rc_nocache_inc();
428		goto out;
429	}
430
431	csum = nfsd_cache_csum(rqstp);
432
433	/*
434	 * Since the common case is a cache miss followed by an insert,
435	 * preallocate an entry.
436	 */
437	rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
438	if (!rp)
439		goto out;
 
 
 
 
 
 
440
441	spin_lock(&b->cache_lock);
442	found = nfsd_cache_insert(b, rp, nn);
443	if (found != rp) {
444		nfsd_reply_cache_free_locked(NULL, rp, nn);
445		rp = found;
446		goto found_entry;
447	}
448
449	nfsd_stats_rc_misses_inc();
 
 
 
 
 
450	rqstp->rq_cacherep = rp;
451	rp->c_state = RC_INPROG;
452
453	atomic_inc(&nn->num_drc_entries);
454	nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
455
456	/* go ahead and prune the cache */
457	prune_bucket(b, nn);
458
459out_unlock:
460	spin_unlock(&b->cache_lock);
461out:
 
 
 
 
 
 
 
 
 
 
 
462	return rtn;
463
464found_entry:
 
465	/* We found a matching entry which is either in progress or done. */
466	nfsd_stats_rc_hits_inc();
 
 
467	rtn = RC_DROPIT;
468
469	/* Request being processed */
470	if (rp->c_state == RC_INPROG)
471		goto out_trace;
472
473	/* From the hall of fame of impractical attacks:
474	 * Is this a user who tries to snoop on the cache? */
475	rtn = RC_DOIT;
476	if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
477		goto out_trace;
478
479	/* Compose RPC reply header */
480	switch (rp->c_type) {
481	case RC_NOCACHE:
482		break;
483	case RC_REPLSTAT:
484		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
485		rtn = RC_REPLY;
486		break;
487	case RC_REPLBUFF:
488		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
489			goto out_unlock; /* should not happen */
490		rtn = RC_REPLY;
491		break;
492	default:
493		WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
 
494	}
495
496out_trace:
497	trace_nfsd_drc_found(nn, rqstp, rtn);
498	goto out_unlock;
499}
500
501/**
502 * nfsd_cache_update - Update an entry in the duplicate reply cache.
503 * @rqstp: svc_rqst with a finished Reply
504 * @cachetype: which cache to update
505 * @statp: Reply's status code
506 *
507 * This is called from nfsd_dispatch when the procedure has been
508 * executed and the complete reply is in rqstp->rq_res.
509 *
510 * We're copying around data here rather than swapping buffers because
511 * the toplevel loop requires max-sized buffers, which would be a waste
512 * of memory for a cache with a max reply size of 100 bytes (diropokres).
513 *
514 * If we should start to use different types of cache entries tailored
515 * specifically for attrstat and fh's, we may save even more space.
516 *
517 * Also note that a cachetype of RC_NOCACHE can legally be passed when
518 * nfsd failed to encode a reply that otherwise would have been cached.
519 * In this case, nfsd_cache_update is called with statp == NULL.
520 */
521void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 
522{
523	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
524	struct svc_cacherep *rp = rqstp->rq_cacherep;
525	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
526	u32		hash;
527	struct nfsd_drc_bucket *b;
528	int		len;
529	size_t		bufsize = 0;
530
531	if (!rp)
532		return;
533
534	hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
535	b = &nn->drc_hashtbl[hash];
536
537	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
538	len >>= 2;
539
540	/* Don't cache excessive amounts of data and XDR failures */
541	if (!statp || len > (256 >> 2)) {
542		nfsd_reply_cache_free(b, rp, nn);
543		return;
544	}
545
546	switch (cachetype) {
547	case RC_REPLSTAT:
548		if (len != 1)
549			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
550		rp->c_replstat = *statp;
551		break;
552	case RC_REPLBUFF:
553		cachv = &rp->c_replvec;
554		bufsize = len << 2;
555		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
556		if (!cachv->iov_base) {
557			nfsd_reply_cache_free(b, rp, nn);
558			return;
559		}
560		cachv->iov_len = bufsize;
561		memcpy(cachv->iov_base, statp, bufsize);
562		break;
563	case RC_NOCACHE:
564		nfsd_reply_cache_free(b, rp, nn);
565		return;
566	}
567	spin_lock(&b->cache_lock);
568	nfsd_stats_drc_mem_usage_add(nn, bufsize);
569	lru_put_end(b, rp);
570	rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
571	rp->c_type = cachetype;
572	rp->c_state = RC_DONE;
573	spin_unlock(&b->cache_lock);
574	return;
575}
576
577/*
578 * Copy cached reply to current reply buffer. Should always fit.
579 * FIXME as reply is in a page, we should just attach the page, and
580 * keep a refcount....
581 */
582static int
583nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
584{
585	struct kvec	*vec = &rqstp->rq_res.head[0];
586
587	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
588		printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
589				data->iov_len);
590		return 0;
591	}
592	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
593	vec->iov_len += data->iov_len;
594	return 1;
595}
596
597/*
598 * Note that fields may be added, removed or reordered in the future. Programs
599 * scraping this file for info should test the labels to ensure they're
600 * getting the correct field.
601 */
602static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
603{
604	struct nfsd_net *nn = m->private;
605
606	seq_printf(m, "max entries:           %u\n", nn->max_drc_entries);
607	seq_printf(m, "num entries:           %u\n",
608		   atomic_read(&nn->num_drc_entries));
609	seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
610	seq_printf(m, "mem usage:             %lld\n",
611		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
612	seq_printf(m, "cache hits:            %lld\n",
613		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
614	seq_printf(m, "cache misses:          %lld\n",
615		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
616	seq_printf(m, "not cached:            %lld\n",
617		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
618	seq_printf(m, "payload misses:        %lld\n",
619		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
620	seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
621	seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
622	return 0;
623}
624
625int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
626{
627	struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
628								nfsd_net_id);
629
630	return single_open(file, nfsd_reply_cache_stats_show, nn);
631}