Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 11#include <linux/slab.h>
 12#include <linux/sunrpc/addr.h>
 13#include <linux/highmem.h>
 14#include <linux/log2.h>
 15#include <linux/hash.h>
 16#include <net/checksum.h>
 17
 18#include "nfsd.h"
 19#include "cache.h"
 20
 21#define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
 22
 23/*
 24 * We use this value to determine the number of hash buckets from the max
 25 * cache size, the idea being that when the cache is at its maximum number
 26 * of entries, then this should be the average number of entries per bucket.
 27 */
 28#define TARGET_BUCKET_SIZE	64
 
 29
 30static struct hlist_head *	cache_hash;
 31static struct list_head 	lru_head;
 32static struct kmem_cache	*drc_slab;
 33
 34/* max number of entries allowed in the cache */
 35static unsigned int		max_drc_entries;
 36
 37/* number of significant bits in the hash value */
 38static unsigned int		maskbits;
 39
 40/*
 41 * Stats and other tracking of on the duplicate reply cache. All of these and
 42 * the "rc" fields in nfsdstats are protected by the cache_lock
 43 */
 44
 45/* total number of entries */
 46static unsigned int		num_drc_entries;
 47
 48/* cache misses due only to checksum comparison failures */
 49static unsigned int		payload_misses;
 50
 51/* amount of memory (in bytes) currently consumed by the DRC */
 52static unsigned int		drc_mem_usage;
 53
 54/* longest hash chain seen */
 55static unsigned int		longest_chain;
 56
 57/* size of cache when we saw the longest hash chain */
 58static unsigned int		longest_chain_cachesize;
 59
 60static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 61static void	cache_cleaner_func(struct work_struct *unused);
 62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
 63					    struct shrink_control *sc);
 64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
 65					   struct shrink_control *sc);
 66
 67static struct shrinker nfsd_reply_cache_shrinker = {
 68	.scan_objects = nfsd_reply_cache_scan,
 69	.count_objects = nfsd_reply_cache_count,
 70	.seeks	= 1,
 71};
 72
 73/*
 74 * locking for the reply cache:
 75 * A cache entry is "single use" if c_state == RC_INPROG
 76 * Otherwise, it when accessing _prev or _next, the lock must be held.
 77 */
 78static DEFINE_SPINLOCK(cache_lock);
 79static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
 80
 81/*
 82 * Put a cap on the size of the DRC based on the amount of available
 83 * low memory in the machine.
 84 *
 85 *  64MB:    8192
 86 * 128MB:   11585
 87 * 256MB:   16384
 88 * 512MB:   23170
 89 *   1GB:   32768
 90 *   2GB:   46340
 91 *   4GB:   65536
 92 *   8GB:   92681
 93 *  16GB:  131072
 94 *
 95 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 96 * ~1k, so the above numbers should give a rough max of the amount of memory
 97 * used in k.
 98 */
 99static unsigned int
100nfsd_cache_size_limit(void)
101{
102	unsigned int limit;
103	unsigned long low_pages = totalram_pages - totalhigh_pages;
104
105	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
106	return min_t(unsigned int, limit, 256*1024);
107}
108
109/*
110 * Compute the number of hash buckets we need. Divide the max cachesize by
111 * the "target" max bucket size, and round up to next power of two.
112 */
113static unsigned int
114nfsd_hashsize(unsigned int limit)
115{
116	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117}
118
119static struct svc_cacherep *
120nfsd_reply_cache_alloc(void)
121{
122	struct svc_cacherep	*rp;
 
123
124	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
125	if (rp) {
 
 
 
 
 
126		rp->c_state = RC_UNUSED;
127		rp->c_type = RC_NOCACHE;
128		INIT_LIST_HEAD(&rp->c_lru);
129		INIT_HLIST_NODE(&rp->c_hash);
 
130	}
131	return rp;
132}
133
134static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136{
137	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
138		drc_mem_usage -= rp->c_replvec.iov_len;
139		kfree(rp->c_replvec.iov_base);
140	}
141	if (!hlist_unhashed(&rp->c_hash))
142		hlist_del(&rp->c_hash);
143	list_del(&rp->c_lru);
144	--num_drc_entries;
145	drc_mem_usage -= sizeof(*rp);
146	kmem_cache_free(drc_slab, rp);
147}
148
149static void
150nfsd_reply_cache_free(struct svc_cacherep *rp)
151{
152	spin_lock(&cache_lock);
153	nfsd_reply_cache_free_locked(rp);
154	spin_unlock(&cache_lock);
155}
156
157int nfsd_reply_cache_init(void)
158{
159	unsigned int hashsize;
160
161	INIT_LIST_HEAD(&lru_head);
162	max_drc_entries = nfsd_cache_size_limit();
163	num_drc_entries = 0;
164	hashsize = nfsd_hashsize(max_drc_entries);
165	maskbits = ilog2(hashsize);
166
167	register_shrinker(&nfsd_reply_cache_shrinker);
168	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
169					0, 0, NULL);
170	if (!drc_slab)
171		goto out_nomem;
172
173	cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
174	if (!cache_hash)
175		goto out_nomem;
176
 
177	return 0;
178out_nomem:
179	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
180	nfsd_reply_cache_shutdown();
181	return -ENOMEM;
182}
183
184void nfsd_reply_cache_shutdown(void)
185{
186	struct svc_cacherep	*rp;
187
188	unregister_shrinker(&nfsd_reply_cache_shrinker);
189	cancel_delayed_work_sync(&cache_cleaner);
190
191	while (!list_empty(&lru_head)) {
192		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
193		nfsd_reply_cache_free_locked(rp);
 
 
 
194	}
195
 
 
196	kfree (cache_hash);
197	cache_hash = NULL;
198
199	if (drc_slab) {
200		kmem_cache_destroy(drc_slab);
201		drc_slab = NULL;
202	}
203}
204
205/*
206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
207 * not already scheduled.
208 */
209static void
210lru_put_end(struct svc_cacherep *rp)
211{
212	rp->c_timestamp = jiffies;
213	list_move_tail(&rp->c_lru, &lru_head);
214	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215}
216
217/*
218 * Move a cache entry from one hash list to another
219 */
220static void
221hash_refile(struct svc_cacherep *rp)
222{
223	hlist_del_init(&rp->c_hash);
224	hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
225}
226
227static inline bool
228nfsd_cache_entry_expired(struct svc_cacherep *rp)
229{
230	return rp->c_state != RC_INPROG &&
231	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
232}
233
234/*
235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
236 * Also prune the oldest ones when the total exceeds the max number of entries.
237 */
238static long
239prune_cache_entries(void)
240{
241	struct svc_cacherep *rp, *tmp;
242	long freed = 0;
243
244	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
245		if (!nfsd_cache_entry_expired(rp) &&
246		    num_drc_entries <= max_drc_entries)
247			break;
248		nfsd_reply_cache_free_locked(rp);
249		freed++;
250	}
251
252	/*
253	 * Conditionally rearm the job. If we cleaned out the list, then
254	 * cancel any pending run (since there won't be any work to do).
255	 * Otherwise, we rearm the job or modify the existing one to run in
256	 * RC_EXPIRE since we just ran the pruner.
257	 */
258	if (list_empty(&lru_head))
259		cancel_delayed_work(&cache_cleaner);
260	else
261		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262	return freed;
263}
264
265static void
266cache_cleaner_func(struct work_struct *unused)
267{
268	spin_lock(&cache_lock);
269	prune_cache_entries();
270	spin_unlock(&cache_lock);
271}
272
273static unsigned long
274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
275{
276	unsigned long num;
277
278	spin_lock(&cache_lock);
279	num = num_drc_entries;
280	spin_unlock(&cache_lock);
281
282	return num;
283}
284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288	unsigned long freed;
289
290	spin_lock(&cache_lock);
291	freed = prune_cache_entries();
292	spin_unlock(&cache_lock);
293	return freed;
294}
295/*
296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
297 */
298static __wsum
299nfsd_cache_csum(struct svc_rqst *rqstp)
300{
301	int idx;
302	unsigned int base;
303	__wsum csum;
304	struct xdr_buf *buf = &rqstp->rq_arg;
305	const unsigned char *p = buf->head[0].iov_base;
306	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
307				RC_CSUMLEN);
308	size_t len = min(buf->head[0].iov_len, csum_len);
309
310	/* rq_arg.head first */
311	csum = csum_partial(p, len, 0);
312	csum_len -= len;
313
314	/* Continue into page array */
315	idx = buf->page_base / PAGE_SIZE;
316	base = buf->page_base & ~PAGE_MASK;
317	while (csum_len) {
318		p = page_address(buf->pages[idx]) + base;
319		len = min_t(size_t, PAGE_SIZE - base, csum_len);
320		csum = csum_partial(p, len, csum);
321		csum_len -= len;
322		base = 0;
323		++idx;
324	}
325	return csum;
326}
327
328static bool
329nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
330{
331	/* Check RPC header info first */
332	if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
333	    rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
334	    rqstp->rq_arg.len != rp->c_len ||
335	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
336	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
337		return false;
338
339	/* compare checksum of NFS data */
340	if (csum != rp->c_csum) {
341		++payload_misses;
342		return false;
343	}
344
345	return true;
346}
347
348/*
349 * Search the request hash for an entry that matches the given rqstp.
350 * Must be called with cache_lock held. Returns the found entry or
351 * NULL on failure.
352 */
353static struct svc_cacherep *
354nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
355{
356	struct svc_cacherep	*rp, *ret = NULL;
357	struct hlist_head 	*rh;
358	unsigned int		entries = 0;
359
360	rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
361	hlist_for_each_entry(rp, rh, c_hash) {
362		++entries;
363		if (nfsd_cache_match(rqstp, csum, rp)) {
364			ret = rp;
365			break;
366		}
367	}
368
369	/* tally hash chain length stats */
370	if (entries > longest_chain) {
371		longest_chain = entries;
372		longest_chain_cachesize = num_drc_entries;
373	} else if (entries == longest_chain) {
374		/* prefer to keep the smallest cachesize possible here */
375		longest_chain_cachesize = min(longest_chain_cachesize,
376						num_drc_entries);
377	}
378
379	return ret;
380}
381
382/*
383 * Try to find an entry matching the current call in the cache. When none
384 * is found, we try to grab the oldest expired entry off the LRU list. If
385 * a suitable one isn't there, then drop the cache_lock and allocate a
386 * new one, then search again in case one got inserted while this thread
387 * didn't hold the lock.
388 */
389int
390nfsd_cache_lookup(struct svc_rqst *rqstp)
391{
392	struct svc_cacherep	*rp, *found;
 
 
393	__be32			xid = rqstp->rq_xid;
394	u32			proto =  rqstp->rq_prot,
395				vers = rqstp->rq_vers,
396				proc = rqstp->rq_proc;
397	__wsum			csum;
398	unsigned long		age;
399	int type = rqstp->rq_cachetype;
400	int rtn = RC_DOIT;
401
402	rqstp->rq_cacherep = NULL;
403	if (type == RC_NOCACHE) {
404		nfsdstats.rcnocache++;
405		return rtn;
406	}
407
408	csum = nfsd_cache_csum(rqstp);
409
410	/*
411	 * Since the common case is a cache miss followed by an insert,
412	 * preallocate an entry.
413	 */
414	rp = nfsd_reply_cache_alloc();
415	spin_lock(&cache_lock);
416	if (likely(rp)) {
417		++num_drc_entries;
418		drc_mem_usage += sizeof(*rp);
419	}
420
421	/* go ahead and prune the cache */
422	prune_cache_entries();
 
 
 
 
 
 
 
 
 
 
423
424	found = nfsd_cache_search(rqstp, csum);
425	if (found) {
426		if (likely(rp))
427			nfsd_reply_cache_free_locked(rp);
428		rp = found;
429		goto found_entry;
 
 
 
 
 
 
430	}
431
432	if (!rp) {
433		dprintk("nfsd: unable to allocate DRC entry!\n");
 
 
 
 
 
 
 
434		goto out;
435	}
436
437	nfsdstats.rcmisses++;
438	rqstp->rq_cacherep = rp;
439	rp->c_state = RC_INPROG;
440	rp->c_xid = xid;
441	rp->c_proc = proc;
442	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
443	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
444	rp->c_prot = proto;
445	rp->c_vers = vers;
446	rp->c_len = rqstp->rq_arg.len;
447	rp->c_csum = csum;
448
449	hash_refile(rp);
450	lru_put_end(rp);
451
452	/* release any buffer */
453	if (rp->c_type == RC_REPLBUFF) {
454		drc_mem_usage -= rp->c_replvec.iov_len;
455		kfree(rp->c_replvec.iov_base);
456		rp->c_replvec.iov_base = NULL;
457	}
458	rp->c_type = RC_NOCACHE;
459 out:
460	spin_unlock(&cache_lock);
461	return rtn;
462
463found_entry:
464	nfsdstats.rchits++;
465	/* We found a matching entry which is either in progress or done. */
466	age = jiffies - rp->c_timestamp;
 
467	lru_put_end(rp);
468
469	rtn = RC_DROPIT;
470	/* Request being processed or excessive rexmits */
471	if (rp->c_state == RC_INPROG || age < RC_DELAY)
472		goto out;
473
474	/* From the hall of fame of impractical attacks:
475	 * Is this a user who tries to snoop on the cache? */
476	rtn = RC_DOIT;
477	if (!rqstp->rq_secure && rp->c_secure)
478		goto out;
479
480	/* Compose RPC reply header */
481	switch (rp->c_type) {
482	case RC_NOCACHE:
483		break;
484	case RC_REPLSTAT:
485		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
486		rtn = RC_REPLY;
487		break;
488	case RC_REPLBUFF:
489		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
490			goto out;	/* should not happen */
491		rtn = RC_REPLY;
492		break;
493	default:
494		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
495		nfsd_reply_cache_free_locked(rp);
496	}
497
498	goto out;
499}
500
501/*
502 * Update a cache entry. This is called from nfsd_dispatch when
503 * the procedure has been executed and the complete reply is in
504 * rqstp->rq_res.
505 *
506 * We're copying around data here rather than swapping buffers because
507 * the toplevel loop requires max-sized buffers, which would be a waste
508 * of memory for a cache with a max reply size of 100 bytes (diropokres).
509 *
510 * If we should start to use different types of cache entries tailored
511 * specifically for attrstat and fh's, we may save even more space.
512 *
513 * Also note that a cachetype of RC_NOCACHE can legally be passed when
514 * nfsd failed to encode a reply that otherwise would have been cached.
515 * In this case, nfsd_cache_update is called with statp == NULL.
516 */
517void
518nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
519{
520	struct svc_cacherep *rp = rqstp->rq_cacherep;
521	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
522	int		len;
523	size_t		bufsize = 0;
524
525	if (!rp)
526		return;
527
528	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
529	len >>= 2;
530
531	/* Don't cache excessive amounts of data and XDR failures */
532	if (!statp || len > (256 >> 2)) {
533		nfsd_reply_cache_free(rp);
534		return;
535	}
536
537	switch (cachetype) {
538	case RC_REPLSTAT:
539		if (len != 1)
540			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
541		rp->c_replstat = *statp;
542		break;
543	case RC_REPLBUFF:
544		cachv = &rp->c_replvec;
545		bufsize = len << 2;
546		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
547		if (!cachv->iov_base) {
548			nfsd_reply_cache_free(rp);
 
 
549			return;
550		}
551		cachv->iov_len = bufsize;
552		memcpy(cachv->iov_base, statp, bufsize);
553		break;
554	case RC_NOCACHE:
555		nfsd_reply_cache_free(rp);
556		return;
557	}
558	spin_lock(&cache_lock);
559	drc_mem_usage += bufsize;
560	lru_put_end(rp);
561	rp->c_secure = rqstp->rq_secure;
562	rp->c_type = cachetype;
563	rp->c_state = RC_DONE;
 
564	spin_unlock(&cache_lock);
565	return;
566}
567
568/*
569 * Copy cached reply to current reply buffer. Should always fit.
570 * FIXME as reply is in a page, we should just attach the page, and
571 * keep a refcount....
572 */
573static int
574nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
575{
576	struct kvec	*vec = &rqstp->rq_res.head[0];
577
578	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
579		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
580				data->iov_len);
581		return 0;
582	}
583	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
584	vec->iov_len += data->iov_len;
585	return 1;
586}
587
588/*
589 * Note that fields may be added, removed or reordered in the future. Programs
590 * scraping this file for info should test the labels to ensure they're
591 * getting the correct field.
592 */
593static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
594{
595	spin_lock(&cache_lock);
596	seq_printf(m, "max entries:           %u\n", max_drc_entries);
597	seq_printf(m, "num entries:           %u\n", num_drc_entries);
598	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
599	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
600	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
601	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
602	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
603	seq_printf(m, "payload misses:        %u\n", payload_misses);
604	seq_printf(m, "longest chain len:     %u\n", longest_chain);
605	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
606	spin_unlock(&cache_lock);
607	return 0;
608}
609
610int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
611{
612	return single_open(file, nfsd_reply_cache_stats_show, NULL);
613}
v3.5.6
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 11#include <linux/slab.h>
 
 
 
 
 
 12
 13#include "nfsd.h"
 14#include "cache.h"
 15
 16/* Size of reply cache. Common values are:
 17 * 4.3BSD:	128
 18 * 4.4BSD:	256
 19 * Solaris2:	1024
 20 * DEC Unix:	512-4096
 
 21 */
 22#define CACHESIZE		1024
 23#define HASHSIZE		64
 24
 25static struct hlist_head *	cache_hash;
 26static struct list_head 	lru_head;
 27static int			cache_disabled = 1;
 
 
 
 
 
 
 28
 29/*
 30 * Calculate the hash index from an XID.
 
 31 */
 32static inline u32 request_hash(u32 xid)
 33{
 34	u32 h = xid;
 35	h ^= (xid >> 24);
 36	return h & (HASHSIZE-1);
 37}
 
 
 
 
 
 
 
 
 
 38
 39static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 
 
 
 
 
 
 
 
 
 
 
 40
 41/*
 42 * locking for the reply cache:
 43 * A cache entry is "single use" if c_state == RC_INPROG
 44 * Otherwise, it when accessing _prev or _next, the lock must be held.
 45 */
 46static DEFINE_SPINLOCK(cache_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48int nfsd_reply_cache_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49{
 50	struct svc_cacherep	*rp;
 51	int			i;
 52
 53	INIT_LIST_HEAD(&lru_head);
 54	i = CACHESIZE;
 55	while (i) {
 56		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 57		if (!rp)
 58			goto out_nomem;
 59		list_add(&rp->c_lru, &lru_head);
 60		rp->c_state = RC_UNUSED;
 61		rp->c_type = RC_NOCACHE;
 
 62		INIT_HLIST_NODE(&rp->c_hash);
 63		i--;
 64	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65
 66	cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67	if (!cache_hash)
 68		goto out_nomem;
 69
 70	cache_disabled = 0;
 71	return 0;
 72out_nomem:
 73	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 74	nfsd_reply_cache_shutdown();
 75	return -ENOMEM;
 76}
 77
 78void nfsd_reply_cache_shutdown(void)
 79{
 80	struct svc_cacherep	*rp;
 81
 
 
 
 82	while (!list_empty(&lru_head)) {
 83		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
 84		if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
 85			kfree(rp->c_replvec.iov_base);
 86		list_del(&rp->c_lru);
 87		kfree(rp);
 88	}
 89
 90	cache_disabled = 1;
 91
 92	kfree (cache_hash);
 93	cache_hash = NULL;
 
 
 
 
 
 94}
 95
 96/*
 97 * Move cache entry to end of LRU list
 
 98 */
 99static void
100lru_put_end(struct svc_cacherep *rp)
101{
 
102	list_move_tail(&rp->c_lru, &lru_head);
 
103}
104
105/*
106 * Move a cache entry from one hash list to another
107 */
108static void
109hash_refile(struct svc_cacherep *rp)
110{
111	hlist_del_init(&rp->c_hash);
112	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113}
114
115/*
116 * Try to find an entry matching the current call in the cache. When none
117 * is found, we grab the oldest unlocked entry off the LRU list.
118 * Note that no operation within the loop may sleep.
 
 
119 */
120int
121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{
123	struct hlist_node	*hn;
124	struct hlist_head 	*rh;
125	struct svc_cacherep	*rp;
126	__be32			xid = rqstp->rq_xid;
127	u32			proto =  rqstp->rq_prot,
128				vers = rqstp->rq_vers,
129				proc = rqstp->rq_proc;
 
130	unsigned long		age;
131	int type = rqstp->rq_cachetype;
132	int rtn;
133
134	rqstp->rq_cacherep = NULL;
135	if (cache_disabled || type == RC_NOCACHE) {
136		nfsdstats.rcnocache++;
137		return RC_DOIT;
138	}
139
 
 
 
 
 
 
 
140	spin_lock(&cache_lock);
141	rtn = RC_DOIT;
 
 
 
142
143	rh = &cache_hash[request_hash(xid)];
144	hlist_for_each_entry(rp, hn, rh, c_hash) {
145		if (rp->c_state != RC_UNUSED &&
146		    xid == rp->c_xid && proc == rp->c_proc &&
147		    proto == rp->c_prot && vers == rp->c_vers &&
148		    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
149		    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
150			nfsdstats.rchits++;
151			goto found_entry;
152		}
153	}
154	nfsdstats.rcmisses++;
155
156	/* This loop shouldn't take more than a few iterations normally */
157	{
158	int	safe = 0;
159	list_for_each_entry(rp, &lru_head, c_lru) {
160		if (rp->c_state != RC_INPROG)
161			break;
162		if (safe++ > CACHESIZE) {
163			printk("nfsd: loop in repcache LRU list\n");
164			cache_disabled = 1;
165			goto out;
166		}
167	}
168	}
169
170	/* All entries on the LRU are in-progress. This should not happen */
171	if (&rp->c_lru == &lru_head) {
172		static int	complaints;
173
174		printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
175		if (++complaints > 5) {
176			printk(KERN_WARNING "nfsd: disabling repcache.\n");
177			cache_disabled = 1;
178		}
179		goto out;
180	}
181
 
182	rqstp->rq_cacherep = rp;
183	rp->c_state = RC_INPROG;
184	rp->c_xid = xid;
185	rp->c_proc = proc;
186	memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
 
187	rp->c_prot = proto;
188	rp->c_vers = vers;
189	rp->c_timestamp = jiffies;
 
190
191	hash_refile(rp);
 
192
193	/* release any buffer */
194	if (rp->c_type == RC_REPLBUFF) {
 
195		kfree(rp->c_replvec.iov_base);
196		rp->c_replvec.iov_base = NULL;
197	}
198	rp->c_type = RC_NOCACHE;
199 out:
200	spin_unlock(&cache_lock);
201	return rtn;
202
203found_entry:
 
204	/* We found a matching entry which is either in progress or done. */
205	age = jiffies - rp->c_timestamp;
206	rp->c_timestamp = jiffies;
207	lru_put_end(rp);
208
209	rtn = RC_DROPIT;
210	/* Request being processed or excessive rexmits */
211	if (rp->c_state == RC_INPROG || age < RC_DELAY)
212		goto out;
213
214	/* From the hall of fame of impractical attacks:
215	 * Is this a user who tries to snoop on the cache? */
216	rtn = RC_DOIT;
217	if (!rqstp->rq_secure && rp->c_secure)
218		goto out;
219
220	/* Compose RPC reply header */
221	switch (rp->c_type) {
222	case RC_NOCACHE:
223		break;
224	case RC_REPLSTAT:
225		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
226		rtn = RC_REPLY;
227		break;
228	case RC_REPLBUFF:
229		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
230			goto out;	/* should not happen */
231		rtn = RC_REPLY;
232		break;
233	default:
234		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
235		rp->c_state = RC_UNUSED;
236	}
237
238	goto out;
239}
240
241/*
242 * Update a cache entry. This is called from nfsd_dispatch when
243 * the procedure has been executed and the complete reply is in
244 * rqstp->rq_res.
245 *
246 * We're copying around data here rather than swapping buffers because
247 * the toplevel loop requires max-sized buffers, which would be a waste
248 * of memory for a cache with a max reply size of 100 bytes (diropokres).
249 *
250 * If we should start to use different types of cache entries tailored
251 * specifically for attrstat and fh's, we may save even more space.
252 *
253 * Also note that a cachetype of RC_NOCACHE can legally be passed when
254 * nfsd failed to encode a reply that otherwise would have been cached.
255 * In this case, nfsd_cache_update is called with statp == NULL.
256 */
257void
258nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
259{
260	struct svc_cacherep *rp;
261	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
262	int		len;
 
263
264	if (!(rp = rqstp->rq_cacherep) || cache_disabled)
265		return;
266
267	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
268	len >>= 2;
269
270	/* Don't cache excessive amounts of data and XDR failures */
271	if (!statp || len > (256 >> 2)) {
272		rp->c_state = RC_UNUSED;
273		return;
274	}
275
276	switch (cachetype) {
277	case RC_REPLSTAT:
278		if (len != 1)
279			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
280		rp->c_replstat = *statp;
281		break;
282	case RC_REPLBUFF:
283		cachv = &rp->c_replvec;
284		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
 
285		if (!cachv->iov_base) {
286			spin_lock(&cache_lock);
287			rp->c_state = RC_UNUSED;
288			spin_unlock(&cache_lock);
289			return;
290		}
291		cachv->iov_len = len << 2;
292		memcpy(cachv->iov_base, statp, len << 2);
293		break;
 
 
 
294	}
295	spin_lock(&cache_lock);
 
296	lru_put_end(rp);
297	rp->c_secure = rqstp->rq_secure;
298	rp->c_type = cachetype;
299	rp->c_state = RC_DONE;
300	rp->c_timestamp = jiffies;
301	spin_unlock(&cache_lock);
302	return;
303}
304
305/*
306 * Copy cached reply to current reply buffer. Should always fit.
307 * FIXME as reply is in a page, we should just attach the page, and
308 * keep a refcount....
309 */
310static int
311nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
312{
313	struct kvec	*vec = &rqstp->rq_res.head[0];
314
315	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
316		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
317				data->iov_len);
318		return 0;
319	}
320	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
321	vec->iov_len += data->iov_len;
322	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323}