Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 11#include <linux/slab.h>
 12
 13#include "nfsd.h"
 14#include "cache.h"
 15
 16/* Size of reply cache. Common values are:
 17 * 4.3BSD:	128
 18 * 4.4BSD:	256
 19 * Solaris2:	1024
 20 * DEC Unix:	512-4096
 21 */
 22#define CACHESIZE		1024
 23#define HASHSIZE		64
 24
 25static struct hlist_head *	cache_hash;
 26static struct list_head 	lru_head;
 27static int			cache_disabled = 1;
 28
 29/*
 30 * Calculate the hash index from an XID.
 31 */
 32static inline u32 request_hash(u32 xid)
 33{
 34	u32 h = xid;
 35	h ^= (xid >> 24);
 36	return h & (HASHSIZE-1);
 37}
 38
 39static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 40
 41/*
 42 * locking for the reply cache:
 43 * A cache entry is "single use" if c_state == RC_INPROG
 44 * Otherwise, it when accessing _prev or _next, the lock must be held.
 45 */
 46static DEFINE_SPINLOCK(cache_lock);
 47
 48int nfsd_reply_cache_init(void)
 49{
 50	struct svc_cacherep	*rp;
 51	int			i;
 52
 53	INIT_LIST_HEAD(&lru_head);
 54	i = CACHESIZE;
 55	while (i) {
 56		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 57		if (!rp)
 58			goto out_nomem;
 59		list_add(&rp->c_lru, &lru_head);
 60		rp->c_state = RC_UNUSED;
 61		rp->c_type = RC_NOCACHE;
 62		INIT_HLIST_NODE(&rp->c_hash);
 63		i--;
 64	}
 65
 66	cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
 67	if (!cache_hash)
 68		goto out_nomem;
 69
 70	cache_disabled = 0;
 71	return 0;
 72out_nomem:
 73	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 74	nfsd_reply_cache_shutdown();
 75	return -ENOMEM;
 76}
 77
 78void nfsd_reply_cache_shutdown(void)
 79{
 80	struct svc_cacherep	*rp;
 81
 82	while (!list_empty(&lru_head)) {
 83		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
 84		if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
 85			kfree(rp->c_replvec.iov_base);
 86		list_del(&rp->c_lru);
 87		kfree(rp);
 88	}
 89
 90	cache_disabled = 1;
 91
 92	kfree (cache_hash);
 93	cache_hash = NULL;
 94}
 95
 96/*
 97 * Move cache entry to end of LRU list
 98 */
 99static void
100lru_put_end(struct svc_cacherep *rp)
101{
102	list_move_tail(&rp->c_lru, &lru_head);
103}
104
105/*
106 * Move a cache entry from one hash list to another
107 */
108static void
109hash_refile(struct svc_cacherep *rp)
110{
111	hlist_del_init(&rp->c_hash);
112	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
113}
114
115/*
116 * Try to find an entry matching the current call in the cache. When none
117 * is found, we grab the oldest unlocked entry off the LRU list.
118 * Note that no operation within the loop may sleep.
119 */
120int
121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{
123	struct hlist_node	*hn;
124	struct hlist_head 	*rh;
125	struct svc_cacherep	*rp;
126	__be32			xid = rqstp->rq_xid;
127	u32			proto =  rqstp->rq_prot,
128				vers = rqstp->rq_vers,
129				proc = rqstp->rq_proc;
130	unsigned long		age;
131	int type = rqstp->rq_cachetype;
132	int rtn;
133
134	rqstp->rq_cacherep = NULL;
135	if (cache_disabled || type == RC_NOCACHE) {
136		nfsdstats.rcnocache++;
137		return RC_DOIT;
138	}
139
140	spin_lock(&cache_lock);
141	rtn = RC_DOIT;
142
143	rh = &cache_hash[request_hash(xid)];
144	hlist_for_each_entry(rp, hn, rh, c_hash) {
145		if (rp->c_state != RC_UNUSED &&
146		    xid == rp->c_xid && proc == rp->c_proc &&
147		    proto == rp->c_prot && vers == rp->c_vers &&
148		    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
149		    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
150			nfsdstats.rchits++;
151			goto found_entry;
152		}
153	}
154	nfsdstats.rcmisses++;
155
156	/* This loop shouldn't take more than a few iterations normally */
157	{
158	int	safe = 0;
159	list_for_each_entry(rp, &lru_head, c_lru) {
160		if (rp->c_state != RC_INPROG)
161			break;
162		if (safe++ > CACHESIZE) {
163			printk("nfsd: loop in repcache LRU list\n");
164			cache_disabled = 1;
165			goto out;
166		}
167	}
168	}
169
170	/* All entries on the LRU are in-progress. This should not happen */
171	if (&rp->c_lru == &lru_head) {
172		static int	complaints;
173
174		printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
175		if (++complaints > 5) {
176			printk(KERN_WARNING "nfsd: disabling repcache.\n");
177			cache_disabled = 1;
178		}
179		goto out;
180	}
181
182	rqstp->rq_cacherep = rp;
183	rp->c_state = RC_INPROG;
184	rp->c_xid = xid;
185	rp->c_proc = proc;
186	memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
187	rp->c_prot = proto;
188	rp->c_vers = vers;
189	rp->c_timestamp = jiffies;
190
191	hash_refile(rp);
192
193	/* release any buffer */
194	if (rp->c_type == RC_REPLBUFF) {
195		kfree(rp->c_replvec.iov_base);
196		rp->c_replvec.iov_base = NULL;
197	}
198	rp->c_type = RC_NOCACHE;
199 out:
200	spin_unlock(&cache_lock);
201	return rtn;
202
203found_entry:
204	/* We found a matching entry which is either in progress or done. */
205	age = jiffies - rp->c_timestamp;
206	rp->c_timestamp = jiffies;
207	lru_put_end(rp);
208
209	rtn = RC_DROPIT;
210	/* Request being processed or excessive rexmits */
211	if (rp->c_state == RC_INPROG || age < RC_DELAY)
212		goto out;
213
214	/* From the hall of fame of impractical attacks:
215	 * Is this a user who tries to snoop on the cache? */
216	rtn = RC_DOIT;
217	if (!rqstp->rq_secure && rp->c_secure)
218		goto out;
219
220	/* Compose RPC reply header */
221	switch (rp->c_type) {
222	case RC_NOCACHE:
223		break;
224	case RC_REPLSTAT:
225		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
226		rtn = RC_REPLY;
227		break;
228	case RC_REPLBUFF:
229		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
230			goto out;	/* should not happen */
231		rtn = RC_REPLY;
232		break;
233	default:
234		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
235		rp->c_state = RC_UNUSED;
236	}
237
238	goto out;
239}
240
241/*
242 * Update a cache entry. This is called from nfsd_dispatch when
243 * the procedure has been executed and the complete reply is in
244 * rqstp->rq_res.
245 *
246 * We're copying around data here rather than swapping buffers because
247 * the toplevel loop requires max-sized buffers, which would be a waste
248 * of memory for a cache with a max reply size of 100 bytes (diropokres).
249 *
250 * If we should start to use different types of cache entries tailored
251 * specifically for attrstat and fh's, we may save even more space.
252 *
253 * Also note that a cachetype of RC_NOCACHE can legally be passed when
254 * nfsd failed to encode a reply that otherwise would have been cached.
255 * In this case, nfsd_cache_update is called with statp == NULL.
256 */
257void
258nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
259{
260	struct svc_cacherep *rp;
261	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
262	int		len;
263
264	if (!(rp = rqstp->rq_cacherep) || cache_disabled)
265		return;
266
267	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
268	len >>= 2;
269
270	/* Don't cache excessive amounts of data and XDR failures */
271	if (!statp || len > (256 >> 2)) {
272		rp->c_state = RC_UNUSED;
273		return;
274	}
275
276	switch (cachetype) {
277	case RC_REPLSTAT:
278		if (len != 1)
279			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
280		rp->c_replstat = *statp;
281		break;
282	case RC_REPLBUFF:
283		cachv = &rp->c_replvec;
284		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
285		if (!cachv->iov_base) {
286			spin_lock(&cache_lock);
287			rp->c_state = RC_UNUSED;
288			spin_unlock(&cache_lock);
289			return;
290		}
291		cachv->iov_len = len << 2;
292		memcpy(cachv->iov_base, statp, len << 2);
293		break;
294	}
295	spin_lock(&cache_lock);
296	lru_put_end(rp);
297	rp->c_secure = rqstp->rq_secure;
298	rp->c_type = cachetype;
299	rp->c_state = RC_DONE;
300	rp->c_timestamp = jiffies;
301	spin_unlock(&cache_lock);
302	return;
303}
304
305/*
306 * Copy cached reply to current reply buffer. Should always fit.
307 * FIXME as reply is in a page, we should just attach the page, and
308 * keep a refcount....
309 */
310static int
311nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
312{
313	struct kvec	*vec = &rqstp->rq_res.head[0];
314
315	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
316		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
317				data->iov_len);
318		return 0;
319	}
320	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
321	vec->iov_len += data->iov_len;
322	return 1;
323}
v3.1
  1/*
  2 * Request reply cache. This is currently a global cache, but this may
  3 * change in the future and be a per-client cache.
  4 *
  5 * This code is heavily inspired by the 44BSD implementation, although
  6 * it does things a bit differently.
  7 *
  8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  9 */
 10
 11#include <linux/slab.h>
 12
 13#include "nfsd.h"
 14#include "cache.h"
 15
 16/* Size of reply cache. Common values are:
 17 * 4.3BSD:	128
 18 * 4.4BSD:	256
 19 * Solaris2:	1024
 20 * DEC Unix:	512-4096
 21 */
 22#define CACHESIZE		1024
 23#define HASHSIZE		64
 24
 25static struct hlist_head *	cache_hash;
 26static struct list_head 	lru_head;
 27static int			cache_disabled = 1;
 28
 29/*
 30 * Calculate the hash index from an XID.
 31 */
 32static inline u32 request_hash(u32 xid)
 33{
 34	u32 h = xid;
 35	h ^= (xid >> 24);
 36	return h & (HASHSIZE-1);
 37}
 38
 39static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 40
 41/*
 42 * locking for the reply cache:
 43 * A cache entry is "single use" if c_state == RC_INPROG
 44 * Otherwise, it when accessing _prev or _next, the lock must be held.
 45 */
 46static DEFINE_SPINLOCK(cache_lock);
 47
 48int nfsd_reply_cache_init(void)
 49{
 50	struct svc_cacherep	*rp;
 51	int			i;
 52
 53	INIT_LIST_HEAD(&lru_head);
 54	i = CACHESIZE;
 55	while (i) {
 56		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 57		if (!rp)
 58			goto out_nomem;
 59		list_add(&rp->c_lru, &lru_head);
 60		rp->c_state = RC_UNUSED;
 61		rp->c_type = RC_NOCACHE;
 62		INIT_HLIST_NODE(&rp->c_hash);
 63		i--;
 64	}
 65
 66	cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
 67	if (!cache_hash)
 68		goto out_nomem;
 69
 70	cache_disabled = 0;
 71	return 0;
 72out_nomem:
 73	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 74	nfsd_reply_cache_shutdown();
 75	return -ENOMEM;
 76}
 77
 78void nfsd_reply_cache_shutdown(void)
 79{
 80	struct svc_cacherep	*rp;
 81
 82	while (!list_empty(&lru_head)) {
 83		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
 84		if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
 85			kfree(rp->c_replvec.iov_base);
 86		list_del(&rp->c_lru);
 87		kfree(rp);
 88	}
 89
 90	cache_disabled = 1;
 91
 92	kfree (cache_hash);
 93	cache_hash = NULL;
 94}
 95
 96/*
 97 * Move cache entry to end of LRU list
 98 */
 99static void
100lru_put_end(struct svc_cacherep *rp)
101{
102	list_move_tail(&rp->c_lru, &lru_head);
103}
104
105/*
106 * Move a cache entry from one hash list to another
107 */
108static void
109hash_refile(struct svc_cacherep *rp)
110{
111	hlist_del_init(&rp->c_hash);
112	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
113}
114
115/*
116 * Try to find an entry matching the current call in the cache. When none
117 * is found, we grab the oldest unlocked entry off the LRU list.
118 * Note that no operation within the loop may sleep.
119 */
120int
121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{
123	struct hlist_node	*hn;
124	struct hlist_head 	*rh;
125	struct svc_cacherep	*rp;
126	__be32			xid = rqstp->rq_xid;
127	u32			proto =  rqstp->rq_prot,
128				vers = rqstp->rq_vers,
129				proc = rqstp->rq_proc;
130	unsigned long		age;
131	int type = rqstp->rq_cachetype;
132	int rtn;
133
134	rqstp->rq_cacherep = NULL;
135	if (cache_disabled || type == RC_NOCACHE) {
136		nfsdstats.rcnocache++;
137		return RC_DOIT;
138	}
139
140	spin_lock(&cache_lock);
141	rtn = RC_DOIT;
142
143	rh = &cache_hash[request_hash(xid)];
144	hlist_for_each_entry(rp, hn, rh, c_hash) {
145		if (rp->c_state != RC_UNUSED &&
146		    xid == rp->c_xid && proc == rp->c_proc &&
147		    proto == rp->c_prot && vers == rp->c_vers &&
148		    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
149		    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
150			nfsdstats.rchits++;
151			goto found_entry;
152		}
153	}
154	nfsdstats.rcmisses++;
155
156	/* This loop shouldn't take more than a few iterations normally */
157	{
158	int	safe = 0;
159	list_for_each_entry(rp, &lru_head, c_lru) {
160		if (rp->c_state != RC_INPROG)
161			break;
162		if (safe++ > CACHESIZE) {
163			printk("nfsd: loop in repcache LRU list\n");
164			cache_disabled = 1;
165			goto out;
166		}
167	}
168	}
169
170	/* All entries on the LRU are in-progress. This should not happen */
171	if (&rp->c_lru == &lru_head) {
172		static int	complaints;
173
174		printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
175		if (++complaints > 5) {
176			printk(KERN_WARNING "nfsd: disabling repcache.\n");
177			cache_disabled = 1;
178		}
179		goto out;
180	}
181
182	rqstp->rq_cacherep = rp;
183	rp->c_state = RC_INPROG;
184	rp->c_xid = xid;
185	rp->c_proc = proc;
186	memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
187	rp->c_prot = proto;
188	rp->c_vers = vers;
189	rp->c_timestamp = jiffies;
190
191	hash_refile(rp);
192
193	/* release any buffer */
194	if (rp->c_type == RC_REPLBUFF) {
195		kfree(rp->c_replvec.iov_base);
196		rp->c_replvec.iov_base = NULL;
197	}
198	rp->c_type = RC_NOCACHE;
199 out:
200	spin_unlock(&cache_lock);
201	return rtn;
202
203found_entry:
204	/* We found a matching entry which is either in progress or done. */
205	age = jiffies - rp->c_timestamp;
206	rp->c_timestamp = jiffies;
207	lru_put_end(rp);
208
209	rtn = RC_DROPIT;
210	/* Request being processed or excessive rexmits */
211	if (rp->c_state == RC_INPROG || age < RC_DELAY)
212		goto out;
213
214	/* From the hall of fame of impractical attacks:
215	 * Is this a user who tries to snoop on the cache? */
216	rtn = RC_DOIT;
217	if (!rqstp->rq_secure && rp->c_secure)
218		goto out;
219
220	/* Compose RPC reply header */
221	switch (rp->c_type) {
222	case RC_NOCACHE:
223		break;
224	case RC_REPLSTAT:
225		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
226		rtn = RC_REPLY;
227		break;
228	case RC_REPLBUFF:
229		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
230			goto out;	/* should not happen */
231		rtn = RC_REPLY;
232		break;
233	default:
234		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
235		rp->c_state = RC_UNUSED;
236	}
237
238	goto out;
239}
240
241/*
242 * Update a cache entry. This is called from nfsd_dispatch when
243 * the procedure has been executed and the complete reply is in
244 * rqstp->rq_res.
245 *
246 * We're copying around data here rather than swapping buffers because
247 * the toplevel loop requires max-sized buffers, which would be a waste
248 * of memory for a cache with a max reply size of 100 bytes (diropokres).
249 *
250 * If we should start to use different types of cache entries tailored
251 * specifically for attrstat and fh's, we may save even more space.
252 *
253 * Also note that a cachetype of RC_NOCACHE can legally be passed when
254 * nfsd failed to encode a reply that otherwise would have been cached.
255 * In this case, nfsd_cache_update is called with statp == NULL.
256 */
257void
258nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
259{
260	struct svc_cacherep *rp;
261	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
262	int		len;
263
264	if (!(rp = rqstp->rq_cacherep) || cache_disabled)
265		return;
266
267	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
268	len >>= 2;
269
270	/* Don't cache excessive amounts of data and XDR failures */
271	if (!statp || len > (256 >> 2)) {
272		rp->c_state = RC_UNUSED;
273		return;
274	}
275
276	switch (cachetype) {
277	case RC_REPLSTAT:
278		if (len != 1)
279			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
280		rp->c_replstat = *statp;
281		break;
282	case RC_REPLBUFF:
283		cachv = &rp->c_replvec;
284		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
285		if (!cachv->iov_base) {
286			spin_lock(&cache_lock);
287			rp->c_state = RC_UNUSED;
288			spin_unlock(&cache_lock);
289			return;
290		}
291		cachv->iov_len = len << 2;
292		memcpy(cachv->iov_base, statp, len << 2);
293		break;
294	}
295	spin_lock(&cache_lock);
296	lru_put_end(rp);
297	rp->c_secure = rqstp->rq_secure;
298	rp->c_type = cachetype;
299	rp->c_state = RC_DONE;
300	rp->c_timestamp = jiffies;
301	spin_unlock(&cache_lock);
302	return;
303}
304
305/*
306 * Copy cached reply to current reply buffer. Should always fit.
307 * FIXME as reply is in a page, we should just attach the page, and
308 * keep a refcount....
309 */
310static int
311nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
312{
313	struct kvec	*vec = &rqstp->rq_res.head[0];
314
315	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
316		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
317				data->iov_len);
318		return 0;
319	}
320	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
321	vec->iov_len += data->iov_len;
322	return 1;
323}