Linux Audio

Check our new training course

Loading...
  1/*
  2 * V9FS cache definitions.
  3 *
  4 *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
  5 *
  6 *  This program is free software; you can redistribute it and/or modify
  7 *  it under the terms of the GNU General Public License version 2
  8 *  as published by the Free Software Foundation.
  9 *
 10 *  This program is distributed in the hope that it will be useful,
 11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 *  GNU General Public License for more details.
 14 *
 15 *  You should have received a copy of the GNU General Public License
 16 *  along with this program; if not, write to:
 17 *  Free Software Foundation
 18 *  51 Franklin Street, Fifth Floor
 19 *  Boston, MA  02111-1301  USA
 20 *
 21 */
 22
 23#include <linux/jiffies.h>
 24#include <linux/file.h>
 25#include <linux/slab.h>
 26#include <linux/stat.h>
 27#include <linux/sched.h>
 28#include <linux/fs.h>
 29#include <net/9p/9p.h>
 30
 31#include "v9fs.h"
 32#include "cache.h"
 33
 34#define CACHETAG_LEN  11
 35
 36struct fscache_netfs v9fs_cache_netfs = {
 37	.name 		= "9p",
 38	.version 	= 0,
 39};
 40
 41/**
 42 * v9fs_random_cachetag - Generate a random tag to be associated
 43 *			  with a new cache session.
 44 *
 45 * The value of jiffies is used for a fairly randomly cache tag.
 46 */
 47
 48static
 49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
 50{
 51	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
 52	if (!v9ses->cachetag)
 53		return -ENOMEM;
 54
 55	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
 56}
 57
 58static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
 59					   void *buffer, uint16_t bufmax)
 60{
 61	struct v9fs_session_info *v9ses;
 62	uint16_t klen = 0;
 63
 64	v9ses = (struct v9fs_session_info *)cookie_netfs_data;
 65	p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
 66		 v9ses, buffer, bufmax);
 67
 68	if (v9ses->cachetag)
 69		klen = strlen(v9ses->cachetag);
 70
 71	if (klen > bufmax)
 72		return 0;
 73
 74	memcpy(buffer, v9ses->cachetag, klen);
 75	p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
 76	return klen;
 77}
 78
 79const struct fscache_cookie_def v9fs_cache_session_index_def = {
 80	.name		= "9P.session",
 81	.type		= FSCACHE_COOKIE_TYPE_INDEX,
 82	.get_key	= v9fs_cache_session_get_key,
 83};
 84
 85void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 86{
 87	/* If no cache session tag was specified, we generate a random one. */
 88	if (!v9ses->cachetag)
 89		v9fs_random_cachetag(v9ses);
 90
 91	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
 92						&v9fs_cache_session_index_def,
 93						v9ses);
 94	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
 95		 v9ses, v9ses->fscache);
 96}
 97
 98void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
 99{
100	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101		 v9ses, v9ses->fscache);
102	fscache_relinquish_cookie(v9ses->fscache, 0);
103	v9ses->fscache = NULL;
104}
105
106
107static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108					 void *buffer, uint16_t bufmax)
109{
110	const struct v9fs_inode *v9inode = cookie_netfs_data;
111	memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
112	p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113		 &v9inode->vfs_inode, v9inode->qid.path);
114	return sizeof(v9inode->qid.path);
115}
116
117static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118				      uint64_t *size)
119{
120	const struct v9fs_inode *v9inode = cookie_netfs_data;
121	*size = i_size_read(&v9inode->vfs_inode);
122
123	p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124		 &v9inode->vfs_inode, *size);
125}
126
127static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128					 void *buffer, uint16_t buflen)
129{
130	const struct v9fs_inode *v9inode = cookie_netfs_data;
131	memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
132	p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133		 &v9inode->vfs_inode, v9inode->qid.version);
134	return sizeof(v9inode->qid.version);
135}
136
137static enum
138fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139					    const void *buffer,
140					    uint16_t buflen)
141{
142	const struct v9fs_inode *v9inode = cookie_netfs_data;
143
144	if (buflen != sizeof(v9inode->qid.version))
145		return FSCACHE_CHECKAUX_OBSOLETE;
146
147	if (memcmp(buffer, &v9inode->qid.version,
148		   sizeof(v9inode->qid.version)))
149		return FSCACHE_CHECKAUX_OBSOLETE;
150
151	return FSCACHE_CHECKAUX_OKAY;
152}
153
154static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
155{
156	struct v9fs_inode *v9inode = cookie_netfs_data;
157	struct pagevec pvec;
158	pgoff_t first;
159	int loop, nr_pages;
160
161	pagevec_init(&pvec, 0);
162	first = 0;
163
164	for (;;) {
165		nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
166					  first,
167					  PAGEVEC_SIZE - pagevec_count(&pvec));
168		if (!nr_pages)
169			break;
170
171		for (loop = 0; loop < nr_pages; loop++)
172			ClearPageFsCache(pvec.pages[loop]);
173
174		first = pvec.pages[nr_pages - 1]->index + 1;
175
176		pvec.nr = nr_pages;
177		pagevec_release(&pvec);
178		cond_resched();
179	}
180}
181
182const struct fscache_cookie_def v9fs_cache_inode_index_def = {
183	.name		= "9p.inode",
184	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
185	.get_key	= v9fs_cache_inode_get_key,
186	.get_attr	= v9fs_cache_inode_get_attr,
187	.get_aux	= v9fs_cache_inode_get_aux,
188	.check_aux	= v9fs_cache_inode_check_aux,
189	.now_uncached	= v9fs_cache_inode_now_uncached,
190};
191
192void v9fs_cache_inode_get_cookie(struct inode *inode)
193{
194	struct v9fs_inode *v9inode;
195	struct v9fs_session_info *v9ses;
196
197	if (!S_ISREG(inode->i_mode))
198		return;
199
200	v9inode = V9FS_I(inode);
201	if (v9inode->fscache)
202		return;
203
204	v9ses = v9fs_inode2v9ses(inode);
205	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
206						  &v9fs_cache_inode_index_def,
207						  v9inode);
208
209	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
210		 inode, v9inode->fscache);
211}
212
213void v9fs_cache_inode_put_cookie(struct inode *inode)
214{
215	struct v9fs_inode *v9inode = V9FS_I(inode);
216
217	if (!v9inode->fscache)
218		return;
219	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
220		 inode, v9inode->fscache);
221
222	fscache_relinquish_cookie(v9inode->fscache, 0);
223	v9inode->fscache = NULL;
224}
225
226void v9fs_cache_inode_flush_cookie(struct inode *inode)
227{
228	struct v9fs_inode *v9inode = V9FS_I(inode);
229
230	if (!v9inode->fscache)
231		return;
232	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
233		 inode, v9inode->fscache);
234
235	fscache_relinquish_cookie(v9inode->fscache, 1);
236	v9inode->fscache = NULL;
237}
238
239void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
240{
241	struct v9fs_inode *v9inode = V9FS_I(inode);
242	struct p9_fid *fid;
243
244	if (!v9inode->fscache)
245		return;
246
247	spin_lock(&v9inode->fscache_lock);
248	fid = filp->private_data;
249	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
250		v9fs_cache_inode_flush_cookie(inode);
251	else
252		v9fs_cache_inode_get_cookie(inode);
253
254	spin_unlock(&v9inode->fscache_lock);
255}
256
257void v9fs_cache_inode_reset_cookie(struct inode *inode)
258{
259	struct v9fs_inode *v9inode = V9FS_I(inode);
260	struct v9fs_session_info *v9ses;
261	struct fscache_cookie *old;
262
263	if (!v9inode->fscache)
264		return;
265
266	old = v9inode->fscache;
267
268	spin_lock(&v9inode->fscache_lock);
269	fscache_relinquish_cookie(v9inode->fscache, 1);
270
271	v9ses = v9fs_inode2v9ses(inode);
272	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
273						  &v9fs_cache_inode_index_def,
274						  v9inode);
275	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
276		 inode, old, v9inode->fscache);
277
278	spin_unlock(&v9inode->fscache_lock);
279}
280
281int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
282{
283	struct inode *inode = page->mapping->host;
284	struct v9fs_inode *v9inode = V9FS_I(inode);
285
286	BUG_ON(!v9inode->fscache);
287
288	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
289}
290
291void __v9fs_fscache_invalidate_page(struct page *page)
292{
293	struct inode *inode = page->mapping->host;
294	struct v9fs_inode *v9inode = V9FS_I(inode);
295
296	BUG_ON(!v9inode->fscache);
297
298	if (PageFsCache(page)) {
299		fscache_wait_on_page_write(v9inode->fscache, page);
300		BUG_ON(!PageLocked(page));
301		fscache_uncache_page(v9inode->fscache, page);
302	}
303}
304
305static void v9fs_vfs_readpage_complete(struct page *page, void *data,
306				       int error)
307{
308	if (!error)
309		SetPageUptodate(page);
310
311	unlock_page(page);
312}
313
314/**
315 * __v9fs_readpage_from_fscache - read a page from cache
316 *
317 * Returns 0 if the pages are in cache and a BIO is submitted,
318 * 1 if the pages are not in cache and -error otherwise.
319 */
320
321int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
322{
323	int ret;
324	const struct v9fs_inode *v9inode = V9FS_I(inode);
325
326	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
327	if (!v9inode->fscache)
328		return -ENOBUFS;
329
330	ret = fscache_read_or_alloc_page(v9inode->fscache,
331					 page,
332					 v9fs_vfs_readpage_complete,
333					 NULL,
334					 GFP_KERNEL);
335	switch (ret) {
336	case -ENOBUFS:
337	case -ENODATA:
338		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
339		return 1;
340	case 0:
341		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
342		return ret;
343	default:
344		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
345		return ret;
346	}
347}
348
349/**
350 * __v9fs_readpages_from_fscache - read multiple pages from cache
351 *
352 * Returns 0 if the pages are in cache and a BIO is submitted,
353 * 1 if the pages are not in cache and -error otherwise.
354 */
355
356int __v9fs_readpages_from_fscache(struct inode *inode,
357				  struct address_space *mapping,
358				  struct list_head *pages,
359				  unsigned *nr_pages)
360{
361	int ret;
362	const struct v9fs_inode *v9inode = V9FS_I(inode);
363
364	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
365	if (!v9inode->fscache)
366		return -ENOBUFS;
367
368	ret = fscache_read_or_alloc_pages(v9inode->fscache,
369					  mapping, pages, nr_pages,
370					  v9fs_vfs_readpage_complete,
371					  NULL,
372					  mapping_gfp_mask(mapping));
373	switch (ret) {
374	case -ENOBUFS:
375	case -ENODATA:
376		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
377		return 1;
378	case 0:
379		BUG_ON(!list_empty(pages));
380		BUG_ON(*nr_pages != 0);
381		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
382		return ret;
383	default:
384		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
385		return ret;
386	}
387}
388
389/**
390 * __v9fs_readpage_to_fscache - write a page to the cache
391 *
392 */
393
394void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
395{
396	int ret;
397	const struct v9fs_inode *v9inode = V9FS_I(inode);
398
399	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
400	ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
401	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
402	if (ret != 0)
403		v9fs_uncache_page(inode, page);
404}
405
406/*
407 * wait for a page to complete writing to the cache
408 */
409void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
410{
411	const struct v9fs_inode *v9inode = V9FS_I(inode);
412	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
413	if (PageFsCache(page))
414		fscache_wait_on_page_write(v9inode->fscache, page);
415}