Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * V9FS cache definitions.
  3 *
  4 *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
  5 *
  6 *  This program is free software; you can redistribute it and/or modify
  7 *  it under the terms of the GNU General Public License version 2
  8 *  as published by the Free Software Foundation.
  9 *
 10 *  This program is distributed in the hope that it will be useful,
 11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 *  GNU General Public License for more details.
 14 *
 15 *  You should have received a copy of the GNU General Public License
 16 *  along with this program; if not, write to:
 17 *  Free Software Foundation
 18 *  51 Franklin Street, Fifth Floor
 19 *  Boston, MA  02111-1301  USA
 20 *
 21 */
 22
 23#include <linux/jiffies.h>
 24#include <linux/file.h>
 25#include <linux/slab.h>
 26#include <linux/stat.h>
 27#include <linux/sched.h>
 28#include <linux/fs.h>
 29#include <net/9p/9p.h>
 30
 31#include "v9fs.h"
 32#include "cache.h"
 33
 34#define CACHETAG_LEN  11
 35
 36struct fscache_netfs v9fs_cache_netfs = {
 37	.name 		= "9p",
 38	.version 	= 0,
 39};
 40
 41/**
 42 * v9fs_random_cachetag - Generate a random tag to be associated
 43 *			  with a new cache session.
 44 *
 45 * The value of jiffies is used for a fairly randomly cache tag.
 46 */
 47
 48static
 49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
 50{
 51	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
 52	if (!v9ses->cachetag)
 53		return -ENOMEM;
 54
 55	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
 56}
 57
 58static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
 59					   void *buffer, uint16_t bufmax)
 60{
 61	struct v9fs_session_info *v9ses;
 62	uint16_t klen = 0;
 63
 64	v9ses = (struct v9fs_session_info *)cookie_netfs_data;
 65	p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
 66		 v9ses, buffer, bufmax);
 67
 68	if (v9ses->cachetag)
 69		klen = strlen(v9ses->cachetag);
 70
 71	if (klen > bufmax)
 72		return 0;
 73
 74	memcpy(buffer, v9ses->cachetag, klen);
 75	p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
 76	return klen;
 77}
 78
 79const struct fscache_cookie_def v9fs_cache_session_index_def = {
 80	.name		= "9P.session",
 81	.type		= FSCACHE_COOKIE_TYPE_INDEX,
 82	.get_key	= v9fs_cache_session_get_key,
 83};
 84
 85void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 86{
 87	/* If no cache session tag was specified, we generate a random one. */
 88	if (!v9ses->cachetag)
 89		v9fs_random_cachetag(v9ses);
 
 
 
 
 90
 91	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
 92						&v9fs_cache_session_index_def,
 93						v9ses, true);
 
 
 
 94	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
 95		 v9ses, v9ses->fscache);
 96}
 97
 98void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
 99{
100	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101		 v9ses, v9ses->fscache);
102	fscache_relinquish_cookie(v9ses->fscache, 0);
103	v9ses->fscache = NULL;
104}
105
106
107static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108					 void *buffer, uint16_t bufmax)
109{
110	const struct v9fs_inode *v9inode = cookie_netfs_data;
111	memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
112	p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113		 &v9inode->vfs_inode, v9inode->qid.path);
114	return sizeof(v9inode->qid.path);
115}
116
117static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118				      uint64_t *size)
119{
120	const struct v9fs_inode *v9inode = cookie_netfs_data;
121	*size = i_size_read(&v9inode->vfs_inode);
122
123	p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124		 &v9inode->vfs_inode, *size);
125}
126
127static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128					 void *buffer, uint16_t buflen)
129{
130	const struct v9fs_inode *v9inode = cookie_netfs_data;
131	memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
132	p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133		 &v9inode->vfs_inode, v9inode->qid.version);
134	return sizeof(v9inode->qid.version);
135}
136
137static enum
138fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139					    const void *buffer,
140					    uint16_t buflen)
 
141{
142	const struct v9fs_inode *v9inode = cookie_netfs_data;
143
144	if (buflen != sizeof(v9inode->qid.version))
145		return FSCACHE_CHECKAUX_OBSOLETE;
146
147	if (memcmp(buffer, &v9inode->qid.version,
148		   sizeof(v9inode->qid.version)))
149		return FSCACHE_CHECKAUX_OBSOLETE;
150
151	return FSCACHE_CHECKAUX_OKAY;
152}
153
154static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
155{
156	struct v9fs_inode *v9inode = cookie_netfs_data;
157	struct pagevec pvec;
158	pgoff_t first;
159	int loop, nr_pages;
160
161	pagevec_init(&pvec, 0);
162	first = 0;
163
164	for (;;) {
165		nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
166					  first,
167					  PAGEVEC_SIZE - pagevec_count(&pvec));
168		if (!nr_pages)
169			break;
170
171		for (loop = 0; loop < nr_pages; loop++)
172			ClearPageFsCache(pvec.pages[loop]);
173
174		first = pvec.pages[nr_pages - 1]->index + 1;
175
176		pvec.nr = nr_pages;
177		pagevec_release(&pvec);
178		cond_resched();
179	}
180}
181
182const struct fscache_cookie_def v9fs_cache_inode_index_def = {
183	.name		= "9p.inode",
184	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
185	.get_key	= v9fs_cache_inode_get_key,
186	.get_attr	= v9fs_cache_inode_get_attr,
187	.get_aux	= v9fs_cache_inode_get_aux,
188	.check_aux	= v9fs_cache_inode_check_aux,
189	.now_uncached	= v9fs_cache_inode_now_uncached,
190};
191
192void v9fs_cache_inode_get_cookie(struct inode *inode)
193{
194	struct v9fs_inode *v9inode;
195	struct v9fs_session_info *v9ses;
196
197	if (!S_ISREG(inode->i_mode))
198		return;
199
200	v9inode = V9FS_I(inode);
201	if (v9inode->fscache)
202		return;
203
204	v9ses = v9fs_inode2v9ses(inode);
205	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
206						  &v9fs_cache_inode_index_def,
207						  v9inode, true);
 
 
 
 
 
 
208
209	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
210		 inode, v9inode->fscache);
211}
212
213void v9fs_cache_inode_put_cookie(struct inode *inode)
214{
215	struct v9fs_inode *v9inode = V9FS_I(inode);
216
217	if (!v9inode->fscache)
218		return;
219	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
220		 inode, v9inode->fscache);
221
222	fscache_relinquish_cookie(v9inode->fscache, 0);
 
223	v9inode->fscache = NULL;
224}
225
226void v9fs_cache_inode_flush_cookie(struct inode *inode)
227{
228	struct v9fs_inode *v9inode = V9FS_I(inode);
229
230	if (!v9inode->fscache)
231		return;
232	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
233		 inode, v9inode->fscache);
234
235	fscache_relinquish_cookie(v9inode->fscache, 1);
236	v9inode->fscache = NULL;
237}
238
239void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
240{
241	struct v9fs_inode *v9inode = V9FS_I(inode);
242
243	if (!v9inode->fscache)
244		return;
245
246	mutex_lock(&v9inode->fscache_lock);
247
248	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
249		v9fs_cache_inode_flush_cookie(inode);
250	else
251		v9fs_cache_inode_get_cookie(inode);
252
253	mutex_unlock(&v9inode->fscache_lock);
254}
255
256void v9fs_cache_inode_reset_cookie(struct inode *inode)
257{
258	struct v9fs_inode *v9inode = V9FS_I(inode);
259	struct v9fs_session_info *v9ses;
260	struct fscache_cookie *old;
261
262	if (!v9inode->fscache)
263		return;
264
265	old = v9inode->fscache;
266
267	mutex_lock(&v9inode->fscache_lock);
268	fscache_relinquish_cookie(v9inode->fscache, 1);
269
270	v9ses = v9fs_inode2v9ses(inode);
271	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
272						  &v9fs_cache_inode_index_def,
273						  v9inode, true);
 
 
 
 
 
 
274	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
275		 inode, old, v9inode->fscache);
276
277	mutex_unlock(&v9inode->fscache_lock);
278}
279
280int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
281{
282	struct inode *inode = page->mapping->host;
283	struct v9fs_inode *v9inode = V9FS_I(inode);
284
285	BUG_ON(!v9inode->fscache);
286
287	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
288}
289
290void __v9fs_fscache_invalidate_page(struct page *page)
291{
292	struct inode *inode = page->mapping->host;
293	struct v9fs_inode *v9inode = V9FS_I(inode);
294
295	BUG_ON(!v9inode->fscache);
296
297	if (PageFsCache(page)) {
298		fscache_wait_on_page_write(v9inode->fscache, page);
299		BUG_ON(!PageLocked(page));
300		fscache_uncache_page(v9inode->fscache, page);
301	}
302}
303
304static void v9fs_vfs_readpage_complete(struct page *page, void *data,
305				       int error)
306{
307	if (!error)
308		SetPageUptodate(page);
309
310	unlock_page(page);
311}
312
313/**
314 * __v9fs_readpage_from_fscache - read a page from cache
315 *
316 * Returns 0 if the pages are in cache and a BIO is submitted,
317 * 1 if the pages are not in cache and -error otherwise.
318 */
319
320int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
321{
322	int ret;
323	const struct v9fs_inode *v9inode = V9FS_I(inode);
324
325	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
326	if (!v9inode->fscache)
327		return -ENOBUFS;
328
329	ret = fscache_read_or_alloc_page(v9inode->fscache,
330					 page,
331					 v9fs_vfs_readpage_complete,
332					 NULL,
333					 GFP_KERNEL);
334	switch (ret) {
335	case -ENOBUFS:
336	case -ENODATA:
337		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
338		return 1;
339	case 0:
340		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
341		return ret;
342	default:
343		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
344		return ret;
345	}
346}
347
348/**
349 * __v9fs_readpages_from_fscache - read multiple pages from cache
350 *
351 * Returns 0 if the pages are in cache and a BIO is submitted,
352 * 1 if the pages are not in cache and -error otherwise.
353 */
354
355int __v9fs_readpages_from_fscache(struct inode *inode,
356				  struct address_space *mapping,
357				  struct list_head *pages,
358				  unsigned *nr_pages)
359{
360	int ret;
361	const struct v9fs_inode *v9inode = V9FS_I(inode);
362
363	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
364	if (!v9inode->fscache)
365		return -ENOBUFS;
366
367	ret = fscache_read_or_alloc_pages(v9inode->fscache,
368					  mapping, pages, nr_pages,
369					  v9fs_vfs_readpage_complete,
370					  NULL,
371					  mapping_gfp_mask(mapping));
372	switch (ret) {
373	case -ENOBUFS:
374	case -ENODATA:
375		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
376		return 1;
377	case 0:
378		BUG_ON(!list_empty(pages));
379		BUG_ON(*nr_pages != 0);
380		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
381		return ret;
382	default:
383		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
384		return ret;
385	}
386}
387
388/**
389 * __v9fs_readpage_to_fscache - write a page to the cache
390 *
391 */
392
393void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
394{
395	int ret;
396	const struct v9fs_inode *v9inode = V9FS_I(inode);
397
398	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
399	ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
 
400	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
401	if (ret != 0)
402		v9fs_uncache_page(inode, page);
403}
404
405/*
406 * wait for a page to complete writing to the cache
407 */
408void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
409{
410	const struct v9fs_inode *v9inode = V9FS_I(inode);
411	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
412	if (PageFsCache(page))
413		fscache_wait_on_page_write(v9inode->fscache, page);
414}
v4.17
  1/*
  2 * V9FS cache definitions.
  3 *
  4 *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
  5 *
  6 *  This program is free software; you can redistribute it and/or modify
  7 *  it under the terms of the GNU General Public License version 2
  8 *  as published by the Free Software Foundation.
  9 *
 10 *  This program is distributed in the hope that it will be useful,
 11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 *  GNU General Public License for more details.
 14 *
 15 *  You should have received a copy of the GNU General Public License
 16 *  along with this program; if not, write to:
 17 *  Free Software Foundation
 18 *  51 Franklin Street, Fifth Floor
 19 *  Boston, MA  02111-1301  USA
 20 *
 21 */
 22
 23#include <linux/jiffies.h>
 24#include <linux/file.h>
 25#include <linux/slab.h>
 26#include <linux/stat.h>
 27#include <linux/sched.h>
 28#include <linux/fs.h>
 29#include <net/9p/9p.h>
 30
 31#include "v9fs.h"
 32#include "cache.h"
 33
 34#define CACHETAG_LEN  11
 35
 36struct fscache_netfs v9fs_cache_netfs = {
 37	.name 		= "9p",
 38	.version 	= 0,
 39};
 40
 41/**
 42 * v9fs_random_cachetag - Generate a random tag to be associated
 43 *			  with a new cache session.
 44 *
 45 * The value of jiffies is used for a fairly randomly cache tag.
 46 */
 47
 48static
 49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
 50{
 51	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
 52	if (!v9ses->cachetag)
 53		return -ENOMEM;
 54
 55	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58const struct fscache_cookie_def v9fs_cache_session_index_def = {
 59	.name		= "9P.session",
 60	.type		= FSCACHE_COOKIE_TYPE_INDEX,
 
 61};
 62
 63void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 64{
 65	/* If no cache session tag was specified, we generate a random one. */
 66	if (!v9ses->cachetag) {
 67		if (v9fs_random_cachetag(v9ses) < 0) {
 68			v9ses->fscache = NULL;
 69			return;
 70		}
 71	}
 72
 73	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
 74						&v9fs_cache_session_index_def,
 75						v9ses->cachetag,
 76						strlen(v9ses->cachetag),
 77						NULL, 0,
 78						v9ses, 0, true);
 79	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
 80		 v9ses, v9ses->fscache);
 81}
 82
 83void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
 84{
 85	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
 86		 v9ses, v9ses->fscache);
 87	fscache_relinquish_cookie(v9ses->fscache, NULL, false);
 88	v9ses->fscache = NULL;
 89}
 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91static enum
 92fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
 93					    const void *buffer,
 94					    uint16_t buflen,
 95					    loff_t object_size)
 96{
 97	const struct v9fs_inode *v9inode = cookie_netfs_data;
 98
 99	if (buflen != sizeof(v9inode->qid.version))
100		return FSCACHE_CHECKAUX_OBSOLETE;
101
102	if (memcmp(buffer, &v9inode->qid.version,
103		   sizeof(v9inode->qid.version)))
104		return FSCACHE_CHECKAUX_OBSOLETE;
105
106	return FSCACHE_CHECKAUX_OKAY;
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109const struct fscache_cookie_def v9fs_cache_inode_index_def = {
110	.name		= "9p.inode",
111	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
 
 
 
112	.check_aux	= v9fs_cache_inode_check_aux,
 
113};
114
115void v9fs_cache_inode_get_cookie(struct inode *inode)
116{
117	struct v9fs_inode *v9inode;
118	struct v9fs_session_info *v9ses;
119
120	if (!S_ISREG(inode->i_mode))
121		return;
122
123	v9inode = V9FS_I(inode);
124	if (v9inode->fscache)
125		return;
126
127	v9ses = v9fs_inode2v9ses(inode);
128	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
129						  &v9fs_cache_inode_index_def,
130						  &v9inode->qid.path,
131						  sizeof(v9inode->qid.path),
132						  &v9inode->qid.version,
133						  sizeof(v9inode->qid.version),
134						  v9inode,
135						  i_size_read(&v9inode->vfs_inode),
136						  true);
137
138	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
139		 inode, v9inode->fscache);
140}
141
142void v9fs_cache_inode_put_cookie(struct inode *inode)
143{
144	struct v9fs_inode *v9inode = V9FS_I(inode);
145
146	if (!v9inode->fscache)
147		return;
148	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
149		 inode, v9inode->fscache);
150
151	fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
152				  false);
153	v9inode->fscache = NULL;
154}
155
156void v9fs_cache_inode_flush_cookie(struct inode *inode)
157{
158	struct v9fs_inode *v9inode = V9FS_I(inode);
159
160	if (!v9inode->fscache)
161		return;
162	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
163		 inode, v9inode->fscache);
164
165	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
166	v9inode->fscache = NULL;
167}
168
169void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
170{
171	struct v9fs_inode *v9inode = V9FS_I(inode);
172
173	if (!v9inode->fscache)
174		return;
175
176	mutex_lock(&v9inode->fscache_lock);
177
178	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
179		v9fs_cache_inode_flush_cookie(inode);
180	else
181		v9fs_cache_inode_get_cookie(inode);
182
183	mutex_unlock(&v9inode->fscache_lock);
184}
185
186void v9fs_cache_inode_reset_cookie(struct inode *inode)
187{
188	struct v9fs_inode *v9inode = V9FS_I(inode);
189	struct v9fs_session_info *v9ses;
190	struct fscache_cookie *old;
191
192	if (!v9inode->fscache)
193		return;
194
195	old = v9inode->fscache;
196
197	mutex_lock(&v9inode->fscache_lock);
198	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
199
200	v9ses = v9fs_inode2v9ses(inode);
201	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
202						  &v9fs_cache_inode_index_def,
203						  &v9inode->qid.path,
204						  sizeof(v9inode->qid.path),
205						  &v9inode->qid.version,
206						  sizeof(v9inode->qid.version),
207						  v9inode,
208						  i_size_read(&v9inode->vfs_inode),
209						  true);
210	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
211		 inode, old, v9inode->fscache);
212
213	mutex_unlock(&v9inode->fscache_lock);
214}
215
216int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
217{
218	struct inode *inode = page->mapping->host;
219	struct v9fs_inode *v9inode = V9FS_I(inode);
220
221	BUG_ON(!v9inode->fscache);
222
223	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
224}
225
226void __v9fs_fscache_invalidate_page(struct page *page)
227{
228	struct inode *inode = page->mapping->host;
229	struct v9fs_inode *v9inode = V9FS_I(inode);
230
231	BUG_ON(!v9inode->fscache);
232
233	if (PageFsCache(page)) {
234		fscache_wait_on_page_write(v9inode->fscache, page);
235		BUG_ON(!PageLocked(page));
236		fscache_uncache_page(v9inode->fscache, page);
237	}
238}
239
240static void v9fs_vfs_readpage_complete(struct page *page, void *data,
241				       int error)
242{
243	if (!error)
244		SetPageUptodate(page);
245
246	unlock_page(page);
247}
248
249/**
250 * __v9fs_readpage_from_fscache - read a page from cache
251 *
252 * Returns 0 if the pages are in cache and a BIO is submitted,
253 * 1 if the pages are not in cache and -error otherwise.
254 */
255
256int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
257{
258	int ret;
259	const struct v9fs_inode *v9inode = V9FS_I(inode);
260
261	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
262	if (!v9inode->fscache)
263		return -ENOBUFS;
264
265	ret = fscache_read_or_alloc_page(v9inode->fscache,
266					 page,
267					 v9fs_vfs_readpage_complete,
268					 NULL,
269					 GFP_KERNEL);
270	switch (ret) {
271	case -ENOBUFS:
272	case -ENODATA:
273		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
274		return 1;
275	case 0:
276		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
277		return ret;
278	default:
279		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
280		return ret;
281	}
282}
283
284/**
285 * __v9fs_readpages_from_fscache - read multiple pages from cache
286 *
287 * Returns 0 if the pages are in cache and a BIO is submitted,
288 * 1 if the pages are not in cache and -error otherwise.
289 */
290
291int __v9fs_readpages_from_fscache(struct inode *inode,
292				  struct address_space *mapping,
293				  struct list_head *pages,
294				  unsigned *nr_pages)
295{
296	int ret;
297	const struct v9fs_inode *v9inode = V9FS_I(inode);
298
299	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
300	if (!v9inode->fscache)
301		return -ENOBUFS;
302
303	ret = fscache_read_or_alloc_pages(v9inode->fscache,
304					  mapping, pages, nr_pages,
305					  v9fs_vfs_readpage_complete,
306					  NULL,
307					  mapping_gfp_mask(mapping));
308	switch (ret) {
309	case -ENOBUFS:
310	case -ENODATA:
311		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
312		return 1;
313	case 0:
314		BUG_ON(!list_empty(pages));
315		BUG_ON(*nr_pages != 0);
316		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
317		return ret;
318	default:
319		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
320		return ret;
321	}
322}
323
324/**
325 * __v9fs_readpage_to_fscache - write a page to the cache
326 *
327 */
328
329void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
330{
331	int ret;
332	const struct v9fs_inode *v9inode = V9FS_I(inode);
333
334	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
335	ret = fscache_write_page(v9inode->fscache, page,
336				 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
337	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
338	if (ret != 0)
339		v9fs_uncache_page(inode, page);
340}
341
342/*
343 * wait for a page to complete writing to the cache
344 */
345void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
346{
347	const struct v9fs_inode *v9inode = V9FS_I(inode);
348	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
349	if (PageFsCache(page))
350		fscache_wait_on_page_write(v9inode->fscache, page);
351}