Loading...
1/*
2 * V9FS cache definitions.
3 *
4 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to:
17 * Free Software Foundation
18 * 51 Franklin Street, Fifth Floor
19 * Boston, MA 02111-1301 USA
20 *
21 */
22
23#include <linux/jiffies.h>
24#include <linux/file.h>
25#include <linux/slab.h>
26#include <linux/stat.h>
27#include <linux/sched.h>
28#include <linux/fs.h>
29#include <net/9p/9p.h>
30
31#include "v9fs.h"
32#include "cache.h"
33
34#define CACHETAG_LEN 11
35
36struct fscache_netfs v9fs_cache_netfs = {
37 .name = "9p",
38 .version = 0,
39};
40
41/**
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 * with a new cache session.
44 *
45 * The value of jiffies is used for a fairly randomly cache tag.
46 */
47
48static
49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50{
51 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52 if (!v9ses->cachetag)
53 return -ENOMEM;
54
55 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56}
57
58static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
59 void *buffer, uint16_t bufmax)
60{
61 struct v9fs_session_info *v9ses;
62 uint16_t klen = 0;
63
64 v9ses = (struct v9fs_session_info *)cookie_netfs_data;
65 p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
66 v9ses, buffer, bufmax);
67
68 if (v9ses->cachetag)
69 klen = strlen(v9ses->cachetag);
70
71 if (klen > bufmax)
72 return 0;
73
74 memcpy(buffer, v9ses->cachetag, klen);
75 p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
76 return klen;
77}
78
79const struct fscache_cookie_def v9fs_cache_session_index_def = {
80 .name = "9P.session",
81 .type = FSCACHE_COOKIE_TYPE_INDEX,
82 .get_key = v9fs_cache_session_get_key,
83};
84
85void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
86{
87 /* If no cache session tag was specified, we generate a random one. */
88 if (!v9ses->cachetag)
89 v9fs_random_cachetag(v9ses);
90
91 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
92 &v9fs_cache_session_index_def,
93 v9ses, true);
94 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
95 v9ses, v9ses->fscache);
96}
97
98void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
99{
100 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101 v9ses, v9ses->fscache);
102 fscache_relinquish_cookie(v9ses->fscache, 0);
103 v9ses->fscache = NULL;
104}
105
106
107static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108 void *buffer, uint16_t bufmax)
109{
110 const struct v9fs_inode *v9inode = cookie_netfs_data;
111 memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
112 p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113 &v9inode->vfs_inode, v9inode->qid.path);
114 return sizeof(v9inode->qid.path);
115}
116
117static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118 uint64_t *size)
119{
120 const struct v9fs_inode *v9inode = cookie_netfs_data;
121 *size = i_size_read(&v9inode->vfs_inode);
122
123 p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124 &v9inode->vfs_inode, *size);
125}
126
127static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128 void *buffer, uint16_t buflen)
129{
130 const struct v9fs_inode *v9inode = cookie_netfs_data;
131 memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
132 p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133 &v9inode->vfs_inode, v9inode->qid.version);
134 return sizeof(v9inode->qid.version);
135}
136
137static enum
138fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139 const void *buffer,
140 uint16_t buflen)
141{
142 const struct v9fs_inode *v9inode = cookie_netfs_data;
143
144 if (buflen != sizeof(v9inode->qid.version))
145 return FSCACHE_CHECKAUX_OBSOLETE;
146
147 if (memcmp(buffer, &v9inode->qid.version,
148 sizeof(v9inode->qid.version)))
149 return FSCACHE_CHECKAUX_OBSOLETE;
150
151 return FSCACHE_CHECKAUX_OKAY;
152}
153
154static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
155{
156 struct v9fs_inode *v9inode = cookie_netfs_data;
157 struct pagevec pvec;
158 pgoff_t first;
159 int loop, nr_pages;
160
161 pagevec_init(&pvec, 0);
162 first = 0;
163
164 for (;;) {
165 nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
166 first,
167 PAGEVEC_SIZE - pagevec_count(&pvec));
168 if (!nr_pages)
169 break;
170
171 for (loop = 0; loop < nr_pages; loop++)
172 ClearPageFsCache(pvec.pages[loop]);
173
174 first = pvec.pages[nr_pages - 1]->index + 1;
175
176 pvec.nr = nr_pages;
177 pagevec_release(&pvec);
178 cond_resched();
179 }
180}
181
182const struct fscache_cookie_def v9fs_cache_inode_index_def = {
183 .name = "9p.inode",
184 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
185 .get_key = v9fs_cache_inode_get_key,
186 .get_attr = v9fs_cache_inode_get_attr,
187 .get_aux = v9fs_cache_inode_get_aux,
188 .check_aux = v9fs_cache_inode_check_aux,
189 .now_uncached = v9fs_cache_inode_now_uncached,
190};
191
192void v9fs_cache_inode_get_cookie(struct inode *inode)
193{
194 struct v9fs_inode *v9inode;
195 struct v9fs_session_info *v9ses;
196
197 if (!S_ISREG(inode->i_mode))
198 return;
199
200 v9inode = V9FS_I(inode);
201 if (v9inode->fscache)
202 return;
203
204 v9ses = v9fs_inode2v9ses(inode);
205 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
206 &v9fs_cache_inode_index_def,
207 v9inode, true);
208
209 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
210 inode, v9inode->fscache);
211}
212
213void v9fs_cache_inode_put_cookie(struct inode *inode)
214{
215 struct v9fs_inode *v9inode = V9FS_I(inode);
216
217 if (!v9inode->fscache)
218 return;
219 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
220 inode, v9inode->fscache);
221
222 fscache_relinquish_cookie(v9inode->fscache, 0);
223 v9inode->fscache = NULL;
224}
225
226void v9fs_cache_inode_flush_cookie(struct inode *inode)
227{
228 struct v9fs_inode *v9inode = V9FS_I(inode);
229
230 if (!v9inode->fscache)
231 return;
232 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
233 inode, v9inode->fscache);
234
235 fscache_relinquish_cookie(v9inode->fscache, 1);
236 v9inode->fscache = NULL;
237}
238
239void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
240{
241 struct v9fs_inode *v9inode = V9FS_I(inode);
242
243 if (!v9inode->fscache)
244 return;
245
246 mutex_lock(&v9inode->fscache_lock);
247
248 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
249 v9fs_cache_inode_flush_cookie(inode);
250 else
251 v9fs_cache_inode_get_cookie(inode);
252
253 mutex_unlock(&v9inode->fscache_lock);
254}
255
256void v9fs_cache_inode_reset_cookie(struct inode *inode)
257{
258 struct v9fs_inode *v9inode = V9FS_I(inode);
259 struct v9fs_session_info *v9ses;
260 struct fscache_cookie *old;
261
262 if (!v9inode->fscache)
263 return;
264
265 old = v9inode->fscache;
266
267 mutex_lock(&v9inode->fscache_lock);
268 fscache_relinquish_cookie(v9inode->fscache, 1);
269
270 v9ses = v9fs_inode2v9ses(inode);
271 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
272 &v9fs_cache_inode_index_def,
273 v9inode, true);
274 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
275 inode, old, v9inode->fscache);
276
277 mutex_unlock(&v9inode->fscache_lock);
278}
279
280int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
281{
282 struct inode *inode = page->mapping->host;
283 struct v9fs_inode *v9inode = V9FS_I(inode);
284
285 BUG_ON(!v9inode->fscache);
286
287 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
288}
289
290void __v9fs_fscache_invalidate_page(struct page *page)
291{
292 struct inode *inode = page->mapping->host;
293 struct v9fs_inode *v9inode = V9FS_I(inode);
294
295 BUG_ON(!v9inode->fscache);
296
297 if (PageFsCache(page)) {
298 fscache_wait_on_page_write(v9inode->fscache, page);
299 BUG_ON(!PageLocked(page));
300 fscache_uncache_page(v9inode->fscache, page);
301 }
302}
303
304static void v9fs_vfs_readpage_complete(struct page *page, void *data,
305 int error)
306{
307 if (!error)
308 SetPageUptodate(page);
309
310 unlock_page(page);
311}
312
313/**
314 * __v9fs_readpage_from_fscache - read a page from cache
315 *
316 * Returns 0 if the pages are in cache and a BIO is submitted,
317 * 1 if the pages are not in cache and -error otherwise.
318 */
319
320int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
321{
322 int ret;
323 const struct v9fs_inode *v9inode = V9FS_I(inode);
324
325 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
326 if (!v9inode->fscache)
327 return -ENOBUFS;
328
329 ret = fscache_read_or_alloc_page(v9inode->fscache,
330 page,
331 v9fs_vfs_readpage_complete,
332 NULL,
333 GFP_KERNEL);
334 switch (ret) {
335 case -ENOBUFS:
336 case -ENODATA:
337 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
338 return 1;
339 case 0:
340 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
341 return ret;
342 default:
343 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
344 return ret;
345 }
346}
347
348/**
349 * __v9fs_readpages_from_fscache - read multiple pages from cache
350 *
351 * Returns 0 if the pages are in cache and a BIO is submitted,
352 * 1 if the pages are not in cache and -error otherwise.
353 */
354
355int __v9fs_readpages_from_fscache(struct inode *inode,
356 struct address_space *mapping,
357 struct list_head *pages,
358 unsigned *nr_pages)
359{
360 int ret;
361 const struct v9fs_inode *v9inode = V9FS_I(inode);
362
363 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
364 if (!v9inode->fscache)
365 return -ENOBUFS;
366
367 ret = fscache_read_or_alloc_pages(v9inode->fscache,
368 mapping, pages, nr_pages,
369 v9fs_vfs_readpage_complete,
370 NULL,
371 mapping_gfp_mask(mapping));
372 switch (ret) {
373 case -ENOBUFS:
374 case -ENODATA:
375 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
376 return 1;
377 case 0:
378 BUG_ON(!list_empty(pages));
379 BUG_ON(*nr_pages != 0);
380 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
381 return ret;
382 default:
383 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
384 return ret;
385 }
386}
387
388/**
389 * __v9fs_readpage_to_fscache - write a page to the cache
390 *
391 */
392
393void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
394{
395 int ret;
396 const struct v9fs_inode *v9inode = V9FS_I(inode);
397
398 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
399 ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
400 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
401 if (ret != 0)
402 v9fs_uncache_page(inode, page);
403}
404
405/*
406 * wait for a page to complete writing to the cache
407 */
408void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
409{
410 const struct v9fs_inode *v9inode = V9FS_I(inode);
411 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
412 if (PageFsCache(page))
413 fscache_wait_on_page_write(v9inode->fscache, page);
414}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * V9FS cache definitions.
4 *
5 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
6 */
7
8#include <linux/jiffies.h>
9#include <linux/file.h>
10#include <linux/slab.h>
11#include <linux/stat.h>
12#include <linux/sched.h>
13#include <linux/fs.h>
14#include <net/9p/9p.h>
15
16#include "v9fs.h"
17#include "cache.h"
18
19#define CACHETAG_LEN 11
20
21struct fscache_netfs v9fs_cache_netfs = {
22 .name = "9p",
23 .version = 0,
24};
25
26/**
27 * v9fs_random_cachetag - Generate a random tag to be associated
28 * with a new cache session.
29 *
30 * The value of jiffies is used for a fairly randomly cache tag.
31 */
32
33static
34int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
35{
36 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
37 if (!v9ses->cachetag)
38 return -ENOMEM;
39
40 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
41}
42
43const struct fscache_cookie_def v9fs_cache_session_index_def = {
44 .name = "9P.session",
45 .type = FSCACHE_COOKIE_TYPE_INDEX,
46};
47
48void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
49{
50 /* If no cache session tag was specified, we generate a random one. */
51 if (!v9ses->cachetag) {
52 if (v9fs_random_cachetag(v9ses) < 0) {
53 v9ses->fscache = NULL;
54 kfree(v9ses->cachetag);
55 v9ses->cachetag = NULL;
56 return;
57 }
58 }
59
60 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
61 &v9fs_cache_session_index_def,
62 v9ses->cachetag,
63 strlen(v9ses->cachetag),
64 NULL, 0,
65 v9ses, 0, true);
66 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
67 v9ses, v9ses->fscache);
68}
69
70void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
71{
72 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
73 v9ses, v9ses->fscache);
74 fscache_relinquish_cookie(v9ses->fscache, NULL, false);
75 v9ses->fscache = NULL;
76}
77
78static enum
79fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
80 const void *buffer,
81 uint16_t buflen,
82 loff_t object_size)
83{
84 const struct v9fs_inode *v9inode = cookie_netfs_data;
85
86 if (buflen != sizeof(v9inode->qid.version))
87 return FSCACHE_CHECKAUX_OBSOLETE;
88
89 if (memcmp(buffer, &v9inode->qid.version,
90 sizeof(v9inode->qid.version)))
91 return FSCACHE_CHECKAUX_OBSOLETE;
92
93 return FSCACHE_CHECKAUX_OKAY;
94}
95
96const struct fscache_cookie_def v9fs_cache_inode_index_def = {
97 .name = "9p.inode",
98 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
99 .check_aux = v9fs_cache_inode_check_aux,
100};
101
102void v9fs_cache_inode_get_cookie(struct inode *inode)
103{
104 struct v9fs_inode *v9inode;
105 struct v9fs_session_info *v9ses;
106
107 if (!S_ISREG(inode->i_mode))
108 return;
109
110 v9inode = V9FS_I(inode);
111 if (v9inode->fscache)
112 return;
113
114 v9ses = v9fs_inode2v9ses(inode);
115 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
116 &v9fs_cache_inode_index_def,
117 &v9inode->qid.path,
118 sizeof(v9inode->qid.path),
119 &v9inode->qid.version,
120 sizeof(v9inode->qid.version),
121 v9inode,
122 i_size_read(&v9inode->vfs_inode),
123 true);
124
125 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
126 inode, v9inode->fscache);
127}
128
129void v9fs_cache_inode_put_cookie(struct inode *inode)
130{
131 struct v9fs_inode *v9inode = V9FS_I(inode);
132
133 if (!v9inode->fscache)
134 return;
135 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
136 inode, v9inode->fscache);
137
138 fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
139 false);
140 v9inode->fscache = NULL;
141}
142
143void v9fs_cache_inode_flush_cookie(struct inode *inode)
144{
145 struct v9fs_inode *v9inode = V9FS_I(inode);
146
147 if (!v9inode->fscache)
148 return;
149 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
150 inode, v9inode->fscache);
151
152 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
153 v9inode->fscache = NULL;
154}
155
156void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
157{
158 struct v9fs_inode *v9inode = V9FS_I(inode);
159
160 if (!v9inode->fscache)
161 return;
162
163 mutex_lock(&v9inode->fscache_lock);
164
165 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
166 v9fs_cache_inode_flush_cookie(inode);
167 else
168 v9fs_cache_inode_get_cookie(inode);
169
170 mutex_unlock(&v9inode->fscache_lock);
171}
172
173void v9fs_cache_inode_reset_cookie(struct inode *inode)
174{
175 struct v9fs_inode *v9inode = V9FS_I(inode);
176 struct v9fs_session_info *v9ses;
177 struct fscache_cookie *old;
178
179 if (!v9inode->fscache)
180 return;
181
182 old = v9inode->fscache;
183
184 mutex_lock(&v9inode->fscache_lock);
185 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
186
187 v9ses = v9fs_inode2v9ses(inode);
188 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
189 &v9fs_cache_inode_index_def,
190 &v9inode->qid.path,
191 sizeof(v9inode->qid.path),
192 &v9inode->qid.version,
193 sizeof(v9inode->qid.version),
194 v9inode,
195 i_size_read(&v9inode->vfs_inode),
196 true);
197 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
198 inode, old, v9inode->fscache);
199
200 mutex_unlock(&v9inode->fscache_lock);
201}
202
203int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
204{
205 struct inode *inode = page->mapping->host;
206 struct v9fs_inode *v9inode = V9FS_I(inode);
207
208 BUG_ON(!v9inode->fscache);
209
210 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
211}
212
213void __v9fs_fscache_invalidate_page(struct page *page)
214{
215 struct inode *inode = page->mapping->host;
216 struct v9fs_inode *v9inode = V9FS_I(inode);
217
218 BUG_ON(!v9inode->fscache);
219
220 if (PageFsCache(page)) {
221 fscache_wait_on_page_write(v9inode->fscache, page);
222 BUG_ON(!PageLocked(page));
223 fscache_uncache_page(v9inode->fscache, page);
224 }
225}
226
227static void v9fs_vfs_readpage_complete(struct page *page, void *data,
228 int error)
229{
230 if (!error)
231 SetPageUptodate(page);
232
233 unlock_page(page);
234}
235
236/**
237 * __v9fs_readpage_from_fscache - read a page from cache
238 *
239 * Returns 0 if the pages are in cache and a BIO is submitted,
240 * 1 if the pages are not in cache and -error otherwise.
241 */
242
243int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
244{
245 int ret;
246 const struct v9fs_inode *v9inode = V9FS_I(inode);
247
248 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
249 if (!v9inode->fscache)
250 return -ENOBUFS;
251
252 ret = fscache_read_or_alloc_page(v9inode->fscache,
253 page,
254 v9fs_vfs_readpage_complete,
255 NULL,
256 GFP_KERNEL);
257 switch (ret) {
258 case -ENOBUFS:
259 case -ENODATA:
260 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
261 return 1;
262 case 0:
263 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
264 return ret;
265 default:
266 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
267 return ret;
268 }
269}
270
271/**
272 * __v9fs_readpages_from_fscache - read multiple pages from cache
273 *
274 * Returns 0 if the pages are in cache and a BIO is submitted,
275 * 1 if the pages are not in cache and -error otherwise.
276 */
277
278int __v9fs_readpages_from_fscache(struct inode *inode,
279 struct address_space *mapping,
280 struct list_head *pages,
281 unsigned *nr_pages)
282{
283 int ret;
284 const struct v9fs_inode *v9inode = V9FS_I(inode);
285
286 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
287 if (!v9inode->fscache)
288 return -ENOBUFS;
289
290 ret = fscache_read_or_alloc_pages(v9inode->fscache,
291 mapping, pages, nr_pages,
292 v9fs_vfs_readpage_complete,
293 NULL,
294 mapping_gfp_mask(mapping));
295 switch (ret) {
296 case -ENOBUFS:
297 case -ENODATA:
298 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
299 return 1;
300 case 0:
301 BUG_ON(!list_empty(pages));
302 BUG_ON(*nr_pages != 0);
303 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
304 return ret;
305 default:
306 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
307 return ret;
308 }
309}
310
311/**
312 * __v9fs_readpage_to_fscache - write a page to the cache
313 *
314 */
315
316void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
317{
318 int ret;
319 const struct v9fs_inode *v9inode = V9FS_I(inode);
320
321 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
322 ret = fscache_write_page(v9inode->fscache, page,
323 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
324 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
325 if (ret != 0)
326 v9fs_uncache_page(inode, page);
327}
328
329/*
330 * wait for a page to complete writing to the cache
331 */
332void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
333{
334 const struct v9fs_inode *v9inode = V9FS_I(inode);
335 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
336 if (PageFsCache(page))
337 fscache_wait_on_page_write(v9inode->fscache, page);
338}