Loading...
1/*
2 * Ceph cache definitions.
3 *
4 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
5 * Written by Milosz Tanski (milosz@adfin.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to:
18 * Free Software Foundation
19 * 51 Franklin Street, Fifth Floor
20 * Boston, MA 02111-1301 USA
21 *
22 */
23
24#include "super.h"
25#include "cache.h"
26
27struct ceph_aux_inode {
28 struct timespec mtime;
29 loff_t size;
30};
31
32struct fscache_netfs ceph_cache_netfs = {
33 .name = "ceph",
34 .version = 0,
35};
36
37static uint16_t ceph_fscache_session_get_key(const void *cookie_netfs_data,
38 void *buffer, uint16_t maxbuf)
39{
40 const struct ceph_fs_client* fsc = cookie_netfs_data;
41 uint16_t klen;
42
43 klen = sizeof(fsc->client->fsid);
44 if (klen > maxbuf)
45 return 0;
46
47 memcpy(buffer, &fsc->client->fsid, klen);
48 return klen;
49}
50
51static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
52 .name = "CEPH.fsid",
53 .type = FSCACHE_COOKIE_TYPE_INDEX,
54 .get_key = ceph_fscache_session_get_key,
55};
56
57int ceph_fscache_register(void)
58{
59 return fscache_register_netfs(&ceph_cache_netfs);
60}
61
62void ceph_fscache_unregister(void)
63{
64 fscache_unregister_netfs(&ceph_cache_netfs);
65}
66
67int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
68{
69 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
70 &ceph_fscache_fsid_object_def,
71 fsc, true);
72
73 if (fsc->fscache == NULL) {
74 pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
75 return 0;
76 }
77
78 fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
79 if (fsc->revalidate_wq == NULL)
80 return -ENOMEM;
81
82 return 0;
83}
84
85static uint16_t ceph_fscache_inode_get_key(const void *cookie_netfs_data,
86 void *buffer, uint16_t maxbuf)
87{
88 const struct ceph_inode_info* ci = cookie_netfs_data;
89 uint16_t klen;
90
91 /* use ceph virtual inode (id + snapshot) */
92 klen = sizeof(ci->i_vino);
93 if (klen > maxbuf)
94 return 0;
95
96 memcpy(buffer, &ci->i_vino, klen);
97 return klen;
98}
99
100static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
101 void *buffer, uint16_t bufmax)
102{
103 struct ceph_aux_inode aux;
104 const struct ceph_inode_info* ci = cookie_netfs_data;
105 const struct inode* inode = &ci->vfs_inode;
106
107 memset(&aux, 0, sizeof(aux));
108 aux.mtime = inode->i_mtime;
109 aux.size = i_size_read(inode);
110
111 memcpy(buffer, &aux, sizeof(aux));
112
113 return sizeof(aux);
114}
115
116static void ceph_fscache_inode_get_attr(const void *cookie_netfs_data,
117 uint64_t *size)
118{
119 const struct ceph_inode_info* ci = cookie_netfs_data;
120 *size = i_size_read(&ci->vfs_inode);
121}
122
123static enum fscache_checkaux ceph_fscache_inode_check_aux(
124 void *cookie_netfs_data, const void *data, uint16_t dlen)
125{
126 struct ceph_aux_inode aux;
127 struct ceph_inode_info* ci = cookie_netfs_data;
128 struct inode* inode = &ci->vfs_inode;
129
130 if (dlen != sizeof(aux))
131 return FSCACHE_CHECKAUX_OBSOLETE;
132
133 memset(&aux, 0, sizeof(aux));
134 aux.mtime = inode->i_mtime;
135 aux.size = i_size_read(inode);
136
137 if (memcmp(data, &aux, sizeof(aux)) != 0)
138 return FSCACHE_CHECKAUX_OBSOLETE;
139
140 dout("ceph inode 0x%p cached okay", ci);
141 return FSCACHE_CHECKAUX_OKAY;
142}
143
144static void ceph_fscache_inode_now_uncached(void* cookie_netfs_data)
145{
146 struct ceph_inode_info* ci = cookie_netfs_data;
147 struct pagevec pvec;
148 pgoff_t first;
149 int loop, nr_pages;
150
151 pagevec_init(&pvec, 0);
152 first = 0;
153
154 dout("ceph inode 0x%p now uncached", ci);
155
156 while (1) {
157 nr_pages = pagevec_lookup(&pvec, ci->vfs_inode.i_mapping, first,
158 PAGEVEC_SIZE - pagevec_count(&pvec));
159
160 if (!nr_pages)
161 break;
162
163 for (loop = 0; loop < nr_pages; loop++)
164 ClearPageFsCache(pvec.pages[loop]);
165
166 first = pvec.pages[nr_pages - 1]->index + 1;
167
168 pvec.nr = nr_pages;
169 pagevec_release(&pvec);
170 cond_resched();
171 }
172}
173
174static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
175 .name = "CEPH.inode",
176 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
177 .get_key = ceph_fscache_inode_get_key,
178 .get_attr = ceph_fscache_inode_get_attr,
179 .get_aux = ceph_fscache_inode_get_aux,
180 .check_aux = ceph_fscache_inode_check_aux,
181 .now_uncached = ceph_fscache_inode_now_uncached,
182};
183
184void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
185 struct ceph_inode_info* ci)
186{
187 struct inode* inode = &ci->vfs_inode;
188
189 /* No caching for filesystem */
190 if (fsc->fscache == NULL)
191 return;
192
193 /* Only cache for regular files that are read only */
194 if ((ci->vfs_inode.i_mode & S_IFREG) == 0)
195 return;
196
197 /* Avoid multiple racing open requests */
198 inode_lock(inode);
199
200 if (ci->fscache)
201 goto done;
202
203 ci->fscache = fscache_acquire_cookie(fsc->fscache,
204 &ceph_fscache_inode_object_def,
205 ci, true);
206 fscache_check_consistency(ci->fscache);
207done:
208 inode_unlock(inode);
209
210}
211
212void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
213{
214 struct fscache_cookie* cookie;
215
216 if ((cookie = ci->fscache) == NULL)
217 return;
218
219 ci->fscache = NULL;
220
221 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
222 fscache_relinquish_cookie(cookie, 0);
223}
224
225static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
226{
227 if (!error)
228 SetPageUptodate(page);
229}
230
231static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
232{
233 if (!error)
234 SetPageUptodate(page);
235
236 unlock_page(page);
237}
238
239static inline int cache_valid(struct ceph_inode_info *ci)
240{
241 return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
242 (ci->i_fscache_gen == ci->i_rdcache_gen));
243}
244
245
246/* Atempt to read from the fscache,
247 *
248 * This function is called from the readpage_nounlock context. DO NOT attempt to
249 * unlock the page here (or in the callback).
250 */
251int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
252{
253 struct ceph_inode_info *ci = ceph_inode(inode);
254 int ret;
255
256 if (!cache_valid(ci))
257 return -ENOBUFS;
258
259 ret = fscache_read_or_alloc_page(ci->fscache, page,
260 ceph_vfs_readpage_complete, NULL,
261 GFP_KERNEL);
262
263 switch (ret) {
264 case 0: /* Page found */
265 dout("page read submitted\n");
266 return 0;
267 case -ENOBUFS: /* Pages were not found, and can't be */
268 case -ENODATA: /* Pages were not found */
269 dout("page/inode not in cache\n");
270 return ret;
271 default:
272 dout("%s: unknown error ret = %i\n", __func__, ret);
273 return ret;
274 }
275}
276
277int ceph_readpages_from_fscache(struct inode *inode,
278 struct address_space *mapping,
279 struct list_head *pages,
280 unsigned *nr_pages)
281{
282 struct ceph_inode_info *ci = ceph_inode(inode);
283 int ret;
284
285 if (!cache_valid(ci))
286 return -ENOBUFS;
287
288 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
289 ceph_vfs_readpage_complete_unlock,
290 NULL, mapping_gfp_mask(mapping));
291
292 switch (ret) {
293 case 0: /* All pages found */
294 dout("all-page read submitted\n");
295 return 0;
296 case -ENOBUFS: /* Some pages were not found, and can't be */
297 case -ENODATA: /* some pages were not found */
298 dout("page/inode not in cache\n");
299 return ret;
300 default:
301 dout("%s: unknown error ret = %i\n", __func__, ret);
302 return ret;
303 }
304}
305
306void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
307{
308 struct ceph_inode_info *ci = ceph_inode(inode);
309 int ret;
310
311 if (!PageFsCache(page))
312 return;
313
314 if (!cache_valid(ci))
315 return;
316
317 ret = fscache_write_page(ci->fscache, page, GFP_KERNEL);
318 if (ret)
319 fscache_uncache_page(ci->fscache, page);
320}
321
322void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
323{
324 struct ceph_inode_info *ci = ceph_inode(inode);
325
326 if (!PageFsCache(page))
327 return;
328
329 fscache_wait_on_page_write(ci->fscache, page);
330 fscache_uncache_page(ci->fscache, page);
331}
332
333void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
334{
335 if (fsc->revalidate_wq)
336 destroy_workqueue(fsc->revalidate_wq);
337
338 fscache_relinquish_cookie(fsc->fscache, 0);
339 fsc->fscache = NULL;
340}
341
342static void ceph_revalidate_work(struct work_struct *work)
343{
344 int issued;
345 u32 orig_gen;
346 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
347 i_revalidate_work);
348 struct inode *inode = &ci->vfs_inode;
349
350 spin_lock(&ci->i_ceph_lock);
351 issued = __ceph_caps_issued(ci, NULL);
352 orig_gen = ci->i_rdcache_gen;
353 spin_unlock(&ci->i_ceph_lock);
354
355 if (!(issued & CEPH_CAP_FILE_CACHE)) {
356 dout("revalidate_work lost cache before validation %p\n",
357 inode);
358 goto out;
359 }
360
361 if (!fscache_check_consistency(ci->fscache))
362 fscache_invalidate(ci->fscache);
363
364 spin_lock(&ci->i_ceph_lock);
365 /* Update the new valid generation (backwards sanity check too) */
366 if (orig_gen > ci->i_fscache_gen) {
367 ci->i_fscache_gen = orig_gen;
368 }
369 spin_unlock(&ci->i_ceph_lock);
370
371out:
372 iput(&ci->vfs_inode);
373}
374
375void ceph_queue_revalidate(struct inode *inode)
376{
377 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
378 struct ceph_inode_info *ci = ceph_inode(inode);
379
380 if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
381 return;
382
383 ihold(inode);
384
385 if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq,
386 &ci->i_revalidate_work)) {
387 dout("ceph_queue_revalidate %p\n", inode);
388 } else {
389 dout("ceph_queue_revalidate %p failed\n)", inode);
390 iput(inode);
391 }
392}
393
394void ceph_fscache_inode_init(struct ceph_inode_info *ci)
395{
396 ci->fscache = NULL;
397 /* The first load is verifed cookie open time */
398 ci->i_fscache_gen = 1;
399 INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
400}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Ceph cache definitions.
4 *
5 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
6 * Written by Milosz Tanski (milosz@adfin.com)
7 */
8
9#include <linux/ceph/ceph_debug.h>
10
11#include "super.h"
12#include "cache.h"
13
14struct ceph_aux_inode {
15 u64 version;
16 u64 mtime_sec;
17 u64 mtime_nsec;
18};
19
20struct fscache_netfs ceph_cache_netfs = {
21 .name = "ceph",
22 .version = 0,
23};
24
25static DEFINE_MUTEX(ceph_fscache_lock);
26static LIST_HEAD(ceph_fscache_list);
27
28struct ceph_fscache_entry {
29 struct list_head list;
30 struct fscache_cookie *fscache;
31 size_t uniq_len;
32 /* The following members must be last */
33 struct ceph_fsid fsid;
34 char uniquifier[0];
35};
36
37static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
38 .name = "CEPH.fsid",
39 .type = FSCACHE_COOKIE_TYPE_INDEX,
40};
41
42int __init ceph_fscache_register(void)
43{
44 return fscache_register_netfs(&ceph_cache_netfs);
45}
46
47void ceph_fscache_unregister(void)
48{
49 fscache_unregister_netfs(&ceph_cache_netfs);
50}
51
52int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
53{
54 const struct ceph_fsid *fsid = &fsc->client->fsid;
55 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
56 size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
57 struct ceph_fscache_entry *ent;
58 int err = 0;
59
60 mutex_lock(&ceph_fscache_lock);
61 list_for_each_entry(ent, &ceph_fscache_list, list) {
62 if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
63 continue;
64 if (ent->uniq_len != uniq_len)
65 continue;
66 if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
67 continue;
68
69 pr_err("fscache cookie already registered for fsid %pU\n", fsid);
70 pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
71 err = -EBUSY;
72 goto out_unlock;
73 }
74
75 ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
76 if (!ent) {
77 err = -ENOMEM;
78 goto out_unlock;
79 }
80
81 memcpy(&ent->fsid, fsid, sizeof(*fsid));
82 if (uniq_len > 0) {
83 memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
84 ent->uniq_len = uniq_len;
85 }
86
87 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
88 &ceph_fscache_fsid_object_def,
89 &ent->fsid, sizeof(ent->fsid) + uniq_len,
90 NULL, 0,
91 fsc, 0, true);
92
93 if (fsc->fscache) {
94 ent->fscache = fsc->fscache;
95 list_add_tail(&ent->list, &ceph_fscache_list);
96 } else {
97 kfree(ent);
98 pr_err("unable to register fscache cookie for fsid %pU\n",
99 fsid);
100 /* all other fs ignore this error */
101 }
102out_unlock:
103 mutex_unlock(&ceph_fscache_lock);
104 return err;
105}
106
107static enum fscache_checkaux ceph_fscache_inode_check_aux(
108 void *cookie_netfs_data, const void *data, uint16_t dlen,
109 loff_t object_size)
110{
111 struct ceph_aux_inode aux;
112 struct ceph_inode_info* ci = cookie_netfs_data;
113 struct inode* inode = &ci->vfs_inode;
114
115 if (dlen != sizeof(aux) ||
116 i_size_read(inode) != object_size)
117 return FSCACHE_CHECKAUX_OBSOLETE;
118
119 memset(&aux, 0, sizeof(aux));
120 aux.version = ci->i_version;
121 aux.mtime_sec = inode->i_mtime.tv_sec;
122 aux.mtime_nsec = inode->i_mtime.tv_nsec;
123
124 if (memcmp(data, &aux, sizeof(aux)) != 0)
125 return FSCACHE_CHECKAUX_OBSOLETE;
126
127 dout("ceph inode 0x%p cached okay\n", ci);
128 return FSCACHE_CHECKAUX_OKAY;
129}
130
131static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
132 .name = "CEPH.inode",
133 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
134 .check_aux = ceph_fscache_inode_check_aux,
135};
136
137void ceph_fscache_register_inode_cookie(struct inode *inode)
138{
139 struct ceph_inode_info *ci = ceph_inode(inode);
140 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
141 struct ceph_aux_inode aux;
142
143 /* No caching for filesystem */
144 if (!fsc->fscache)
145 return;
146
147 /* Only cache for regular files that are read only */
148 if (!S_ISREG(inode->i_mode))
149 return;
150
151 inode_lock_nested(inode, I_MUTEX_CHILD);
152 if (!ci->fscache) {
153 memset(&aux, 0, sizeof(aux));
154 aux.version = ci->i_version;
155 aux.mtime_sec = inode->i_mtime.tv_sec;
156 aux.mtime_nsec = inode->i_mtime.tv_nsec;
157 ci->fscache = fscache_acquire_cookie(fsc->fscache,
158 &ceph_fscache_inode_object_def,
159 &ci->i_vino, sizeof(ci->i_vino),
160 &aux, sizeof(aux),
161 ci, i_size_read(inode), false);
162 }
163 inode_unlock(inode);
164}
165
166void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
167{
168 struct fscache_cookie* cookie;
169
170 if ((cookie = ci->fscache) == NULL)
171 return;
172
173 ci->fscache = NULL;
174
175 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
176 fscache_relinquish_cookie(cookie, &ci->i_vino, false);
177}
178
179static bool ceph_fscache_can_enable(void *data)
180{
181 struct inode *inode = data;
182 return !inode_is_open_for_write(inode);
183}
184
185void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
186{
187 struct ceph_inode_info *ci = ceph_inode(inode);
188
189 if (!fscache_cookie_valid(ci->fscache))
190 return;
191
192 if (inode_is_open_for_write(inode)) {
193 dout("fscache_file_set_cookie %p %p disabling cache\n",
194 inode, filp);
195 fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
196 fscache_uncache_all_inode_pages(ci->fscache, inode);
197 } else {
198 fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
199 ceph_fscache_can_enable, inode);
200 if (fscache_cookie_enabled(ci->fscache)) {
201 dout("fscache_file_set_cookie %p %p enabling cache\n",
202 inode, filp);
203 }
204 }
205}
206
207static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
208{
209 if (!error)
210 SetPageUptodate(page);
211
212 unlock_page(page);
213}
214
215static inline bool cache_valid(struct ceph_inode_info *ci)
216{
217 return ci->i_fscache_gen == ci->i_rdcache_gen;
218}
219
220
221/* Atempt to read from the fscache,
222 *
223 * This function is called from the readpage_nounlock context. DO NOT attempt to
224 * unlock the page here (or in the callback).
225 */
226int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
227{
228 struct ceph_inode_info *ci = ceph_inode(inode);
229 int ret;
230
231 if (!cache_valid(ci))
232 return -ENOBUFS;
233
234 ret = fscache_read_or_alloc_page(ci->fscache, page,
235 ceph_readpage_from_fscache_complete, NULL,
236 GFP_KERNEL);
237
238 switch (ret) {
239 case 0: /* Page found */
240 dout("page read submitted\n");
241 return 0;
242 case -ENOBUFS: /* Pages were not found, and can't be */
243 case -ENODATA: /* Pages were not found */
244 dout("page/inode not in cache\n");
245 return ret;
246 default:
247 dout("%s: unknown error ret = %i\n", __func__, ret);
248 return ret;
249 }
250}
251
252int ceph_readpages_from_fscache(struct inode *inode,
253 struct address_space *mapping,
254 struct list_head *pages,
255 unsigned *nr_pages)
256{
257 struct ceph_inode_info *ci = ceph_inode(inode);
258 int ret;
259
260 if (!cache_valid(ci))
261 return -ENOBUFS;
262
263 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
264 ceph_readpage_from_fscache_complete,
265 NULL, mapping_gfp_mask(mapping));
266
267 switch (ret) {
268 case 0: /* All pages found */
269 dout("all-page read submitted\n");
270 return 0;
271 case -ENOBUFS: /* Some pages were not found, and can't be */
272 case -ENODATA: /* some pages were not found */
273 dout("page/inode not in cache\n");
274 return ret;
275 default:
276 dout("%s: unknown error ret = %i\n", __func__, ret);
277 return ret;
278 }
279}
280
281void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
282{
283 struct ceph_inode_info *ci = ceph_inode(inode);
284 int ret;
285
286 if (!PageFsCache(page))
287 return;
288
289 if (!cache_valid(ci))
290 return;
291
292 ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
293 GFP_KERNEL);
294 if (ret)
295 fscache_uncache_page(ci->fscache, page);
296}
297
298void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
299{
300 struct ceph_inode_info *ci = ceph_inode(inode);
301
302 if (!PageFsCache(page))
303 return;
304
305 fscache_wait_on_page_write(ci->fscache, page);
306 fscache_uncache_page(ci->fscache, page);
307}
308
309void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
310{
311 if (fscache_cookie_valid(fsc->fscache)) {
312 struct ceph_fscache_entry *ent;
313 bool found = false;
314
315 mutex_lock(&ceph_fscache_lock);
316 list_for_each_entry(ent, &ceph_fscache_list, list) {
317 if (ent->fscache == fsc->fscache) {
318 list_del(&ent->list);
319 kfree(ent);
320 found = true;
321 break;
322 }
323 }
324 WARN_ON_ONCE(!found);
325 mutex_unlock(&ceph_fscache_lock);
326
327 __fscache_relinquish_cookie(fsc->fscache, NULL, false);
328 }
329 fsc->fscache = NULL;
330}
331
332/*
333 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
334 */
335void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
336{
337 if (cache_valid(ci))
338 return;
339
340 /* resue i_truncate_mutex. There should be no pending
341 * truncate while the caller holds CEPH_CAP_FILE_RD */
342 mutex_lock(&ci->i_truncate_mutex);
343 if (!cache_valid(ci)) {
344 if (fscache_check_consistency(ci->fscache, &ci->i_vino))
345 fscache_invalidate(ci->fscache);
346 spin_lock(&ci->i_ceph_lock);
347 ci->i_fscache_gen = ci->i_rdcache_gen;
348 spin_unlock(&ci->i_ceph_lock);
349 }
350 mutex_unlock(&ci->i_truncate_mutex);
351}