Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
11#include <linux/xattr.h>
12#include <linux/posix_acl.h>
13#include <linux/random.h>
14#include <linux/sort.h>
15
16#include "super.h"
17#include "mds_client.h"
18#include "cache.h"
19#include <linux/ceph/decode.h>
20
21/*
22 * Ceph inode operations
23 *
24 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
25 * setattr, etc.), xattr helpers, and helpers for assimilating
26 * metadata returned by the MDS into our cache.
27 *
28 * Also define helpers for doing asynchronous writeback, invalidation,
29 * and truncation for the benefit of those who can't afford to block
30 * (typically because they are in the message handler path).
31 */
32
33static const struct inode_operations ceph_symlink_iops;
34
35static void ceph_invalidate_work(struct work_struct *work);
36static void ceph_writeback_work(struct work_struct *work);
37static void ceph_vmtruncate_work(struct work_struct *work);
38
39/*
40 * find or create an inode, given the ceph ino number
41 */
42static int ceph_set_ino_cb(struct inode *inode, void *data)
43{
44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
46 return 0;
47}
48
49struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
50{
51 struct inode *inode;
52 ino_t t = ceph_vino_to_ino(vino);
53
54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
55 if (inode == NULL)
56 return ERR_PTR(-ENOMEM);
57 if (inode->i_state & I_NEW) {
58 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
59 inode, ceph_vinop(inode), (u64)inode->i_ino);
60 unlock_new_inode(inode);
61 }
62
63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
64 vino.snap, inode);
65 return inode;
66}
67
68/*
69 * get/constuct snapdir inode for a given directory
70 */
71struct inode *ceph_get_snapdir(struct inode *parent)
72{
73 struct ceph_vino vino = {
74 .ino = ceph_ino(parent),
75 .snap = CEPH_SNAPDIR,
76 };
77 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
78 struct ceph_inode_info *ci = ceph_inode(inode);
79
80 BUG_ON(!S_ISDIR(parent->i_mode));
81 if (IS_ERR(inode))
82 return inode;
83 inode->i_mode = parent->i_mode;
84 inode->i_uid = parent->i_uid;
85 inode->i_gid = parent->i_gid;
86 inode->i_op = &ceph_snapdir_iops;
87 inode->i_fop = &ceph_snapdir_fops;
88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
89 ci->i_rbytes = 0;
90 return inode;
91}
92
93const struct inode_operations ceph_file_iops = {
94 .permission = ceph_permission,
95 .setattr = ceph_setattr,
96 .getattr = ceph_getattr,
97 .listxattr = ceph_listxattr,
98 .get_acl = ceph_get_acl,
99 .set_acl = ceph_set_acl,
100};
101
102
103/*
104 * We use a 'frag tree' to keep track of the MDS's directory fragments
105 * for a given inode (usually there is just a single fragment). We
106 * need to know when a child frag is delegated to a new MDS, or when
107 * it is flagged as replicated, so we can direct our requests
108 * accordingly.
109 */
110
111/*
112 * find/create a frag in the tree
113 */
114static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115 u32 f)
116{
117 struct rb_node **p;
118 struct rb_node *parent = NULL;
119 struct ceph_inode_frag *frag;
120 int c;
121
122 p = &ci->i_fragtree.rb_node;
123 while (*p) {
124 parent = *p;
125 frag = rb_entry(parent, struct ceph_inode_frag, node);
126 c = ceph_frag_compare(f, frag->frag);
127 if (c < 0)
128 p = &(*p)->rb_left;
129 else if (c > 0)
130 p = &(*p)->rb_right;
131 else
132 return frag;
133 }
134
135 frag = kmalloc(sizeof(*frag), GFP_NOFS);
136 if (!frag) {
137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
138 "frag %x\n", &ci->vfs_inode,
139 ceph_vinop(&ci->vfs_inode), f);
140 return ERR_PTR(-ENOMEM);
141 }
142 frag->frag = f;
143 frag->split_by = 0;
144 frag->mds = -1;
145 frag->ndist = 0;
146
147 rb_link_node(&frag->node, parent, p);
148 rb_insert_color(&frag->node, &ci->i_fragtree);
149
150 dout("get_or_create_frag added %llx.%llx frag %x\n",
151 ceph_vinop(&ci->vfs_inode), f);
152 return frag;
153}
154
155/*
156 * find a specific frag @f
157 */
158struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
159{
160 struct rb_node *n = ci->i_fragtree.rb_node;
161
162 while (n) {
163 struct ceph_inode_frag *frag =
164 rb_entry(n, struct ceph_inode_frag, node);
165 int c = ceph_frag_compare(f, frag->frag);
166 if (c < 0)
167 n = n->rb_left;
168 else if (c > 0)
169 n = n->rb_right;
170 else
171 return frag;
172 }
173 return NULL;
174}
175
176/*
177 * Choose frag containing the given value @v. If @pfrag is
178 * specified, copy the frag delegation info to the caller if
179 * it is present.
180 */
181static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
182 struct ceph_inode_frag *pfrag, int *found)
183{
184 u32 t = ceph_frag_make(0, 0);
185 struct ceph_inode_frag *frag;
186 unsigned nway, i;
187 u32 n;
188
189 if (found)
190 *found = 0;
191
192 while (1) {
193 WARN_ON(!ceph_frag_contains_value(t, v));
194 frag = __ceph_find_frag(ci, t);
195 if (!frag)
196 break; /* t is a leaf */
197 if (frag->split_by == 0) {
198 if (pfrag)
199 memcpy(pfrag, frag, sizeof(*pfrag));
200 if (found)
201 *found = 1;
202 break;
203 }
204
205 /* choose child */
206 nway = 1 << frag->split_by;
207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208 frag->split_by, nway);
209 for (i = 0; i < nway; i++) {
210 n = ceph_frag_make_child(t, frag->split_by, i);
211 if (ceph_frag_contains_value(n, v)) {
212 t = n;
213 break;
214 }
215 }
216 BUG_ON(i == nway);
217 }
218 dout("choose_frag(%x) = %x\n", v, t);
219
220 return t;
221}
222
223u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
224 struct ceph_inode_frag *pfrag, int *found)
225{
226 u32 ret;
227 mutex_lock(&ci->i_fragtree_mutex);
228 ret = __ceph_choose_frag(ci, v, pfrag, found);
229 mutex_unlock(&ci->i_fragtree_mutex);
230 return ret;
231}
232
233/*
234 * Process dirfrag (delegation) info from the mds. Include leaf
235 * fragment in tree ONLY if ndist > 0. Otherwise, only
236 * branches/splits are included in i_fragtree)
237 */
238static int ceph_fill_dirfrag(struct inode *inode,
239 struct ceph_mds_reply_dirfrag *dirinfo)
240{
241 struct ceph_inode_info *ci = ceph_inode(inode);
242 struct ceph_inode_frag *frag;
243 u32 id = le32_to_cpu(dirinfo->frag);
244 int mds = le32_to_cpu(dirinfo->auth);
245 int ndist = le32_to_cpu(dirinfo->ndist);
246 int diri_auth = -1;
247 int i;
248 int err = 0;
249
250 spin_lock(&ci->i_ceph_lock);
251 if (ci->i_auth_cap)
252 diri_auth = ci->i_auth_cap->mds;
253 spin_unlock(&ci->i_ceph_lock);
254
255 if (mds == -1) /* CDIR_AUTH_PARENT */
256 mds = diri_auth;
257
258 mutex_lock(&ci->i_fragtree_mutex);
259 if (ndist == 0 && mds == diri_auth) {
260 /* no delegation info needed. */
261 frag = __ceph_find_frag(ci, id);
262 if (!frag)
263 goto out;
264 if (frag->split_by == 0) {
265 /* tree leaf, remove */
266 dout("fill_dirfrag removed %llx.%llx frag %x"
267 " (no ref)\n", ceph_vinop(inode), id);
268 rb_erase(&frag->node, &ci->i_fragtree);
269 kfree(frag);
270 } else {
271 /* tree branch, keep and clear */
272 dout("fill_dirfrag cleared %llx.%llx frag %x"
273 " referral\n", ceph_vinop(inode), id);
274 frag->mds = -1;
275 frag->ndist = 0;
276 }
277 goto out;
278 }
279
280
281 /* find/add this frag to store mds delegation info */
282 frag = __get_or_create_frag(ci, id);
283 if (IS_ERR(frag)) {
284 /* this is not the end of the world; we can continue
285 with bad/inaccurate delegation info */
286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
288 err = -ENOMEM;
289 goto out;
290 }
291
292 frag->mds = mds;
293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
294 for (i = 0; i < frag->ndist; i++)
295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
297 ceph_vinop(inode), frag->frag, frag->ndist);
298
299out:
300 mutex_unlock(&ci->i_fragtree_mutex);
301 return err;
302}
303
304static int frag_tree_split_cmp(const void *l, const void *r)
305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308 return ceph_frag_compare(le32_to_cpu(ls->frag),
309 le32_to_cpu(rs->frag));
310}
311
312static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
313{
314 if (!frag)
315 return f == ceph_frag_make(0, 0);
316 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
317 return false;
318 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
319}
320
321static int ceph_fill_fragtree(struct inode *inode,
322 struct ceph_frag_tree_head *fragtree,
323 struct ceph_mds_reply_dirfrag *dirinfo)
324{
325 struct ceph_inode_info *ci = ceph_inode(inode);
326 struct ceph_inode_frag *frag, *prev_frag = NULL;
327 struct rb_node *rb_node;
328 unsigned i, split_by, nsplits;
329 u32 id;
330 bool update = false;
331
332 mutex_lock(&ci->i_fragtree_mutex);
333 nsplits = le32_to_cpu(fragtree->nsplits);
334 if (nsplits != ci->i_fragtree_nsplits) {
335 update = true;
336 } else if (nsplits) {
337 i = prandom_u32() % nsplits;
338 id = le32_to_cpu(fragtree->splits[i].frag);
339 if (!__ceph_find_frag(ci, id))
340 update = true;
341 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
342 rb_node = rb_first(&ci->i_fragtree);
343 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
344 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
345 update = true;
346 }
347 if (!update && dirinfo) {
348 id = le32_to_cpu(dirinfo->frag);
349 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
350 update = true;
351 }
352 if (!update)
353 goto out_unlock;
354
355 if (nsplits > 1) {
356 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
357 frag_tree_split_cmp, NULL);
358 }
359
360 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
361 rb_node = rb_first(&ci->i_fragtree);
362 for (i = 0; i < nsplits; i++) {
363 id = le32_to_cpu(fragtree->splits[i].frag);
364 split_by = le32_to_cpu(fragtree->splits[i].by);
365 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
366 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
367 "frag %x split by %d\n", ceph_vinop(inode),
368 i, nsplits, id, split_by);
369 continue;
370 }
371 frag = NULL;
372 while (rb_node) {
373 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
374 if (ceph_frag_compare(frag->frag, id) >= 0) {
375 if (frag->frag != id)
376 frag = NULL;
377 else
378 rb_node = rb_next(rb_node);
379 break;
380 }
381 rb_node = rb_next(rb_node);
382 /* delete stale split/leaf node */
383 if (frag->split_by > 0 ||
384 !is_frag_child(frag->frag, prev_frag)) {
385 rb_erase(&frag->node, &ci->i_fragtree);
386 if (frag->split_by > 0)
387 ci->i_fragtree_nsplits--;
388 kfree(frag);
389 }
390 frag = NULL;
391 }
392 if (!frag) {
393 frag = __get_or_create_frag(ci, id);
394 if (IS_ERR(frag))
395 continue;
396 }
397 if (frag->split_by == 0)
398 ci->i_fragtree_nsplits++;
399 frag->split_by = split_by;
400 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
401 prev_frag = frag;
402 }
403 while (rb_node) {
404 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
405 rb_node = rb_next(rb_node);
406 /* delete stale split/leaf node */
407 if (frag->split_by > 0 ||
408 !is_frag_child(frag->frag, prev_frag)) {
409 rb_erase(&frag->node, &ci->i_fragtree);
410 if (frag->split_by > 0)
411 ci->i_fragtree_nsplits--;
412 kfree(frag);
413 }
414 }
415out_unlock:
416 mutex_unlock(&ci->i_fragtree_mutex);
417 return 0;
418}
419
420/*
421 * initialize a newly allocated inode.
422 */
423struct inode *ceph_alloc_inode(struct super_block *sb)
424{
425 struct ceph_inode_info *ci;
426 int i;
427
428 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
429 if (!ci)
430 return NULL;
431
432 dout("alloc_inode %p\n", &ci->vfs_inode);
433
434 spin_lock_init(&ci->i_ceph_lock);
435
436 ci->i_version = 0;
437 ci->i_inline_version = 0;
438 ci->i_time_warp_seq = 0;
439 ci->i_ceph_flags = 0;
440 atomic64_set(&ci->i_ordered_count, 1);
441 atomic64_set(&ci->i_release_count, 1);
442 atomic64_set(&ci->i_complete_seq[0], 0);
443 atomic64_set(&ci->i_complete_seq[1], 0);
444 ci->i_symlink = NULL;
445
446 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
447 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
448
449 ci->i_fragtree = RB_ROOT;
450 mutex_init(&ci->i_fragtree_mutex);
451
452 ci->i_xattrs.blob = NULL;
453 ci->i_xattrs.prealloc_blob = NULL;
454 ci->i_xattrs.dirty = false;
455 ci->i_xattrs.index = RB_ROOT;
456 ci->i_xattrs.count = 0;
457 ci->i_xattrs.names_size = 0;
458 ci->i_xattrs.vals_size = 0;
459 ci->i_xattrs.version = 0;
460 ci->i_xattrs.index_version = 0;
461
462 ci->i_caps = RB_ROOT;
463 ci->i_auth_cap = NULL;
464 ci->i_dirty_caps = 0;
465 ci->i_flushing_caps = 0;
466 INIT_LIST_HEAD(&ci->i_dirty_item);
467 INIT_LIST_HEAD(&ci->i_flushing_item);
468 ci->i_prealloc_cap_flush = NULL;
469 INIT_LIST_HEAD(&ci->i_cap_flush_list);
470 init_waitqueue_head(&ci->i_cap_wq);
471 ci->i_hold_caps_min = 0;
472 ci->i_hold_caps_max = 0;
473 INIT_LIST_HEAD(&ci->i_cap_delay_list);
474 INIT_LIST_HEAD(&ci->i_cap_snaps);
475 ci->i_head_snapc = NULL;
476 ci->i_snap_caps = 0;
477
478 for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
479 ci->i_nr_by_mode[i] = 0;
480
481 mutex_init(&ci->i_truncate_mutex);
482 ci->i_truncate_seq = 0;
483 ci->i_truncate_size = 0;
484 ci->i_truncate_pending = 0;
485
486 ci->i_max_size = 0;
487 ci->i_reported_size = 0;
488 ci->i_wanted_max_size = 0;
489 ci->i_requested_max_size = 0;
490
491 ci->i_pin_ref = 0;
492 ci->i_rd_ref = 0;
493 ci->i_rdcache_ref = 0;
494 ci->i_wr_ref = 0;
495 ci->i_wb_ref = 0;
496 ci->i_wrbuffer_ref = 0;
497 ci->i_wrbuffer_ref_head = 0;
498 ci->i_shared_gen = 0;
499 ci->i_rdcache_gen = 0;
500 ci->i_rdcache_revoking = 0;
501
502 INIT_LIST_HEAD(&ci->i_unsafe_writes);
503 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
504 INIT_LIST_HEAD(&ci->i_unsafe_iops);
505 spin_lock_init(&ci->i_unsafe_lock);
506
507 ci->i_snap_realm = NULL;
508 INIT_LIST_HEAD(&ci->i_snap_realm_item);
509 INIT_LIST_HEAD(&ci->i_snap_flush_item);
510
511 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
512 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
513
514 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
515
516 ceph_fscache_inode_init(ci);
517
518 return &ci->vfs_inode;
519}
520
521static void ceph_i_callback(struct rcu_head *head)
522{
523 struct inode *inode = container_of(head, struct inode, i_rcu);
524 struct ceph_inode_info *ci = ceph_inode(inode);
525
526 kmem_cache_free(ceph_inode_cachep, ci);
527}
528
529void ceph_destroy_inode(struct inode *inode)
530{
531 struct ceph_inode_info *ci = ceph_inode(inode);
532 struct ceph_inode_frag *frag;
533 struct rb_node *n;
534
535 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
536
537 ceph_fscache_unregister_inode_cookie(ci);
538
539 ceph_queue_caps_release(inode);
540
541 /*
542 * we may still have a snap_realm reference if there are stray
543 * caps in i_snap_caps.
544 */
545 if (ci->i_snap_realm) {
546 struct ceph_mds_client *mdsc =
547 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
548 struct ceph_snap_realm *realm = ci->i_snap_realm;
549
550 dout(" dropping residual ref to snap realm %p\n", realm);
551 spin_lock(&realm->inodes_with_caps_lock);
552 list_del_init(&ci->i_snap_realm_item);
553 spin_unlock(&realm->inodes_with_caps_lock);
554 ceph_put_snap_realm(mdsc, realm);
555 }
556
557 kfree(ci->i_symlink);
558 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
559 frag = rb_entry(n, struct ceph_inode_frag, node);
560 rb_erase(n, &ci->i_fragtree);
561 kfree(frag);
562 }
563 ci->i_fragtree_nsplits = 0;
564
565 __ceph_destroy_xattrs(ci);
566 if (ci->i_xattrs.blob)
567 ceph_buffer_put(ci->i_xattrs.blob);
568 if (ci->i_xattrs.prealloc_blob)
569 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
570
571 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
572
573 call_rcu(&inode->i_rcu, ceph_i_callback);
574}
575
576int ceph_drop_inode(struct inode *inode)
577{
578 /*
579 * Positve dentry and corresponding inode are always accompanied
580 * in MDS reply. So no need to keep inode in the cache after
581 * dropping all its aliases.
582 */
583 return 1;
584}
585
586void ceph_evict_inode(struct inode *inode)
587{
588 /* wait unsafe sync writes */
589 ceph_sync_write_wait(inode);
590 truncate_inode_pages_final(&inode->i_data);
591 clear_inode(inode);
592}
593
594static inline blkcnt_t calc_inode_blocks(u64 size)
595{
596 return (size + (1<<9) - 1) >> 9;
597}
598
599/*
600 * Helpers to fill in size, ctime, mtime, and atime. We have to be
601 * careful because either the client or MDS may have more up to date
602 * info, depending on which capabilities are held, and whether
603 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
604 * and size are monotonically increasing, except when utimes() or
605 * truncate() increments the corresponding _seq values.)
606 */
607int ceph_fill_file_size(struct inode *inode, int issued,
608 u32 truncate_seq, u64 truncate_size, u64 size)
609{
610 struct ceph_inode_info *ci = ceph_inode(inode);
611 int queue_trunc = 0;
612
613 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
614 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
615 dout("size %lld -> %llu\n", inode->i_size, size);
616 if (size > 0 && S_ISDIR(inode->i_mode)) {
617 pr_err("fill_file_size non-zero size for directory\n");
618 size = 0;
619 }
620 i_size_write(inode, size);
621 inode->i_blocks = calc_inode_blocks(size);
622 ci->i_reported_size = size;
623 if (truncate_seq != ci->i_truncate_seq) {
624 dout("truncate_seq %u -> %u\n",
625 ci->i_truncate_seq, truncate_seq);
626 ci->i_truncate_seq = truncate_seq;
627
628 /* the MDS should have revoked these caps */
629 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
630 CEPH_CAP_FILE_RD |
631 CEPH_CAP_FILE_WR |
632 CEPH_CAP_FILE_LAZYIO));
633 /*
634 * If we hold relevant caps, or in the case where we're
635 * not the only client referencing this file and we
636 * don't hold those caps, then we need to check whether
637 * the file is either opened or mmaped
638 */
639 if ((issued & (CEPH_CAP_FILE_CACHE|
640 CEPH_CAP_FILE_BUFFER)) ||
641 mapping_mapped(inode->i_mapping) ||
642 __ceph_caps_file_wanted(ci)) {
643 ci->i_truncate_pending++;
644 queue_trunc = 1;
645 }
646 }
647 }
648 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
649 ci->i_truncate_size != truncate_size) {
650 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
651 truncate_size);
652 ci->i_truncate_size = truncate_size;
653 }
654
655 if (queue_trunc)
656 ceph_fscache_invalidate(inode);
657
658 return queue_trunc;
659}
660
661void ceph_fill_file_time(struct inode *inode, int issued,
662 u64 time_warp_seq, struct timespec *ctime,
663 struct timespec *mtime, struct timespec *atime)
664{
665 struct ceph_inode_info *ci = ceph_inode(inode);
666 int warn = 0;
667
668 if (issued & (CEPH_CAP_FILE_EXCL|
669 CEPH_CAP_FILE_WR|
670 CEPH_CAP_FILE_BUFFER|
671 CEPH_CAP_AUTH_EXCL|
672 CEPH_CAP_XATTR_EXCL)) {
673 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
674 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
675 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
676 ctime->tv_sec, ctime->tv_nsec);
677 inode->i_ctime = *ctime;
678 }
679 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
680 /* the MDS did a utimes() */
681 dout("mtime %ld.%09ld -> %ld.%09ld "
682 "tw %d -> %d\n",
683 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
684 mtime->tv_sec, mtime->tv_nsec,
685 ci->i_time_warp_seq, (int)time_warp_seq);
686
687 inode->i_mtime = *mtime;
688 inode->i_atime = *atime;
689 ci->i_time_warp_seq = time_warp_seq;
690 } else if (time_warp_seq == ci->i_time_warp_seq) {
691 /* nobody did utimes(); take the max */
692 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
693 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
694 inode->i_mtime.tv_sec,
695 inode->i_mtime.tv_nsec,
696 mtime->tv_sec, mtime->tv_nsec);
697 inode->i_mtime = *mtime;
698 }
699 if (timespec_compare(atime, &inode->i_atime) > 0) {
700 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
701 inode->i_atime.tv_sec,
702 inode->i_atime.tv_nsec,
703 atime->tv_sec, atime->tv_nsec);
704 inode->i_atime = *atime;
705 }
706 } else if (issued & CEPH_CAP_FILE_EXCL) {
707 /* we did a utimes(); ignore mds values */
708 } else {
709 warn = 1;
710 }
711 } else {
712 /* we have no write|excl caps; whatever the MDS says is true */
713 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
714 inode->i_ctime = *ctime;
715 inode->i_mtime = *mtime;
716 inode->i_atime = *atime;
717 ci->i_time_warp_seq = time_warp_seq;
718 } else {
719 warn = 1;
720 }
721 }
722 if (warn) /* time_warp_seq shouldn't go backwards */
723 dout("%p mds time_warp_seq %llu < %u\n",
724 inode, time_warp_seq, ci->i_time_warp_seq);
725}
726
727/*
728 * Populate an inode based on info from mds. May be called on new or
729 * existing inodes.
730 */
731static int fill_inode(struct inode *inode, struct page *locked_page,
732 struct ceph_mds_reply_info_in *iinfo,
733 struct ceph_mds_reply_dirfrag *dirinfo,
734 struct ceph_mds_session *session,
735 unsigned long ttl_from, int cap_fmode,
736 struct ceph_cap_reservation *caps_reservation)
737{
738 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
739 struct ceph_mds_reply_inode *info = iinfo->in;
740 struct ceph_inode_info *ci = ceph_inode(inode);
741 int issued = 0, implemented, new_issued;
742 struct timespec mtime, atime, ctime;
743 struct ceph_buffer *xattr_blob = NULL;
744 struct ceph_string *pool_ns = NULL;
745 struct ceph_cap *new_cap = NULL;
746 int err = 0;
747 bool wake = false;
748 bool queue_trunc = false;
749 bool new_version = false;
750 bool fill_inline = false;
751
752 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
753 inode, ceph_vinop(inode), le64_to_cpu(info->version),
754 ci->i_version);
755
756 /* prealloc new cap struct */
757 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
758 new_cap = ceph_get_cap(mdsc, caps_reservation);
759
760 /*
761 * prealloc xattr data, if it looks like we'll need it. only
762 * if len > 4 (meaning there are actually xattrs; the first 4
763 * bytes are the xattr count).
764 */
765 if (iinfo->xattr_len > 4) {
766 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
767 if (!xattr_blob)
768 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
769 iinfo->xattr_len);
770 }
771
772 if (iinfo->pool_ns_len > 0)
773 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
774 iinfo->pool_ns_len);
775
776 spin_lock(&ci->i_ceph_lock);
777
778 /*
779 * provided version will be odd if inode value is projected,
780 * even if stable. skip the update if we have newer stable
781 * info (ours>=theirs, e.g. due to racing mds replies), unless
782 * we are getting projected (unstable) info (in which case the
783 * version is odd, and we want ours>theirs).
784 * us them
785 * 2 2 skip
786 * 3 2 skip
787 * 3 3 update
788 */
789 if (ci->i_version == 0 ||
790 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
791 le64_to_cpu(info->version) > (ci->i_version & ~1)))
792 new_version = true;
793
794 issued = __ceph_caps_issued(ci, &implemented);
795 issued |= implemented | __ceph_caps_dirty(ci);
796 new_issued = ~issued & le32_to_cpu(info->cap.caps);
797
798 /* update inode */
799 ci->i_version = le64_to_cpu(info->version);
800 inode->i_version++;
801 inode->i_rdev = le32_to_cpu(info->rdev);
802 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
803
804 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
805 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
806 inode->i_mode = le32_to_cpu(info->mode);
807 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
808 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
809 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
810 from_kuid(&init_user_ns, inode->i_uid),
811 from_kgid(&init_user_ns, inode->i_gid));
812 }
813
814 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
815 (issued & CEPH_CAP_LINK_EXCL) == 0)
816 set_nlink(inode, le32_to_cpu(info->nlink));
817
818 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
819 /* be careful with mtime, atime, size */
820 ceph_decode_timespec(&atime, &info->atime);
821 ceph_decode_timespec(&mtime, &info->mtime);
822 ceph_decode_timespec(&ctime, &info->ctime);
823 ceph_fill_file_time(inode, issued,
824 le32_to_cpu(info->time_warp_seq),
825 &ctime, &mtime, &atime);
826 }
827
828 if (new_version ||
829 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
830 s64 old_pool = ci->i_layout.pool_id;
831 struct ceph_string *old_ns;
832
833 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
834 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
835 lockdep_is_held(&ci->i_ceph_lock));
836 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
837
838 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
839 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
840
841 pool_ns = old_ns;
842
843 queue_trunc = ceph_fill_file_size(inode, issued,
844 le32_to_cpu(info->truncate_seq),
845 le64_to_cpu(info->truncate_size),
846 le64_to_cpu(info->size));
847 /* only update max_size on auth cap */
848 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
849 ci->i_max_size != le64_to_cpu(info->max_size)) {
850 dout("max_size %lld -> %llu\n", ci->i_max_size,
851 le64_to_cpu(info->max_size));
852 ci->i_max_size = le64_to_cpu(info->max_size);
853 }
854 }
855
856 /* xattrs */
857 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
858 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
859 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
860 if (ci->i_xattrs.blob)
861 ceph_buffer_put(ci->i_xattrs.blob);
862 ci->i_xattrs.blob = xattr_blob;
863 if (xattr_blob)
864 memcpy(ci->i_xattrs.blob->vec.iov_base,
865 iinfo->xattr_data, iinfo->xattr_len);
866 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
867 ceph_forget_all_cached_acls(inode);
868 xattr_blob = NULL;
869 }
870
871 inode->i_mapping->a_ops = &ceph_aops;
872
873 switch (inode->i_mode & S_IFMT) {
874 case S_IFIFO:
875 case S_IFBLK:
876 case S_IFCHR:
877 case S_IFSOCK:
878 init_special_inode(inode, inode->i_mode, inode->i_rdev);
879 inode->i_op = &ceph_file_iops;
880 break;
881 case S_IFREG:
882 inode->i_op = &ceph_file_iops;
883 inode->i_fop = &ceph_file_fops;
884 break;
885 case S_IFLNK:
886 inode->i_op = &ceph_symlink_iops;
887 if (!ci->i_symlink) {
888 u32 symlen = iinfo->symlink_len;
889 char *sym;
890
891 spin_unlock(&ci->i_ceph_lock);
892
893 if (symlen != i_size_read(inode)) {
894 pr_err("fill_inode %llx.%llx BAD symlink "
895 "size %lld\n", ceph_vinop(inode),
896 i_size_read(inode));
897 i_size_write(inode, symlen);
898 inode->i_blocks = calc_inode_blocks(symlen);
899 }
900
901 err = -ENOMEM;
902 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
903 if (!sym)
904 goto out;
905
906 spin_lock(&ci->i_ceph_lock);
907 if (!ci->i_symlink)
908 ci->i_symlink = sym;
909 else
910 kfree(sym); /* lost a race */
911 }
912 inode->i_link = ci->i_symlink;
913 break;
914 case S_IFDIR:
915 inode->i_op = &ceph_dir_iops;
916 inode->i_fop = &ceph_dir_fops;
917
918 ci->i_dir_layout = iinfo->dir_layout;
919
920 ci->i_files = le64_to_cpu(info->files);
921 ci->i_subdirs = le64_to_cpu(info->subdirs);
922 ci->i_rbytes = le64_to_cpu(info->rbytes);
923 ci->i_rfiles = le64_to_cpu(info->rfiles);
924 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
925 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
926 break;
927 default:
928 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
929 ceph_vinop(inode), inode->i_mode);
930 }
931
932 /* were we issued a capability? */
933 if (info->cap.caps) {
934 if (ceph_snap(inode) == CEPH_NOSNAP) {
935 unsigned caps = le32_to_cpu(info->cap.caps);
936 ceph_add_cap(inode, session,
937 le64_to_cpu(info->cap.cap_id),
938 cap_fmode, caps,
939 le32_to_cpu(info->cap.wanted),
940 le32_to_cpu(info->cap.seq),
941 le32_to_cpu(info->cap.mseq),
942 le64_to_cpu(info->cap.realm),
943 info->cap.flags, &new_cap);
944
945 /* set dir completion flag? */
946 if (S_ISDIR(inode->i_mode) &&
947 ci->i_files == 0 && ci->i_subdirs == 0 &&
948 (caps & CEPH_CAP_FILE_SHARED) &&
949 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
950 !__ceph_dir_is_complete(ci)) {
951 dout(" marking %p complete (empty)\n", inode);
952 i_size_write(inode, 0);
953 __ceph_dir_set_complete(ci,
954 atomic64_read(&ci->i_release_count),
955 atomic64_read(&ci->i_ordered_count));
956 }
957
958 wake = true;
959 } else {
960 dout(" %p got snap_caps %s\n", inode,
961 ceph_cap_string(le32_to_cpu(info->cap.caps)));
962 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
963 if (cap_fmode >= 0)
964 __ceph_get_fmode(ci, cap_fmode);
965 }
966 } else if (cap_fmode >= 0) {
967 pr_warn("mds issued no caps on %llx.%llx\n",
968 ceph_vinop(inode));
969 __ceph_get_fmode(ci, cap_fmode);
970 }
971
972 if (iinfo->inline_version > 0 &&
973 iinfo->inline_version >= ci->i_inline_version) {
974 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
975 ci->i_inline_version = iinfo->inline_version;
976 if (ci->i_inline_version != CEPH_INLINE_NONE &&
977 (locked_page ||
978 (le32_to_cpu(info->cap.caps) & cache_caps)))
979 fill_inline = true;
980 }
981
982 spin_unlock(&ci->i_ceph_lock);
983
984 if (fill_inline)
985 ceph_fill_inline_data(inode, locked_page,
986 iinfo->inline_data, iinfo->inline_len);
987
988 if (wake)
989 wake_up_all(&ci->i_cap_wq);
990
991 /* queue truncate if we saw i_size decrease */
992 if (queue_trunc)
993 ceph_queue_vmtruncate(inode);
994
995 /* populate frag tree */
996 if (S_ISDIR(inode->i_mode))
997 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
998
999 /* update delegation info? */
1000 if (dirinfo)
1001 ceph_fill_dirfrag(inode, dirinfo);
1002
1003 err = 0;
1004out:
1005 if (new_cap)
1006 ceph_put_cap(mdsc, new_cap);
1007 if (xattr_blob)
1008 ceph_buffer_put(xattr_blob);
1009 ceph_put_string(pool_ns);
1010 return err;
1011}
1012
1013/*
1014 * caller should hold session s_mutex.
1015 */
1016static void update_dentry_lease(struct dentry *dentry,
1017 struct ceph_mds_reply_lease *lease,
1018 struct ceph_mds_session *session,
1019 unsigned long from_time)
1020{
1021 struct ceph_dentry_info *di = ceph_dentry(dentry);
1022 long unsigned duration = le32_to_cpu(lease->duration_ms);
1023 long unsigned ttl = from_time + (duration * HZ) / 1000;
1024 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1025 struct inode *dir;
1026
1027 spin_lock(&dentry->d_lock);
1028 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1029 dentry, duration, ttl);
1030
1031 /* make lease_rdcache_gen match directory */
1032 dir = d_inode(dentry->d_parent);
1033
1034 /* only track leases on regular dentries */
1035 if (ceph_snap(dir) != CEPH_NOSNAP)
1036 goto out_unlock;
1037
1038 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
1039
1040 if (duration == 0)
1041 goto out_unlock;
1042
1043 if (di->lease_gen == session->s_cap_gen &&
1044 time_before(ttl, di->time))
1045 goto out_unlock; /* we already have a newer lease. */
1046
1047 if (di->lease_session && di->lease_session != session)
1048 goto out_unlock;
1049
1050 ceph_dentry_lru_touch(dentry);
1051
1052 if (!di->lease_session)
1053 di->lease_session = ceph_get_mds_session(session);
1054 di->lease_gen = session->s_cap_gen;
1055 di->lease_seq = le32_to_cpu(lease->seq);
1056 di->lease_renew_after = half_ttl;
1057 di->lease_renew_from = 0;
1058 di->time = ttl;
1059out_unlock:
1060 spin_unlock(&dentry->d_lock);
1061 return;
1062}
1063
1064/*
1065 * splice a dentry to an inode.
1066 * caller must hold directory i_mutex for this to be safe.
1067 */
1068static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
1069{
1070 struct dentry *realdn;
1071
1072 BUG_ON(d_inode(dn));
1073
1074 /* dn must be unhashed */
1075 if (!d_unhashed(dn))
1076 d_drop(dn);
1077 realdn = d_splice_alias(in, dn);
1078 if (IS_ERR(realdn)) {
1079 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1080 PTR_ERR(realdn), dn, in, ceph_vinop(in));
1081 dn = realdn; /* note realdn contains the error */
1082 goto out;
1083 } else if (realdn) {
1084 dout("dn %p (%d) spliced with %p (%d) "
1085 "inode %p ino %llx.%llx\n",
1086 dn, d_count(dn),
1087 realdn, d_count(realdn),
1088 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1089 dput(dn);
1090 dn = realdn;
1091 } else {
1092 BUG_ON(!ceph_dentry(dn));
1093 dout("dn %p attached to %p ino %llx.%llx\n",
1094 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1095 }
1096out:
1097 return dn;
1098}
1099
1100/*
1101 * Incorporate results into the local cache. This is either just
1102 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1103 * after a lookup).
1104 *
1105 * A reply may contain
1106 * a directory inode along with a dentry.
1107 * and/or a target inode
1108 *
1109 * Called with snap_rwsem (read).
1110 */
1111int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1112 struct ceph_mds_session *session)
1113{
1114 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1115 struct inode *in = NULL;
1116 struct ceph_vino vino;
1117 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1118 int err = 0;
1119
1120 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1121 rinfo->head->is_dentry, rinfo->head->is_target);
1122
1123#if 0
1124 /*
1125 * Debugging hook:
1126 *
1127 * If we resend completed ops to a recovering mds, we get no
1128 * trace. Since that is very rare, pretend this is the case
1129 * to ensure the 'no trace' handlers in the callers behave.
1130 *
1131 * Fill in inodes unconditionally to avoid breaking cap
1132 * invariants.
1133 */
1134 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1135 pr_info("fill_trace faking empty trace on %lld %s\n",
1136 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1137 if (rinfo->head->is_dentry) {
1138 rinfo->head->is_dentry = 0;
1139 err = fill_inode(req->r_locked_dir,
1140 &rinfo->diri, rinfo->dirfrag,
1141 session, req->r_request_started, -1);
1142 }
1143 if (rinfo->head->is_target) {
1144 rinfo->head->is_target = 0;
1145 ininfo = rinfo->targeti.in;
1146 vino.ino = le64_to_cpu(ininfo->ino);
1147 vino.snap = le64_to_cpu(ininfo->snapid);
1148 in = ceph_get_inode(sb, vino);
1149 err = fill_inode(in, &rinfo->targeti, NULL,
1150 session, req->r_request_started,
1151 req->r_fmode);
1152 iput(in);
1153 }
1154 }
1155#endif
1156
1157 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1158 dout("fill_trace reply is empty!\n");
1159 if (rinfo->head->result == 0 && req->r_locked_dir)
1160 ceph_invalidate_dir_request(req);
1161 return 0;
1162 }
1163
1164 if (rinfo->head->is_dentry) {
1165 struct inode *dir = req->r_locked_dir;
1166
1167 if (dir) {
1168 err = fill_inode(dir, NULL,
1169 &rinfo->diri, rinfo->dirfrag,
1170 session, req->r_request_started, -1,
1171 &req->r_caps_reservation);
1172 if (err < 0)
1173 goto done;
1174 } else {
1175 WARN_ON_ONCE(1);
1176 }
1177
1178 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1179 struct qstr dname;
1180 struct dentry *dn, *parent;
1181
1182 BUG_ON(!rinfo->head->is_target);
1183 BUG_ON(req->r_dentry);
1184
1185 parent = d_find_any_alias(dir);
1186 BUG_ON(!parent);
1187
1188 dname.name = rinfo->dname;
1189 dname.len = rinfo->dname_len;
1190 dname.hash = full_name_hash(parent, dname.name, dname.len);
1191 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1192 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1193retry_lookup:
1194 dn = d_lookup(parent, &dname);
1195 dout("d_lookup on parent=%p name=%.*s got %p\n",
1196 parent, dname.len, dname.name, dn);
1197
1198 if (!dn) {
1199 dn = d_alloc(parent, &dname);
1200 dout("d_alloc %p '%.*s' = %p\n", parent,
1201 dname.len, dname.name, dn);
1202 if (dn == NULL) {
1203 dput(parent);
1204 err = -ENOMEM;
1205 goto done;
1206 }
1207 err = 0;
1208 } else if (d_really_is_positive(dn) &&
1209 (ceph_ino(d_inode(dn)) != vino.ino ||
1210 ceph_snap(d_inode(dn)) != vino.snap)) {
1211 dout(" dn %p points to wrong inode %p\n",
1212 dn, d_inode(dn));
1213 d_delete(dn);
1214 dput(dn);
1215 goto retry_lookup;
1216 }
1217
1218 req->r_dentry = dn;
1219 dput(parent);
1220 }
1221 }
1222
1223 if (rinfo->head->is_target) {
1224 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1225 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1226
1227 in = ceph_get_inode(sb, vino);
1228 if (IS_ERR(in)) {
1229 err = PTR_ERR(in);
1230 goto done;
1231 }
1232 req->r_target_inode = in;
1233
1234 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1235 session, req->r_request_started,
1236 (!req->r_aborted && rinfo->head->result == 0) ?
1237 req->r_fmode : -1,
1238 &req->r_caps_reservation);
1239 if (err < 0) {
1240 pr_err("fill_inode badness %p %llx.%llx\n",
1241 in, ceph_vinop(in));
1242 goto done;
1243 }
1244 }
1245
1246 /*
1247 * ignore null lease/binding on snapdir ENOENT, or else we
1248 * will have trouble splicing in the virtual snapdir later
1249 */
1250 if (rinfo->head->is_dentry && !req->r_aborted &&
1251 req->r_locked_dir &&
1252 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1253 fsc->mount_options->snapdir_name,
1254 req->r_dentry->d_name.len))) {
1255 /*
1256 * lookup link rename : null -> possibly existing inode
1257 * mknod symlink mkdir : null -> new inode
1258 * unlink : linked -> null
1259 */
1260 struct inode *dir = req->r_locked_dir;
1261 struct dentry *dn = req->r_dentry;
1262 bool have_dir_cap, have_lease;
1263
1264 BUG_ON(!dn);
1265 BUG_ON(!dir);
1266 BUG_ON(d_inode(dn->d_parent) != dir);
1267 BUG_ON(ceph_ino(dir) !=
1268 le64_to_cpu(rinfo->diri.in->ino));
1269 BUG_ON(ceph_snap(dir) !=
1270 le64_to_cpu(rinfo->diri.in->snapid));
1271
1272 /* do we have a lease on the whole dir? */
1273 have_dir_cap =
1274 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1275 CEPH_CAP_FILE_SHARED);
1276
1277 /* do we have a dn lease? */
1278 have_lease = have_dir_cap ||
1279 le32_to_cpu(rinfo->dlease->duration_ms);
1280 if (!have_lease)
1281 dout("fill_trace no dentry lease or dir cap\n");
1282
1283 /* rename? */
1284 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1285 struct inode *olddir = req->r_old_dentry_dir;
1286 BUG_ON(!olddir);
1287
1288 dout(" src %p '%pd' dst %p '%pd'\n",
1289 req->r_old_dentry,
1290 req->r_old_dentry,
1291 dn, dn);
1292 dout("fill_trace doing d_move %p -> %p\n",
1293 req->r_old_dentry, dn);
1294
1295 /* d_move screws up sibling dentries' offsets */
1296 ceph_dir_clear_ordered(dir);
1297 ceph_dir_clear_ordered(olddir);
1298
1299 d_move(req->r_old_dentry, dn);
1300 dout(" src %p '%pd' dst %p '%pd'\n",
1301 req->r_old_dentry,
1302 req->r_old_dentry,
1303 dn, dn);
1304
1305 /* ensure target dentry is invalidated, despite
1306 rehashing bug in vfs_rename_dir */
1307 ceph_invalidate_dentry_lease(dn);
1308
1309 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1310 ceph_dentry(req->r_old_dentry)->offset);
1311
1312 dn = req->r_old_dentry; /* use old_dentry */
1313 }
1314
1315 /* null dentry? */
1316 if (!rinfo->head->is_target) {
1317 dout("fill_trace null dentry\n");
1318 if (d_really_is_positive(dn)) {
1319 ceph_dir_clear_ordered(dir);
1320 dout("d_delete %p\n", dn);
1321 d_delete(dn);
1322 } else {
1323 if (have_lease && d_unhashed(dn))
1324 d_add(dn, NULL);
1325 update_dentry_lease(dn, rinfo->dlease,
1326 session,
1327 req->r_request_started);
1328 }
1329 goto done;
1330 }
1331
1332 /* attach proper inode */
1333 if (d_really_is_negative(dn)) {
1334 ceph_dir_clear_ordered(dir);
1335 ihold(in);
1336 dn = splice_dentry(dn, in);
1337 if (IS_ERR(dn)) {
1338 err = PTR_ERR(dn);
1339 goto done;
1340 }
1341 req->r_dentry = dn; /* may have spliced */
1342 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1343 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1344 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1345 ceph_vinop(in));
1346 d_invalidate(dn);
1347 have_lease = false;
1348 }
1349
1350 if (have_lease)
1351 update_dentry_lease(dn, rinfo->dlease, session,
1352 req->r_request_started);
1353 dout(" final dn %p\n", dn);
1354 } else if (!req->r_aborted &&
1355 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1356 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1357 struct dentry *dn = req->r_dentry;
1358 struct inode *dir = req->r_locked_dir;
1359
1360 /* fill out a snapdir LOOKUPSNAP dentry */
1361 BUG_ON(!dn);
1362 BUG_ON(!dir);
1363 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1364 dout(" linking snapped dir %p to dn %p\n", in, dn);
1365 ceph_dir_clear_ordered(dir);
1366 ihold(in);
1367 dn = splice_dentry(dn, in);
1368 if (IS_ERR(dn)) {
1369 err = PTR_ERR(dn);
1370 goto done;
1371 }
1372 req->r_dentry = dn; /* may have spliced */
1373 }
1374done:
1375 dout("fill_trace done err=%d\n", err);
1376 return err;
1377}
1378
1379/*
1380 * Prepopulate our cache with readdir results, leases, etc.
1381 */
1382static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1383 struct ceph_mds_session *session)
1384{
1385 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1386 int i, err = 0;
1387
1388 for (i = 0; i < rinfo->dir_nr; i++) {
1389 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1390 struct ceph_vino vino;
1391 struct inode *in;
1392 int rc;
1393
1394 vino.ino = le64_to_cpu(rde->inode.in->ino);
1395 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1396
1397 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1398 if (IS_ERR(in)) {
1399 err = PTR_ERR(in);
1400 dout("new_inode badness got %d\n", err);
1401 continue;
1402 }
1403 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
1404 req->r_request_started, -1,
1405 &req->r_caps_reservation);
1406 if (rc < 0) {
1407 pr_err("fill_inode badness on %p got %d\n", in, rc);
1408 err = rc;
1409 }
1410 iput(in);
1411 }
1412
1413 return err;
1414}
1415
1416void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1417{
1418 if (ctl->page) {
1419 kunmap(ctl->page);
1420 put_page(ctl->page);
1421 ctl->page = NULL;
1422 }
1423}
1424
1425static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1426 struct ceph_readdir_cache_control *ctl,
1427 struct ceph_mds_request *req)
1428{
1429 struct ceph_inode_info *ci = ceph_inode(dir);
1430 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1431 unsigned idx = ctl->index % nsize;
1432 pgoff_t pgoff = ctl->index / nsize;
1433
1434 if (!ctl->page || pgoff != page_index(ctl->page)) {
1435 ceph_readdir_cache_release(ctl);
1436 if (idx == 0)
1437 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1438 else
1439 ctl->page = find_lock_page(&dir->i_data, pgoff);
1440 if (!ctl->page) {
1441 ctl->index = -1;
1442 return idx == 0 ? -ENOMEM : 0;
1443 }
1444 /* reading/filling the cache are serialized by
1445 * i_mutex, no need to use page lock */
1446 unlock_page(ctl->page);
1447 ctl->dentries = kmap(ctl->page);
1448 if (idx == 0)
1449 memset(ctl->dentries, 0, PAGE_SIZE);
1450 }
1451
1452 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1453 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1454 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1455 ctl->dentries[idx] = dn;
1456 ctl->index++;
1457 } else {
1458 dout("disable readdir cache\n");
1459 ctl->index = -1;
1460 }
1461 return 0;
1462}
1463
1464int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1465 struct ceph_mds_session *session)
1466{
1467 struct dentry *parent = req->r_dentry;
1468 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1469 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1470 struct qstr dname;
1471 struct dentry *dn;
1472 struct inode *in;
1473 int err = 0, skipped = 0, ret, i;
1474 struct inode *snapdir = NULL;
1475 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1476 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1477 u32 last_hash = 0;
1478 u32 fpos_offset;
1479 struct ceph_readdir_cache_control cache_ctl = {};
1480
1481 if (req->r_aborted)
1482 return readdir_prepopulate_inodes_only(req, session);
1483
1484 if (rinfo->hash_order && req->r_path2) {
1485 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1486 req->r_path2, strlen(req->r_path2));
1487 last_hash = ceph_frag_value(last_hash);
1488 }
1489
1490 if (rinfo->dir_dir &&
1491 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1492 dout("readdir_prepopulate got new frag %x -> %x\n",
1493 frag, le32_to_cpu(rinfo->dir_dir->frag));
1494 frag = le32_to_cpu(rinfo->dir_dir->frag);
1495 if (!rinfo->hash_order)
1496 req->r_readdir_offset = 2;
1497 }
1498
1499 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1500 snapdir = ceph_get_snapdir(d_inode(parent));
1501 parent = d_find_alias(snapdir);
1502 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1503 rinfo->dir_nr, parent);
1504 } else {
1505 dout("readdir_prepopulate %d items under dn %p\n",
1506 rinfo->dir_nr, parent);
1507 if (rinfo->dir_dir)
1508 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1509 }
1510
1511 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
1512 !(rinfo->hash_order && req->r_path2)) {
1513 /* note dir version at start of readdir so we can tell
1514 * if any dentries get dropped */
1515 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1516 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1517 req->r_readdir_cache_idx = 0;
1518 }
1519
1520 cache_ctl.index = req->r_readdir_cache_idx;
1521 fpos_offset = req->r_readdir_offset;
1522
1523 /* FIXME: release caps/leases if error occurs */
1524 for (i = 0; i < rinfo->dir_nr; i++) {
1525 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1526 struct ceph_vino vino;
1527
1528 dname.name = rde->name;
1529 dname.len = rde->name_len;
1530 dname.hash = full_name_hash(parent, dname.name, dname.len);
1531
1532 vino.ino = le64_to_cpu(rde->inode.in->ino);
1533 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1534
1535 if (rinfo->hash_order) {
1536 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1537 rde->name, rde->name_len);
1538 hash = ceph_frag_value(hash);
1539 if (hash != last_hash)
1540 fpos_offset = 2;
1541 last_hash = hash;
1542 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1543 } else {
1544 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1545 }
1546
1547retry_lookup:
1548 dn = d_lookup(parent, &dname);
1549 dout("d_lookup on parent=%p name=%.*s got %p\n",
1550 parent, dname.len, dname.name, dn);
1551
1552 if (!dn) {
1553 dn = d_alloc(parent, &dname);
1554 dout("d_alloc %p '%.*s' = %p\n", parent,
1555 dname.len, dname.name, dn);
1556 if (dn == NULL) {
1557 dout("d_alloc badness\n");
1558 err = -ENOMEM;
1559 goto out;
1560 }
1561 } else if (d_really_is_positive(dn) &&
1562 (ceph_ino(d_inode(dn)) != vino.ino ||
1563 ceph_snap(d_inode(dn)) != vino.snap)) {
1564 dout(" dn %p points to wrong inode %p\n",
1565 dn, d_inode(dn));
1566 d_delete(dn);
1567 dput(dn);
1568 goto retry_lookup;
1569 }
1570
1571 /* inode */
1572 if (d_really_is_positive(dn)) {
1573 in = d_inode(dn);
1574 } else {
1575 in = ceph_get_inode(parent->d_sb, vino);
1576 if (IS_ERR(in)) {
1577 dout("new_inode badness\n");
1578 d_drop(dn);
1579 dput(dn);
1580 err = PTR_ERR(in);
1581 goto out;
1582 }
1583 }
1584
1585 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
1586 req->r_request_started, -1,
1587 &req->r_caps_reservation);
1588 if (ret < 0) {
1589 pr_err("fill_inode badness on %p\n", in);
1590 if (d_really_is_negative(dn))
1591 iput(in);
1592 d_drop(dn);
1593 err = ret;
1594 goto next_item;
1595 }
1596
1597 if (d_really_is_negative(dn)) {
1598 struct dentry *realdn;
1599
1600 if (ceph_security_xattr_deadlock(in)) {
1601 dout(" skip splicing dn %p to inode %p"
1602 " (security xattr deadlock)\n", dn, in);
1603 iput(in);
1604 skipped++;
1605 goto next_item;
1606 }
1607
1608 realdn = splice_dentry(dn, in);
1609 if (IS_ERR(realdn)) {
1610 err = PTR_ERR(realdn);
1611 d_drop(dn);
1612 dn = NULL;
1613 goto next_item;
1614 }
1615 dn = realdn;
1616 }
1617
1618 ceph_dentry(dn)->offset = rde->offset;
1619
1620 update_dentry_lease(dn, rde->lease, req->r_session,
1621 req->r_request_started);
1622
1623 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1624 ret = fill_readdir_cache(d_inode(parent), dn,
1625 &cache_ctl, req);
1626 if (ret < 0)
1627 err = ret;
1628 }
1629next_item:
1630 if (dn)
1631 dput(dn);
1632 }
1633out:
1634 if (err == 0 && skipped == 0) {
1635 req->r_did_prepopulate = true;
1636 req->r_readdir_cache_idx = cache_ctl.index;
1637 }
1638 ceph_readdir_cache_release(&cache_ctl);
1639 if (snapdir) {
1640 iput(snapdir);
1641 dput(parent);
1642 }
1643 dout("readdir_prepopulate done\n");
1644 return err;
1645}
1646
1647int ceph_inode_set_size(struct inode *inode, loff_t size)
1648{
1649 struct ceph_inode_info *ci = ceph_inode(inode);
1650 int ret = 0;
1651
1652 spin_lock(&ci->i_ceph_lock);
1653 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1654 i_size_write(inode, size);
1655 inode->i_blocks = calc_inode_blocks(size);
1656
1657 /* tell the MDS if we are approaching max_size */
1658 if ((size << 1) >= ci->i_max_size &&
1659 (ci->i_reported_size << 1) < ci->i_max_size)
1660 ret = 1;
1661
1662 spin_unlock(&ci->i_ceph_lock);
1663 return ret;
1664}
1665
1666/*
1667 * Write back inode data in a worker thread. (This can't be done
1668 * in the message handler context.)
1669 */
1670void ceph_queue_writeback(struct inode *inode)
1671{
1672 ihold(inode);
1673 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1674 &ceph_inode(inode)->i_wb_work)) {
1675 dout("ceph_queue_writeback %p\n", inode);
1676 } else {
1677 dout("ceph_queue_writeback %p failed\n", inode);
1678 iput(inode);
1679 }
1680}
1681
1682static void ceph_writeback_work(struct work_struct *work)
1683{
1684 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1685 i_wb_work);
1686 struct inode *inode = &ci->vfs_inode;
1687
1688 dout("writeback %p\n", inode);
1689 filemap_fdatawrite(&inode->i_data);
1690 iput(inode);
1691}
1692
1693/*
1694 * queue an async invalidation
1695 */
1696void ceph_queue_invalidate(struct inode *inode)
1697{
1698 ihold(inode);
1699 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1700 &ceph_inode(inode)->i_pg_inv_work)) {
1701 dout("ceph_queue_invalidate %p\n", inode);
1702 } else {
1703 dout("ceph_queue_invalidate %p failed\n", inode);
1704 iput(inode);
1705 }
1706}
1707
1708/*
1709 * Invalidate inode pages in a worker thread. (This can't be done
1710 * in the message handler context.)
1711 */
1712static void ceph_invalidate_work(struct work_struct *work)
1713{
1714 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1715 i_pg_inv_work);
1716 struct inode *inode = &ci->vfs_inode;
1717 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1718 u32 orig_gen;
1719 int check = 0;
1720
1721 mutex_lock(&ci->i_truncate_mutex);
1722
1723 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1724 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1725 inode, ceph_ino(inode));
1726 mapping_set_error(inode->i_mapping, -EIO);
1727 truncate_pagecache(inode, 0);
1728 mutex_unlock(&ci->i_truncate_mutex);
1729 goto out;
1730 }
1731
1732 spin_lock(&ci->i_ceph_lock);
1733 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1734 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1735 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1736 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1737 check = 1;
1738 spin_unlock(&ci->i_ceph_lock);
1739 mutex_unlock(&ci->i_truncate_mutex);
1740 goto out;
1741 }
1742 orig_gen = ci->i_rdcache_gen;
1743 spin_unlock(&ci->i_ceph_lock);
1744
1745 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1746 pr_err("invalidate_pages %p fails\n", inode);
1747 }
1748
1749 spin_lock(&ci->i_ceph_lock);
1750 if (orig_gen == ci->i_rdcache_gen &&
1751 orig_gen == ci->i_rdcache_revoking) {
1752 dout("invalidate_pages %p gen %d successful\n", inode,
1753 ci->i_rdcache_gen);
1754 ci->i_rdcache_revoking--;
1755 check = 1;
1756 } else {
1757 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1758 inode, orig_gen, ci->i_rdcache_gen,
1759 ci->i_rdcache_revoking);
1760 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1761 check = 1;
1762 }
1763 spin_unlock(&ci->i_ceph_lock);
1764 mutex_unlock(&ci->i_truncate_mutex);
1765out:
1766 if (check)
1767 ceph_check_caps(ci, 0, NULL);
1768 iput(inode);
1769}
1770
1771
1772/*
1773 * called by trunc_wq;
1774 *
1775 * We also truncate in a separate thread as well.
1776 */
1777static void ceph_vmtruncate_work(struct work_struct *work)
1778{
1779 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1780 i_vmtruncate_work);
1781 struct inode *inode = &ci->vfs_inode;
1782
1783 dout("vmtruncate_work %p\n", inode);
1784 __ceph_do_pending_vmtruncate(inode);
1785 iput(inode);
1786}
1787
1788/*
1789 * Queue an async vmtruncate. If we fail to queue work, we will handle
1790 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1791 */
1792void ceph_queue_vmtruncate(struct inode *inode)
1793{
1794 struct ceph_inode_info *ci = ceph_inode(inode);
1795
1796 ihold(inode);
1797
1798 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1799 &ci->i_vmtruncate_work)) {
1800 dout("ceph_queue_vmtruncate %p\n", inode);
1801 } else {
1802 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1803 inode, ci->i_truncate_pending);
1804 iput(inode);
1805 }
1806}
1807
1808/*
1809 * Make sure any pending truncation is applied before doing anything
1810 * that may depend on it.
1811 */
1812void __ceph_do_pending_vmtruncate(struct inode *inode)
1813{
1814 struct ceph_inode_info *ci = ceph_inode(inode);
1815 u64 to;
1816 int wrbuffer_refs, finish = 0;
1817
1818 mutex_lock(&ci->i_truncate_mutex);
1819retry:
1820 spin_lock(&ci->i_ceph_lock);
1821 if (ci->i_truncate_pending == 0) {
1822 dout("__do_pending_vmtruncate %p none pending\n", inode);
1823 spin_unlock(&ci->i_ceph_lock);
1824 mutex_unlock(&ci->i_truncate_mutex);
1825 return;
1826 }
1827
1828 /*
1829 * make sure any dirty snapped pages are flushed before we
1830 * possibly truncate them.. so write AND block!
1831 */
1832 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1833 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1834 inode);
1835 spin_unlock(&ci->i_ceph_lock);
1836 filemap_write_and_wait_range(&inode->i_data, 0,
1837 inode->i_sb->s_maxbytes);
1838 goto retry;
1839 }
1840
1841 /* there should be no reader or writer */
1842 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1843
1844 to = ci->i_truncate_size;
1845 wrbuffer_refs = ci->i_wrbuffer_ref;
1846 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1847 ci->i_truncate_pending, to);
1848 spin_unlock(&ci->i_ceph_lock);
1849
1850 truncate_pagecache(inode, to);
1851
1852 spin_lock(&ci->i_ceph_lock);
1853 if (to == ci->i_truncate_size) {
1854 ci->i_truncate_pending = 0;
1855 finish = 1;
1856 }
1857 spin_unlock(&ci->i_ceph_lock);
1858 if (!finish)
1859 goto retry;
1860
1861 mutex_unlock(&ci->i_truncate_mutex);
1862
1863 if (wrbuffer_refs == 0)
1864 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1865
1866 wake_up_all(&ci->i_cap_wq);
1867}
1868
1869/*
1870 * symlinks
1871 */
1872static const struct inode_operations ceph_symlink_iops = {
1873 .get_link = simple_get_link,
1874 .setattr = ceph_setattr,
1875 .getattr = ceph_getattr,
1876 .listxattr = ceph_listxattr,
1877};
1878
1879int __ceph_setattr(struct inode *inode, struct iattr *attr)
1880{
1881 struct ceph_inode_info *ci = ceph_inode(inode);
1882 const unsigned int ia_valid = attr->ia_valid;
1883 struct ceph_mds_request *req;
1884 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1885 struct ceph_cap_flush *prealloc_cf;
1886 int issued;
1887 int release = 0, dirtied = 0;
1888 int mask = 0;
1889 int err = 0;
1890 int inode_dirty_flags = 0;
1891 bool lock_snap_rwsem = false;
1892
1893 prealloc_cf = ceph_alloc_cap_flush();
1894 if (!prealloc_cf)
1895 return -ENOMEM;
1896
1897 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1898 USE_AUTH_MDS);
1899 if (IS_ERR(req)) {
1900 ceph_free_cap_flush(prealloc_cf);
1901 return PTR_ERR(req);
1902 }
1903
1904 spin_lock(&ci->i_ceph_lock);
1905 issued = __ceph_caps_issued(ci, NULL);
1906
1907 if (!ci->i_head_snapc &&
1908 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1909 lock_snap_rwsem = true;
1910 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1911 spin_unlock(&ci->i_ceph_lock);
1912 down_read(&mdsc->snap_rwsem);
1913 spin_lock(&ci->i_ceph_lock);
1914 issued = __ceph_caps_issued(ci, NULL);
1915 }
1916 }
1917
1918 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1919
1920 if (ia_valid & ATTR_UID) {
1921 dout("setattr %p uid %d -> %d\n", inode,
1922 from_kuid(&init_user_ns, inode->i_uid),
1923 from_kuid(&init_user_ns, attr->ia_uid));
1924 if (issued & CEPH_CAP_AUTH_EXCL) {
1925 inode->i_uid = attr->ia_uid;
1926 dirtied |= CEPH_CAP_AUTH_EXCL;
1927 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1928 !uid_eq(attr->ia_uid, inode->i_uid)) {
1929 req->r_args.setattr.uid = cpu_to_le32(
1930 from_kuid(&init_user_ns, attr->ia_uid));
1931 mask |= CEPH_SETATTR_UID;
1932 release |= CEPH_CAP_AUTH_SHARED;
1933 }
1934 }
1935 if (ia_valid & ATTR_GID) {
1936 dout("setattr %p gid %d -> %d\n", inode,
1937 from_kgid(&init_user_ns, inode->i_gid),
1938 from_kgid(&init_user_ns, attr->ia_gid));
1939 if (issued & CEPH_CAP_AUTH_EXCL) {
1940 inode->i_gid = attr->ia_gid;
1941 dirtied |= CEPH_CAP_AUTH_EXCL;
1942 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1943 !gid_eq(attr->ia_gid, inode->i_gid)) {
1944 req->r_args.setattr.gid = cpu_to_le32(
1945 from_kgid(&init_user_ns, attr->ia_gid));
1946 mask |= CEPH_SETATTR_GID;
1947 release |= CEPH_CAP_AUTH_SHARED;
1948 }
1949 }
1950 if (ia_valid & ATTR_MODE) {
1951 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1952 attr->ia_mode);
1953 if (issued & CEPH_CAP_AUTH_EXCL) {
1954 inode->i_mode = attr->ia_mode;
1955 dirtied |= CEPH_CAP_AUTH_EXCL;
1956 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1957 attr->ia_mode != inode->i_mode) {
1958 inode->i_mode = attr->ia_mode;
1959 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1960 mask |= CEPH_SETATTR_MODE;
1961 release |= CEPH_CAP_AUTH_SHARED;
1962 }
1963 }
1964
1965 if (ia_valid & ATTR_ATIME) {
1966 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1967 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1968 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1969 if (issued & CEPH_CAP_FILE_EXCL) {
1970 ci->i_time_warp_seq++;
1971 inode->i_atime = attr->ia_atime;
1972 dirtied |= CEPH_CAP_FILE_EXCL;
1973 } else if ((issued & CEPH_CAP_FILE_WR) &&
1974 timespec_compare(&inode->i_atime,
1975 &attr->ia_atime) < 0) {
1976 inode->i_atime = attr->ia_atime;
1977 dirtied |= CEPH_CAP_FILE_WR;
1978 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1979 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1980 ceph_encode_timespec(&req->r_args.setattr.atime,
1981 &attr->ia_atime);
1982 mask |= CEPH_SETATTR_ATIME;
1983 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1984 CEPH_CAP_FILE_WR;
1985 }
1986 }
1987 if (ia_valid & ATTR_MTIME) {
1988 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1989 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1990 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1991 if (issued & CEPH_CAP_FILE_EXCL) {
1992 ci->i_time_warp_seq++;
1993 inode->i_mtime = attr->ia_mtime;
1994 dirtied |= CEPH_CAP_FILE_EXCL;
1995 } else if ((issued & CEPH_CAP_FILE_WR) &&
1996 timespec_compare(&inode->i_mtime,
1997 &attr->ia_mtime) < 0) {
1998 inode->i_mtime = attr->ia_mtime;
1999 dirtied |= CEPH_CAP_FILE_WR;
2000 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2001 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
2002 ceph_encode_timespec(&req->r_args.setattr.mtime,
2003 &attr->ia_mtime);
2004 mask |= CEPH_SETATTR_MTIME;
2005 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2006 CEPH_CAP_FILE_WR;
2007 }
2008 }
2009 if (ia_valid & ATTR_SIZE) {
2010 dout("setattr %p size %lld -> %lld\n", inode,
2011 inode->i_size, attr->ia_size);
2012 if ((issued & CEPH_CAP_FILE_EXCL) &&
2013 attr->ia_size > inode->i_size) {
2014 i_size_write(inode, attr->ia_size);
2015 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2016 inode->i_ctime = attr->ia_ctime;
2017 ci->i_reported_size = attr->ia_size;
2018 dirtied |= CEPH_CAP_FILE_EXCL;
2019 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2020 attr->ia_size != inode->i_size) {
2021 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2022 req->r_args.setattr.old_size =
2023 cpu_to_le64(inode->i_size);
2024 mask |= CEPH_SETATTR_SIZE;
2025 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2026 CEPH_CAP_FILE_WR;
2027 }
2028 }
2029
2030 /* these do nothing */
2031 if (ia_valid & ATTR_CTIME) {
2032 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2033 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2034 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
2035 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2036 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2037 only ? "ctime only" : "ignored");
2038 inode->i_ctime = attr->ia_ctime;
2039 if (only) {
2040 /*
2041 * if kernel wants to dirty ctime but nothing else,
2042 * we need to choose a cap to dirty under, or do
2043 * a almost-no-op setattr
2044 */
2045 if (issued & CEPH_CAP_AUTH_EXCL)
2046 dirtied |= CEPH_CAP_AUTH_EXCL;
2047 else if (issued & CEPH_CAP_FILE_EXCL)
2048 dirtied |= CEPH_CAP_FILE_EXCL;
2049 else if (issued & CEPH_CAP_XATTR_EXCL)
2050 dirtied |= CEPH_CAP_XATTR_EXCL;
2051 else
2052 mask |= CEPH_SETATTR_CTIME;
2053 }
2054 }
2055 if (ia_valid & ATTR_FILE)
2056 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2057
2058 if (dirtied) {
2059 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2060 &prealloc_cf);
2061 inode->i_ctime = current_time(inode);
2062 }
2063
2064 release &= issued;
2065 spin_unlock(&ci->i_ceph_lock);
2066 if (lock_snap_rwsem)
2067 up_read(&mdsc->snap_rwsem);
2068
2069 if (inode_dirty_flags)
2070 __mark_inode_dirty(inode, inode_dirty_flags);
2071
2072 if (ia_valid & ATTR_MODE) {
2073 err = posix_acl_chmod(inode, attr->ia_mode);
2074 if (err)
2075 goto out_put;
2076 }
2077
2078 if (mask) {
2079 req->r_inode = inode;
2080 ihold(inode);
2081 req->r_inode_drop = release;
2082 req->r_args.setattr.mask = cpu_to_le32(mask);
2083 req->r_num_caps = 1;
2084 err = ceph_mdsc_do_request(mdsc, NULL, req);
2085 }
2086 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2087 ceph_cap_string(dirtied), mask);
2088
2089 ceph_mdsc_put_request(req);
2090 if (mask & CEPH_SETATTR_SIZE)
2091 __ceph_do_pending_vmtruncate(inode);
2092 ceph_free_cap_flush(prealloc_cf);
2093 return err;
2094out_put:
2095 ceph_mdsc_put_request(req);
2096 ceph_free_cap_flush(prealloc_cf);
2097 return err;
2098}
2099
2100/*
2101 * setattr
2102 */
2103int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2104{
2105 struct inode *inode = d_inode(dentry);
2106 int err;
2107
2108 if (ceph_snap(inode) != CEPH_NOSNAP)
2109 return -EROFS;
2110
2111 err = setattr_prepare(dentry, attr);
2112 if (err != 0)
2113 return err;
2114
2115 return __ceph_setattr(inode, attr);
2116}
2117
2118/*
2119 * Verify that we have a lease on the given mask. If not,
2120 * do a getattr against an mds.
2121 */
2122int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2123 int mask, bool force)
2124{
2125 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2126 struct ceph_mds_client *mdsc = fsc->mdsc;
2127 struct ceph_mds_request *req;
2128 int err;
2129
2130 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2131 dout("do_getattr inode %p SNAPDIR\n", inode);
2132 return 0;
2133 }
2134
2135 dout("do_getattr inode %p mask %s mode 0%o\n",
2136 inode, ceph_cap_string(mask), inode->i_mode);
2137 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2138 return 0;
2139
2140 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2141 if (IS_ERR(req))
2142 return PTR_ERR(req);
2143 req->r_inode = inode;
2144 ihold(inode);
2145 req->r_num_caps = 1;
2146 req->r_args.getattr.mask = cpu_to_le32(mask);
2147 req->r_locked_page = locked_page;
2148 err = ceph_mdsc_do_request(mdsc, NULL, req);
2149 if (locked_page && err == 0) {
2150 u64 inline_version = req->r_reply_info.targeti.inline_version;
2151 if (inline_version == 0) {
2152 /* the reply is supposed to contain inline data */
2153 err = -EINVAL;
2154 } else if (inline_version == CEPH_INLINE_NONE) {
2155 err = -ENODATA;
2156 } else {
2157 err = req->r_reply_info.targeti.inline_len;
2158 }
2159 }
2160 ceph_mdsc_put_request(req);
2161 dout("do_getattr result=%d\n", err);
2162 return err;
2163}
2164
2165
2166/*
2167 * Check inode permissions. We verify we have a valid value for
2168 * the AUTH cap, then call the generic handler.
2169 */
2170int ceph_permission(struct inode *inode, int mask)
2171{
2172 int err;
2173
2174 if (mask & MAY_NOT_BLOCK)
2175 return -ECHILD;
2176
2177 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2178
2179 if (!err)
2180 err = generic_permission(inode, mask);
2181 return err;
2182}
2183
2184/*
2185 * Get all attributes. Hopefully somedata we'll have a statlite()
2186 * and can limit the fields we require to be accurate.
2187 */
2188int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2189 struct kstat *stat)
2190{
2191 struct inode *inode = d_inode(dentry);
2192 struct ceph_inode_info *ci = ceph_inode(inode);
2193 int err;
2194
2195 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2196 if (!err) {
2197 generic_fillattr(inode, stat);
2198 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2199 if (ceph_snap(inode) != CEPH_NOSNAP)
2200 stat->dev = ceph_snap(inode);
2201 else
2202 stat->dev = 0;
2203 if (S_ISDIR(inode->i_mode)) {
2204 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2205 RBYTES))
2206 stat->size = ci->i_rbytes;
2207 else
2208 stat->size = ci->i_files + ci->i_subdirs;
2209 stat->blocks = 0;
2210 stat->blksize = 65536;
2211 }
2212 }
2213 return err;
2214}
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
11#include <linux/posix_acl.h>
12#include <linux/random.h>
13
14#include "super.h"
15#include "mds_client.h"
16#include "cache.h"
17#include <linux/ceph/decode.h>
18
19/*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31static const struct inode_operations ceph_symlink_iops;
32
33static void ceph_invalidate_work(struct work_struct *work);
34static void ceph_writeback_work(struct work_struct *work);
35static void ceph_vmtruncate_work(struct work_struct *work);
36
37/*
38 * find or create an inode, given the ceph ino number
39 */
40static int ceph_set_ino_cb(struct inode *inode, void *data)
41{
42 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
43 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
44 return 0;
45}
46
47struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48{
49 struct inode *inode;
50 ino_t t = ceph_vino_to_ino(vino);
51
52 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53 if (inode == NULL)
54 return ERR_PTR(-ENOMEM);
55 if (inode->i_state & I_NEW) {
56 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
57 inode, ceph_vinop(inode), (u64)inode->i_ino);
58 unlock_new_inode(inode);
59 }
60
61 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
62 vino.snap, inode);
63 return inode;
64}
65
66/*
67 * get/constuct snapdir inode for a given directory
68 */
69struct inode *ceph_get_snapdir(struct inode *parent)
70{
71 struct ceph_vino vino = {
72 .ino = ceph_ino(parent),
73 .snap = CEPH_SNAPDIR,
74 };
75 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
76 struct ceph_inode_info *ci = ceph_inode(inode);
77
78 BUG_ON(!S_ISDIR(parent->i_mode));
79 if (IS_ERR(inode))
80 return inode;
81 inode->i_mode = parent->i_mode;
82 inode->i_uid = parent->i_uid;
83 inode->i_gid = parent->i_gid;
84 inode->i_op = &ceph_snapdir_iops;
85 inode->i_fop = &ceph_snapdir_fops;
86 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
87 ci->i_rbytes = 0;
88 return inode;
89}
90
91const struct inode_operations ceph_file_iops = {
92 .permission = ceph_permission,
93 .setattr = ceph_setattr,
94 .getattr = ceph_getattr,
95 .setxattr = ceph_setxattr,
96 .getxattr = ceph_getxattr,
97 .listxattr = ceph_listxattr,
98 .removexattr = ceph_removexattr,
99 .get_acl = ceph_get_acl,
100 .set_acl = ceph_set_acl,
101};
102
103
104/*
105 * We use a 'frag tree' to keep track of the MDS's directory fragments
106 * for a given inode (usually there is just a single fragment). We
107 * need to know when a child frag is delegated to a new MDS, or when
108 * it is flagged as replicated, so we can direct our requests
109 * accordingly.
110 */
111
112/*
113 * find/create a frag in the tree
114 */
115static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
116 u32 f)
117{
118 struct rb_node **p;
119 struct rb_node *parent = NULL;
120 struct ceph_inode_frag *frag;
121 int c;
122
123 p = &ci->i_fragtree.rb_node;
124 while (*p) {
125 parent = *p;
126 frag = rb_entry(parent, struct ceph_inode_frag, node);
127 c = ceph_frag_compare(f, frag->frag);
128 if (c < 0)
129 p = &(*p)->rb_left;
130 else if (c > 0)
131 p = &(*p)->rb_right;
132 else
133 return frag;
134 }
135
136 frag = kmalloc(sizeof(*frag), GFP_NOFS);
137 if (!frag) {
138 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
139 "frag %x\n", &ci->vfs_inode,
140 ceph_vinop(&ci->vfs_inode), f);
141 return ERR_PTR(-ENOMEM);
142 }
143 frag->frag = f;
144 frag->split_by = 0;
145 frag->mds = -1;
146 frag->ndist = 0;
147
148 rb_link_node(&frag->node, parent, p);
149 rb_insert_color(&frag->node, &ci->i_fragtree);
150
151 dout("get_or_create_frag added %llx.%llx frag %x\n",
152 ceph_vinop(&ci->vfs_inode), f);
153 return frag;
154}
155
156/*
157 * find a specific frag @f
158 */
159struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
160{
161 struct rb_node *n = ci->i_fragtree.rb_node;
162
163 while (n) {
164 struct ceph_inode_frag *frag =
165 rb_entry(n, struct ceph_inode_frag, node);
166 int c = ceph_frag_compare(f, frag->frag);
167 if (c < 0)
168 n = n->rb_left;
169 else if (c > 0)
170 n = n->rb_right;
171 else
172 return frag;
173 }
174 return NULL;
175}
176
177/*
178 * Choose frag containing the given value @v. If @pfrag is
179 * specified, copy the frag delegation info to the caller if
180 * it is present.
181 */
182static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
183 struct ceph_inode_frag *pfrag, int *found)
184{
185 u32 t = ceph_frag_make(0, 0);
186 struct ceph_inode_frag *frag;
187 unsigned nway, i;
188 u32 n;
189
190 if (found)
191 *found = 0;
192
193 while (1) {
194 WARN_ON(!ceph_frag_contains_value(t, v));
195 frag = __ceph_find_frag(ci, t);
196 if (!frag)
197 break; /* t is a leaf */
198 if (frag->split_by == 0) {
199 if (pfrag)
200 memcpy(pfrag, frag, sizeof(*pfrag));
201 if (found)
202 *found = 1;
203 break;
204 }
205
206 /* choose child */
207 nway = 1 << frag->split_by;
208 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
209 frag->split_by, nway);
210 for (i = 0; i < nway; i++) {
211 n = ceph_frag_make_child(t, frag->split_by, i);
212 if (ceph_frag_contains_value(n, v)) {
213 t = n;
214 break;
215 }
216 }
217 BUG_ON(i == nway);
218 }
219 dout("choose_frag(%x) = %x\n", v, t);
220
221 return t;
222}
223
224u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
225 struct ceph_inode_frag *pfrag, int *found)
226{
227 u32 ret;
228 mutex_lock(&ci->i_fragtree_mutex);
229 ret = __ceph_choose_frag(ci, v, pfrag, found);
230 mutex_unlock(&ci->i_fragtree_mutex);
231 return ret;
232}
233
234/*
235 * Process dirfrag (delegation) info from the mds. Include leaf
236 * fragment in tree ONLY if ndist > 0. Otherwise, only
237 * branches/splits are included in i_fragtree)
238 */
239static int ceph_fill_dirfrag(struct inode *inode,
240 struct ceph_mds_reply_dirfrag *dirinfo)
241{
242 struct ceph_inode_info *ci = ceph_inode(inode);
243 struct ceph_inode_frag *frag;
244 u32 id = le32_to_cpu(dirinfo->frag);
245 int mds = le32_to_cpu(dirinfo->auth);
246 int ndist = le32_to_cpu(dirinfo->ndist);
247 int diri_auth = -1;
248 int i;
249 int err = 0;
250
251 spin_lock(&ci->i_ceph_lock);
252 if (ci->i_auth_cap)
253 diri_auth = ci->i_auth_cap->mds;
254 spin_unlock(&ci->i_ceph_lock);
255
256 mutex_lock(&ci->i_fragtree_mutex);
257 if (ndist == 0 && mds == diri_auth) {
258 /* no delegation info needed. */
259 frag = __ceph_find_frag(ci, id);
260 if (!frag)
261 goto out;
262 if (frag->split_by == 0) {
263 /* tree leaf, remove */
264 dout("fill_dirfrag removed %llx.%llx frag %x"
265 " (no ref)\n", ceph_vinop(inode), id);
266 rb_erase(&frag->node, &ci->i_fragtree);
267 kfree(frag);
268 } else {
269 /* tree branch, keep and clear */
270 dout("fill_dirfrag cleared %llx.%llx frag %x"
271 " referral\n", ceph_vinop(inode), id);
272 frag->mds = -1;
273 frag->ndist = 0;
274 }
275 goto out;
276 }
277
278
279 /* find/add this frag to store mds delegation info */
280 frag = __get_or_create_frag(ci, id);
281 if (IS_ERR(frag)) {
282 /* this is not the end of the world; we can continue
283 with bad/inaccurate delegation info */
284 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
285 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
286 err = -ENOMEM;
287 goto out;
288 }
289
290 frag->mds = mds;
291 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
292 for (i = 0; i < frag->ndist; i++)
293 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
294 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
295 ceph_vinop(inode), frag->frag, frag->ndist);
296
297out:
298 mutex_unlock(&ci->i_fragtree_mutex);
299 return err;
300}
301
302static int ceph_fill_fragtree(struct inode *inode,
303 struct ceph_frag_tree_head *fragtree,
304 struct ceph_mds_reply_dirfrag *dirinfo)
305{
306 struct ceph_inode_info *ci = ceph_inode(inode);
307 struct ceph_inode_frag *frag;
308 struct rb_node *rb_node;
309 int i;
310 u32 id, nsplits;
311 bool update = false;
312
313 mutex_lock(&ci->i_fragtree_mutex);
314 nsplits = le32_to_cpu(fragtree->nsplits);
315 if (nsplits) {
316 i = prandom_u32() % nsplits;
317 id = le32_to_cpu(fragtree->splits[i].frag);
318 if (!__ceph_find_frag(ci, id))
319 update = true;
320 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
321 rb_node = rb_first(&ci->i_fragtree);
322 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
323 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
324 update = true;
325 }
326 if (!update && dirinfo) {
327 id = le32_to_cpu(dirinfo->frag);
328 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
329 update = true;
330 }
331 if (!update)
332 goto out_unlock;
333
334 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
335 rb_node = rb_first(&ci->i_fragtree);
336 for (i = 0; i < nsplits; i++) {
337 id = le32_to_cpu(fragtree->splits[i].frag);
338 frag = NULL;
339 while (rb_node) {
340 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
341 if (ceph_frag_compare(frag->frag, id) >= 0) {
342 if (frag->frag != id)
343 frag = NULL;
344 else
345 rb_node = rb_next(rb_node);
346 break;
347 }
348 rb_node = rb_next(rb_node);
349 rb_erase(&frag->node, &ci->i_fragtree);
350 kfree(frag);
351 frag = NULL;
352 }
353 if (!frag) {
354 frag = __get_or_create_frag(ci, id);
355 if (IS_ERR(frag))
356 continue;
357 }
358 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
359 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
360 }
361 while (rb_node) {
362 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
363 rb_node = rb_next(rb_node);
364 rb_erase(&frag->node, &ci->i_fragtree);
365 kfree(frag);
366 }
367out_unlock:
368 mutex_unlock(&ci->i_fragtree_mutex);
369 return 0;
370}
371
372/*
373 * initialize a newly allocated inode.
374 */
375struct inode *ceph_alloc_inode(struct super_block *sb)
376{
377 struct ceph_inode_info *ci;
378 int i;
379
380 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
381 if (!ci)
382 return NULL;
383
384 dout("alloc_inode %p\n", &ci->vfs_inode);
385
386 spin_lock_init(&ci->i_ceph_lock);
387
388 ci->i_version = 0;
389 ci->i_inline_version = 0;
390 ci->i_time_warp_seq = 0;
391 ci->i_ceph_flags = 0;
392 atomic64_set(&ci->i_ordered_count, 1);
393 atomic64_set(&ci->i_release_count, 1);
394 atomic64_set(&ci->i_complete_seq[0], 0);
395 atomic64_set(&ci->i_complete_seq[1], 0);
396 ci->i_symlink = NULL;
397
398 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
399 ci->i_pool_ns_len = 0;
400
401 ci->i_fragtree = RB_ROOT;
402 mutex_init(&ci->i_fragtree_mutex);
403
404 ci->i_xattrs.blob = NULL;
405 ci->i_xattrs.prealloc_blob = NULL;
406 ci->i_xattrs.dirty = false;
407 ci->i_xattrs.index = RB_ROOT;
408 ci->i_xattrs.count = 0;
409 ci->i_xattrs.names_size = 0;
410 ci->i_xattrs.vals_size = 0;
411 ci->i_xattrs.version = 0;
412 ci->i_xattrs.index_version = 0;
413
414 ci->i_caps = RB_ROOT;
415 ci->i_auth_cap = NULL;
416 ci->i_dirty_caps = 0;
417 ci->i_flushing_caps = 0;
418 INIT_LIST_HEAD(&ci->i_dirty_item);
419 INIT_LIST_HEAD(&ci->i_flushing_item);
420 ci->i_prealloc_cap_flush = NULL;
421 ci->i_cap_flush_tree = RB_ROOT;
422 init_waitqueue_head(&ci->i_cap_wq);
423 ci->i_hold_caps_min = 0;
424 ci->i_hold_caps_max = 0;
425 INIT_LIST_HEAD(&ci->i_cap_delay_list);
426 INIT_LIST_HEAD(&ci->i_cap_snaps);
427 ci->i_head_snapc = NULL;
428 ci->i_snap_caps = 0;
429
430 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
431 ci->i_nr_by_mode[i] = 0;
432
433 mutex_init(&ci->i_truncate_mutex);
434 ci->i_truncate_seq = 0;
435 ci->i_truncate_size = 0;
436 ci->i_truncate_pending = 0;
437
438 ci->i_max_size = 0;
439 ci->i_reported_size = 0;
440 ci->i_wanted_max_size = 0;
441 ci->i_requested_max_size = 0;
442
443 ci->i_pin_ref = 0;
444 ci->i_rd_ref = 0;
445 ci->i_rdcache_ref = 0;
446 ci->i_wr_ref = 0;
447 ci->i_wb_ref = 0;
448 ci->i_wrbuffer_ref = 0;
449 ci->i_wrbuffer_ref_head = 0;
450 ci->i_shared_gen = 0;
451 ci->i_rdcache_gen = 0;
452 ci->i_rdcache_revoking = 0;
453
454 INIT_LIST_HEAD(&ci->i_unsafe_writes);
455 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
456 INIT_LIST_HEAD(&ci->i_unsafe_iops);
457 spin_lock_init(&ci->i_unsafe_lock);
458
459 ci->i_snap_realm = NULL;
460 INIT_LIST_HEAD(&ci->i_snap_realm_item);
461 INIT_LIST_HEAD(&ci->i_snap_flush_item);
462
463 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
464 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
465
466 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
467
468 ceph_fscache_inode_init(ci);
469
470 return &ci->vfs_inode;
471}
472
473static void ceph_i_callback(struct rcu_head *head)
474{
475 struct inode *inode = container_of(head, struct inode, i_rcu);
476 struct ceph_inode_info *ci = ceph_inode(inode);
477
478 kmem_cache_free(ceph_inode_cachep, ci);
479}
480
481void ceph_destroy_inode(struct inode *inode)
482{
483 struct ceph_inode_info *ci = ceph_inode(inode);
484 struct ceph_inode_frag *frag;
485 struct rb_node *n;
486
487 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
488
489 ceph_fscache_unregister_inode_cookie(ci);
490
491 ceph_queue_caps_release(inode);
492
493 /*
494 * we may still have a snap_realm reference if there are stray
495 * caps in i_snap_caps.
496 */
497 if (ci->i_snap_realm) {
498 struct ceph_mds_client *mdsc =
499 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
500 struct ceph_snap_realm *realm = ci->i_snap_realm;
501
502 dout(" dropping residual ref to snap realm %p\n", realm);
503 spin_lock(&realm->inodes_with_caps_lock);
504 list_del_init(&ci->i_snap_realm_item);
505 spin_unlock(&realm->inodes_with_caps_lock);
506 ceph_put_snap_realm(mdsc, realm);
507 }
508
509 kfree(ci->i_symlink);
510 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
511 frag = rb_entry(n, struct ceph_inode_frag, node);
512 rb_erase(n, &ci->i_fragtree);
513 kfree(frag);
514 }
515
516 __ceph_destroy_xattrs(ci);
517 if (ci->i_xattrs.blob)
518 ceph_buffer_put(ci->i_xattrs.blob);
519 if (ci->i_xattrs.prealloc_blob)
520 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
521
522 call_rcu(&inode->i_rcu, ceph_i_callback);
523}
524
525int ceph_drop_inode(struct inode *inode)
526{
527 /*
528 * Positve dentry and corresponding inode are always accompanied
529 * in MDS reply. So no need to keep inode in the cache after
530 * dropping all its aliases.
531 */
532 return 1;
533}
534
535/*
536 * Helpers to fill in size, ctime, mtime, and atime. We have to be
537 * careful because either the client or MDS may have more up to date
538 * info, depending on which capabilities are held, and whether
539 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
540 * and size are monotonically increasing, except when utimes() or
541 * truncate() increments the corresponding _seq values.)
542 */
543int ceph_fill_file_size(struct inode *inode, int issued,
544 u32 truncate_seq, u64 truncate_size, u64 size)
545{
546 struct ceph_inode_info *ci = ceph_inode(inode);
547 int queue_trunc = 0;
548
549 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
550 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
551 dout("size %lld -> %llu\n", inode->i_size, size);
552 if (size > 0 && S_ISDIR(inode->i_mode)) {
553 pr_err("fill_file_size non-zero size for directory\n");
554 size = 0;
555 }
556 i_size_write(inode, size);
557 inode->i_blocks = (size + (1<<9) - 1) >> 9;
558 ci->i_reported_size = size;
559 if (truncate_seq != ci->i_truncate_seq) {
560 dout("truncate_seq %u -> %u\n",
561 ci->i_truncate_seq, truncate_seq);
562 ci->i_truncate_seq = truncate_seq;
563
564 /* the MDS should have revoked these caps */
565 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
566 CEPH_CAP_FILE_RD |
567 CEPH_CAP_FILE_WR |
568 CEPH_CAP_FILE_LAZYIO));
569 /*
570 * If we hold relevant caps, or in the case where we're
571 * not the only client referencing this file and we
572 * don't hold those caps, then we need to check whether
573 * the file is either opened or mmaped
574 */
575 if ((issued & (CEPH_CAP_FILE_CACHE|
576 CEPH_CAP_FILE_BUFFER)) ||
577 mapping_mapped(inode->i_mapping) ||
578 __ceph_caps_file_wanted(ci)) {
579 ci->i_truncate_pending++;
580 queue_trunc = 1;
581 }
582 }
583 }
584 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
585 ci->i_truncate_size != truncate_size) {
586 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
587 truncate_size);
588 ci->i_truncate_size = truncate_size;
589 }
590
591 if (queue_trunc)
592 ceph_fscache_invalidate(inode);
593
594 return queue_trunc;
595}
596
597void ceph_fill_file_time(struct inode *inode, int issued,
598 u64 time_warp_seq, struct timespec *ctime,
599 struct timespec *mtime, struct timespec *atime)
600{
601 struct ceph_inode_info *ci = ceph_inode(inode);
602 int warn = 0;
603
604 if (issued & (CEPH_CAP_FILE_EXCL|
605 CEPH_CAP_FILE_WR|
606 CEPH_CAP_FILE_BUFFER|
607 CEPH_CAP_AUTH_EXCL|
608 CEPH_CAP_XATTR_EXCL)) {
609 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
610 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
611 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
612 ctime->tv_sec, ctime->tv_nsec);
613 inode->i_ctime = *ctime;
614 }
615 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
616 /* the MDS did a utimes() */
617 dout("mtime %ld.%09ld -> %ld.%09ld "
618 "tw %d -> %d\n",
619 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
620 mtime->tv_sec, mtime->tv_nsec,
621 ci->i_time_warp_seq, (int)time_warp_seq);
622
623 inode->i_mtime = *mtime;
624 inode->i_atime = *atime;
625 ci->i_time_warp_seq = time_warp_seq;
626 } else if (time_warp_seq == ci->i_time_warp_seq) {
627 /* nobody did utimes(); take the max */
628 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
629 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
630 inode->i_mtime.tv_sec,
631 inode->i_mtime.tv_nsec,
632 mtime->tv_sec, mtime->tv_nsec);
633 inode->i_mtime = *mtime;
634 }
635 if (timespec_compare(atime, &inode->i_atime) > 0) {
636 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
637 inode->i_atime.tv_sec,
638 inode->i_atime.tv_nsec,
639 atime->tv_sec, atime->tv_nsec);
640 inode->i_atime = *atime;
641 }
642 } else if (issued & CEPH_CAP_FILE_EXCL) {
643 /* we did a utimes(); ignore mds values */
644 } else {
645 warn = 1;
646 }
647 } else {
648 /* we have no write|excl caps; whatever the MDS says is true */
649 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
650 inode->i_ctime = *ctime;
651 inode->i_mtime = *mtime;
652 inode->i_atime = *atime;
653 ci->i_time_warp_seq = time_warp_seq;
654 } else {
655 warn = 1;
656 }
657 }
658 if (warn) /* time_warp_seq shouldn't go backwards */
659 dout("%p mds time_warp_seq %llu < %u\n",
660 inode, time_warp_seq, ci->i_time_warp_seq);
661}
662
663/*
664 * Populate an inode based on info from mds. May be called on new or
665 * existing inodes.
666 */
667static int fill_inode(struct inode *inode, struct page *locked_page,
668 struct ceph_mds_reply_info_in *iinfo,
669 struct ceph_mds_reply_dirfrag *dirinfo,
670 struct ceph_mds_session *session,
671 unsigned long ttl_from, int cap_fmode,
672 struct ceph_cap_reservation *caps_reservation)
673{
674 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
675 struct ceph_mds_reply_inode *info = iinfo->in;
676 struct ceph_inode_info *ci = ceph_inode(inode);
677 int issued = 0, implemented, new_issued;
678 struct timespec mtime, atime, ctime;
679 struct ceph_buffer *xattr_blob = NULL;
680 struct ceph_cap *new_cap = NULL;
681 int err = 0;
682 bool wake = false;
683 bool queue_trunc = false;
684 bool new_version = false;
685 bool fill_inline = false;
686
687 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
688 inode, ceph_vinop(inode), le64_to_cpu(info->version),
689 ci->i_version);
690
691 /* prealloc new cap struct */
692 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
693 new_cap = ceph_get_cap(mdsc, caps_reservation);
694
695 /*
696 * prealloc xattr data, if it looks like we'll need it. only
697 * if len > 4 (meaning there are actually xattrs; the first 4
698 * bytes are the xattr count).
699 */
700 if (iinfo->xattr_len > 4) {
701 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
702 if (!xattr_blob)
703 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
704 iinfo->xattr_len);
705 }
706
707 spin_lock(&ci->i_ceph_lock);
708
709 /*
710 * provided version will be odd if inode value is projected,
711 * even if stable. skip the update if we have newer stable
712 * info (ours>=theirs, e.g. due to racing mds replies), unless
713 * we are getting projected (unstable) info (in which case the
714 * version is odd, and we want ours>theirs).
715 * us them
716 * 2 2 skip
717 * 3 2 skip
718 * 3 3 update
719 */
720 if (ci->i_version == 0 ||
721 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
722 le64_to_cpu(info->version) > (ci->i_version & ~1)))
723 new_version = true;
724
725 issued = __ceph_caps_issued(ci, &implemented);
726 issued |= implemented | __ceph_caps_dirty(ci);
727 new_issued = ~issued & le32_to_cpu(info->cap.caps);
728
729 /* update inode */
730 ci->i_version = le64_to_cpu(info->version);
731 inode->i_version++;
732 inode->i_rdev = le32_to_cpu(info->rdev);
733 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
734
735 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
736 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
737 inode->i_mode = le32_to_cpu(info->mode);
738 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
739 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
740 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
741 from_kuid(&init_user_ns, inode->i_uid),
742 from_kgid(&init_user_ns, inode->i_gid));
743 }
744
745 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
746 (issued & CEPH_CAP_LINK_EXCL) == 0)
747 set_nlink(inode, le32_to_cpu(info->nlink));
748
749 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
750 /* be careful with mtime, atime, size */
751 ceph_decode_timespec(&atime, &info->atime);
752 ceph_decode_timespec(&mtime, &info->mtime);
753 ceph_decode_timespec(&ctime, &info->ctime);
754 ceph_fill_file_time(inode, issued,
755 le32_to_cpu(info->time_warp_seq),
756 &ctime, &mtime, &atime);
757 }
758
759 if (new_version ||
760 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
761 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
762 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
763 ci->i_layout = info->layout;
764 ci->i_pool_ns_len = iinfo->pool_ns_len;
765
766 queue_trunc = ceph_fill_file_size(inode, issued,
767 le32_to_cpu(info->truncate_seq),
768 le64_to_cpu(info->truncate_size),
769 le64_to_cpu(info->size));
770 /* only update max_size on auth cap */
771 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
772 ci->i_max_size != le64_to_cpu(info->max_size)) {
773 dout("max_size %lld -> %llu\n", ci->i_max_size,
774 le64_to_cpu(info->max_size));
775 ci->i_max_size = le64_to_cpu(info->max_size);
776 }
777 }
778
779 /* xattrs */
780 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
781 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
782 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
783 if (ci->i_xattrs.blob)
784 ceph_buffer_put(ci->i_xattrs.blob);
785 ci->i_xattrs.blob = xattr_blob;
786 if (xattr_blob)
787 memcpy(ci->i_xattrs.blob->vec.iov_base,
788 iinfo->xattr_data, iinfo->xattr_len);
789 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
790 ceph_forget_all_cached_acls(inode);
791 xattr_blob = NULL;
792 }
793
794 inode->i_mapping->a_ops = &ceph_aops;
795
796 switch (inode->i_mode & S_IFMT) {
797 case S_IFIFO:
798 case S_IFBLK:
799 case S_IFCHR:
800 case S_IFSOCK:
801 init_special_inode(inode, inode->i_mode, inode->i_rdev);
802 inode->i_op = &ceph_file_iops;
803 break;
804 case S_IFREG:
805 inode->i_op = &ceph_file_iops;
806 inode->i_fop = &ceph_file_fops;
807 break;
808 case S_IFLNK:
809 inode->i_op = &ceph_symlink_iops;
810 if (!ci->i_symlink) {
811 u32 symlen = iinfo->symlink_len;
812 char *sym;
813
814 spin_unlock(&ci->i_ceph_lock);
815
816 err = -EINVAL;
817 if (WARN_ON(symlen != i_size_read(inode)))
818 goto out;
819
820 err = -ENOMEM;
821 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
822 if (!sym)
823 goto out;
824
825 spin_lock(&ci->i_ceph_lock);
826 if (!ci->i_symlink)
827 ci->i_symlink = sym;
828 else
829 kfree(sym); /* lost a race */
830 }
831 inode->i_link = ci->i_symlink;
832 break;
833 case S_IFDIR:
834 inode->i_op = &ceph_dir_iops;
835 inode->i_fop = &ceph_dir_fops;
836
837 ci->i_dir_layout = iinfo->dir_layout;
838
839 ci->i_files = le64_to_cpu(info->files);
840 ci->i_subdirs = le64_to_cpu(info->subdirs);
841 ci->i_rbytes = le64_to_cpu(info->rbytes);
842 ci->i_rfiles = le64_to_cpu(info->rfiles);
843 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
844 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
845 break;
846 default:
847 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
848 ceph_vinop(inode), inode->i_mode);
849 }
850
851 /* were we issued a capability? */
852 if (info->cap.caps) {
853 if (ceph_snap(inode) == CEPH_NOSNAP) {
854 unsigned caps = le32_to_cpu(info->cap.caps);
855 ceph_add_cap(inode, session,
856 le64_to_cpu(info->cap.cap_id),
857 cap_fmode, caps,
858 le32_to_cpu(info->cap.wanted),
859 le32_to_cpu(info->cap.seq),
860 le32_to_cpu(info->cap.mseq),
861 le64_to_cpu(info->cap.realm),
862 info->cap.flags, &new_cap);
863
864 /* set dir completion flag? */
865 if (S_ISDIR(inode->i_mode) &&
866 ci->i_files == 0 && ci->i_subdirs == 0 &&
867 (caps & CEPH_CAP_FILE_SHARED) &&
868 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
869 !__ceph_dir_is_complete(ci)) {
870 dout(" marking %p complete (empty)\n", inode);
871 i_size_write(inode, 0);
872 __ceph_dir_set_complete(ci,
873 atomic64_read(&ci->i_release_count),
874 atomic64_read(&ci->i_ordered_count));
875 }
876
877 wake = true;
878 } else {
879 dout(" %p got snap_caps %s\n", inode,
880 ceph_cap_string(le32_to_cpu(info->cap.caps)));
881 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
882 if (cap_fmode >= 0)
883 __ceph_get_fmode(ci, cap_fmode);
884 }
885 } else if (cap_fmode >= 0) {
886 pr_warn("mds issued no caps on %llx.%llx\n",
887 ceph_vinop(inode));
888 __ceph_get_fmode(ci, cap_fmode);
889 }
890
891 if (iinfo->inline_version > 0 &&
892 iinfo->inline_version >= ci->i_inline_version) {
893 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
894 ci->i_inline_version = iinfo->inline_version;
895 if (ci->i_inline_version != CEPH_INLINE_NONE &&
896 (locked_page ||
897 (le32_to_cpu(info->cap.caps) & cache_caps)))
898 fill_inline = true;
899 }
900
901 spin_unlock(&ci->i_ceph_lock);
902
903 if (fill_inline)
904 ceph_fill_inline_data(inode, locked_page,
905 iinfo->inline_data, iinfo->inline_len);
906
907 if (wake)
908 wake_up_all(&ci->i_cap_wq);
909
910 /* queue truncate if we saw i_size decrease */
911 if (queue_trunc)
912 ceph_queue_vmtruncate(inode);
913
914 /* populate frag tree */
915 if (S_ISDIR(inode->i_mode))
916 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
917
918 /* update delegation info? */
919 if (dirinfo)
920 ceph_fill_dirfrag(inode, dirinfo);
921
922 err = 0;
923out:
924 if (new_cap)
925 ceph_put_cap(mdsc, new_cap);
926 if (xattr_blob)
927 ceph_buffer_put(xattr_blob);
928 return err;
929}
930
931/*
932 * caller should hold session s_mutex.
933 */
934static void update_dentry_lease(struct dentry *dentry,
935 struct ceph_mds_reply_lease *lease,
936 struct ceph_mds_session *session,
937 unsigned long from_time)
938{
939 struct ceph_dentry_info *di = ceph_dentry(dentry);
940 long unsigned duration = le32_to_cpu(lease->duration_ms);
941 long unsigned ttl = from_time + (duration * HZ) / 1000;
942 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
943 struct inode *dir;
944
945 /* only track leases on regular dentries */
946 if (dentry->d_op != &ceph_dentry_ops)
947 return;
948
949 spin_lock(&dentry->d_lock);
950 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
951 dentry, duration, ttl);
952
953 /* make lease_rdcache_gen match directory */
954 dir = d_inode(dentry->d_parent);
955 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
956
957 if (duration == 0)
958 goto out_unlock;
959
960 if (di->lease_gen == session->s_cap_gen &&
961 time_before(ttl, dentry->d_time))
962 goto out_unlock; /* we already have a newer lease. */
963
964 if (di->lease_session && di->lease_session != session)
965 goto out_unlock;
966
967 ceph_dentry_lru_touch(dentry);
968
969 if (!di->lease_session)
970 di->lease_session = ceph_get_mds_session(session);
971 di->lease_gen = session->s_cap_gen;
972 di->lease_seq = le32_to_cpu(lease->seq);
973 di->lease_renew_after = half_ttl;
974 di->lease_renew_from = 0;
975 dentry->d_time = ttl;
976out_unlock:
977 spin_unlock(&dentry->d_lock);
978 return;
979}
980
981/*
982 * splice a dentry to an inode.
983 * caller must hold directory i_mutex for this to be safe.
984 */
985static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
986{
987 struct dentry *realdn;
988
989 BUG_ON(d_inode(dn));
990
991 /* dn must be unhashed */
992 if (!d_unhashed(dn))
993 d_drop(dn);
994 realdn = d_splice_alias(in, dn);
995 if (IS_ERR(realdn)) {
996 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
997 PTR_ERR(realdn), dn, in, ceph_vinop(in));
998 dn = realdn; /* note realdn contains the error */
999 goto out;
1000 } else if (realdn) {
1001 dout("dn %p (%d) spliced with %p (%d) "
1002 "inode %p ino %llx.%llx\n",
1003 dn, d_count(dn),
1004 realdn, d_count(realdn),
1005 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1006 dput(dn);
1007 dn = realdn;
1008 } else {
1009 BUG_ON(!ceph_dentry(dn));
1010 dout("dn %p attached to %p ino %llx.%llx\n",
1011 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1012 }
1013out:
1014 return dn;
1015}
1016
1017/*
1018 * Incorporate results into the local cache. This is either just
1019 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1020 * after a lookup).
1021 *
1022 * A reply may contain
1023 * a directory inode along with a dentry.
1024 * and/or a target inode
1025 *
1026 * Called with snap_rwsem (read).
1027 */
1028int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1029 struct ceph_mds_session *session)
1030{
1031 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1032 struct inode *in = NULL;
1033 struct ceph_vino vino;
1034 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1035 int err = 0;
1036
1037 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1038 rinfo->head->is_dentry, rinfo->head->is_target);
1039
1040#if 0
1041 /*
1042 * Debugging hook:
1043 *
1044 * If we resend completed ops to a recovering mds, we get no
1045 * trace. Since that is very rare, pretend this is the case
1046 * to ensure the 'no trace' handlers in the callers behave.
1047 *
1048 * Fill in inodes unconditionally to avoid breaking cap
1049 * invariants.
1050 */
1051 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1052 pr_info("fill_trace faking empty trace on %lld %s\n",
1053 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1054 if (rinfo->head->is_dentry) {
1055 rinfo->head->is_dentry = 0;
1056 err = fill_inode(req->r_locked_dir,
1057 &rinfo->diri, rinfo->dirfrag,
1058 session, req->r_request_started, -1);
1059 }
1060 if (rinfo->head->is_target) {
1061 rinfo->head->is_target = 0;
1062 ininfo = rinfo->targeti.in;
1063 vino.ino = le64_to_cpu(ininfo->ino);
1064 vino.snap = le64_to_cpu(ininfo->snapid);
1065 in = ceph_get_inode(sb, vino);
1066 err = fill_inode(in, &rinfo->targeti, NULL,
1067 session, req->r_request_started,
1068 req->r_fmode);
1069 iput(in);
1070 }
1071 }
1072#endif
1073
1074 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1075 dout("fill_trace reply is empty!\n");
1076 if (rinfo->head->result == 0 && req->r_locked_dir)
1077 ceph_invalidate_dir_request(req);
1078 return 0;
1079 }
1080
1081 if (rinfo->head->is_dentry) {
1082 struct inode *dir = req->r_locked_dir;
1083
1084 if (dir) {
1085 err = fill_inode(dir, NULL,
1086 &rinfo->diri, rinfo->dirfrag,
1087 session, req->r_request_started, -1,
1088 &req->r_caps_reservation);
1089 if (err < 0)
1090 goto done;
1091 } else {
1092 WARN_ON_ONCE(1);
1093 }
1094
1095 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1096 struct qstr dname;
1097 struct dentry *dn, *parent;
1098
1099 BUG_ON(!rinfo->head->is_target);
1100 BUG_ON(req->r_dentry);
1101
1102 parent = d_find_any_alias(dir);
1103 BUG_ON(!parent);
1104
1105 dname.name = rinfo->dname;
1106 dname.len = rinfo->dname_len;
1107 dname.hash = full_name_hash(dname.name, dname.len);
1108 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1109 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1110retry_lookup:
1111 dn = d_lookup(parent, &dname);
1112 dout("d_lookup on parent=%p name=%.*s got %p\n",
1113 parent, dname.len, dname.name, dn);
1114
1115 if (!dn) {
1116 dn = d_alloc(parent, &dname);
1117 dout("d_alloc %p '%.*s' = %p\n", parent,
1118 dname.len, dname.name, dn);
1119 if (dn == NULL) {
1120 dput(parent);
1121 err = -ENOMEM;
1122 goto done;
1123 }
1124 err = ceph_init_dentry(dn);
1125 if (err < 0) {
1126 dput(dn);
1127 dput(parent);
1128 goto done;
1129 }
1130 } else if (d_really_is_positive(dn) &&
1131 (ceph_ino(d_inode(dn)) != vino.ino ||
1132 ceph_snap(d_inode(dn)) != vino.snap)) {
1133 dout(" dn %p points to wrong inode %p\n",
1134 dn, d_inode(dn));
1135 d_delete(dn);
1136 dput(dn);
1137 goto retry_lookup;
1138 }
1139
1140 req->r_dentry = dn;
1141 dput(parent);
1142 }
1143 }
1144
1145 if (rinfo->head->is_target) {
1146 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1147 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1148
1149 in = ceph_get_inode(sb, vino);
1150 if (IS_ERR(in)) {
1151 err = PTR_ERR(in);
1152 goto done;
1153 }
1154 req->r_target_inode = in;
1155
1156 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1157 session, req->r_request_started,
1158 (!req->r_aborted && rinfo->head->result == 0) ?
1159 req->r_fmode : -1,
1160 &req->r_caps_reservation);
1161 if (err < 0) {
1162 pr_err("fill_inode badness %p %llx.%llx\n",
1163 in, ceph_vinop(in));
1164 goto done;
1165 }
1166 }
1167
1168 /*
1169 * ignore null lease/binding on snapdir ENOENT, or else we
1170 * will have trouble splicing in the virtual snapdir later
1171 */
1172 if (rinfo->head->is_dentry && !req->r_aborted &&
1173 req->r_locked_dir &&
1174 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1175 fsc->mount_options->snapdir_name,
1176 req->r_dentry->d_name.len))) {
1177 /*
1178 * lookup link rename : null -> possibly existing inode
1179 * mknod symlink mkdir : null -> new inode
1180 * unlink : linked -> null
1181 */
1182 struct inode *dir = req->r_locked_dir;
1183 struct dentry *dn = req->r_dentry;
1184 bool have_dir_cap, have_lease;
1185
1186 BUG_ON(!dn);
1187 BUG_ON(!dir);
1188 BUG_ON(d_inode(dn->d_parent) != dir);
1189 BUG_ON(ceph_ino(dir) !=
1190 le64_to_cpu(rinfo->diri.in->ino));
1191 BUG_ON(ceph_snap(dir) !=
1192 le64_to_cpu(rinfo->diri.in->snapid));
1193
1194 /* do we have a lease on the whole dir? */
1195 have_dir_cap =
1196 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1197 CEPH_CAP_FILE_SHARED);
1198
1199 /* do we have a dn lease? */
1200 have_lease = have_dir_cap ||
1201 le32_to_cpu(rinfo->dlease->duration_ms);
1202 if (!have_lease)
1203 dout("fill_trace no dentry lease or dir cap\n");
1204
1205 /* rename? */
1206 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1207 struct inode *olddir = req->r_old_dentry_dir;
1208 BUG_ON(!olddir);
1209
1210 dout(" src %p '%pd' dst %p '%pd'\n",
1211 req->r_old_dentry,
1212 req->r_old_dentry,
1213 dn, dn);
1214 dout("fill_trace doing d_move %p -> %p\n",
1215 req->r_old_dentry, dn);
1216
1217 /* d_move screws up sibling dentries' offsets */
1218 ceph_dir_clear_ordered(dir);
1219 ceph_dir_clear_ordered(olddir);
1220
1221 d_move(req->r_old_dentry, dn);
1222 dout(" src %p '%pd' dst %p '%pd'\n",
1223 req->r_old_dentry,
1224 req->r_old_dentry,
1225 dn, dn);
1226
1227 /* ensure target dentry is invalidated, despite
1228 rehashing bug in vfs_rename_dir */
1229 ceph_invalidate_dentry_lease(dn);
1230
1231 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1232 ceph_dentry(req->r_old_dentry)->offset);
1233
1234 dn = req->r_old_dentry; /* use old_dentry */
1235 }
1236
1237 /* null dentry? */
1238 if (!rinfo->head->is_target) {
1239 dout("fill_trace null dentry\n");
1240 if (d_really_is_positive(dn)) {
1241 ceph_dir_clear_ordered(dir);
1242 dout("d_delete %p\n", dn);
1243 d_delete(dn);
1244 } else {
1245 if (have_lease && d_unhashed(dn))
1246 d_add(dn, NULL);
1247 update_dentry_lease(dn, rinfo->dlease,
1248 session,
1249 req->r_request_started);
1250 }
1251 goto done;
1252 }
1253
1254 /* attach proper inode */
1255 if (d_really_is_negative(dn)) {
1256 ceph_dir_clear_ordered(dir);
1257 ihold(in);
1258 dn = splice_dentry(dn, in);
1259 if (IS_ERR(dn)) {
1260 err = PTR_ERR(dn);
1261 goto done;
1262 }
1263 req->r_dentry = dn; /* may have spliced */
1264 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1265 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1266 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1267 ceph_vinop(in));
1268 d_invalidate(dn);
1269 have_lease = false;
1270 }
1271
1272 if (have_lease)
1273 update_dentry_lease(dn, rinfo->dlease, session,
1274 req->r_request_started);
1275 dout(" final dn %p\n", dn);
1276 } else if (!req->r_aborted &&
1277 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1278 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1279 struct dentry *dn = req->r_dentry;
1280 struct inode *dir = req->r_locked_dir;
1281
1282 /* fill out a snapdir LOOKUPSNAP dentry */
1283 BUG_ON(!dn);
1284 BUG_ON(!dir);
1285 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1286 dout(" linking snapped dir %p to dn %p\n", in, dn);
1287 ceph_dir_clear_ordered(dir);
1288 ihold(in);
1289 dn = splice_dentry(dn, in);
1290 if (IS_ERR(dn)) {
1291 err = PTR_ERR(dn);
1292 goto done;
1293 }
1294 req->r_dentry = dn; /* may have spliced */
1295 }
1296done:
1297 dout("fill_trace done err=%d\n", err);
1298 return err;
1299}
1300
1301/*
1302 * Prepopulate our cache with readdir results, leases, etc.
1303 */
1304static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1305 struct ceph_mds_session *session)
1306{
1307 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1308 int i, err = 0;
1309
1310 for (i = 0; i < rinfo->dir_nr; i++) {
1311 struct ceph_vino vino;
1312 struct inode *in;
1313 int rc;
1314
1315 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1316 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1317
1318 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1319 if (IS_ERR(in)) {
1320 err = PTR_ERR(in);
1321 dout("new_inode badness got %d\n", err);
1322 continue;
1323 }
1324 rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1325 req->r_request_started, -1,
1326 &req->r_caps_reservation);
1327 if (rc < 0) {
1328 pr_err("fill_inode badness on %p got %d\n", in, rc);
1329 err = rc;
1330 continue;
1331 }
1332 }
1333
1334 return err;
1335}
1336
1337void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1338{
1339 if (ctl->page) {
1340 kunmap(ctl->page);
1341 put_page(ctl->page);
1342 ctl->page = NULL;
1343 }
1344}
1345
1346static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1347 struct ceph_readdir_cache_control *ctl,
1348 struct ceph_mds_request *req)
1349{
1350 struct ceph_inode_info *ci = ceph_inode(dir);
1351 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1352 unsigned idx = ctl->index % nsize;
1353 pgoff_t pgoff = ctl->index / nsize;
1354
1355 if (!ctl->page || pgoff != page_index(ctl->page)) {
1356 ceph_readdir_cache_release(ctl);
1357 if (idx == 0)
1358 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1359 else
1360 ctl->page = find_lock_page(&dir->i_data, pgoff);
1361 if (!ctl->page) {
1362 ctl->index = -1;
1363 return idx == 0 ? -ENOMEM : 0;
1364 }
1365 /* reading/filling the cache are serialized by
1366 * i_mutex, no need to use page lock */
1367 unlock_page(ctl->page);
1368 ctl->dentries = kmap(ctl->page);
1369 if (idx == 0)
1370 memset(ctl->dentries, 0, PAGE_SIZE);
1371 }
1372
1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1374 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1375 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1376 ctl->dentries[idx] = dn;
1377 ctl->index++;
1378 } else {
1379 dout("disable readdir cache\n");
1380 ctl->index = -1;
1381 }
1382 return 0;
1383}
1384
1385int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1386 struct ceph_mds_session *session)
1387{
1388 struct dentry *parent = req->r_dentry;
1389 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1390 struct qstr dname;
1391 struct dentry *dn;
1392 struct inode *in;
1393 int err = 0, skipped = 0, ret, i;
1394 struct inode *snapdir = NULL;
1395 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1396 struct ceph_dentry_info *di;
1397 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1398 struct ceph_readdir_cache_control cache_ctl = {};
1399
1400 if (req->r_aborted)
1401 return readdir_prepopulate_inodes_only(req, session);
1402
1403 if (rinfo->dir_dir &&
1404 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1405 dout("readdir_prepopulate got new frag %x -> %x\n",
1406 frag, le32_to_cpu(rinfo->dir_dir->frag));
1407 frag = le32_to_cpu(rinfo->dir_dir->frag);
1408 if (ceph_frag_is_leftmost(frag))
1409 req->r_readdir_offset = 2;
1410 else
1411 req->r_readdir_offset = 0;
1412 }
1413
1414 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1415 snapdir = ceph_get_snapdir(d_inode(parent));
1416 parent = d_find_alias(snapdir);
1417 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1418 rinfo->dir_nr, parent);
1419 } else {
1420 dout("readdir_prepopulate %d items under dn %p\n",
1421 rinfo->dir_nr, parent);
1422 if (rinfo->dir_dir)
1423 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1424 }
1425
1426 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1427 /* note dir version at start of readdir so we can tell
1428 * if any dentries get dropped */
1429 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1430 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1431 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1432 req->r_readdir_cache_idx = 0;
1433 }
1434
1435 cache_ctl.index = req->r_readdir_cache_idx;
1436
1437 /* FIXME: release caps/leases if error occurs */
1438 for (i = 0; i < rinfo->dir_nr; i++) {
1439 struct ceph_vino vino;
1440
1441 dname.name = rinfo->dir_dname[i];
1442 dname.len = rinfo->dir_dname_len[i];
1443 dname.hash = full_name_hash(dname.name, dname.len);
1444
1445 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1446 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1447
1448retry_lookup:
1449 dn = d_lookup(parent, &dname);
1450 dout("d_lookup on parent=%p name=%.*s got %p\n",
1451 parent, dname.len, dname.name, dn);
1452
1453 if (!dn) {
1454 dn = d_alloc(parent, &dname);
1455 dout("d_alloc %p '%.*s' = %p\n", parent,
1456 dname.len, dname.name, dn);
1457 if (dn == NULL) {
1458 dout("d_alloc badness\n");
1459 err = -ENOMEM;
1460 goto out;
1461 }
1462 ret = ceph_init_dentry(dn);
1463 if (ret < 0) {
1464 dput(dn);
1465 err = ret;
1466 goto out;
1467 }
1468 } else if (d_really_is_positive(dn) &&
1469 (ceph_ino(d_inode(dn)) != vino.ino ||
1470 ceph_snap(d_inode(dn)) != vino.snap)) {
1471 dout(" dn %p points to wrong inode %p\n",
1472 dn, d_inode(dn));
1473 d_delete(dn);
1474 dput(dn);
1475 goto retry_lookup;
1476 }
1477
1478 /* inode */
1479 if (d_really_is_positive(dn)) {
1480 in = d_inode(dn);
1481 } else {
1482 in = ceph_get_inode(parent->d_sb, vino);
1483 if (IS_ERR(in)) {
1484 dout("new_inode badness\n");
1485 d_drop(dn);
1486 dput(dn);
1487 err = PTR_ERR(in);
1488 goto out;
1489 }
1490 }
1491
1492 ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1493 req->r_request_started, -1,
1494 &req->r_caps_reservation);
1495 if (ret < 0) {
1496 pr_err("fill_inode badness on %p\n", in);
1497 if (d_really_is_negative(dn))
1498 iput(in);
1499 d_drop(dn);
1500 err = ret;
1501 goto next_item;
1502 }
1503
1504 if (d_really_is_negative(dn)) {
1505 struct dentry *realdn;
1506
1507 if (ceph_security_xattr_deadlock(in)) {
1508 dout(" skip splicing dn %p to inode %p"
1509 " (security xattr deadlock)\n", dn, in);
1510 iput(in);
1511 skipped++;
1512 goto next_item;
1513 }
1514
1515 realdn = splice_dentry(dn, in);
1516 if (IS_ERR(realdn)) {
1517 err = PTR_ERR(realdn);
1518 d_drop(dn);
1519 dn = NULL;
1520 goto next_item;
1521 }
1522 dn = realdn;
1523 }
1524
1525 di = dn->d_fsdata;
1526 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1527
1528 update_dentry_lease(dn, rinfo->dir_dlease[i],
1529 req->r_session,
1530 req->r_request_started);
1531
1532 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1533 ret = fill_readdir_cache(d_inode(parent), dn,
1534 &cache_ctl, req);
1535 if (ret < 0)
1536 err = ret;
1537 }
1538next_item:
1539 if (dn)
1540 dput(dn);
1541 }
1542out:
1543 if (err == 0 && skipped == 0) {
1544 req->r_did_prepopulate = true;
1545 req->r_readdir_cache_idx = cache_ctl.index;
1546 }
1547 ceph_readdir_cache_release(&cache_ctl);
1548 if (snapdir) {
1549 iput(snapdir);
1550 dput(parent);
1551 }
1552 dout("readdir_prepopulate done\n");
1553 return err;
1554}
1555
1556int ceph_inode_set_size(struct inode *inode, loff_t size)
1557{
1558 struct ceph_inode_info *ci = ceph_inode(inode);
1559 int ret = 0;
1560
1561 spin_lock(&ci->i_ceph_lock);
1562 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1563 i_size_write(inode, size);
1564 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1565
1566 /* tell the MDS if we are approaching max_size */
1567 if ((size << 1) >= ci->i_max_size &&
1568 (ci->i_reported_size << 1) < ci->i_max_size)
1569 ret = 1;
1570
1571 spin_unlock(&ci->i_ceph_lock);
1572 return ret;
1573}
1574
1575/*
1576 * Write back inode data in a worker thread. (This can't be done
1577 * in the message handler context.)
1578 */
1579void ceph_queue_writeback(struct inode *inode)
1580{
1581 ihold(inode);
1582 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1583 &ceph_inode(inode)->i_wb_work)) {
1584 dout("ceph_queue_writeback %p\n", inode);
1585 } else {
1586 dout("ceph_queue_writeback %p failed\n", inode);
1587 iput(inode);
1588 }
1589}
1590
1591static void ceph_writeback_work(struct work_struct *work)
1592{
1593 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1594 i_wb_work);
1595 struct inode *inode = &ci->vfs_inode;
1596
1597 dout("writeback %p\n", inode);
1598 filemap_fdatawrite(&inode->i_data);
1599 iput(inode);
1600}
1601
1602/*
1603 * queue an async invalidation
1604 */
1605void ceph_queue_invalidate(struct inode *inode)
1606{
1607 ihold(inode);
1608 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1609 &ceph_inode(inode)->i_pg_inv_work)) {
1610 dout("ceph_queue_invalidate %p\n", inode);
1611 } else {
1612 dout("ceph_queue_invalidate %p failed\n", inode);
1613 iput(inode);
1614 }
1615}
1616
1617/*
1618 * Invalidate inode pages in a worker thread. (This can't be done
1619 * in the message handler context.)
1620 */
1621static void ceph_invalidate_work(struct work_struct *work)
1622{
1623 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1624 i_pg_inv_work);
1625 struct inode *inode = &ci->vfs_inode;
1626 u32 orig_gen;
1627 int check = 0;
1628
1629 mutex_lock(&ci->i_truncate_mutex);
1630 spin_lock(&ci->i_ceph_lock);
1631 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1632 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1633 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1634 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1635 check = 1;
1636 spin_unlock(&ci->i_ceph_lock);
1637 mutex_unlock(&ci->i_truncate_mutex);
1638 goto out;
1639 }
1640 orig_gen = ci->i_rdcache_gen;
1641 spin_unlock(&ci->i_ceph_lock);
1642
1643 truncate_pagecache(inode, 0);
1644
1645 spin_lock(&ci->i_ceph_lock);
1646 if (orig_gen == ci->i_rdcache_gen &&
1647 orig_gen == ci->i_rdcache_revoking) {
1648 dout("invalidate_pages %p gen %d successful\n", inode,
1649 ci->i_rdcache_gen);
1650 ci->i_rdcache_revoking--;
1651 check = 1;
1652 } else {
1653 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1654 inode, orig_gen, ci->i_rdcache_gen,
1655 ci->i_rdcache_revoking);
1656 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1657 check = 1;
1658 }
1659 spin_unlock(&ci->i_ceph_lock);
1660 mutex_unlock(&ci->i_truncate_mutex);
1661out:
1662 if (check)
1663 ceph_check_caps(ci, 0, NULL);
1664 iput(inode);
1665}
1666
1667
1668/*
1669 * called by trunc_wq;
1670 *
1671 * We also truncate in a separate thread as well.
1672 */
1673static void ceph_vmtruncate_work(struct work_struct *work)
1674{
1675 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1676 i_vmtruncate_work);
1677 struct inode *inode = &ci->vfs_inode;
1678
1679 dout("vmtruncate_work %p\n", inode);
1680 __ceph_do_pending_vmtruncate(inode);
1681 iput(inode);
1682}
1683
1684/*
1685 * Queue an async vmtruncate. If we fail to queue work, we will handle
1686 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1687 */
1688void ceph_queue_vmtruncate(struct inode *inode)
1689{
1690 struct ceph_inode_info *ci = ceph_inode(inode);
1691
1692 ihold(inode);
1693
1694 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1695 &ci->i_vmtruncate_work)) {
1696 dout("ceph_queue_vmtruncate %p\n", inode);
1697 } else {
1698 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1699 inode, ci->i_truncate_pending);
1700 iput(inode);
1701 }
1702}
1703
1704/*
1705 * Make sure any pending truncation is applied before doing anything
1706 * that may depend on it.
1707 */
1708void __ceph_do_pending_vmtruncate(struct inode *inode)
1709{
1710 struct ceph_inode_info *ci = ceph_inode(inode);
1711 u64 to;
1712 int wrbuffer_refs, finish = 0;
1713
1714 mutex_lock(&ci->i_truncate_mutex);
1715retry:
1716 spin_lock(&ci->i_ceph_lock);
1717 if (ci->i_truncate_pending == 0) {
1718 dout("__do_pending_vmtruncate %p none pending\n", inode);
1719 spin_unlock(&ci->i_ceph_lock);
1720 mutex_unlock(&ci->i_truncate_mutex);
1721 return;
1722 }
1723
1724 /*
1725 * make sure any dirty snapped pages are flushed before we
1726 * possibly truncate them.. so write AND block!
1727 */
1728 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1729 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1730 inode);
1731 spin_unlock(&ci->i_ceph_lock);
1732 filemap_write_and_wait_range(&inode->i_data, 0,
1733 inode->i_sb->s_maxbytes);
1734 goto retry;
1735 }
1736
1737 /* there should be no reader or writer */
1738 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1739
1740 to = ci->i_truncate_size;
1741 wrbuffer_refs = ci->i_wrbuffer_ref;
1742 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1743 ci->i_truncate_pending, to);
1744 spin_unlock(&ci->i_ceph_lock);
1745
1746 truncate_pagecache(inode, to);
1747
1748 spin_lock(&ci->i_ceph_lock);
1749 if (to == ci->i_truncate_size) {
1750 ci->i_truncate_pending = 0;
1751 finish = 1;
1752 }
1753 spin_unlock(&ci->i_ceph_lock);
1754 if (!finish)
1755 goto retry;
1756
1757 mutex_unlock(&ci->i_truncate_mutex);
1758
1759 if (wrbuffer_refs == 0)
1760 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1761
1762 wake_up_all(&ci->i_cap_wq);
1763}
1764
1765/*
1766 * symlinks
1767 */
1768static const struct inode_operations ceph_symlink_iops = {
1769 .readlink = generic_readlink,
1770 .get_link = simple_get_link,
1771 .setattr = ceph_setattr,
1772 .getattr = ceph_getattr,
1773 .setxattr = ceph_setxattr,
1774 .getxattr = ceph_getxattr,
1775 .listxattr = ceph_listxattr,
1776 .removexattr = ceph_removexattr,
1777};
1778
1779/*
1780 * setattr
1781 */
1782int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1783{
1784 struct inode *inode = d_inode(dentry);
1785 struct ceph_inode_info *ci = ceph_inode(inode);
1786 const unsigned int ia_valid = attr->ia_valid;
1787 struct ceph_mds_request *req;
1788 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1789 struct ceph_cap_flush *prealloc_cf;
1790 int issued;
1791 int release = 0, dirtied = 0;
1792 int mask = 0;
1793 int err = 0;
1794 int inode_dirty_flags = 0;
1795 bool lock_snap_rwsem = false;
1796
1797 if (ceph_snap(inode) != CEPH_NOSNAP)
1798 return -EROFS;
1799
1800 err = inode_change_ok(inode, attr);
1801 if (err != 0)
1802 return err;
1803
1804 prealloc_cf = ceph_alloc_cap_flush();
1805 if (!prealloc_cf)
1806 return -ENOMEM;
1807
1808 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1809 USE_AUTH_MDS);
1810 if (IS_ERR(req)) {
1811 ceph_free_cap_flush(prealloc_cf);
1812 return PTR_ERR(req);
1813 }
1814
1815 spin_lock(&ci->i_ceph_lock);
1816 issued = __ceph_caps_issued(ci, NULL);
1817
1818 if (!ci->i_head_snapc &&
1819 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1820 lock_snap_rwsem = true;
1821 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1822 spin_unlock(&ci->i_ceph_lock);
1823 down_read(&mdsc->snap_rwsem);
1824 spin_lock(&ci->i_ceph_lock);
1825 issued = __ceph_caps_issued(ci, NULL);
1826 }
1827 }
1828
1829 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1830
1831 if (ia_valid & ATTR_UID) {
1832 dout("setattr %p uid %d -> %d\n", inode,
1833 from_kuid(&init_user_ns, inode->i_uid),
1834 from_kuid(&init_user_ns, attr->ia_uid));
1835 if (issued & CEPH_CAP_AUTH_EXCL) {
1836 inode->i_uid = attr->ia_uid;
1837 dirtied |= CEPH_CAP_AUTH_EXCL;
1838 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1839 !uid_eq(attr->ia_uid, inode->i_uid)) {
1840 req->r_args.setattr.uid = cpu_to_le32(
1841 from_kuid(&init_user_ns, attr->ia_uid));
1842 mask |= CEPH_SETATTR_UID;
1843 release |= CEPH_CAP_AUTH_SHARED;
1844 }
1845 }
1846 if (ia_valid & ATTR_GID) {
1847 dout("setattr %p gid %d -> %d\n", inode,
1848 from_kgid(&init_user_ns, inode->i_gid),
1849 from_kgid(&init_user_ns, attr->ia_gid));
1850 if (issued & CEPH_CAP_AUTH_EXCL) {
1851 inode->i_gid = attr->ia_gid;
1852 dirtied |= CEPH_CAP_AUTH_EXCL;
1853 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1854 !gid_eq(attr->ia_gid, inode->i_gid)) {
1855 req->r_args.setattr.gid = cpu_to_le32(
1856 from_kgid(&init_user_ns, attr->ia_gid));
1857 mask |= CEPH_SETATTR_GID;
1858 release |= CEPH_CAP_AUTH_SHARED;
1859 }
1860 }
1861 if (ia_valid & ATTR_MODE) {
1862 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1863 attr->ia_mode);
1864 if (issued & CEPH_CAP_AUTH_EXCL) {
1865 inode->i_mode = attr->ia_mode;
1866 dirtied |= CEPH_CAP_AUTH_EXCL;
1867 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1868 attr->ia_mode != inode->i_mode) {
1869 inode->i_mode = attr->ia_mode;
1870 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1871 mask |= CEPH_SETATTR_MODE;
1872 release |= CEPH_CAP_AUTH_SHARED;
1873 }
1874 }
1875
1876 if (ia_valid & ATTR_ATIME) {
1877 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1878 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1879 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1880 if (issued & CEPH_CAP_FILE_EXCL) {
1881 ci->i_time_warp_seq++;
1882 inode->i_atime = attr->ia_atime;
1883 dirtied |= CEPH_CAP_FILE_EXCL;
1884 } else if ((issued & CEPH_CAP_FILE_WR) &&
1885 timespec_compare(&inode->i_atime,
1886 &attr->ia_atime) < 0) {
1887 inode->i_atime = attr->ia_atime;
1888 dirtied |= CEPH_CAP_FILE_WR;
1889 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1890 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1891 ceph_encode_timespec(&req->r_args.setattr.atime,
1892 &attr->ia_atime);
1893 mask |= CEPH_SETATTR_ATIME;
1894 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1895 CEPH_CAP_FILE_WR;
1896 }
1897 }
1898 if (ia_valid & ATTR_MTIME) {
1899 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1900 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1901 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1902 if (issued & CEPH_CAP_FILE_EXCL) {
1903 ci->i_time_warp_seq++;
1904 inode->i_mtime = attr->ia_mtime;
1905 dirtied |= CEPH_CAP_FILE_EXCL;
1906 } else if ((issued & CEPH_CAP_FILE_WR) &&
1907 timespec_compare(&inode->i_mtime,
1908 &attr->ia_mtime) < 0) {
1909 inode->i_mtime = attr->ia_mtime;
1910 dirtied |= CEPH_CAP_FILE_WR;
1911 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1912 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1913 ceph_encode_timespec(&req->r_args.setattr.mtime,
1914 &attr->ia_mtime);
1915 mask |= CEPH_SETATTR_MTIME;
1916 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1917 CEPH_CAP_FILE_WR;
1918 }
1919 }
1920 if (ia_valid & ATTR_SIZE) {
1921 dout("setattr %p size %lld -> %lld\n", inode,
1922 inode->i_size, attr->ia_size);
1923 if ((issued & CEPH_CAP_FILE_EXCL) &&
1924 attr->ia_size > inode->i_size) {
1925 i_size_write(inode, attr->ia_size);
1926 inode->i_blocks =
1927 (attr->ia_size + (1 << 9) - 1) >> 9;
1928 inode->i_ctime = attr->ia_ctime;
1929 ci->i_reported_size = attr->ia_size;
1930 dirtied |= CEPH_CAP_FILE_EXCL;
1931 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1932 attr->ia_size != inode->i_size) {
1933 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1934 req->r_args.setattr.old_size =
1935 cpu_to_le64(inode->i_size);
1936 mask |= CEPH_SETATTR_SIZE;
1937 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1938 CEPH_CAP_FILE_WR;
1939 }
1940 }
1941
1942 /* these do nothing */
1943 if (ia_valid & ATTR_CTIME) {
1944 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1945 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1946 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1947 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1948 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1949 only ? "ctime only" : "ignored");
1950 inode->i_ctime = attr->ia_ctime;
1951 if (only) {
1952 /*
1953 * if kernel wants to dirty ctime but nothing else,
1954 * we need to choose a cap to dirty under, or do
1955 * a almost-no-op setattr
1956 */
1957 if (issued & CEPH_CAP_AUTH_EXCL)
1958 dirtied |= CEPH_CAP_AUTH_EXCL;
1959 else if (issued & CEPH_CAP_FILE_EXCL)
1960 dirtied |= CEPH_CAP_FILE_EXCL;
1961 else if (issued & CEPH_CAP_XATTR_EXCL)
1962 dirtied |= CEPH_CAP_XATTR_EXCL;
1963 else
1964 mask |= CEPH_SETATTR_CTIME;
1965 }
1966 }
1967 if (ia_valid & ATTR_FILE)
1968 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1969
1970 if (dirtied) {
1971 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
1972 &prealloc_cf);
1973 inode->i_ctime = current_fs_time(inode->i_sb);
1974 }
1975
1976 release &= issued;
1977 spin_unlock(&ci->i_ceph_lock);
1978 if (lock_snap_rwsem)
1979 up_read(&mdsc->snap_rwsem);
1980
1981 if (inode_dirty_flags)
1982 __mark_inode_dirty(inode, inode_dirty_flags);
1983
1984 if (ia_valid & ATTR_MODE) {
1985 err = posix_acl_chmod(inode, attr->ia_mode);
1986 if (err)
1987 goto out_put;
1988 }
1989
1990 if (mask) {
1991 req->r_inode = inode;
1992 ihold(inode);
1993 req->r_inode_drop = release;
1994 req->r_args.setattr.mask = cpu_to_le32(mask);
1995 req->r_num_caps = 1;
1996 err = ceph_mdsc_do_request(mdsc, NULL, req);
1997 }
1998 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1999 ceph_cap_string(dirtied), mask);
2000
2001 ceph_mdsc_put_request(req);
2002 if (mask & CEPH_SETATTR_SIZE)
2003 __ceph_do_pending_vmtruncate(inode);
2004 ceph_free_cap_flush(prealloc_cf);
2005 return err;
2006out_put:
2007 ceph_mdsc_put_request(req);
2008 ceph_free_cap_flush(prealloc_cf);
2009 return err;
2010}
2011
2012/*
2013 * Verify that we have a lease on the given mask. If not,
2014 * do a getattr against an mds.
2015 */
2016int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2017 int mask, bool force)
2018{
2019 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2020 struct ceph_mds_client *mdsc = fsc->mdsc;
2021 struct ceph_mds_request *req;
2022 int err;
2023
2024 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2025 dout("do_getattr inode %p SNAPDIR\n", inode);
2026 return 0;
2027 }
2028
2029 dout("do_getattr inode %p mask %s mode 0%o\n",
2030 inode, ceph_cap_string(mask), inode->i_mode);
2031 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2032 return 0;
2033
2034 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2035 if (IS_ERR(req))
2036 return PTR_ERR(req);
2037 req->r_inode = inode;
2038 ihold(inode);
2039 req->r_num_caps = 1;
2040 req->r_args.getattr.mask = cpu_to_le32(mask);
2041 req->r_locked_page = locked_page;
2042 err = ceph_mdsc_do_request(mdsc, NULL, req);
2043 if (locked_page && err == 0) {
2044 u64 inline_version = req->r_reply_info.targeti.inline_version;
2045 if (inline_version == 0) {
2046 /* the reply is supposed to contain inline data */
2047 err = -EINVAL;
2048 } else if (inline_version == CEPH_INLINE_NONE) {
2049 err = -ENODATA;
2050 } else {
2051 err = req->r_reply_info.targeti.inline_len;
2052 }
2053 }
2054 ceph_mdsc_put_request(req);
2055 dout("do_getattr result=%d\n", err);
2056 return err;
2057}
2058
2059
2060/*
2061 * Check inode permissions. We verify we have a valid value for
2062 * the AUTH cap, then call the generic handler.
2063 */
2064int ceph_permission(struct inode *inode, int mask)
2065{
2066 int err;
2067
2068 if (mask & MAY_NOT_BLOCK)
2069 return -ECHILD;
2070
2071 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2072
2073 if (!err)
2074 err = generic_permission(inode, mask);
2075 return err;
2076}
2077
2078/*
2079 * Get all attributes. Hopefully somedata we'll have a statlite()
2080 * and can limit the fields we require to be accurate.
2081 */
2082int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2083 struct kstat *stat)
2084{
2085 struct inode *inode = d_inode(dentry);
2086 struct ceph_inode_info *ci = ceph_inode(inode);
2087 int err;
2088
2089 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2090 if (!err) {
2091 generic_fillattr(inode, stat);
2092 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2093 if (ceph_snap(inode) != CEPH_NOSNAP)
2094 stat->dev = ceph_snap(inode);
2095 else
2096 stat->dev = 0;
2097 if (S_ISDIR(inode->i_mode)) {
2098 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2099 RBYTES))
2100 stat->size = ci->i_rbytes;
2101 else
2102 stat->size = ci->i_files + ci->i_subdirs;
2103 stat->blocks = 0;
2104 stat->blksize = 65536;
2105 }
2106 }
2107 return err;
2108}