Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
6#include <linux/slab.h>
7#include <linux/sched.h>
8
9#include "super.h"
10#include "mds_client.h"
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
31const struct dentry_operations ceph_dentry_ops;
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
51 goto out_unlock;
52 }
53
54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
61 di->dentry = dentry;
62 di->lease_session = NULL;
63 dentry->d_time = jiffies;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
67 ceph_dentry_lru_add(dentry);
68out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71}
72
73struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74{
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
81 if (!IS_ROOT(dentry)) {
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87}
88
89
90/*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94static unsigned fpos_frag(loff_t p)
95{
96 return p >> 32;
97}
98static unsigned fpos_off(loff_t p)
99{
100 return p & 0xffffffff;
101}
102
103static int fpos_cmp(loff_t l, loff_t r)
104{
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109}
110
111/*
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_u.d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
116 * are dropped.
117 *
118 * Complete dir indicates that we have all dentries in the dir. It is
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
121 */
122static int __dcache_readdir(struct file *file, struct dir_context *ctx,
123 u32 shared_gen)
124{
125 struct ceph_file_info *fi = file->private_data;
126 struct dentry *parent = file->f_dentry;
127 struct inode *dir = parent->d_inode;
128 struct list_head *p;
129 struct dentry *dentry, *last;
130 struct ceph_dentry_info *di;
131 int err = 0;
132
133 /* claim ref on last dentry we returned */
134 last = fi->dentry;
135 fi->dentry = NULL;
136
137 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
138 dir, shared_gen, ctx->pos, last);
139
140 spin_lock(&parent->d_lock);
141
142 /* start at beginning? */
143 if (ctx->pos == 2 || last == NULL ||
144 fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
145 if (list_empty(&parent->d_subdirs))
146 goto out_unlock;
147 p = parent->d_subdirs.prev;
148 dout(" initial p %p/%p\n", p->prev, p->next);
149 } else {
150 p = last->d_u.d_child.prev;
151 }
152
153more:
154 dentry = list_entry(p, struct dentry, d_u.d_child);
155 di = ceph_dentry(dentry);
156 while (1) {
157 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
158 d_unhashed(dentry) ? "!hashed" : "hashed",
159 parent->d_subdirs.prev, parent->d_subdirs.next);
160 if (p == &parent->d_subdirs) {
161 fi->flags |= CEPH_F_ATEND;
162 goto out_unlock;
163 }
164 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
165 if (di->lease_shared_gen == shared_gen &&
166 !d_unhashed(dentry) && dentry->d_inode &&
167 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
168 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
169 fpos_cmp(ctx->pos, di->offset) <= 0)
170 break;
171 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
172 dentry->d_name.len, dentry->d_name.name, di->offset,
173 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
174 !dentry->d_inode ? " null" : "");
175 spin_unlock(&dentry->d_lock);
176 p = p->prev;
177 dentry = list_entry(p, struct dentry, d_u.d_child);
178 di = ceph_dentry(dentry);
179 }
180
181 dget_dlock(dentry);
182 spin_unlock(&dentry->d_lock);
183 spin_unlock(&parent->d_lock);
184
185 /* make sure a dentry wasn't dropped while we didn't have parent lock */
186 if (!ceph_dir_is_complete(dir)) {
187 dout(" lost dir complete on %p; falling back to mds\n", dir);
188 dput(dentry);
189 err = -EAGAIN;
190 goto out;
191 }
192
193 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
194 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
195 if (!dir_emit(ctx, dentry->d_name.name,
196 dentry->d_name.len,
197 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
198 dentry->d_inode->i_mode >> 12)) {
199 if (last) {
200 /* remember our position */
201 fi->dentry = last;
202 fi->next_offset = fpos_off(di->offset);
203 }
204 dput(dentry);
205 return 0;
206 }
207
208 ctx->pos = di->offset + 1;
209
210 if (last)
211 dput(last);
212 last = dentry;
213
214 spin_lock(&parent->d_lock);
215 p = p->prev; /* advance to next dentry */
216 goto more;
217
218out_unlock:
219 spin_unlock(&parent->d_lock);
220out:
221 if (last)
222 dput(last);
223 return err;
224}
225
226/*
227 * make note of the last dentry we read, so we can
228 * continue at the same lexicographical point,
229 * regardless of what dir changes take place on the
230 * server.
231 */
232static int note_last_dentry(struct ceph_file_info *fi, const char *name,
233 int len)
234{
235 kfree(fi->last_name);
236 fi->last_name = kmalloc(len+1, GFP_NOFS);
237 if (!fi->last_name)
238 return -ENOMEM;
239 memcpy(fi->last_name, name, len);
240 fi->last_name[len] = 0;
241 dout("note_last_dentry '%s'\n", fi->last_name);
242 return 0;
243}
244
245static int ceph_readdir(struct file *file, struct dir_context *ctx)
246{
247 struct ceph_file_info *fi = file->private_data;
248 struct inode *inode = file_inode(file);
249 struct ceph_inode_info *ci = ceph_inode(inode);
250 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
251 struct ceph_mds_client *mdsc = fsc->mdsc;
252 unsigned frag = fpos_frag(ctx->pos);
253 int off = fpos_off(ctx->pos);
254 int err;
255 u32 ftype;
256 struct ceph_mds_reply_info_parsed *rinfo;
257
258 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
259 if (fi->flags & CEPH_F_ATEND)
260 return 0;
261
262 /* always start with . and .. */
263 if (ctx->pos == 0) {
264 /* note dir version at start of readdir so we can tell
265 * if any dentries get dropped */
266 fi->dir_release_count = atomic_read(&ci->i_release_count);
267
268 dout("readdir off 0 -> '.'\n");
269 if (!dir_emit(ctx, ".", 1,
270 ceph_translate_ino(inode->i_sb, inode->i_ino),
271 inode->i_mode >> 12))
272 return 0;
273 ctx->pos = 1;
274 off = 1;
275 }
276 if (ctx->pos == 1) {
277 ino_t ino = parent_ino(file->f_dentry);
278 dout("readdir off 1 -> '..'\n");
279 if (!dir_emit(ctx, "..", 2,
280 ceph_translate_ino(inode->i_sb, ino),
281 inode->i_mode >> 12))
282 return 0;
283 ctx->pos = 2;
284 off = 2;
285 }
286
287 /* can we use the dcache? */
288 spin_lock(&ci->i_ceph_lock);
289 if ((ctx->pos == 2 || fi->dentry) &&
290 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
291 ceph_snap(inode) != CEPH_SNAPDIR &&
292 __ceph_dir_is_complete(ci) &&
293 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
294 u32 shared_gen = ci->i_shared_gen;
295 spin_unlock(&ci->i_ceph_lock);
296 err = __dcache_readdir(file, ctx, shared_gen);
297 if (err != -EAGAIN)
298 return err;
299 frag = fpos_frag(ctx->pos);
300 off = fpos_off(ctx->pos);
301 } else {
302 spin_unlock(&ci->i_ceph_lock);
303 }
304 if (fi->dentry) {
305 err = note_last_dentry(fi, fi->dentry->d_name.name,
306 fi->dentry->d_name.len);
307 if (err)
308 return err;
309 dput(fi->dentry);
310 fi->dentry = NULL;
311 }
312
313 /* proceed with a normal readdir */
314
315more:
316 /* do we have the correct frag content buffered? */
317 if (fi->frag != frag || fi->last_readdir == NULL) {
318 struct ceph_mds_request *req;
319 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
320 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
321
322 /* discard old result, if any */
323 if (fi->last_readdir) {
324 ceph_mdsc_put_request(fi->last_readdir);
325 fi->last_readdir = NULL;
326 }
327
328 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
329 ceph_vinop(inode), frag, fi->last_name);
330 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
331 if (IS_ERR(req))
332 return PTR_ERR(req);
333 err = ceph_alloc_readdir_reply_buffer(req, inode);
334 if (err) {
335 ceph_mdsc_put_request(req);
336 return err;
337 }
338 req->r_inode = inode;
339 ihold(inode);
340 req->r_dentry = dget(file->f_dentry);
341 /* hints to request -> mds selection code */
342 req->r_direct_mode = USE_AUTH_MDS;
343 req->r_direct_hash = ceph_frag_value(frag);
344 req->r_direct_is_hash = true;
345 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
346 req->r_readdir_offset = fi->next_offset;
347 req->r_args.readdir.frag = cpu_to_le32(frag);
348 err = ceph_mdsc_do_request(mdsc, NULL, req);
349 if (err < 0) {
350 ceph_mdsc_put_request(req);
351 return err;
352 }
353 dout("readdir got and parsed readdir result=%d"
354 " on frag %x, end=%d, complete=%d\n", err, frag,
355 (int)req->r_reply_info.dir_end,
356 (int)req->r_reply_info.dir_complete);
357
358 if (!req->r_did_prepopulate) {
359 dout("readdir !did_prepopulate");
360 /* preclude from marking dir complete */
361 fi->dir_release_count--;
362 }
363
364 /* note next offset and last dentry name */
365 rinfo = &req->r_reply_info;
366 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
367 frag = le32_to_cpu(rinfo->dir_dir->frag);
368 if (ceph_frag_is_leftmost(frag))
369 fi->next_offset = 2;
370 else
371 fi->next_offset = 0;
372 off = fi->next_offset;
373 }
374 fi->frag = frag;
375 fi->offset = fi->next_offset;
376 fi->last_readdir = req;
377
378 if (req->r_reply_info.dir_end) {
379 kfree(fi->last_name);
380 fi->last_name = NULL;
381 if (ceph_frag_is_rightmost(frag))
382 fi->next_offset = 2;
383 else
384 fi->next_offset = 0;
385 } else {
386 err = note_last_dentry(fi,
387 rinfo->dir_dname[rinfo->dir_nr-1],
388 rinfo->dir_dname_len[rinfo->dir_nr-1]);
389 if (err)
390 return err;
391 fi->next_offset += rinfo->dir_nr;
392 }
393 }
394
395 rinfo = &fi->last_readdir->r_reply_info;
396 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
397 rinfo->dir_nr, off, fi->offset);
398
399 ctx->pos = ceph_make_fpos(frag, off);
400 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
401 struct ceph_mds_reply_inode *in =
402 rinfo->dir_in[off - fi->offset].in;
403 struct ceph_vino vino;
404 ino_t ino;
405
406 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
407 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
408 rinfo->dir_dname_len[off - fi->offset],
409 rinfo->dir_dname[off - fi->offset], in);
410 BUG_ON(!in);
411 ftype = le32_to_cpu(in->mode) >> 12;
412 vino.ino = le64_to_cpu(in->ino);
413 vino.snap = le64_to_cpu(in->snapid);
414 ino = ceph_vino_to_ino(vino);
415 if (!dir_emit(ctx,
416 rinfo->dir_dname[off - fi->offset],
417 rinfo->dir_dname_len[off - fi->offset],
418 ceph_translate_ino(inode->i_sb, ino), ftype)) {
419 dout("filldir stopping us...\n");
420 return 0;
421 }
422 off++;
423 ctx->pos++;
424 }
425
426 if (fi->last_name) {
427 ceph_mdsc_put_request(fi->last_readdir);
428 fi->last_readdir = NULL;
429 goto more;
430 }
431
432 /* more frags? */
433 if (!ceph_frag_is_rightmost(frag)) {
434 frag = ceph_frag_next(frag);
435 off = 0;
436 ctx->pos = ceph_make_fpos(frag, off);
437 dout("readdir next frag is %x\n", frag);
438 goto more;
439 }
440 fi->flags |= CEPH_F_ATEND;
441
442 /*
443 * if dir_release_count still matches the dir, no dentries
444 * were released during the whole readdir, and we should have
445 * the complete dir contents in our cache.
446 */
447 spin_lock(&ci->i_ceph_lock);
448 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
449 dout(" marking %p complete\n", inode);
450 __ceph_dir_set_complete(ci, fi->dir_release_count);
451 }
452 spin_unlock(&ci->i_ceph_lock);
453
454 dout("readdir %p file %p done.\n", inode, file);
455 return 0;
456}
457
458static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
459{
460 if (fi->last_readdir) {
461 ceph_mdsc_put_request(fi->last_readdir);
462 fi->last_readdir = NULL;
463 }
464 kfree(fi->last_name);
465 fi->last_name = NULL;
466 if (ceph_frag_is_leftmost(frag))
467 fi->next_offset = 2; /* compensate for . and .. */
468 else
469 fi->next_offset = 0;
470 if (fi->dentry) {
471 dput(fi->dentry);
472 fi->dentry = NULL;
473 }
474 fi->flags &= ~CEPH_F_ATEND;
475}
476
477static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
478{
479 struct ceph_file_info *fi = file->private_data;
480 struct inode *inode = file->f_mapping->host;
481 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
482 loff_t retval;
483
484 mutex_lock(&inode->i_mutex);
485 retval = -EINVAL;
486 switch (whence) {
487 case SEEK_END:
488 offset += inode->i_size + 2; /* FIXME */
489 break;
490 case SEEK_CUR:
491 offset += file->f_pos;
492 case SEEK_SET:
493 break;
494 default:
495 goto out;
496 }
497
498 if (offset >= 0) {
499 if (offset != file->f_pos) {
500 file->f_pos = offset;
501 file->f_version = 0;
502 fi->flags &= ~CEPH_F_ATEND;
503 }
504 retval = offset;
505
506 /*
507 * discard buffered readdir content on seekdir(0), or
508 * seek to new frag, or seek prior to current chunk.
509 */
510 if (offset == 0 ||
511 fpos_frag(offset) != fi->frag ||
512 fpos_off(offset) < fi->offset) {
513 dout("dir_llseek dropping %p content\n", file);
514 reset_readdir(fi, fpos_frag(offset));
515 }
516
517 /* bump dir_release_count if we did a forward seek */
518 if (fpos_cmp(offset, old_offset) > 0)
519 fi->dir_release_count--;
520 }
521out:
522 mutex_unlock(&inode->i_mutex);
523 return retval;
524}
525
526/*
527 * Handle lookups for the hidden .snap directory.
528 */
529int ceph_handle_snapdir(struct ceph_mds_request *req,
530 struct dentry *dentry, int err)
531{
532 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
533 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
534
535 /* .snap dir? */
536 if (err == -ENOENT &&
537 ceph_snap(parent) == CEPH_NOSNAP &&
538 strcmp(dentry->d_name.name,
539 fsc->mount_options->snapdir_name) == 0) {
540 struct inode *inode = ceph_get_snapdir(parent);
541 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
542 dentry, dentry->d_name.len, dentry->d_name.name, inode);
543 BUG_ON(!d_unhashed(dentry));
544 d_add(dentry, inode);
545 err = 0;
546 }
547 return err;
548}
549
550/*
551 * Figure out final result of a lookup/open request.
552 *
553 * Mainly, make sure we return the final req->r_dentry (if it already
554 * existed) in place of the original VFS-provided dentry when they
555 * differ.
556 *
557 * Gracefully handle the case where the MDS replies with -ENOENT and
558 * no trace (which it may do, at its discretion, e.g., if it doesn't
559 * care to issue a lease on the negative dentry).
560 */
561struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
562 struct dentry *dentry, int err)
563{
564 if (err == -ENOENT) {
565 /* no trace? */
566 err = 0;
567 if (!req->r_reply_info.head->is_dentry) {
568 dout("ENOENT and no trace, dentry %p inode %p\n",
569 dentry, dentry->d_inode);
570 if (dentry->d_inode) {
571 d_drop(dentry);
572 err = -ENOENT;
573 } else {
574 d_add(dentry, NULL);
575 }
576 }
577 }
578 if (err)
579 dentry = ERR_PTR(err);
580 else if (dentry != req->r_dentry)
581 dentry = dget(req->r_dentry); /* we got spliced */
582 else
583 dentry = NULL;
584 return dentry;
585}
586
587static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
588{
589 return ceph_ino(inode) == CEPH_INO_ROOT &&
590 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
591}
592
593/*
594 * Look up a single dir entry. If there is a lookup intent, inform
595 * the MDS so that it gets our 'caps wanted' value in a single op.
596 */
597static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
598 unsigned int flags)
599{
600 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
601 struct ceph_mds_client *mdsc = fsc->mdsc;
602 struct ceph_mds_request *req;
603 int op;
604 int err;
605
606 dout("lookup %p dentry %p '%.*s'\n",
607 dir, dentry, dentry->d_name.len, dentry->d_name.name);
608
609 if (dentry->d_name.len > NAME_MAX)
610 return ERR_PTR(-ENAMETOOLONG);
611
612 err = ceph_init_dentry(dentry);
613 if (err < 0)
614 return ERR_PTR(err);
615
616 /* can we conclude ENOENT locally? */
617 if (dentry->d_inode == NULL) {
618 struct ceph_inode_info *ci = ceph_inode(dir);
619 struct ceph_dentry_info *di = ceph_dentry(dentry);
620
621 spin_lock(&ci->i_ceph_lock);
622 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
623 if (strncmp(dentry->d_name.name,
624 fsc->mount_options->snapdir_name,
625 dentry->d_name.len) &&
626 !is_root_ceph_dentry(dir, dentry) &&
627 __ceph_dir_is_complete(ci) &&
628 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
629 spin_unlock(&ci->i_ceph_lock);
630 dout(" dir %p complete, -ENOENT\n", dir);
631 d_add(dentry, NULL);
632 di->lease_shared_gen = ci->i_shared_gen;
633 return NULL;
634 }
635 spin_unlock(&ci->i_ceph_lock);
636 }
637
638 op = ceph_snap(dir) == CEPH_SNAPDIR ?
639 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
640 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
641 if (IS_ERR(req))
642 return ERR_CAST(req);
643 req->r_dentry = dget(dentry);
644 req->r_num_caps = 2;
645 /* we only need inode linkage */
646 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
647 req->r_locked_dir = dir;
648 err = ceph_mdsc_do_request(mdsc, NULL, req);
649 err = ceph_handle_snapdir(req, dentry, err);
650 dentry = ceph_finish_lookup(req, dentry, err);
651 ceph_mdsc_put_request(req); /* will dput(dentry) */
652 dout("lookup result=%p\n", dentry);
653 return dentry;
654}
655
656/*
657 * If we do a create but get no trace back from the MDS, follow up with
658 * a lookup (the VFS expects us to link up the provided dentry).
659 */
660int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
661{
662 struct dentry *result = ceph_lookup(dir, dentry, 0);
663
664 if (result && !IS_ERR(result)) {
665 /*
666 * We created the item, then did a lookup, and found
667 * it was already linked to another inode we already
668 * had in our cache (and thus got spliced). Link our
669 * dentry to that inode, but don't hash it, just in
670 * case the VFS wants to dereference it.
671 */
672 BUG_ON(!result->d_inode);
673 d_instantiate(dentry, result->d_inode);
674 return 0;
675 }
676 return PTR_ERR(result);
677}
678
679static int ceph_mknod(struct inode *dir, struct dentry *dentry,
680 umode_t mode, dev_t rdev)
681{
682 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
683 struct ceph_mds_client *mdsc = fsc->mdsc;
684 struct ceph_mds_request *req;
685 int err;
686
687 if (ceph_snap(dir) != CEPH_NOSNAP)
688 return -EROFS;
689
690 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
691 dir, dentry, mode, rdev);
692 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
693 if (IS_ERR(req)) {
694 d_drop(dentry);
695 return PTR_ERR(req);
696 }
697 req->r_dentry = dget(dentry);
698 req->r_num_caps = 2;
699 req->r_locked_dir = dir;
700 req->r_args.mknod.mode = cpu_to_le32(mode);
701 req->r_args.mknod.rdev = cpu_to_le32(rdev);
702 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
703 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
704 err = ceph_mdsc_do_request(mdsc, dir, req);
705 if (!err && !req->r_reply_info.head->is_dentry)
706 err = ceph_handle_notrace_create(dir, dentry);
707 ceph_mdsc_put_request(req);
708
709 if (!err)
710 ceph_init_acl(dentry, dentry->d_inode, dir);
711 else
712 d_drop(dentry);
713 return err;
714}
715
716static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
717 bool excl)
718{
719 return ceph_mknod(dir, dentry, mode, 0);
720}
721
722static int ceph_symlink(struct inode *dir, struct dentry *dentry,
723 const char *dest)
724{
725 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
726 struct ceph_mds_client *mdsc = fsc->mdsc;
727 struct ceph_mds_request *req;
728 int err;
729
730 if (ceph_snap(dir) != CEPH_NOSNAP)
731 return -EROFS;
732
733 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
734 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
735 if (IS_ERR(req)) {
736 d_drop(dentry);
737 return PTR_ERR(req);
738 }
739 req->r_dentry = dget(dentry);
740 req->r_num_caps = 2;
741 req->r_path2 = kstrdup(dest, GFP_NOFS);
742 req->r_locked_dir = dir;
743 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
744 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
745 err = ceph_mdsc_do_request(mdsc, dir, req);
746 if (!err && !req->r_reply_info.head->is_dentry)
747 err = ceph_handle_notrace_create(dir, dentry);
748 ceph_mdsc_put_request(req);
749 if (!err)
750 ceph_init_acl(dentry, dentry->d_inode, dir);
751 else
752 d_drop(dentry);
753 return err;
754}
755
756static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
757{
758 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
759 struct ceph_mds_client *mdsc = fsc->mdsc;
760 struct ceph_mds_request *req;
761 int err = -EROFS;
762 int op;
763
764 if (ceph_snap(dir) == CEPH_SNAPDIR) {
765 /* mkdir .snap/foo is a MKSNAP */
766 op = CEPH_MDS_OP_MKSNAP;
767 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
768 dentry->d_name.len, dentry->d_name.name, dentry);
769 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
770 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
771 op = CEPH_MDS_OP_MKDIR;
772 } else {
773 goto out;
774 }
775 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
776 if (IS_ERR(req)) {
777 err = PTR_ERR(req);
778 goto out;
779 }
780
781 req->r_dentry = dget(dentry);
782 req->r_num_caps = 2;
783 req->r_locked_dir = dir;
784 req->r_args.mkdir.mode = cpu_to_le32(mode);
785 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
786 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
787 err = ceph_mdsc_do_request(mdsc, dir, req);
788 if (!err && !req->r_reply_info.head->is_dentry)
789 err = ceph_handle_notrace_create(dir, dentry);
790 ceph_mdsc_put_request(req);
791out:
792 if (!err)
793 ceph_init_acl(dentry, dentry->d_inode, dir);
794 else
795 d_drop(dentry);
796 return err;
797}
798
799static int ceph_link(struct dentry *old_dentry, struct inode *dir,
800 struct dentry *dentry)
801{
802 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
803 struct ceph_mds_client *mdsc = fsc->mdsc;
804 struct ceph_mds_request *req;
805 int err;
806
807 if (ceph_snap(dir) != CEPH_NOSNAP)
808 return -EROFS;
809
810 dout("link in dir %p old_dentry %p dentry %p\n", dir,
811 old_dentry, dentry);
812 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
813 if (IS_ERR(req)) {
814 d_drop(dentry);
815 return PTR_ERR(req);
816 }
817 req->r_dentry = dget(dentry);
818 req->r_num_caps = 2;
819 req->r_old_dentry = dget(old_dentry);
820 req->r_locked_dir = dir;
821 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
822 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
823 /* release LINK_SHARED on source inode (mds will lock it) */
824 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
825 err = ceph_mdsc_do_request(mdsc, dir, req);
826 if (err) {
827 d_drop(dentry);
828 } else if (!req->r_reply_info.head->is_dentry) {
829 ihold(old_dentry->d_inode);
830 d_instantiate(dentry, old_dentry->d_inode);
831 }
832 ceph_mdsc_put_request(req);
833 return err;
834}
835
836/*
837 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
838 * looks like the link count will hit 0, drop any other caps (other
839 * than PIN) we don't specifically want (due to the file still being
840 * open).
841 */
842static int drop_caps_for_unlink(struct inode *inode)
843{
844 struct ceph_inode_info *ci = ceph_inode(inode);
845 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
846
847 spin_lock(&ci->i_ceph_lock);
848 if (inode->i_nlink == 1) {
849 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
850 ci->i_ceph_flags |= CEPH_I_NODELAY;
851 }
852 spin_unlock(&ci->i_ceph_lock);
853 return drop;
854}
855
856/*
857 * rmdir and unlink are differ only by the metadata op code
858 */
859static int ceph_unlink(struct inode *dir, struct dentry *dentry)
860{
861 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
862 struct ceph_mds_client *mdsc = fsc->mdsc;
863 struct inode *inode = dentry->d_inode;
864 struct ceph_mds_request *req;
865 int err = -EROFS;
866 int op;
867
868 if (ceph_snap(dir) == CEPH_SNAPDIR) {
869 /* rmdir .snap/foo is RMSNAP */
870 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
871 dentry->d_name.name, dentry);
872 op = CEPH_MDS_OP_RMSNAP;
873 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
874 dout("unlink/rmdir dir %p dn %p inode %p\n",
875 dir, dentry, inode);
876 op = S_ISDIR(dentry->d_inode->i_mode) ?
877 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
878 } else
879 goto out;
880 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
881 if (IS_ERR(req)) {
882 err = PTR_ERR(req);
883 goto out;
884 }
885 req->r_dentry = dget(dentry);
886 req->r_num_caps = 2;
887 req->r_locked_dir = dir;
888 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
889 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
890 req->r_inode_drop = drop_caps_for_unlink(inode);
891 err = ceph_mdsc_do_request(mdsc, dir, req);
892 if (!err && !req->r_reply_info.head->is_dentry)
893 d_delete(dentry);
894 ceph_mdsc_put_request(req);
895out:
896 return err;
897}
898
899static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
900 struct inode *new_dir, struct dentry *new_dentry)
901{
902 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
903 struct ceph_mds_client *mdsc = fsc->mdsc;
904 struct ceph_mds_request *req;
905 int err;
906
907 if (ceph_snap(old_dir) != ceph_snap(new_dir))
908 return -EXDEV;
909 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
910 ceph_snap(new_dir) != CEPH_NOSNAP)
911 return -EROFS;
912 dout("rename dir %p dentry %p to dir %p dentry %p\n",
913 old_dir, old_dentry, new_dir, new_dentry);
914 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
915 if (IS_ERR(req))
916 return PTR_ERR(req);
917 ihold(old_dir);
918 req->r_dentry = dget(new_dentry);
919 req->r_num_caps = 2;
920 req->r_old_dentry = dget(old_dentry);
921 req->r_old_dentry_dir = old_dir;
922 req->r_locked_dir = new_dir;
923 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
924 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
925 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
926 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
927 /* release LINK_RDCACHE on source inode (mds will lock it) */
928 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
929 if (new_dentry->d_inode)
930 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
931 err = ceph_mdsc_do_request(mdsc, old_dir, req);
932 if (!err && !req->r_reply_info.head->is_dentry) {
933 /*
934 * Normally d_move() is done by fill_trace (called by
935 * do_request, above). If there is no trace, we need
936 * to do it here.
937 */
938
939 d_move(old_dentry, new_dentry);
940
941 /* ensure target dentry is invalidated, despite
942 rehashing bug in vfs_rename_dir */
943 ceph_invalidate_dentry_lease(new_dentry);
944
945 /* d_move screws up sibling dentries' offsets */
946 ceph_dir_clear_complete(old_dir);
947 ceph_dir_clear_complete(new_dir);
948
949 }
950 ceph_mdsc_put_request(req);
951 return err;
952}
953
954/*
955 * Ensure a dentry lease will no longer revalidate.
956 */
957void ceph_invalidate_dentry_lease(struct dentry *dentry)
958{
959 spin_lock(&dentry->d_lock);
960 dentry->d_time = jiffies;
961 ceph_dentry(dentry)->lease_shared_gen = 0;
962 spin_unlock(&dentry->d_lock);
963}
964
965/*
966 * Check if dentry lease is valid. If not, delete the lease. Try to
967 * renew if the least is more than half up.
968 */
969static int dentry_lease_is_valid(struct dentry *dentry)
970{
971 struct ceph_dentry_info *di;
972 struct ceph_mds_session *s;
973 int valid = 0;
974 u32 gen;
975 unsigned long ttl;
976 struct ceph_mds_session *session = NULL;
977 struct inode *dir = NULL;
978 u32 seq = 0;
979
980 spin_lock(&dentry->d_lock);
981 di = ceph_dentry(dentry);
982 if (di->lease_session) {
983 s = di->lease_session;
984 spin_lock(&s->s_gen_ttl_lock);
985 gen = s->s_cap_gen;
986 ttl = s->s_cap_ttl;
987 spin_unlock(&s->s_gen_ttl_lock);
988
989 if (di->lease_gen == gen &&
990 time_before(jiffies, dentry->d_time) &&
991 time_before(jiffies, ttl)) {
992 valid = 1;
993 if (di->lease_renew_after &&
994 time_after(jiffies, di->lease_renew_after)) {
995 /* we should renew */
996 dir = dentry->d_parent->d_inode;
997 session = ceph_get_mds_session(s);
998 seq = di->lease_seq;
999 di->lease_renew_after = 0;
1000 di->lease_renew_from = jiffies;
1001 }
1002 }
1003 }
1004 spin_unlock(&dentry->d_lock);
1005
1006 if (session) {
1007 ceph_mdsc_lease_send_msg(session, dir, dentry,
1008 CEPH_MDS_LEASE_RENEW, seq);
1009 ceph_put_mds_session(session);
1010 }
1011 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1012 return valid;
1013}
1014
1015/*
1016 * Check if directory-wide content lease/cap is valid.
1017 */
1018static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1019{
1020 struct ceph_inode_info *ci = ceph_inode(dir);
1021 struct ceph_dentry_info *di = ceph_dentry(dentry);
1022 int valid = 0;
1023
1024 spin_lock(&ci->i_ceph_lock);
1025 if (ci->i_shared_gen == di->lease_shared_gen)
1026 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1027 spin_unlock(&ci->i_ceph_lock);
1028 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1029 dir, (unsigned)ci->i_shared_gen, dentry,
1030 (unsigned)di->lease_shared_gen, valid);
1031 return valid;
1032}
1033
1034/*
1035 * Check if cached dentry can be trusted.
1036 */
1037static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1038{
1039 int valid = 0;
1040 struct inode *dir;
1041
1042 if (flags & LOOKUP_RCU)
1043 return -ECHILD;
1044
1045 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1046 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1047 ceph_dentry(dentry)->offset);
1048
1049 dir = ceph_get_dentry_parent_inode(dentry);
1050
1051 /* always trust cached snapped dentries, snapdir dentry */
1052 if (ceph_snap(dir) != CEPH_NOSNAP) {
1053 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1054 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1055 valid = 1;
1056 } else if (dentry->d_inode &&
1057 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1058 valid = 1;
1059 } else if (dentry_lease_is_valid(dentry) ||
1060 dir_lease_is_valid(dir, dentry)) {
1061 if (dentry->d_inode)
1062 valid = ceph_is_any_caps(dentry->d_inode);
1063 else
1064 valid = 1;
1065 }
1066
1067 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1068 if (valid) {
1069 ceph_dentry_lru_touch(dentry);
1070 } else {
1071 ceph_dir_clear_complete(dir);
1072 d_drop(dentry);
1073 }
1074 iput(dir);
1075 return valid;
1076}
1077
1078/*
1079 * Release our ceph_dentry_info.
1080 */
1081static void ceph_d_release(struct dentry *dentry)
1082{
1083 struct ceph_dentry_info *di = ceph_dentry(dentry);
1084
1085 dout("d_release %p\n", dentry);
1086 ceph_dentry_lru_del(dentry);
1087 if (di->lease_session)
1088 ceph_put_mds_session(di->lease_session);
1089 kmem_cache_free(ceph_dentry_cachep, di);
1090 dentry->d_fsdata = NULL;
1091}
1092
1093static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1094 unsigned int flags)
1095{
1096 /*
1097 * Eventually, we'll want to revalidate snapped metadata
1098 * too... probably...
1099 */
1100 return 1;
1101}
1102
1103/*
1104 * When the VFS prunes a dentry from the cache, we need to clear the
1105 * complete flag on the parent directory.
1106 *
1107 * Called under dentry->d_lock.
1108 */
1109static void ceph_d_prune(struct dentry *dentry)
1110{
1111 dout("ceph_d_prune %p\n", dentry);
1112
1113 /* do we have a valid parent? */
1114 if (IS_ROOT(dentry))
1115 return;
1116
1117 /* if we are not hashed, we don't affect dir's completeness */
1118 if (d_unhashed(dentry))
1119 return;
1120
1121 /*
1122 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1123 * cleared until d_release
1124 */
1125 ceph_dir_clear_complete(dentry->d_parent->d_inode);
1126}
1127
1128/*
1129 * read() on a dir. This weird interface hack only works if mounted
1130 * with '-o dirstat'.
1131 */
1132static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1133 loff_t *ppos)
1134{
1135 struct ceph_file_info *cf = file->private_data;
1136 struct inode *inode = file_inode(file);
1137 struct ceph_inode_info *ci = ceph_inode(inode);
1138 int left;
1139 const int bufsize = 1024;
1140
1141 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1142 return -EISDIR;
1143
1144 if (!cf->dir_info) {
1145 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1146 if (!cf->dir_info)
1147 return -ENOMEM;
1148 cf->dir_info_len =
1149 snprintf(cf->dir_info, bufsize,
1150 "entries: %20lld\n"
1151 " files: %20lld\n"
1152 " subdirs: %20lld\n"
1153 "rentries: %20lld\n"
1154 " rfiles: %20lld\n"
1155 " rsubdirs: %20lld\n"
1156 "rbytes: %20lld\n"
1157 "rctime: %10ld.%09ld\n",
1158 ci->i_files + ci->i_subdirs,
1159 ci->i_files,
1160 ci->i_subdirs,
1161 ci->i_rfiles + ci->i_rsubdirs,
1162 ci->i_rfiles,
1163 ci->i_rsubdirs,
1164 ci->i_rbytes,
1165 (long)ci->i_rctime.tv_sec,
1166 (long)ci->i_rctime.tv_nsec);
1167 }
1168
1169 if (*ppos >= cf->dir_info_len)
1170 return 0;
1171 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1172 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1173 if (left == size)
1174 return -EFAULT;
1175 *ppos += (size - left);
1176 return size - left;
1177}
1178
1179/*
1180 * an fsync() on a dir will wait for any uncommitted directory
1181 * operations to commit.
1182 */
1183static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1184 int datasync)
1185{
1186 struct inode *inode = file_inode(file);
1187 struct ceph_inode_info *ci = ceph_inode(inode);
1188 struct list_head *head = &ci->i_unsafe_dirops;
1189 struct ceph_mds_request *req;
1190 u64 last_tid;
1191 int ret = 0;
1192
1193 dout("dir_fsync %p\n", inode);
1194 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1195 if (ret)
1196 return ret;
1197 mutex_lock(&inode->i_mutex);
1198
1199 spin_lock(&ci->i_unsafe_lock);
1200 if (list_empty(head))
1201 goto out;
1202
1203 req = list_entry(head->prev,
1204 struct ceph_mds_request, r_unsafe_dir_item);
1205 last_tid = req->r_tid;
1206
1207 do {
1208 ceph_mdsc_get_request(req);
1209 spin_unlock(&ci->i_unsafe_lock);
1210
1211 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1212 inode, req->r_tid, last_tid);
1213 if (req->r_timeout) {
1214 ret = wait_for_completion_timeout(
1215 &req->r_safe_completion, req->r_timeout);
1216 if (ret > 0)
1217 ret = 0;
1218 else if (ret == 0)
1219 ret = -EIO; /* timed out */
1220 } else {
1221 wait_for_completion(&req->r_safe_completion);
1222 }
1223 ceph_mdsc_put_request(req);
1224
1225 spin_lock(&ci->i_unsafe_lock);
1226 if (ret || list_empty(head))
1227 break;
1228 req = list_entry(head->next,
1229 struct ceph_mds_request, r_unsafe_dir_item);
1230 } while (req->r_tid < last_tid);
1231out:
1232 spin_unlock(&ci->i_unsafe_lock);
1233 mutex_unlock(&inode->i_mutex);
1234
1235 return ret;
1236}
1237
1238/*
1239 * We maintain a private dentry LRU.
1240 *
1241 * FIXME: this needs to be changed to a per-mds lru to be useful.
1242 */
1243void ceph_dentry_lru_add(struct dentry *dn)
1244{
1245 struct ceph_dentry_info *di = ceph_dentry(dn);
1246 struct ceph_mds_client *mdsc;
1247
1248 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1249 dn->d_name.len, dn->d_name.name);
1250 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1251 spin_lock(&mdsc->dentry_lru_lock);
1252 list_add_tail(&di->lru, &mdsc->dentry_lru);
1253 mdsc->num_dentry++;
1254 spin_unlock(&mdsc->dentry_lru_lock);
1255}
1256
1257void ceph_dentry_lru_touch(struct dentry *dn)
1258{
1259 struct ceph_dentry_info *di = ceph_dentry(dn);
1260 struct ceph_mds_client *mdsc;
1261
1262 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1263 dn->d_name.len, dn->d_name.name, di->offset);
1264 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1265 spin_lock(&mdsc->dentry_lru_lock);
1266 list_move_tail(&di->lru, &mdsc->dentry_lru);
1267 spin_unlock(&mdsc->dentry_lru_lock);
1268}
1269
1270void ceph_dentry_lru_del(struct dentry *dn)
1271{
1272 struct ceph_dentry_info *di = ceph_dentry(dn);
1273 struct ceph_mds_client *mdsc;
1274
1275 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1276 dn->d_name.len, dn->d_name.name);
1277 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1278 spin_lock(&mdsc->dentry_lru_lock);
1279 list_del_init(&di->lru);
1280 mdsc->num_dentry--;
1281 spin_unlock(&mdsc->dentry_lru_lock);
1282}
1283
1284/*
1285 * Return name hash for a given dentry. This is dependent on
1286 * the parent directory's hash function.
1287 */
1288unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1289{
1290 struct ceph_inode_info *dci = ceph_inode(dir);
1291
1292 switch (dci->i_dir_layout.dl_dir_hash) {
1293 case 0: /* for backward compat */
1294 case CEPH_STR_HASH_LINUX:
1295 return dn->d_name.hash;
1296
1297 default:
1298 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1299 dn->d_name.name, dn->d_name.len);
1300 }
1301}
1302
1303const struct file_operations ceph_dir_fops = {
1304 .read = ceph_read_dir,
1305 .iterate = ceph_readdir,
1306 .llseek = ceph_dir_llseek,
1307 .open = ceph_open,
1308 .release = ceph_release,
1309 .unlocked_ioctl = ceph_ioctl,
1310 .fsync = ceph_dir_fsync,
1311};
1312
1313const struct inode_operations ceph_dir_iops = {
1314 .lookup = ceph_lookup,
1315 .permission = ceph_permission,
1316 .getattr = ceph_getattr,
1317 .setattr = ceph_setattr,
1318 .setxattr = ceph_setxattr,
1319 .getxattr = ceph_getxattr,
1320 .listxattr = ceph_listxattr,
1321 .removexattr = ceph_removexattr,
1322 .get_acl = ceph_get_acl,
1323 .set_acl = ceph_set_acl,
1324 .mknod = ceph_mknod,
1325 .symlink = ceph_symlink,
1326 .mkdir = ceph_mkdir,
1327 .link = ceph_link,
1328 .unlink = ceph_unlink,
1329 .rmdir = ceph_unlink,
1330 .rename = ceph_rename,
1331 .create = ceph_create,
1332 .atomic_open = ceph_atomic_open,
1333};
1334
1335const struct dentry_operations ceph_dentry_ops = {
1336 .d_revalidate = ceph_d_revalidate,
1337 .d_release = ceph_d_release,
1338 .d_prune = ceph_d_prune,
1339};
1340
1341const struct dentry_operations ceph_snapdir_dentry_ops = {
1342 .d_revalidate = ceph_snapdir_d_revalidate,
1343 .d_release = ceph_d_release,
1344};
1345
1346const struct dentry_operations ceph_snap_dentry_ops = {
1347 .d_release = ceph_d_release,
1348 .d_prune = ceph_d_prune,
1349};
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
6#include <linux/slab.h>
7#include <linux/sched.h>
8#include <linux/xattr.h>
9
10#include "super.h"
11#include "mds_client.h"
12
13/*
14 * Directory operations: readdir, lookup, create, link, unlink,
15 * rename, etc.
16 */
17
18/*
19 * Ceph MDS operations are specified in terms of a base ino and
20 * relative path. Thus, the client can specify an operation on a
21 * specific inode (e.g., a getattr due to fstat(2)), or as a path
22 * relative to, say, the root directory.
23 *
24 * Normally, we limit ourselves to strict inode ops (no path component)
25 * or dentry operations (a single path component relative to an ino). The
26 * exception to this is open_root_dentry(), which will open the mount
27 * point by name.
28 */
29
30const struct dentry_operations ceph_dentry_ops;
31
32/*
33 * Initialize ceph dentry state.
34 */
35static int ceph_d_init(struct dentry *dentry)
36{
37 struct ceph_dentry_info *di;
38
39 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
40 if (!di)
41 return -ENOMEM; /* oh well */
42
43 di->dentry = dentry;
44 di->lease_session = NULL;
45 di->time = jiffies;
46 dentry->d_fsdata = di;
47 ceph_dentry_lru_add(dentry);
48 return 0;
49}
50
51/*
52 * for f_pos for readdir:
53 * - hash order:
54 * (0xff << 52) | ((24 bits hash) << 28) |
55 * (the nth entry has hash collision);
56 * - frag+name order;
57 * ((frag value) << 28) | (the nth entry in frag);
58 */
59#define OFFSET_BITS 28
60#define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
61#define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
62loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
63{
64 loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
65 if (hash_order)
66 fpos |= HASH_ORDER;
67 return fpos;
68}
69
70static bool is_hash_order(loff_t p)
71{
72 return (p & HASH_ORDER) == HASH_ORDER;
73}
74
75static unsigned fpos_frag(loff_t p)
76{
77 return p >> OFFSET_BITS;
78}
79
80static unsigned fpos_hash(loff_t p)
81{
82 return ceph_frag_value(fpos_frag(p));
83}
84
85static unsigned fpos_off(loff_t p)
86{
87 return p & OFFSET_MASK;
88}
89
90static int fpos_cmp(loff_t l, loff_t r)
91{
92 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
93 if (v)
94 return v;
95 return (int)(fpos_off(l) - fpos_off(r));
96}
97
98/*
99 * make note of the last dentry we read, so we can
100 * continue at the same lexicographical point,
101 * regardless of what dir changes take place on the
102 * server.
103 */
104static int note_last_dentry(struct ceph_file_info *fi, const char *name,
105 int len, unsigned next_offset)
106{
107 char *buf = kmalloc(len+1, GFP_KERNEL);
108 if (!buf)
109 return -ENOMEM;
110 kfree(fi->last_name);
111 fi->last_name = buf;
112 memcpy(fi->last_name, name, len);
113 fi->last_name[len] = 0;
114 fi->next_offset = next_offset;
115 dout("note_last_dentry '%s'\n", fi->last_name);
116 return 0;
117}
118
119
120static struct dentry *
121__dcache_find_get_entry(struct dentry *parent, u64 idx,
122 struct ceph_readdir_cache_control *cache_ctl)
123{
124 struct inode *dir = d_inode(parent);
125 struct dentry *dentry;
126 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
127 loff_t ptr_pos = idx * sizeof(struct dentry *);
128 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
129
130 if (ptr_pos >= i_size_read(dir))
131 return NULL;
132
133 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
134 ceph_readdir_cache_release(cache_ctl);
135 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
136 if (!cache_ctl->page) {
137 dout(" page %lu not found\n", ptr_pgoff);
138 return ERR_PTR(-EAGAIN);
139 }
140 /* reading/filling the cache are serialized by
141 i_mutex, no need to use page lock */
142 unlock_page(cache_ctl->page);
143 cache_ctl->dentries = kmap(cache_ctl->page);
144 }
145
146 cache_ctl->index = idx & idx_mask;
147
148 rcu_read_lock();
149 spin_lock(&parent->d_lock);
150 /* check i_size again here, because empty directory can be
151 * marked as complete while not holding the i_mutex. */
152 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
153 dentry = cache_ctl->dentries[cache_ctl->index];
154 else
155 dentry = NULL;
156 spin_unlock(&parent->d_lock);
157 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
158 dentry = NULL;
159 rcu_read_unlock();
160 return dentry ? : ERR_PTR(-EAGAIN);
161}
162
163/*
164 * When possible, we try to satisfy a readdir by peeking at the
165 * dcache. We make this work by carefully ordering dentries on
166 * d_child when we initially get results back from the MDS, and
167 * falling back to a "normal" sync readdir if any dentries in the dir
168 * are dropped.
169 *
170 * Complete dir indicates that we have all dentries in the dir. It is
171 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
172 * the MDS if/when the directory is modified).
173 */
174static int __dcache_readdir(struct file *file, struct dir_context *ctx,
175 u32 shared_gen)
176{
177 struct ceph_file_info *fi = file->private_data;
178 struct dentry *parent = file->f_path.dentry;
179 struct inode *dir = d_inode(parent);
180 struct dentry *dentry, *last = NULL;
181 struct ceph_dentry_info *di;
182 struct ceph_readdir_cache_control cache_ctl = {};
183 u64 idx = 0;
184 int err = 0;
185
186 dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
187
188 /* search start position */
189 if (ctx->pos > 2) {
190 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
191 while (count > 0) {
192 u64 step = count >> 1;
193 dentry = __dcache_find_get_entry(parent, idx + step,
194 &cache_ctl);
195 if (!dentry) {
196 /* use linar search */
197 idx = 0;
198 break;
199 }
200 if (IS_ERR(dentry)) {
201 err = PTR_ERR(dentry);
202 goto out;
203 }
204 di = ceph_dentry(dentry);
205 spin_lock(&dentry->d_lock);
206 if (fpos_cmp(di->offset, ctx->pos) < 0) {
207 idx += step + 1;
208 count -= step + 1;
209 } else {
210 count = step;
211 }
212 spin_unlock(&dentry->d_lock);
213 dput(dentry);
214 }
215
216 dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
217 }
218
219
220 for (;;) {
221 bool emit_dentry = false;
222 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
223 if (!dentry) {
224 fi->flags |= CEPH_F_ATEND;
225 err = 0;
226 break;
227 }
228 if (IS_ERR(dentry)) {
229 err = PTR_ERR(dentry);
230 goto out;
231 }
232
233 di = ceph_dentry(dentry);
234 spin_lock(&dentry->d_lock);
235 if (di->lease_shared_gen == shared_gen &&
236 d_really_is_positive(dentry) &&
237 fpos_cmp(ctx->pos, di->offset) <= 0) {
238 emit_dentry = true;
239 }
240 spin_unlock(&dentry->d_lock);
241
242 if (emit_dentry) {
243 dout(" %llx dentry %p %pd %p\n", di->offset,
244 dentry, dentry, d_inode(dentry));
245 ctx->pos = di->offset;
246 if (!dir_emit(ctx, dentry->d_name.name,
247 dentry->d_name.len,
248 ceph_translate_ino(dentry->d_sb,
249 d_inode(dentry)->i_ino),
250 d_inode(dentry)->i_mode >> 12)) {
251 dput(dentry);
252 err = 0;
253 break;
254 }
255 ctx->pos++;
256
257 if (last)
258 dput(last);
259 last = dentry;
260 } else {
261 dput(dentry);
262 }
263 }
264out:
265 ceph_readdir_cache_release(&cache_ctl);
266 if (last) {
267 int ret;
268 di = ceph_dentry(last);
269 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
270 fpos_off(di->offset) + 1);
271 if (ret < 0)
272 err = ret;
273 dput(last);
274 }
275 return err;
276}
277
278static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
279{
280 if (!fi->last_readdir)
281 return true;
282 if (is_hash_order(pos))
283 return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
284 else
285 return fi->frag != fpos_frag(pos);
286}
287
288static int ceph_readdir(struct file *file, struct dir_context *ctx)
289{
290 struct ceph_file_info *fi = file->private_data;
291 struct inode *inode = file_inode(file);
292 struct ceph_inode_info *ci = ceph_inode(inode);
293 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
294 struct ceph_mds_client *mdsc = fsc->mdsc;
295 int i;
296 int err;
297 u32 ftype;
298 struct ceph_mds_reply_info_parsed *rinfo;
299
300 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
301 if (fi->flags & CEPH_F_ATEND)
302 return 0;
303
304 /* always start with . and .. */
305 if (ctx->pos == 0) {
306 dout("readdir off 0 -> '.'\n");
307 if (!dir_emit(ctx, ".", 1,
308 ceph_translate_ino(inode->i_sb, inode->i_ino),
309 inode->i_mode >> 12))
310 return 0;
311 ctx->pos = 1;
312 }
313 if (ctx->pos == 1) {
314 ino_t ino = parent_ino(file->f_path.dentry);
315 dout("readdir off 1 -> '..'\n");
316 if (!dir_emit(ctx, "..", 2,
317 ceph_translate_ino(inode->i_sb, ino),
318 inode->i_mode >> 12))
319 return 0;
320 ctx->pos = 2;
321 }
322
323 /* can we use the dcache? */
324 spin_lock(&ci->i_ceph_lock);
325 if (ceph_test_mount_opt(fsc, DCACHE) &&
326 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
327 ceph_snap(inode) != CEPH_SNAPDIR &&
328 __ceph_dir_is_complete_ordered(ci) &&
329 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
330 u32 shared_gen = ci->i_shared_gen;
331 spin_unlock(&ci->i_ceph_lock);
332 err = __dcache_readdir(file, ctx, shared_gen);
333 if (err != -EAGAIN)
334 return err;
335 } else {
336 spin_unlock(&ci->i_ceph_lock);
337 }
338
339 /* proceed with a normal readdir */
340more:
341 /* do we have the correct frag content buffered? */
342 if (need_send_readdir(fi, ctx->pos)) {
343 struct ceph_mds_request *req;
344 unsigned frag;
345 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
346 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
347
348 /* discard old result, if any */
349 if (fi->last_readdir) {
350 ceph_mdsc_put_request(fi->last_readdir);
351 fi->last_readdir = NULL;
352 }
353
354 if (is_hash_order(ctx->pos)) {
355 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
356 NULL, NULL);
357 } else {
358 frag = fpos_frag(ctx->pos);
359 }
360
361 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
362 ceph_vinop(inode), frag, fi->last_name);
363 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
364 if (IS_ERR(req))
365 return PTR_ERR(req);
366 err = ceph_alloc_readdir_reply_buffer(req, inode);
367 if (err) {
368 ceph_mdsc_put_request(req);
369 return err;
370 }
371 /* hints to request -> mds selection code */
372 req->r_direct_mode = USE_AUTH_MDS;
373 req->r_direct_hash = ceph_frag_value(frag);
374 req->r_direct_is_hash = true;
375 if (fi->last_name) {
376 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
377 if (!req->r_path2) {
378 ceph_mdsc_put_request(req);
379 return -ENOMEM;
380 }
381 }
382 req->r_dir_release_cnt = fi->dir_release_count;
383 req->r_dir_ordered_cnt = fi->dir_ordered_count;
384 req->r_readdir_cache_idx = fi->readdir_cache_idx;
385 req->r_readdir_offset = fi->next_offset;
386 req->r_args.readdir.frag = cpu_to_le32(frag);
387 req->r_args.readdir.flags =
388 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
389
390 req->r_inode = inode;
391 ihold(inode);
392 req->r_dentry = dget(file->f_path.dentry);
393 err = ceph_mdsc_do_request(mdsc, NULL, req);
394 if (err < 0) {
395 ceph_mdsc_put_request(req);
396 return err;
397 }
398 dout("readdir got and parsed readdir result=%d on "
399 "frag %x, end=%d, complete=%d, hash_order=%d\n",
400 err, frag,
401 (int)req->r_reply_info.dir_end,
402 (int)req->r_reply_info.dir_complete,
403 (int)req->r_reply_info.hash_order);
404
405 rinfo = &req->r_reply_info;
406 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
407 frag = le32_to_cpu(rinfo->dir_dir->frag);
408 if (!rinfo->hash_order) {
409 fi->next_offset = req->r_readdir_offset;
410 /* adjust ctx->pos to beginning of frag */
411 ctx->pos = ceph_make_fpos(frag,
412 fi->next_offset,
413 false);
414 }
415 }
416
417 fi->frag = frag;
418 fi->last_readdir = req;
419
420 if (req->r_did_prepopulate) {
421 fi->readdir_cache_idx = req->r_readdir_cache_idx;
422 if (fi->readdir_cache_idx < 0) {
423 /* preclude from marking dir ordered */
424 fi->dir_ordered_count = 0;
425 } else if (ceph_frag_is_leftmost(frag) &&
426 fi->next_offset == 2) {
427 /* note dir version at start of readdir so
428 * we can tell if any dentries get dropped */
429 fi->dir_release_count = req->r_dir_release_cnt;
430 fi->dir_ordered_count = req->r_dir_ordered_cnt;
431 }
432 } else {
433 dout("readdir !did_prepopulate");
434 /* disable readdir cache */
435 fi->readdir_cache_idx = -1;
436 /* preclude from marking dir complete */
437 fi->dir_release_count = 0;
438 }
439
440 /* note next offset and last dentry name */
441 if (rinfo->dir_nr > 0) {
442 struct ceph_mds_reply_dir_entry *rde =
443 rinfo->dir_entries + (rinfo->dir_nr-1);
444 unsigned next_offset = req->r_reply_info.dir_end ?
445 2 : (fpos_off(rde->offset) + 1);
446 err = note_last_dentry(fi, rde->name, rde->name_len,
447 next_offset);
448 if (err)
449 return err;
450 } else if (req->r_reply_info.dir_end) {
451 fi->next_offset = 2;
452 /* keep last name */
453 }
454 }
455
456 rinfo = &fi->last_readdir->r_reply_info;
457 dout("readdir frag %x num %d pos %llx chunk first %llx\n",
458 fi->frag, rinfo->dir_nr, ctx->pos,
459 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
460
461 i = 0;
462 /* search start position */
463 if (rinfo->dir_nr > 0) {
464 int step, nr = rinfo->dir_nr;
465 while (nr > 0) {
466 step = nr >> 1;
467 if (rinfo->dir_entries[i + step].offset < ctx->pos) {
468 i += step + 1;
469 nr -= step + 1;
470 } else {
471 nr = step;
472 }
473 }
474 }
475 for (; i < rinfo->dir_nr; i++) {
476 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
477 struct ceph_vino vino;
478 ino_t ino;
479
480 BUG_ON(rde->offset < ctx->pos);
481
482 ctx->pos = rde->offset;
483 dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
484 i, rinfo->dir_nr, ctx->pos,
485 rde->name_len, rde->name, &rde->inode.in);
486
487 BUG_ON(!rde->inode.in);
488 ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
489 vino.ino = le64_to_cpu(rde->inode.in->ino);
490 vino.snap = le64_to_cpu(rde->inode.in->snapid);
491 ino = ceph_vino_to_ino(vino);
492
493 if (!dir_emit(ctx, rde->name, rde->name_len,
494 ceph_translate_ino(inode->i_sb, ino), ftype)) {
495 dout("filldir stopping us...\n");
496 return 0;
497 }
498 ctx->pos++;
499 }
500
501 if (fi->next_offset > 2) {
502 ceph_mdsc_put_request(fi->last_readdir);
503 fi->last_readdir = NULL;
504 goto more;
505 }
506
507 /* more frags? */
508 if (!ceph_frag_is_rightmost(fi->frag)) {
509 unsigned frag = ceph_frag_next(fi->frag);
510 if (is_hash_order(ctx->pos)) {
511 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
512 fi->next_offset, true);
513 if (new_pos > ctx->pos)
514 ctx->pos = new_pos;
515 /* keep last_name */
516 } else {
517 ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
518 kfree(fi->last_name);
519 fi->last_name = NULL;
520 }
521 dout("readdir next frag is %x\n", frag);
522 goto more;
523 }
524 fi->flags |= CEPH_F_ATEND;
525
526 /*
527 * if dir_release_count still matches the dir, no dentries
528 * were released during the whole readdir, and we should have
529 * the complete dir contents in our cache.
530 */
531 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
532 spin_lock(&ci->i_ceph_lock);
533 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
534 dout(" marking %p complete and ordered\n", inode);
535 /* use i_size to track number of entries in
536 * readdir cache */
537 BUG_ON(fi->readdir_cache_idx < 0);
538 i_size_write(inode, fi->readdir_cache_idx *
539 sizeof(struct dentry*));
540 } else {
541 dout(" marking %p complete\n", inode);
542 }
543 __ceph_dir_set_complete(ci, fi->dir_release_count,
544 fi->dir_ordered_count);
545 spin_unlock(&ci->i_ceph_lock);
546 }
547
548 dout("readdir %p file %p done.\n", inode, file);
549 return 0;
550}
551
552static void reset_readdir(struct ceph_file_info *fi)
553{
554 if (fi->last_readdir) {
555 ceph_mdsc_put_request(fi->last_readdir);
556 fi->last_readdir = NULL;
557 }
558 kfree(fi->last_name);
559 fi->last_name = NULL;
560 fi->dir_release_count = 0;
561 fi->readdir_cache_idx = -1;
562 fi->next_offset = 2; /* compensate for . and .. */
563 fi->flags &= ~CEPH_F_ATEND;
564}
565
566/*
567 * discard buffered readdir content on seekdir(0), or seek to new frag,
568 * or seek prior to current chunk
569 */
570static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
571{
572 struct ceph_mds_reply_info_parsed *rinfo;
573 loff_t chunk_offset;
574 if (new_pos == 0)
575 return true;
576 if (is_hash_order(new_pos)) {
577 /* no need to reset last_name for a forward seek when
578 * dentries are sotred in hash order */
579 } else if (fi->frag != fpos_frag(new_pos)) {
580 return true;
581 }
582 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
583 if (!rinfo || !rinfo->dir_nr)
584 return true;
585 chunk_offset = rinfo->dir_entries[0].offset;
586 return new_pos < chunk_offset ||
587 is_hash_order(new_pos) != is_hash_order(chunk_offset);
588}
589
590static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
591{
592 struct ceph_file_info *fi = file->private_data;
593 struct inode *inode = file->f_mapping->host;
594 loff_t retval;
595
596 inode_lock(inode);
597 retval = -EINVAL;
598 switch (whence) {
599 case SEEK_CUR:
600 offset += file->f_pos;
601 case SEEK_SET:
602 break;
603 case SEEK_END:
604 retval = -EOPNOTSUPP;
605 default:
606 goto out;
607 }
608
609 if (offset >= 0) {
610 if (need_reset_readdir(fi, offset)) {
611 dout("dir_llseek dropping %p content\n", file);
612 reset_readdir(fi);
613 } else if (is_hash_order(offset) && offset > file->f_pos) {
614 /* for hash offset, we don't know if a forward seek
615 * is within same frag */
616 fi->dir_release_count = 0;
617 fi->readdir_cache_idx = -1;
618 }
619
620 if (offset != file->f_pos) {
621 file->f_pos = offset;
622 file->f_version = 0;
623 fi->flags &= ~CEPH_F_ATEND;
624 }
625 retval = offset;
626 }
627out:
628 inode_unlock(inode);
629 return retval;
630}
631
632/*
633 * Handle lookups for the hidden .snap directory.
634 */
635int ceph_handle_snapdir(struct ceph_mds_request *req,
636 struct dentry *dentry, int err)
637{
638 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
639 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
640
641 /* .snap dir? */
642 if (err == -ENOENT &&
643 ceph_snap(parent) == CEPH_NOSNAP &&
644 strcmp(dentry->d_name.name,
645 fsc->mount_options->snapdir_name) == 0) {
646 struct inode *inode = ceph_get_snapdir(parent);
647 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
648 dentry, dentry, inode);
649 BUG_ON(!d_unhashed(dentry));
650 d_add(dentry, inode);
651 err = 0;
652 }
653 return err;
654}
655
656/*
657 * Figure out final result of a lookup/open request.
658 *
659 * Mainly, make sure we return the final req->r_dentry (if it already
660 * existed) in place of the original VFS-provided dentry when they
661 * differ.
662 *
663 * Gracefully handle the case where the MDS replies with -ENOENT and
664 * no trace (which it may do, at its discretion, e.g., if it doesn't
665 * care to issue a lease on the negative dentry).
666 */
667struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
668 struct dentry *dentry, int err)
669{
670 if (err == -ENOENT) {
671 /* no trace? */
672 err = 0;
673 if (!req->r_reply_info.head->is_dentry) {
674 dout("ENOENT and no trace, dentry %p inode %p\n",
675 dentry, d_inode(dentry));
676 if (d_really_is_positive(dentry)) {
677 d_drop(dentry);
678 err = -ENOENT;
679 } else {
680 d_add(dentry, NULL);
681 }
682 }
683 }
684 if (err)
685 dentry = ERR_PTR(err);
686 else if (dentry != req->r_dentry)
687 dentry = dget(req->r_dentry); /* we got spliced */
688 else
689 dentry = NULL;
690 return dentry;
691}
692
693static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
694{
695 return ceph_ino(inode) == CEPH_INO_ROOT &&
696 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
697}
698
699/*
700 * Look up a single dir entry. If there is a lookup intent, inform
701 * the MDS so that it gets our 'caps wanted' value in a single op.
702 */
703static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
704 unsigned int flags)
705{
706 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
707 struct ceph_mds_client *mdsc = fsc->mdsc;
708 struct ceph_mds_request *req;
709 int op;
710 int mask;
711 int err;
712
713 dout("lookup %p dentry %p '%pd'\n",
714 dir, dentry, dentry);
715
716 if (dentry->d_name.len > NAME_MAX)
717 return ERR_PTR(-ENAMETOOLONG);
718
719 /* can we conclude ENOENT locally? */
720 if (d_really_is_negative(dentry)) {
721 struct ceph_inode_info *ci = ceph_inode(dir);
722 struct ceph_dentry_info *di = ceph_dentry(dentry);
723
724 spin_lock(&ci->i_ceph_lock);
725 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
726 if (strncmp(dentry->d_name.name,
727 fsc->mount_options->snapdir_name,
728 dentry->d_name.len) &&
729 !is_root_ceph_dentry(dir, dentry) &&
730 ceph_test_mount_opt(fsc, DCACHE) &&
731 __ceph_dir_is_complete(ci) &&
732 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
733 spin_unlock(&ci->i_ceph_lock);
734 dout(" dir %p complete, -ENOENT\n", dir);
735 d_add(dentry, NULL);
736 di->lease_shared_gen = ci->i_shared_gen;
737 return NULL;
738 }
739 spin_unlock(&ci->i_ceph_lock);
740 }
741
742 op = ceph_snap(dir) == CEPH_SNAPDIR ?
743 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
744 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
745 if (IS_ERR(req))
746 return ERR_CAST(req);
747 req->r_dentry = dget(dentry);
748 req->r_num_caps = 2;
749
750 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
751 if (ceph_security_xattr_wanted(dir))
752 mask |= CEPH_CAP_XATTR_SHARED;
753 req->r_args.getattr.mask = cpu_to_le32(mask);
754
755 req->r_locked_dir = dir;
756 err = ceph_mdsc_do_request(mdsc, NULL, req);
757 err = ceph_handle_snapdir(req, dentry, err);
758 dentry = ceph_finish_lookup(req, dentry, err);
759 ceph_mdsc_put_request(req); /* will dput(dentry) */
760 dout("lookup result=%p\n", dentry);
761 return dentry;
762}
763
764/*
765 * If we do a create but get no trace back from the MDS, follow up with
766 * a lookup (the VFS expects us to link up the provided dentry).
767 */
768int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
769{
770 struct dentry *result = ceph_lookup(dir, dentry, 0);
771
772 if (result && !IS_ERR(result)) {
773 /*
774 * We created the item, then did a lookup, and found
775 * it was already linked to another inode we already
776 * had in our cache (and thus got spliced). To not
777 * confuse VFS (especially when inode is a directory),
778 * we don't link our dentry to that inode, return an
779 * error instead.
780 *
781 * This event should be rare and it happens only when
782 * we talk to old MDS. Recent MDS does not send traceless
783 * reply for request that creates new inode.
784 */
785 d_drop(result);
786 return -ESTALE;
787 }
788 return PTR_ERR(result);
789}
790
791static int ceph_mknod(struct inode *dir, struct dentry *dentry,
792 umode_t mode, dev_t rdev)
793{
794 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
795 struct ceph_mds_client *mdsc = fsc->mdsc;
796 struct ceph_mds_request *req;
797 struct ceph_acls_info acls = {};
798 int err;
799
800 if (ceph_snap(dir) != CEPH_NOSNAP)
801 return -EROFS;
802
803 err = ceph_pre_init_acls(dir, &mode, &acls);
804 if (err < 0)
805 return err;
806
807 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
808 dir, dentry, mode, rdev);
809 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
810 if (IS_ERR(req)) {
811 err = PTR_ERR(req);
812 goto out;
813 }
814 req->r_dentry = dget(dentry);
815 req->r_num_caps = 2;
816 req->r_locked_dir = dir;
817 req->r_args.mknod.mode = cpu_to_le32(mode);
818 req->r_args.mknod.rdev = cpu_to_le32(rdev);
819 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
820 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
821 if (acls.pagelist) {
822 req->r_pagelist = acls.pagelist;
823 acls.pagelist = NULL;
824 }
825 err = ceph_mdsc_do_request(mdsc, dir, req);
826 if (!err && !req->r_reply_info.head->is_dentry)
827 err = ceph_handle_notrace_create(dir, dentry);
828 ceph_mdsc_put_request(req);
829out:
830 if (!err)
831 ceph_init_inode_acls(d_inode(dentry), &acls);
832 else
833 d_drop(dentry);
834 ceph_release_acls_info(&acls);
835 return err;
836}
837
838static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
839 bool excl)
840{
841 return ceph_mknod(dir, dentry, mode, 0);
842}
843
844static int ceph_symlink(struct inode *dir, struct dentry *dentry,
845 const char *dest)
846{
847 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
848 struct ceph_mds_client *mdsc = fsc->mdsc;
849 struct ceph_mds_request *req;
850 int err;
851
852 if (ceph_snap(dir) != CEPH_NOSNAP)
853 return -EROFS;
854
855 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
856 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
857 if (IS_ERR(req)) {
858 err = PTR_ERR(req);
859 goto out;
860 }
861 req->r_path2 = kstrdup(dest, GFP_KERNEL);
862 if (!req->r_path2) {
863 err = -ENOMEM;
864 ceph_mdsc_put_request(req);
865 goto out;
866 }
867 req->r_locked_dir = dir;
868 req->r_dentry = dget(dentry);
869 req->r_num_caps = 2;
870 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
871 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
872 err = ceph_mdsc_do_request(mdsc, dir, req);
873 if (!err && !req->r_reply_info.head->is_dentry)
874 err = ceph_handle_notrace_create(dir, dentry);
875 ceph_mdsc_put_request(req);
876out:
877 if (err)
878 d_drop(dentry);
879 return err;
880}
881
882static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
883{
884 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
885 struct ceph_mds_client *mdsc = fsc->mdsc;
886 struct ceph_mds_request *req;
887 struct ceph_acls_info acls = {};
888 int err = -EROFS;
889 int op;
890
891 if (ceph_snap(dir) == CEPH_SNAPDIR) {
892 /* mkdir .snap/foo is a MKSNAP */
893 op = CEPH_MDS_OP_MKSNAP;
894 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
895 dentry, dentry);
896 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
897 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
898 op = CEPH_MDS_OP_MKDIR;
899 } else {
900 goto out;
901 }
902
903 mode |= S_IFDIR;
904 err = ceph_pre_init_acls(dir, &mode, &acls);
905 if (err < 0)
906 goto out;
907
908 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
909 if (IS_ERR(req)) {
910 err = PTR_ERR(req);
911 goto out;
912 }
913
914 req->r_dentry = dget(dentry);
915 req->r_num_caps = 2;
916 req->r_locked_dir = dir;
917 req->r_args.mkdir.mode = cpu_to_le32(mode);
918 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
919 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
920 if (acls.pagelist) {
921 req->r_pagelist = acls.pagelist;
922 acls.pagelist = NULL;
923 }
924 err = ceph_mdsc_do_request(mdsc, dir, req);
925 if (!err &&
926 !req->r_reply_info.head->is_target &&
927 !req->r_reply_info.head->is_dentry)
928 err = ceph_handle_notrace_create(dir, dentry);
929 ceph_mdsc_put_request(req);
930out:
931 if (!err)
932 ceph_init_inode_acls(d_inode(dentry), &acls);
933 else
934 d_drop(dentry);
935 ceph_release_acls_info(&acls);
936 return err;
937}
938
939static int ceph_link(struct dentry *old_dentry, struct inode *dir,
940 struct dentry *dentry)
941{
942 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
943 struct ceph_mds_client *mdsc = fsc->mdsc;
944 struct ceph_mds_request *req;
945 int err;
946
947 if (ceph_snap(dir) != CEPH_NOSNAP)
948 return -EROFS;
949
950 dout("link in dir %p old_dentry %p dentry %p\n", dir,
951 old_dentry, dentry);
952 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
953 if (IS_ERR(req)) {
954 d_drop(dentry);
955 return PTR_ERR(req);
956 }
957 req->r_dentry = dget(dentry);
958 req->r_num_caps = 2;
959 req->r_old_dentry = dget(old_dentry);
960 req->r_locked_dir = dir;
961 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
962 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
963 /* release LINK_SHARED on source inode (mds will lock it) */
964 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
965 err = ceph_mdsc_do_request(mdsc, dir, req);
966 if (err) {
967 d_drop(dentry);
968 } else if (!req->r_reply_info.head->is_dentry) {
969 ihold(d_inode(old_dentry));
970 d_instantiate(dentry, d_inode(old_dentry));
971 }
972 ceph_mdsc_put_request(req);
973 return err;
974}
975
976/*
977 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
978 * looks like the link count will hit 0, drop any other caps (other
979 * than PIN) we don't specifically want (due to the file still being
980 * open).
981 */
982static int drop_caps_for_unlink(struct inode *inode)
983{
984 struct ceph_inode_info *ci = ceph_inode(inode);
985 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
986
987 spin_lock(&ci->i_ceph_lock);
988 if (inode->i_nlink == 1) {
989 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
990 ci->i_ceph_flags |= CEPH_I_NODELAY;
991 }
992 spin_unlock(&ci->i_ceph_lock);
993 return drop;
994}
995
996/*
997 * rmdir and unlink are differ only by the metadata op code
998 */
999static int ceph_unlink(struct inode *dir, struct dentry *dentry)
1000{
1001 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
1002 struct ceph_mds_client *mdsc = fsc->mdsc;
1003 struct inode *inode = d_inode(dentry);
1004 struct ceph_mds_request *req;
1005 int err = -EROFS;
1006 int op;
1007
1008 if (ceph_snap(dir) == CEPH_SNAPDIR) {
1009 /* rmdir .snap/foo is RMSNAP */
1010 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
1011 op = CEPH_MDS_OP_RMSNAP;
1012 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
1013 dout("unlink/rmdir dir %p dn %p inode %p\n",
1014 dir, dentry, inode);
1015 op = d_is_dir(dentry) ?
1016 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
1017 } else
1018 goto out;
1019 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1020 if (IS_ERR(req)) {
1021 err = PTR_ERR(req);
1022 goto out;
1023 }
1024 req->r_dentry = dget(dentry);
1025 req->r_num_caps = 2;
1026 req->r_locked_dir = dir;
1027 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1028 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1029 req->r_inode_drop = drop_caps_for_unlink(inode);
1030 err = ceph_mdsc_do_request(mdsc, dir, req);
1031 if (!err && !req->r_reply_info.head->is_dentry)
1032 d_delete(dentry);
1033 ceph_mdsc_put_request(req);
1034out:
1035 return err;
1036}
1037
1038static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
1039 struct inode *new_dir, struct dentry *new_dentry,
1040 unsigned int flags)
1041{
1042 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
1043 struct ceph_mds_client *mdsc = fsc->mdsc;
1044 struct ceph_mds_request *req;
1045 int op = CEPH_MDS_OP_RENAME;
1046 int err;
1047
1048 if (flags)
1049 return -EINVAL;
1050
1051 if (ceph_snap(old_dir) != ceph_snap(new_dir))
1052 return -EXDEV;
1053 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1054 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1055 op = CEPH_MDS_OP_RENAMESNAP;
1056 else
1057 return -EROFS;
1058 }
1059 dout("rename dir %p dentry %p to dir %p dentry %p\n",
1060 old_dir, old_dentry, new_dir, new_dentry);
1061 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1062 if (IS_ERR(req))
1063 return PTR_ERR(req);
1064 ihold(old_dir);
1065 req->r_dentry = dget(new_dentry);
1066 req->r_num_caps = 2;
1067 req->r_old_dentry = dget(old_dentry);
1068 req->r_old_dentry_dir = old_dir;
1069 req->r_locked_dir = new_dir;
1070 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1071 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1072 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1073 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1074 /* release LINK_RDCACHE on source inode (mds will lock it) */
1075 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
1076 if (d_really_is_positive(new_dentry))
1077 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
1078 err = ceph_mdsc_do_request(mdsc, old_dir, req);
1079 if (!err && !req->r_reply_info.head->is_dentry) {
1080 /*
1081 * Normally d_move() is done by fill_trace (called by
1082 * do_request, above). If there is no trace, we need
1083 * to do it here.
1084 */
1085
1086 /* d_move screws up sibling dentries' offsets */
1087 ceph_dir_clear_complete(old_dir);
1088 ceph_dir_clear_complete(new_dir);
1089
1090 d_move(old_dentry, new_dentry);
1091
1092 /* ensure target dentry is invalidated, despite
1093 rehashing bug in vfs_rename_dir */
1094 ceph_invalidate_dentry_lease(new_dentry);
1095 }
1096 ceph_mdsc_put_request(req);
1097 return err;
1098}
1099
1100/*
1101 * Ensure a dentry lease will no longer revalidate.
1102 */
1103void ceph_invalidate_dentry_lease(struct dentry *dentry)
1104{
1105 spin_lock(&dentry->d_lock);
1106 ceph_dentry(dentry)->time = jiffies;
1107 ceph_dentry(dentry)->lease_shared_gen = 0;
1108 spin_unlock(&dentry->d_lock);
1109}
1110
1111/*
1112 * Check if dentry lease is valid. If not, delete the lease. Try to
1113 * renew if the least is more than half up.
1114 */
1115static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
1116 struct inode *dir)
1117{
1118 struct ceph_dentry_info *di;
1119 struct ceph_mds_session *s;
1120 int valid = 0;
1121 u32 gen;
1122 unsigned long ttl;
1123 struct ceph_mds_session *session = NULL;
1124 u32 seq = 0;
1125
1126 spin_lock(&dentry->d_lock);
1127 di = ceph_dentry(dentry);
1128 if (di && di->lease_session) {
1129 s = di->lease_session;
1130 spin_lock(&s->s_gen_ttl_lock);
1131 gen = s->s_cap_gen;
1132 ttl = s->s_cap_ttl;
1133 spin_unlock(&s->s_gen_ttl_lock);
1134
1135 if (di->lease_gen == gen &&
1136 time_before(jiffies, di->time) &&
1137 time_before(jiffies, ttl)) {
1138 valid = 1;
1139 if (di->lease_renew_after &&
1140 time_after(jiffies, di->lease_renew_after)) {
1141 /*
1142 * We should renew. If we're in RCU walk mode
1143 * though, we can't do that so just return
1144 * -ECHILD.
1145 */
1146 if (flags & LOOKUP_RCU) {
1147 valid = -ECHILD;
1148 } else {
1149 session = ceph_get_mds_session(s);
1150 seq = di->lease_seq;
1151 di->lease_renew_after = 0;
1152 di->lease_renew_from = jiffies;
1153 }
1154 }
1155 }
1156 }
1157 spin_unlock(&dentry->d_lock);
1158
1159 if (session) {
1160 ceph_mdsc_lease_send_msg(session, dir, dentry,
1161 CEPH_MDS_LEASE_RENEW, seq);
1162 ceph_put_mds_session(session);
1163 }
1164 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1165 return valid;
1166}
1167
1168/*
1169 * Check if directory-wide content lease/cap is valid.
1170 */
1171static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1172{
1173 struct ceph_inode_info *ci = ceph_inode(dir);
1174 struct ceph_dentry_info *di = ceph_dentry(dentry);
1175 int valid = 0;
1176
1177 spin_lock(&ci->i_ceph_lock);
1178 if (ci->i_shared_gen == di->lease_shared_gen)
1179 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1180 spin_unlock(&ci->i_ceph_lock);
1181 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1182 dir, (unsigned)ci->i_shared_gen, dentry,
1183 (unsigned)di->lease_shared_gen, valid);
1184 return valid;
1185}
1186
1187/*
1188 * Check if cached dentry can be trusted.
1189 */
1190static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1191{
1192 int valid = 0;
1193 struct dentry *parent;
1194 struct inode *dir;
1195
1196 if (flags & LOOKUP_RCU) {
1197 parent = ACCESS_ONCE(dentry->d_parent);
1198 dir = d_inode_rcu(parent);
1199 if (!dir)
1200 return -ECHILD;
1201 } else {
1202 parent = dget_parent(dentry);
1203 dir = d_inode(parent);
1204 }
1205
1206 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1207 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
1208
1209 /* always trust cached snapped dentries, snapdir dentry */
1210 if (ceph_snap(dir) != CEPH_NOSNAP) {
1211 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1212 dentry, d_inode(dentry));
1213 valid = 1;
1214 } else if (d_really_is_positive(dentry) &&
1215 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1216 valid = 1;
1217 } else {
1218 valid = dentry_lease_is_valid(dentry, flags, dir);
1219 if (valid == -ECHILD)
1220 return valid;
1221 if (valid || dir_lease_is_valid(dir, dentry)) {
1222 if (d_really_is_positive(dentry))
1223 valid = ceph_is_any_caps(d_inode(dentry));
1224 else
1225 valid = 1;
1226 }
1227 }
1228
1229 if (!valid) {
1230 struct ceph_mds_client *mdsc =
1231 ceph_sb_to_client(dir->i_sb)->mdsc;
1232 struct ceph_mds_request *req;
1233 int op, err;
1234 u32 mask;
1235
1236 if (flags & LOOKUP_RCU)
1237 return -ECHILD;
1238
1239 op = ceph_snap(dir) == CEPH_SNAPDIR ?
1240 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_GETATTR;
1241 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1242 if (!IS_ERR(req)) {
1243 req->r_dentry = dget(dentry);
1244 req->r_num_caps = op == CEPH_MDS_OP_GETATTR ? 1 : 2;
1245
1246 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1247 if (ceph_security_xattr_wanted(dir))
1248 mask |= CEPH_CAP_XATTR_SHARED;
1249 req->r_args.getattr.mask = cpu_to_le32(mask);
1250
1251 err = ceph_mdsc_do_request(mdsc, NULL, req);
1252 switch (err) {
1253 case 0:
1254 if (d_really_is_positive(dentry) &&
1255 d_inode(dentry) == req->r_target_inode)
1256 valid = 1;
1257 break;
1258 case -ENOENT:
1259 if (d_really_is_negative(dentry))
1260 valid = 1;
1261 /* Fallthrough */
1262 default:
1263 break;
1264 }
1265 ceph_mdsc_put_request(req);
1266 dout("d_revalidate %p lookup result=%d\n",
1267 dentry, err);
1268 }
1269 }
1270
1271 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1272 if (valid) {
1273 ceph_dentry_lru_touch(dentry);
1274 } else {
1275 ceph_dir_clear_complete(dir);
1276 }
1277
1278 if (!(flags & LOOKUP_RCU))
1279 dput(parent);
1280 return valid;
1281}
1282
1283/*
1284 * Release our ceph_dentry_info.
1285 */
1286static void ceph_d_release(struct dentry *dentry)
1287{
1288 struct ceph_dentry_info *di = ceph_dentry(dentry);
1289
1290 dout("d_release %p\n", dentry);
1291 ceph_dentry_lru_del(dentry);
1292
1293 spin_lock(&dentry->d_lock);
1294 dentry->d_fsdata = NULL;
1295 spin_unlock(&dentry->d_lock);
1296
1297 if (di->lease_session)
1298 ceph_put_mds_session(di->lease_session);
1299 kmem_cache_free(ceph_dentry_cachep, di);
1300}
1301
1302/*
1303 * When the VFS prunes a dentry from the cache, we need to clear the
1304 * complete flag on the parent directory.
1305 *
1306 * Called under dentry->d_lock.
1307 */
1308static void ceph_d_prune(struct dentry *dentry)
1309{
1310 dout("ceph_d_prune %p\n", dentry);
1311
1312 /* do we have a valid parent? */
1313 if (IS_ROOT(dentry))
1314 return;
1315
1316 /* if we are not hashed, we don't affect dir's completeness */
1317 if (d_unhashed(dentry))
1318 return;
1319
1320 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
1321 return;
1322
1323 /*
1324 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1325 * cleared until d_release
1326 */
1327 ceph_dir_clear_complete(d_inode(dentry->d_parent));
1328}
1329
1330/*
1331 * read() on a dir. This weird interface hack only works if mounted
1332 * with '-o dirstat'.
1333 */
1334static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1335 loff_t *ppos)
1336{
1337 struct ceph_file_info *cf = file->private_data;
1338 struct inode *inode = file_inode(file);
1339 struct ceph_inode_info *ci = ceph_inode(inode);
1340 int left;
1341 const int bufsize = 1024;
1342
1343 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1344 return -EISDIR;
1345
1346 if (!cf->dir_info) {
1347 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
1348 if (!cf->dir_info)
1349 return -ENOMEM;
1350 cf->dir_info_len =
1351 snprintf(cf->dir_info, bufsize,
1352 "entries: %20lld\n"
1353 " files: %20lld\n"
1354 " subdirs: %20lld\n"
1355 "rentries: %20lld\n"
1356 " rfiles: %20lld\n"
1357 " rsubdirs: %20lld\n"
1358 "rbytes: %20lld\n"
1359 "rctime: %10ld.%09ld\n",
1360 ci->i_files + ci->i_subdirs,
1361 ci->i_files,
1362 ci->i_subdirs,
1363 ci->i_rfiles + ci->i_rsubdirs,
1364 ci->i_rfiles,
1365 ci->i_rsubdirs,
1366 ci->i_rbytes,
1367 (long)ci->i_rctime.tv_sec,
1368 (long)ci->i_rctime.tv_nsec);
1369 }
1370
1371 if (*ppos >= cf->dir_info_len)
1372 return 0;
1373 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1374 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1375 if (left == size)
1376 return -EFAULT;
1377 *ppos += (size - left);
1378 return size - left;
1379}
1380
1381/*
1382 * We maintain a private dentry LRU.
1383 *
1384 * FIXME: this needs to be changed to a per-mds lru to be useful.
1385 */
1386void ceph_dentry_lru_add(struct dentry *dn)
1387{
1388 struct ceph_dentry_info *di = ceph_dentry(dn);
1389 struct ceph_mds_client *mdsc;
1390
1391 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1392 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1393 spin_lock(&mdsc->dentry_lru_lock);
1394 list_add_tail(&di->lru, &mdsc->dentry_lru);
1395 mdsc->num_dentry++;
1396 spin_unlock(&mdsc->dentry_lru_lock);
1397}
1398
1399void ceph_dentry_lru_touch(struct dentry *dn)
1400{
1401 struct ceph_dentry_info *di = ceph_dentry(dn);
1402 struct ceph_mds_client *mdsc;
1403
1404 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1405 di->offset);
1406 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1407 spin_lock(&mdsc->dentry_lru_lock);
1408 list_move_tail(&di->lru, &mdsc->dentry_lru);
1409 spin_unlock(&mdsc->dentry_lru_lock);
1410}
1411
1412void ceph_dentry_lru_del(struct dentry *dn)
1413{
1414 struct ceph_dentry_info *di = ceph_dentry(dn);
1415 struct ceph_mds_client *mdsc;
1416
1417 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1418 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1419 spin_lock(&mdsc->dentry_lru_lock);
1420 list_del_init(&di->lru);
1421 mdsc->num_dentry--;
1422 spin_unlock(&mdsc->dentry_lru_lock);
1423}
1424
1425/*
1426 * Return name hash for a given dentry. This is dependent on
1427 * the parent directory's hash function.
1428 */
1429unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1430{
1431 struct ceph_inode_info *dci = ceph_inode(dir);
1432
1433 switch (dci->i_dir_layout.dl_dir_hash) {
1434 case 0: /* for backward compat */
1435 case CEPH_STR_HASH_LINUX:
1436 return dn->d_name.hash;
1437
1438 default:
1439 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1440 dn->d_name.name, dn->d_name.len);
1441 }
1442}
1443
1444const struct file_operations ceph_dir_fops = {
1445 .read = ceph_read_dir,
1446 .iterate = ceph_readdir,
1447 .llseek = ceph_dir_llseek,
1448 .open = ceph_open,
1449 .release = ceph_release,
1450 .unlocked_ioctl = ceph_ioctl,
1451 .fsync = ceph_fsync,
1452};
1453
1454const struct file_operations ceph_snapdir_fops = {
1455 .iterate = ceph_readdir,
1456 .llseek = ceph_dir_llseek,
1457 .open = ceph_open,
1458 .release = ceph_release,
1459};
1460
1461const struct inode_operations ceph_dir_iops = {
1462 .lookup = ceph_lookup,
1463 .permission = ceph_permission,
1464 .getattr = ceph_getattr,
1465 .setattr = ceph_setattr,
1466 .listxattr = ceph_listxattr,
1467 .get_acl = ceph_get_acl,
1468 .set_acl = ceph_set_acl,
1469 .mknod = ceph_mknod,
1470 .symlink = ceph_symlink,
1471 .mkdir = ceph_mkdir,
1472 .link = ceph_link,
1473 .unlink = ceph_unlink,
1474 .rmdir = ceph_unlink,
1475 .rename = ceph_rename,
1476 .create = ceph_create,
1477 .atomic_open = ceph_atomic_open,
1478};
1479
1480const struct inode_operations ceph_snapdir_iops = {
1481 .lookup = ceph_lookup,
1482 .permission = ceph_permission,
1483 .getattr = ceph_getattr,
1484 .mkdir = ceph_mkdir,
1485 .rmdir = ceph_unlink,
1486 .rename = ceph_rename,
1487};
1488
1489const struct dentry_operations ceph_dentry_ops = {
1490 .d_revalidate = ceph_d_revalidate,
1491 .d_release = ceph_d_release,
1492 .d_prune = ceph_d_prune,
1493 .d_init = ceph_d_init,
1494};