Loading...
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20
21#include <linux/ceph/decode.h>
22#include <linux/ceph/mon_client.h>
23#include <linux/ceph/auth.h>
24#include <linux/ceph/debugfs.h>
25
26/*
27 * Ceph superblock operations
28 *
29 * Handle the basics of mounting, unmounting.
30 */
31
32/*
33 * super ops
34 */
35static void ceph_put_super(struct super_block *s)
36{
37 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
38
39 dout("put_super\n");
40 ceph_mdsc_close_sessions(fsc->mdsc);
41
42 /*
43 * ensure we release the bdi before put_anon_super releases
44 * the device name.
45 */
46 if (s->s_bdi == &fsc->backing_dev_info) {
47 bdi_unregister(&fsc->backing_dev_info);
48 s->s_bdi = NULL;
49 }
50
51 return;
52}
53
54static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
55{
56 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
57 struct ceph_monmap *monmap = fsc->client->monc.monmap;
58 struct ceph_statfs st;
59 u64 fsid;
60 int err;
61
62 dout("statfs\n");
63 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
64 if (err < 0)
65 return err;
66
67 /* fill in kstatfs */
68 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
69
70 /*
71 * express utilization in terms of large blocks to avoid
72 * overflow on 32-bit machines.
73 */
74 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
75 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
76 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
77 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
78
79 buf->f_files = le64_to_cpu(st.num_objects);
80 buf->f_ffree = -1;
81 buf->f_namelen = NAME_MAX;
82 buf->f_frsize = PAGE_CACHE_SIZE;
83
84 /* leave fsid little-endian, regardless of host endianness */
85 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
86 buf->f_fsid.val[0] = fsid & 0xffffffff;
87 buf->f_fsid.val[1] = fsid >> 32;
88
89 return 0;
90}
91
92
93static int ceph_sync_fs(struct super_block *sb, int wait)
94{
95 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
96
97 if (!wait) {
98 dout("sync_fs (non-blocking)\n");
99 ceph_flush_dirty_caps(fsc->mdsc);
100 dout("sync_fs (non-blocking) done\n");
101 return 0;
102 }
103
104 dout("sync_fs (blocking)\n");
105 ceph_osdc_sync(&fsc->client->osdc);
106 ceph_mdsc_sync(fsc->mdsc);
107 dout("sync_fs (blocking) done\n");
108 return 0;
109}
110
111/*
112 * mount options
113 */
114enum {
115 Opt_wsize,
116 Opt_rsize,
117 Opt_caps_wanted_delay_min,
118 Opt_caps_wanted_delay_max,
119 Opt_cap_release_safety,
120 Opt_readdir_max_entries,
121 Opt_readdir_max_bytes,
122 Opt_congestion_kb,
123 Opt_last_int,
124 /* int args above */
125 Opt_snapdirname,
126 Opt_last_string,
127 /* string args above */
128 Opt_dirstat,
129 Opt_nodirstat,
130 Opt_rbytes,
131 Opt_norbytes,
132 Opt_noasyncreaddir,
133 Opt_ino32,
134};
135
136static match_table_t fsopt_tokens = {
137 {Opt_wsize, "wsize=%d"},
138 {Opt_rsize, "rsize=%d"},
139 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
140 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
141 {Opt_cap_release_safety, "cap_release_safety=%d"},
142 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
143 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
144 {Opt_congestion_kb, "write_congestion_kb=%d"},
145 /* int args above */
146 {Opt_snapdirname, "snapdirname=%s"},
147 /* string args above */
148 {Opt_dirstat, "dirstat"},
149 {Opt_nodirstat, "nodirstat"},
150 {Opt_rbytes, "rbytes"},
151 {Opt_norbytes, "norbytes"},
152 {Opt_noasyncreaddir, "noasyncreaddir"},
153 {Opt_ino32, "ino32"},
154 {-1, NULL}
155};
156
157static int parse_fsopt_token(char *c, void *private)
158{
159 struct ceph_mount_options *fsopt = private;
160 substring_t argstr[MAX_OPT_ARGS];
161 int token, intval, ret;
162
163 token = match_token((char *)c, fsopt_tokens, argstr);
164 if (token < 0)
165 return -EINVAL;
166
167 if (token < Opt_last_int) {
168 ret = match_int(&argstr[0], &intval);
169 if (ret < 0) {
170 pr_err("bad mount option arg (not int) "
171 "at '%s'\n", c);
172 return ret;
173 }
174 dout("got int token %d val %d\n", token, intval);
175 } else if (token > Opt_last_int && token < Opt_last_string) {
176 dout("got string token %d val %s\n", token,
177 argstr[0].from);
178 } else {
179 dout("got token %d\n", token);
180 }
181
182 switch (token) {
183 case Opt_snapdirname:
184 kfree(fsopt->snapdir_name);
185 fsopt->snapdir_name = kstrndup(argstr[0].from,
186 argstr[0].to-argstr[0].from,
187 GFP_KERNEL);
188 if (!fsopt->snapdir_name)
189 return -ENOMEM;
190 break;
191
192 /* misc */
193 case Opt_wsize:
194 fsopt->wsize = intval;
195 break;
196 case Opt_rsize:
197 fsopt->rsize = intval;
198 break;
199 case Opt_caps_wanted_delay_min:
200 fsopt->caps_wanted_delay_min = intval;
201 break;
202 case Opt_caps_wanted_delay_max:
203 fsopt->caps_wanted_delay_max = intval;
204 break;
205 case Opt_readdir_max_entries:
206 fsopt->max_readdir = intval;
207 break;
208 case Opt_readdir_max_bytes:
209 fsopt->max_readdir_bytes = intval;
210 break;
211 case Opt_congestion_kb:
212 fsopt->congestion_kb = intval;
213 break;
214 case Opt_dirstat:
215 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
216 break;
217 case Opt_nodirstat:
218 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
219 break;
220 case Opt_rbytes:
221 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
222 break;
223 case Opt_norbytes:
224 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
225 break;
226 case Opt_noasyncreaddir:
227 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
228 break;
229 case Opt_ino32:
230 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
231 break;
232 default:
233 BUG_ON(token);
234 }
235 return 0;
236}
237
238static void destroy_mount_options(struct ceph_mount_options *args)
239{
240 dout("destroy_mount_options %p\n", args);
241 kfree(args->snapdir_name);
242 kfree(args);
243}
244
245static int strcmp_null(const char *s1, const char *s2)
246{
247 if (!s1 && !s2)
248 return 0;
249 if (s1 && !s2)
250 return -1;
251 if (!s1 && s2)
252 return 1;
253 return strcmp(s1, s2);
254}
255
256static int compare_mount_options(struct ceph_mount_options *new_fsopt,
257 struct ceph_options *new_opt,
258 struct ceph_fs_client *fsc)
259{
260 struct ceph_mount_options *fsopt1 = new_fsopt;
261 struct ceph_mount_options *fsopt2 = fsc->mount_options;
262 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
263 int ret;
264
265 ret = memcmp(fsopt1, fsopt2, ofs);
266 if (ret)
267 return ret;
268
269 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
270 if (ret)
271 return ret;
272
273 return ceph_compare_options(new_opt, fsc->client);
274}
275
276static int parse_mount_options(struct ceph_mount_options **pfsopt,
277 struct ceph_options **popt,
278 int flags, char *options,
279 const char *dev_name,
280 const char **path)
281{
282 struct ceph_mount_options *fsopt;
283 const char *dev_name_end;
284 int err = -ENOMEM;
285
286 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
287 if (!fsopt)
288 return -ENOMEM;
289
290 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
291
292 fsopt->sb_flags = flags;
293 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
294
295 fsopt->rsize = CEPH_RSIZE_DEFAULT;
296 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
297 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
298 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
299 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
300 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
301 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
302 fsopt->congestion_kb = default_congestion_kb();
303
304 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
305 err = -EINVAL;
306 if (!dev_name)
307 goto out;
308 *path = strstr(dev_name, ":/");
309 if (*path == NULL) {
310 pr_err("device name is missing path (no :/ in %s)\n",
311 dev_name);
312 goto out;
313 }
314 dev_name_end = *path;
315 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
316
317 /* path on server */
318 *path += 2;
319 dout("server path '%s'\n", *path);
320
321 err = ceph_parse_options(popt, options, dev_name, dev_name_end,
322 parse_fsopt_token, (void *)fsopt);
323 if (err)
324 goto out;
325
326 /* success */
327 *pfsopt = fsopt;
328 return 0;
329
330out:
331 destroy_mount_options(fsopt);
332 return err;
333}
334
335/**
336 * ceph_show_options - Show mount options in /proc/mounts
337 * @m: seq_file to write to
338 * @mnt: mount descriptor
339 */
340static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
341{
342 struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
343 struct ceph_mount_options *fsopt = fsc->mount_options;
344 struct ceph_options *opt = fsc->client->options;
345
346 if (opt->flags & CEPH_OPT_FSID)
347 seq_printf(m, ",fsid=%pU", &opt->fsid);
348 if (opt->flags & CEPH_OPT_NOSHARE)
349 seq_puts(m, ",noshare");
350 if (opt->flags & CEPH_OPT_NOCRC)
351 seq_puts(m, ",nocrc");
352
353 if (opt->name)
354 seq_printf(m, ",name=%s", opt->name);
355 if (opt->key)
356 seq_puts(m, ",secret=<hidden>");
357
358 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
359 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
360 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
361 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
362 if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
363 seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
364 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
365 seq_printf(m, ",osdkeepalivetimeout=%d",
366 opt->osd_keepalive_timeout);
367
368 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
369 seq_puts(m, ",dirstat");
370 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
371 seq_puts(m, ",norbytes");
372 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
373 seq_puts(m, ",noasyncreaddir");
374
375 if (fsopt->wsize)
376 seq_printf(m, ",wsize=%d", fsopt->wsize);
377 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
378 seq_printf(m, ",rsize=%d", fsopt->rsize);
379 if (fsopt->congestion_kb != default_congestion_kb())
380 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
381 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
382 seq_printf(m, ",caps_wanted_delay_min=%d",
383 fsopt->caps_wanted_delay_min);
384 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
385 seq_printf(m, ",caps_wanted_delay_max=%d",
386 fsopt->caps_wanted_delay_max);
387 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
388 seq_printf(m, ",cap_release_safety=%d",
389 fsopt->cap_release_safety);
390 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
391 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
392 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
393 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
394 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
395 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
396 return 0;
397}
398
399/*
400 * handle any mon messages the standard library doesn't understand.
401 * return error if we don't either.
402 */
403static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
404{
405 struct ceph_fs_client *fsc = client->private;
406 int type = le16_to_cpu(msg->hdr.type);
407
408 switch (type) {
409 case CEPH_MSG_MDS_MAP:
410 ceph_mdsc_handle_map(fsc->mdsc, msg);
411 return 0;
412
413 default:
414 return -1;
415 }
416}
417
418/*
419 * create a new fs client
420 */
421struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
422 struct ceph_options *opt)
423{
424 struct ceph_fs_client *fsc;
425 int err = -ENOMEM;
426
427 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
428 if (!fsc)
429 return ERR_PTR(-ENOMEM);
430
431 fsc->client = ceph_create_client(opt, fsc);
432 if (IS_ERR(fsc->client)) {
433 err = PTR_ERR(fsc->client);
434 goto fail;
435 }
436 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
437 fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
438 CEPH_FEATURE_DIRLAYOUTHASH;
439 fsc->client->monc.want_mdsmap = 1;
440
441 fsc->mount_options = fsopt;
442
443 fsc->sb = NULL;
444 fsc->mount_state = CEPH_MOUNT_MOUNTING;
445
446 atomic_long_set(&fsc->writeback_count, 0);
447
448 err = bdi_init(&fsc->backing_dev_info);
449 if (err < 0)
450 goto fail_client;
451
452 err = -ENOMEM;
453 /*
454 * The number of concurrent works can be high but they don't need
455 * to be processed in parallel, limit concurrency.
456 */
457 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
458 if (fsc->wb_wq == NULL)
459 goto fail_bdi;
460 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
461 if (fsc->pg_inv_wq == NULL)
462 goto fail_wb_wq;
463 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
464 if (fsc->trunc_wq == NULL)
465 goto fail_pg_inv_wq;
466
467 /* set up mempools */
468 err = -ENOMEM;
469 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
470 fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
471 if (!fsc->wb_pagevec_pool)
472 goto fail_trunc_wq;
473
474 /* caps */
475 fsc->min_caps = fsopt->max_readdir;
476
477 return fsc;
478
479fail_trunc_wq:
480 destroy_workqueue(fsc->trunc_wq);
481fail_pg_inv_wq:
482 destroy_workqueue(fsc->pg_inv_wq);
483fail_wb_wq:
484 destroy_workqueue(fsc->wb_wq);
485fail_bdi:
486 bdi_destroy(&fsc->backing_dev_info);
487fail_client:
488 ceph_destroy_client(fsc->client);
489fail:
490 kfree(fsc);
491 return ERR_PTR(err);
492}
493
494void destroy_fs_client(struct ceph_fs_client *fsc)
495{
496 dout("destroy_fs_client %p\n", fsc);
497
498 destroy_workqueue(fsc->wb_wq);
499 destroy_workqueue(fsc->pg_inv_wq);
500 destroy_workqueue(fsc->trunc_wq);
501
502 bdi_destroy(&fsc->backing_dev_info);
503
504 mempool_destroy(fsc->wb_pagevec_pool);
505
506 destroy_mount_options(fsc->mount_options);
507
508 ceph_fs_debugfs_cleanup(fsc);
509
510 ceph_destroy_client(fsc->client);
511
512 kfree(fsc);
513 dout("destroy_fs_client %p done\n", fsc);
514}
515
516/*
517 * caches
518 */
519struct kmem_cache *ceph_inode_cachep;
520struct kmem_cache *ceph_cap_cachep;
521struct kmem_cache *ceph_dentry_cachep;
522struct kmem_cache *ceph_file_cachep;
523
524static void ceph_inode_init_once(void *foo)
525{
526 struct ceph_inode_info *ci = foo;
527 inode_init_once(&ci->vfs_inode);
528}
529
530static int __init init_caches(void)
531{
532 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
533 sizeof(struct ceph_inode_info),
534 __alignof__(struct ceph_inode_info),
535 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
536 ceph_inode_init_once);
537 if (ceph_inode_cachep == NULL)
538 return -ENOMEM;
539
540 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
541 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
542 if (ceph_cap_cachep == NULL)
543 goto bad_cap;
544
545 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
546 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
547 if (ceph_dentry_cachep == NULL)
548 goto bad_dentry;
549
550 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
551 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
552 if (ceph_file_cachep == NULL)
553 goto bad_file;
554
555 return 0;
556
557bad_file:
558 kmem_cache_destroy(ceph_dentry_cachep);
559bad_dentry:
560 kmem_cache_destroy(ceph_cap_cachep);
561bad_cap:
562 kmem_cache_destroy(ceph_inode_cachep);
563 return -ENOMEM;
564}
565
566static void destroy_caches(void)
567{
568 kmem_cache_destroy(ceph_inode_cachep);
569 kmem_cache_destroy(ceph_cap_cachep);
570 kmem_cache_destroy(ceph_dentry_cachep);
571 kmem_cache_destroy(ceph_file_cachep);
572}
573
574
575/*
576 * ceph_umount_begin - initiate forced umount. Tear down down the
577 * mount, skipping steps that may hang while waiting for server(s).
578 */
579static void ceph_umount_begin(struct super_block *sb)
580{
581 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
582
583 dout("ceph_umount_begin - starting forced umount\n");
584 if (!fsc)
585 return;
586 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
587 return;
588}
589
590static const struct super_operations ceph_super_ops = {
591 .alloc_inode = ceph_alloc_inode,
592 .destroy_inode = ceph_destroy_inode,
593 .write_inode = ceph_write_inode,
594 .sync_fs = ceph_sync_fs,
595 .put_super = ceph_put_super,
596 .show_options = ceph_show_options,
597 .statfs = ceph_statfs,
598 .umount_begin = ceph_umount_begin,
599};
600
601/*
602 * Bootstrap mount by opening the root directory. Note the mount
603 * @started time from caller, and time out if this takes too long.
604 */
605static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
606 const char *path,
607 unsigned long started)
608{
609 struct ceph_mds_client *mdsc = fsc->mdsc;
610 struct ceph_mds_request *req = NULL;
611 int err;
612 struct dentry *root;
613
614 /* open dir */
615 dout("open_root_inode opening '%s'\n", path);
616 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
617 if (IS_ERR(req))
618 return ERR_CAST(req);
619 req->r_path1 = kstrdup(path, GFP_NOFS);
620 req->r_ino1.ino = CEPH_INO_ROOT;
621 req->r_ino1.snap = CEPH_NOSNAP;
622 req->r_started = started;
623 req->r_timeout = fsc->client->options->mount_timeout * HZ;
624 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
625 req->r_num_caps = 2;
626 err = ceph_mdsc_do_request(mdsc, NULL, req);
627 if (err == 0) {
628 dout("open_root_inode success\n");
629 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
630 fsc->sb->s_root == NULL)
631 root = d_alloc_root(req->r_target_inode);
632 else
633 root = d_obtain_alias(req->r_target_inode);
634 req->r_target_inode = NULL;
635 dout("open_root_inode success, root dentry is %p\n", root);
636 } else {
637 root = ERR_PTR(err);
638 }
639 ceph_mdsc_put_request(req);
640 return root;
641}
642
643
644
645
646/*
647 * mount: join the ceph cluster, and open root directory.
648 */
649static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
650 const char *path)
651{
652 int err;
653 unsigned long started = jiffies; /* note the start time */
654 struct dentry *root;
655 int first = 0; /* first vfsmount for this super_block */
656
657 dout("mount start\n");
658 mutex_lock(&fsc->client->mount_mutex);
659
660 err = __ceph_open_session(fsc->client, started);
661 if (err < 0)
662 goto out;
663
664 dout("mount opening root\n");
665 root = open_root_dentry(fsc, "", started);
666 if (IS_ERR(root)) {
667 err = PTR_ERR(root);
668 goto out;
669 }
670 if (fsc->sb->s_root) {
671 dput(root);
672 } else {
673 fsc->sb->s_root = root;
674 first = 1;
675
676 err = ceph_fs_debugfs_init(fsc);
677 if (err < 0)
678 goto fail;
679 }
680
681 if (path[0] == 0) {
682 dget(root);
683 } else {
684 dout("mount opening base mountpoint\n");
685 root = open_root_dentry(fsc, path, started);
686 if (IS_ERR(root)) {
687 err = PTR_ERR(root);
688 goto fail;
689 }
690 }
691
692 fsc->mount_state = CEPH_MOUNT_MOUNTED;
693 dout("mount success\n");
694 mutex_unlock(&fsc->client->mount_mutex);
695 return root;
696
697out:
698 mutex_unlock(&fsc->client->mount_mutex);
699 return ERR_PTR(err);
700
701fail:
702 if (first) {
703 dput(fsc->sb->s_root);
704 fsc->sb->s_root = NULL;
705 }
706 goto out;
707}
708
709static int ceph_set_super(struct super_block *s, void *data)
710{
711 struct ceph_fs_client *fsc = data;
712 int ret;
713
714 dout("set_super %p data %p\n", s, data);
715
716 s->s_flags = fsc->mount_options->sb_flags;
717 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
718
719 s->s_fs_info = fsc;
720 fsc->sb = s;
721
722 s->s_op = &ceph_super_ops;
723 s->s_export_op = &ceph_export_ops;
724
725 s->s_time_gran = 1000; /* 1000 ns == 1 us */
726
727 ret = set_anon_super(s, NULL); /* what is that second arg for? */
728 if (ret != 0)
729 goto fail;
730
731 return ret;
732
733fail:
734 s->s_fs_info = NULL;
735 fsc->sb = NULL;
736 return ret;
737}
738
739/*
740 * share superblock if same fs AND options
741 */
742static int ceph_compare_super(struct super_block *sb, void *data)
743{
744 struct ceph_fs_client *new = data;
745 struct ceph_mount_options *fsopt = new->mount_options;
746 struct ceph_options *opt = new->client->options;
747 struct ceph_fs_client *other = ceph_sb_to_client(sb);
748
749 dout("ceph_compare_super %p\n", sb);
750
751 if (compare_mount_options(fsopt, opt, other)) {
752 dout("monitor(s)/mount options don't match\n");
753 return 0;
754 }
755 if ((opt->flags & CEPH_OPT_FSID) &&
756 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
757 dout("fsid doesn't match\n");
758 return 0;
759 }
760 if (fsopt->sb_flags != other->mount_options->sb_flags) {
761 dout("flags differ\n");
762 return 0;
763 }
764 return 1;
765}
766
767/*
768 * construct our own bdi so we can control readahead, etc.
769 */
770static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
771
772static int ceph_register_bdi(struct super_block *sb,
773 struct ceph_fs_client *fsc)
774{
775 int err;
776
777 /* set ra_pages based on rsize mount option? */
778 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
779 fsc->backing_dev_info.ra_pages =
780 (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
781 >> PAGE_SHIFT;
782 else
783 fsc->backing_dev_info.ra_pages =
784 default_backing_dev_info.ra_pages;
785
786 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
787 atomic_long_inc_return(&bdi_seq));
788 if (!err)
789 sb->s_bdi = &fsc->backing_dev_info;
790 return err;
791}
792
793static struct dentry *ceph_mount(struct file_system_type *fs_type,
794 int flags, const char *dev_name, void *data)
795{
796 struct super_block *sb;
797 struct ceph_fs_client *fsc;
798 struct dentry *res;
799 int err;
800 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
801 const char *path = NULL;
802 struct ceph_mount_options *fsopt = NULL;
803 struct ceph_options *opt = NULL;
804
805 dout("ceph_mount\n");
806 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
807 if (err < 0) {
808 res = ERR_PTR(err);
809 goto out_final;
810 }
811
812 /* create client (which we may/may not use) */
813 fsc = create_fs_client(fsopt, opt);
814 if (IS_ERR(fsc)) {
815 res = ERR_CAST(fsc);
816 destroy_mount_options(fsopt);
817 ceph_destroy_options(opt);
818 goto out_final;
819 }
820
821 err = ceph_mdsc_init(fsc);
822 if (err < 0) {
823 res = ERR_PTR(err);
824 goto out;
825 }
826
827 if (ceph_test_opt(fsc->client, NOSHARE))
828 compare_super = NULL;
829 sb = sget(fs_type, compare_super, ceph_set_super, fsc);
830 if (IS_ERR(sb)) {
831 res = ERR_CAST(sb);
832 goto out;
833 }
834
835 if (ceph_sb_to_client(sb) != fsc) {
836 ceph_mdsc_destroy(fsc);
837 destroy_fs_client(fsc);
838 fsc = ceph_sb_to_client(sb);
839 dout("get_sb got existing client %p\n", fsc);
840 } else {
841 dout("get_sb using new client %p\n", fsc);
842 err = ceph_register_bdi(sb, fsc);
843 if (err < 0) {
844 res = ERR_PTR(err);
845 goto out_splat;
846 }
847 }
848
849 res = ceph_real_mount(fsc, path);
850 if (IS_ERR(res))
851 goto out_splat;
852 dout("root %p inode %p ino %llx.%llx\n", res,
853 res->d_inode, ceph_vinop(res->d_inode));
854 return res;
855
856out_splat:
857 ceph_mdsc_close_sessions(fsc->mdsc);
858 deactivate_locked_super(sb);
859 goto out_final;
860
861out:
862 ceph_mdsc_destroy(fsc);
863 destroy_fs_client(fsc);
864out_final:
865 dout("ceph_mount fail %ld\n", PTR_ERR(res));
866 return res;
867}
868
869static void ceph_kill_sb(struct super_block *s)
870{
871 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
872 dout("kill_sb %p\n", s);
873 ceph_mdsc_pre_umount(fsc->mdsc);
874 kill_anon_super(s); /* will call put_super after sb is r/o */
875 ceph_mdsc_destroy(fsc);
876 destroy_fs_client(fsc);
877}
878
879static struct file_system_type ceph_fs_type = {
880 .owner = THIS_MODULE,
881 .name = "ceph",
882 .mount = ceph_mount,
883 .kill_sb = ceph_kill_sb,
884 .fs_flags = FS_RENAME_DOES_D_MOVE,
885};
886
887#define _STRINGIFY(x) #x
888#define STRINGIFY(x) _STRINGIFY(x)
889
890static int __init init_ceph(void)
891{
892 int ret = init_caches();
893 if (ret)
894 goto out;
895
896 ret = register_filesystem(&ceph_fs_type);
897 if (ret)
898 goto out_icache;
899
900 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
901
902 return 0;
903
904out_icache:
905 destroy_caches();
906out:
907 return ret;
908}
909
910static void __exit exit_ceph(void)
911{
912 dout("exit_ceph\n");
913 unregister_filesystem(&ceph_fs_type);
914 destroy_caches();
915}
916
917module_init(init_ceph);
918module_exit(exit_ceph);
919
920MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
921MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
922MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
923MODULE_DESCRIPTION("Ceph filesystem for Linux");
924MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/ceph/ceph_debug.h>
4
5#include <linux/backing-dev.h>
6#include <linux/ctype.h>
7#include <linux/fs.h>
8#include <linux/inet.h>
9#include <linux/in6.h>
10#include <linux/module.h>
11#include <linux/mount.h>
12#include <linux/fs_context.h>
13#include <linux/fs_parser.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/slab.h>
17#include <linux/statfs.h>
18#include <linux/string.h>
19
20#include "super.h"
21#include "mds_client.h"
22#include "cache.h"
23
24#include <linux/ceph/ceph_features.h>
25#include <linux/ceph/decode.h>
26#include <linux/ceph/mon_client.h>
27#include <linux/ceph/auth.h>
28#include <linux/ceph/debugfs.h>
29
30static DEFINE_SPINLOCK(ceph_fsc_lock);
31static LIST_HEAD(ceph_fsc_list);
32
33/*
34 * Ceph superblock operations
35 *
36 * Handle the basics of mounting, unmounting.
37 */
38
39/*
40 * super ops
41 */
42static void ceph_put_super(struct super_block *s)
43{
44 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
45
46 dout("put_super\n");
47 ceph_mdsc_close_sessions(fsc->mdsc);
48}
49
50static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
51{
52 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
53 struct ceph_mon_client *monc = &fsc->client->monc;
54 struct ceph_statfs st;
55 u64 fsid;
56 int err;
57 u64 data_pool;
58
59 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
60 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
61 } else {
62 data_pool = CEPH_NOPOOL;
63 }
64
65 dout("statfs\n");
66 err = ceph_monc_do_statfs(monc, data_pool, &st);
67 if (err < 0)
68 return err;
69
70 /* fill in kstatfs */
71 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
72
73 /*
74 * express utilization in terms of large blocks to avoid
75 * overflow on 32-bit machines.
76 *
77 * NOTE: for the time being, we make bsize == frsize to humor
78 * not-yet-ancient versions of glibc that are broken.
79 * Someday, we will probably want to report a real block
80 * size... whatever that may mean for a network file system!
81 */
82 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
83 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
84
85 /*
86 * By default use root quota for stats; fallback to overall filesystem
87 * usage if using 'noquotadf' mount option or if the root dir doesn't
88 * have max_bytes quota set.
89 */
90 if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
91 !ceph_quota_update_statfs(fsc, buf)) {
92 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
93 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
94 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
95 }
96
97 buf->f_files = le64_to_cpu(st.num_objects);
98 buf->f_ffree = -1;
99 buf->f_namelen = NAME_MAX;
100
101 /* Must convert the fsid, for consistent values across arches */
102 mutex_lock(&monc->mutex);
103 fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
104 le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
105 mutex_unlock(&monc->mutex);
106
107 buf->f_fsid.val[0] = fsid & 0xffffffff;
108 buf->f_fsid.val[1] = fsid >> 32;
109
110 return 0;
111}
112
113static int ceph_sync_fs(struct super_block *sb, int wait)
114{
115 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
116
117 if (!wait) {
118 dout("sync_fs (non-blocking)\n");
119 ceph_flush_dirty_caps(fsc->mdsc);
120 dout("sync_fs (non-blocking) done\n");
121 return 0;
122 }
123
124 dout("sync_fs (blocking)\n");
125 ceph_osdc_sync(&fsc->client->osdc);
126 ceph_mdsc_sync(fsc->mdsc);
127 dout("sync_fs (blocking) done\n");
128 return 0;
129}
130
131/*
132 * mount options
133 */
134enum {
135 Opt_wsize,
136 Opt_rsize,
137 Opt_rasize,
138 Opt_caps_wanted_delay_min,
139 Opt_caps_wanted_delay_max,
140 Opt_caps_max,
141 Opt_readdir_max_entries,
142 Opt_readdir_max_bytes,
143 Opt_congestion_kb,
144 /* int args above */
145 Opt_snapdirname,
146 Opt_mds_namespace,
147 Opt_recover_session,
148 Opt_source,
149 /* string args above */
150 Opt_dirstat,
151 Opt_rbytes,
152 Opt_asyncreaddir,
153 Opt_dcache,
154 Opt_ino32,
155 Opt_fscache,
156 Opt_poolperm,
157 Opt_require_active_mds,
158 Opt_acl,
159 Opt_quotadf,
160 Opt_copyfrom,
161 Opt_wsync,
162};
163
164enum ceph_recover_session_mode {
165 ceph_recover_session_no,
166 ceph_recover_session_clean
167};
168
169static const struct constant_table ceph_param_recover[] = {
170 { "no", ceph_recover_session_no },
171 { "clean", ceph_recover_session_clean },
172 {}
173};
174
175static const struct fs_parameter_spec ceph_mount_parameters[] = {
176 fsparam_flag_no ("acl", Opt_acl),
177 fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
178 fsparam_s32 ("caps_max", Opt_caps_max),
179 fsparam_u32 ("caps_wanted_delay_max", Opt_caps_wanted_delay_max),
180 fsparam_u32 ("caps_wanted_delay_min", Opt_caps_wanted_delay_min),
181 fsparam_u32 ("write_congestion_kb", Opt_congestion_kb),
182 fsparam_flag_no ("copyfrom", Opt_copyfrom),
183 fsparam_flag_no ("dcache", Opt_dcache),
184 fsparam_flag_no ("dirstat", Opt_dirstat),
185 fsparam_flag_no ("fsc", Opt_fscache), // fsc|nofsc
186 fsparam_string ("fsc", Opt_fscache), // fsc=...
187 fsparam_flag_no ("ino32", Opt_ino32),
188 fsparam_string ("mds_namespace", Opt_mds_namespace),
189 fsparam_flag_no ("poolperm", Opt_poolperm),
190 fsparam_flag_no ("quotadf", Opt_quotadf),
191 fsparam_u32 ("rasize", Opt_rasize),
192 fsparam_flag_no ("rbytes", Opt_rbytes),
193 fsparam_u32 ("readdir_max_bytes", Opt_readdir_max_bytes),
194 fsparam_u32 ("readdir_max_entries", Opt_readdir_max_entries),
195 fsparam_enum ("recover_session", Opt_recover_session, ceph_param_recover),
196 fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
197 fsparam_u32 ("rsize", Opt_rsize),
198 fsparam_string ("snapdirname", Opt_snapdirname),
199 fsparam_string ("source", Opt_source),
200 fsparam_u32 ("wsize", Opt_wsize),
201 fsparam_flag_no ("wsync", Opt_wsync),
202 {}
203};
204
205struct ceph_parse_opts_ctx {
206 struct ceph_options *copts;
207 struct ceph_mount_options *opts;
208};
209
210/*
211 * Remove adjacent slashes and then the trailing slash, unless it is
212 * the only remaining character.
213 *
214 * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
215 */
216static void canonicalize_path(char *path)
217{
218 int i, j = 0;
219
220 for (i = 0; path[i] != '\0'; i++) {
221 if (path[i] != '/' || j < 1 || path[j - 1] != '/')
222 path[j++] = path[i];
223 }
224
225 if (j > 1 && path[j - 1] == '/')
226 j--;
227 path[j] = '\0';
228}
229
230/*
231 * Parse the source parameter. Distinguish the server list from the path.
232 *
233 * The source will look like:
234 * <server_spec>[,<server_spec>...]:[<path>]
235 * where
236 * <server_spec> is <ip>[:<port>]
237 * <path> is optional, but if present must begin with '/'
238 */
239static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
240{
241 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
242 struct ceph_mount_options *fsopt = pctx->opts;
243 char *dev_name = param->string, *dev_name_end;
244 int ret;
245
246 dout("%s '%s'\n", __func__, dev_name);
247 if (!dev_name || !*dev_name)
248 return invalfc(fc, "Empty source");
249
250 dev_name_end = strchr(dev_name, '/');
251 if (dev_name_end) {
252 /*
253 * The server_path will include the whole chars from userland
254 * including the leading '/'.
255 */
256 kfree(fsopt->server_path);
257 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
258 if (!fsopt->server_path)
259 return -ENOMEM;
260
261 canonicalize_path(fsopt->server_path);
262 } else {
263 dev_name_end = dev_name + strlen(dev_name);
264 }
265
266 dev_name_end--; /* back up to ':' separator */
267 if (dev_name_end < dev_name || *dev_name_end != ':')
268 return invalfc(fc, "No path or : separator in source");
269
270 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
271 if (fsopt->server_path)
272 dout("server path '%s'\n", fsopt->server_path);
273
274 ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
275 pctx->copts, fc->log.log);
276 if (ret)
277 return ret;
278
279 fc->source = param->string;
280 param->string = NULL;
281 return 0;
282}
283
284static int ceph_parse_mount_param(struct fs_context *fc,
285 struct fs_parameter *param)
286{
287 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
288 struct ceph_mount_options *fsopt = pctx->opts;
289 struct fs_parse_result result;
290 unsigned int mode;
291 int token, ret;
292
293 ret = ceph_parse_param(param, pctx->copts, fc->log.log);
294 if (ret != -ENOPARAM)
295 return ret;
296
297 token = fs_parse(fc, ceph_mount_parameters, param, &result);
298 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
299 if (token < 0)
300 return token;
301
302 switch (token) {
303 case Opt_snapdirname:
304 kfree(fsopt->snapdir_name);
305 fsopt->snapdir_name = param->string;
306 param->string = NULL;
307 break;
308 case Opt_mds_namespace:
309 kfree(fsopt->mds_namespace);
310 fsopt->mds_namespace = param->string;
311 param->string = NULL;
312 break;
313 case Opt_recover_session:
314 mode = result.uint_32;
315 if (mode == ceph_recover_session_no)
316 fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
317 else if (mode == ceph_recover_session_clean)
318 fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
319 else
320 BUG();
321 break;
322 case Opt_source:
323 if (fc->source)
324 return invalfc(fc, "Multiple sources specified");
325 return ceph_parse_source(param, fc);
326 case Opt_wsize:
327 if (result.uint_32 < PAGE_SIZE ||
328 result.uint_32 > CEPH_MAX_WRITE_SIZE)
329 goto out_of_range;
330 fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
331 break;
332 case Opt_rsize:
333 if (result.uint_32 < PAGE_SIZE ||
334 result.uint_32 > CEPH_MAX_READ_SIZE)
335 goto out_of_range;
336 fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
337 break;
338 case Opt_rasize:
339 fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
340 break;
341 case Opt_caps_wanted_delay_min:
342 if (result.uint_32 < 1)
343 goto out_of_range;
344 fsopt->caps_wanted_delay_min = result.uint_32;
345 break;
346 case Opt_caps_wanted_delay_max:
347 if (result.uint_32 < 1)
348 goto out_of_range;
349 fsopt->caps_wanted_delay_max = result.uint_32;
350 break;
351 case Opt_caps_max:
352 if (result.int_32 < 0)
353 goto out_of_range;
354 fsopt->caps_max = result.int_32;
355 break;
356 case Opt_readdir_max_entries:
357 if (result.uint_32 < 1)
358 goto out_of_range;
359 fsopt->max_readdir = result.uint_32;
360 break;
361 case Opt_readdir_max_bytes:
362 if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
363 goto out_of_range;
364 fsopt->max_readdir_bytes = result.uint_32;
365 break;
366 case Opt_congestion_kb:
367 if (result.uint_32 < 1024) /* at least 1M */
368 goto out_of_range;
369 fsopt->congestion_kb = result.uint_32;
370 break;
371 case Opt_dirstat:
372 if (!result.negated)
373 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
374 else
375 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
376 break;
377 case Opt_rbytes:
378 if (!result.negated)
379 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
380 else
381 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
382 break;
383 case Opt_asyncreaddir:
384 if (!result.negated)
385 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
386 else
387 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
388 break;
389 case Opt_dcache:
390 if (!result.negated)
391 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
392 else
393 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
394 break;
395 case Opt_ino32:
396 if (!result.negated)
397 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
398 else
399 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
400 break;
401
402 case Opt_fscache:
403#ifdef CONFIG_CEPH_FSCACHE
404 kfree(fsopt->fscache_uniq);
405 fsopt->fscache_uniq = NULL;
406 if (result.negated) {
407 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
408 } else {
409 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
410 fsopt->fscache_uniq = param->string;
411 param->string = NULL;
412 }
413 break;
414#else
415 return invalfc(fc, "fscache support is disabled");
416#endif
417 case Opt_poolperm:
418 if (!result.negated)
419 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
420 else
421 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
422 break;
423 case Opt_require_active_mds:
424 if (!result.negated)
425 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
426 else
427 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
428 break;
429 case Opt_quotadf:
430 if (!result.negated)
431 fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
432 else
433 fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
434 break;
435 case Opt_copyfrom:
436 if (!result.negated)
437 fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
438 else
439 fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
440 break;
441 case Opt_acl:
442 if (!result.negated) {
443#ifdef CONFIG_CEPH_FS_POSIX_ACL
444 fc->sb_flags |= SB_POSIXACL;
445#else
446 return invalfc(fc, "POSIX ACL support is disabled");
447#endif
448 } else {
449 fc->sb_flags &= ~SB_POSIXACL;
450 }
451 break;
452 case Opt_wsync:
453 if (!result.negated)
454 fsopt->flags &= ~CEPH_MOUNT_OPT_ASYNC_DIROPS;
455 else
456 fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
457 break;
458 default:
459 BUG();
460 }
461 return 0;
462
463out_of_range:
464 return invalfc(fc, "%s out of range", param->key);
465}
466
467static void destroy_mount_options(struct ceph_mount_options *args)
468{
469 dout("destroy_mount_options %p\n", args);
470 if (!args)
471 return;
472
473 kfree(args->snapdir_name);
474 kfree(args->mds_namespace);
475 kfree(args->server_path);
476 kfree(args->fscache_uniq);
477 kfree(args);
478}
479
480static int strcmp_null(const char *s1, const char *s2)
481{
482 if (!s1 && !s2)
483 return 0;
484 if (s1 && !s2)
485 return -1;
486 if (!s1 && s2)
487 return 1;
488 return strcmp(s1, s2);
489}
490
491static int compare_mount_options(struct ceph_mount_options *new_fsopt,
492 struct ceph_options *new_opt,
493 struct ceph_fs_client *fsc)
494{
495 struct ceph_mount_options *fsopt1 = new_fsopt;
496 struct ceph_mount_options *fsopt2 = fsc->mount_options;
497 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
498 int ret;
499
500 ret = memcmp(fsopt1, fsopt2, ofs);
501 if (ret)
502 return ret;
503
504 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
505 if (ret)
506 return ret;
507
508 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
509 if (ret)
510 return ret;
511
512 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
513 if (ret)
514 return ret;
515
516 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
517 if (ret)
518 return ret;
519
520 return ceph_compare_options(new_opt, fsc->client);
521}
522
523/**
524 * ceph_show_options - Show mount options in /proc/mounts
525 * @m: seq_file to write to
526 * @root: root of that (sub)tree
527 */
528static int ceph_show_options(struct seq_file *m, struct dentry *root)
529{
530 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
531 struct ceph_mount_options *fsopt = fsc->mount_options;
532 size_t pos;
533 int ret;
534
535 /* a comma between MNT/MS and client options */
536 seq_putc(m, ',');
537 pos = m->count;
538
539 ret = ceph_print_client_options(m, fsc->client, false);
540 if (ret)
541 return ret;
542
543 /* retract our comma if no client options */
544 if (m->count == pos)
545 m->count--;
546
547 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
548 seq_puts(m, ",dirstat");
549 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
550 seq_puts(m, ",rbytes");
551 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
552 seq_puts(m, ",noasyncreaddir");
553 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
554 seq_puts(m, ",nodcache");
555 if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
556 seq_puts(m, ",ino32");
557 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
558 seq_show_option(m, "fsc", fsopt->fscache_uniq);
559 }
560 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
561 seq_puts(m, ",nopoolperm");
562 if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
563 seq_puts(m, ",noquotadf");
564
565#ifdef CONFIG_CEPH_FS_POSIX_ACL
566 if (root->d_sb->s_flags & SB_POSIXACL)
567 seq_puts(m, ",acl");
568 else
569 seq_puts(m, ",noacl");
570#endif
571
572 if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
573 seq_puts(m, ",copyfrom");
574
575 if (fsopt->mds_namespace)
576 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
577
578 if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
579 seq_show_option(m, "recover_session", "clean");
580
581 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
582 seq_puts(m, ",nowsync");
583
584 if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
585 seq_printf(m, ",wsize=%u", fsopt->wsize);
586 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
587 seq_printf(m, ",rsize=%u", fsopt->rsize);
588 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
589 seq_printf(m, ",rasize=%u", fsopt->rasize);
590 if (fsopt->congestion_kb != default_congestion_kb())
591 seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
592 if (fsopt->caps_max)
593 seq_printf(m, ",caps_max=%d", fsopt->caps_max);
594 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
595 seq_printf(m, ",caps_wanted_delay_min=%u",
596 fsopt->caps_wanted_delay_min);
597 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
598 seq_printf(m, ",caps_wanted_delay_max=%u",
599 fsopt->caps_wanted_delay_max);
600 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
601 seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
602 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
603 seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
604 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
605 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
606
607 return 0;
608}
609
610/*
611 * handle any mon messages the standard library doesn't understand.
612 * return error if we don't either.
613 */
614static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
615{
616 struct ceph_fs_client *fsc = client->private;
617 int type = le16_to_cpu(msg->hdr.type);
618
619 switch (type) {
620 case CEPH_MSG_MDS_MAP:
621 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
622 return 0;
623 case CEPH_MSG_FS_MAP_USER:
624 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
625 return 0;
626 default:
627 return -1;
628 }
629}
630
631/*
632 * create a new fs client
633 *
634 * Success or not, this function consumes @fsopt and @opt.
635 */
636static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
637 struct ceph_options *opt)
638{
639 struct ceph_fs_client *fsc;
640 int err;
641
642 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
643 if (!fsc) {
644 err = -ENOMEM;
645 goto fail;
646 }
647
648 fsc->client = ceph_create_client(opt, fsc);
649 if (IS_ERR(fsc->client)) {
650 err = PTR_ERR(fsc->client);
651 goto fail;
652 }
653 opt = NULL; /* fsc->client now owns this */
654
655 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
656 ceph_set_opt(fsc->client, ABORT_ON_FULL);
657
658 if (!fsopt->mds_namespace) {
659 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
660 0, true);
661 } else {
662 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
663 0, false);
664 }
665
666 fsc->mount_options = fsopt;
667
668 fsc->sb = NULL;
669 fsc->mount_state = CEPH_MOUNT_MOUNTING;
670 fsc->filp_gen = 1;
671 fsc->have_copy_from2 = true;
672
673 atomic_long_set(&fsc->writeback_count, 0);
674
675 err = -ENOMEM;
676 /*
677 * The number of concurrent works can be high but they don't need
678 * to be processed in parallel, limit concurrency.
679 */
680 fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
681 if (!fsc->inode_wq)
682 goto fail_client;
683 fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
684 if (!fsc->cap_wq)
685 goto fail_inode_wq;
686
687 spin_lock(&ceph_fsc_lock);
688 list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
689 spin_unlock(&ceph_fsc_lock);
690
691 return fsc;
692
693fail_inode_wq:
694 destroy_workqueue(fsc->inode_wq);
695fail_client:
696 ceph_destroy_client(fsc->client);
697fail:
698 kfree(fsc);
699 if (opt)
700 ceph_destroy_options(opt);
701 destroy_mount_options(fsopt);
702 return ERR_PTR(err);
703}
704
705static void flush_fs_workqueues(struct ceph_fs_client *fsc)
706{
707 flush_workqueue(fsc->inode_wq);
708 flush_workqueue(fsc->cap_wq);
709}
710
711static void destroy_fs_client(struct ceph_fs_client *fsc)
712{
713 dout("destroy_fs_client %p\n", fsc);
714
715 spin_lock(&ceph_fsc_lock);
716 list_del(&fsc->metric_wakeup);
717 spin_unlock(&ceph_fsc_lock);
718
719 ceph_mdsc_destroy(fsc);
720 destroy_workqueue(fsc->inode_wq);
721 destroy_workqueue(fsc->cap_wq);
722
723 destroy_mount_options(fsc->mount_options);
724
725 ceph_destroy_client(fsc->client);
726
727 kfree(fsc);
728 dout("destroy_fs_client %p done\n", fsc);
729}
730
731/*
732 * caches
733 */
734struct kmem_cache *ceph_inode_cachep;
735struct kmem_cache *ceph_cap_cachep;
736struct kmem_cache *ceph_cap_flush_cachep;
737struct kmem_cache *ceph_dentry_cachep;
738struct kmem_cache *ceph_file_cachep;
739struct kmem_cache *ceph_dir_file_cachep;
740struct kmem_cache *ceph_mds_request_cachep;
741mempool_t *ceph_wb_pagevec_pool;
742
743static void ceph_inode_init_once(void *foo)
744{
745 struct ceph_inode_info *ci = foo;
746 inode_init_once(&ci->vfs_inode);
747}
748
749static int __init init_caches(void)
750{
751 int error = -ENOMEM;
752
753 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
754 sizeof(struct ceph_inode_info),
755 __alignof__(struct ceph_inode_info),
756 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
757 SLAB_ACCOUNT, ceph_inode_init_once);
758 if (!ceph_inode_cachep)
759 return -ENOMEM;
760
761 ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
762 if (!ceph_cap_cachep)
763 goto bad_cap;
764 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
765 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
766 if (!ceph_cap_flush_cachep)
767 goto bad_cap_flush;
768
769 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
770 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
771 if (!ceph_dentry_cachep)
772 goto bad_dentry;
773
774 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
775 if (!ceph_file_cachep)
776 goto bad_file;
777
778 ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
779 if (!ceph_dir_file_cachep)
780 goto bad_dir_file;
781
782 ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
783 if (!ceph_mds_request_cachep)
784 goto bad_mds_req;
785
786 ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
787 if (!ceph_wb_pagevec_pool)
788 goto bad_pagevec_pool;
789
790 error = ceph_fscache_register();
791 if (error)
792 goto bad_fscache;
793
794 return 0;
795
796bad_fscache:
797 kmem_cache_destroy(ceph_mds_request_cachep);
798bad_pagevec_pool:
799 mempool_destroy(ceph_wb_pagevec_pool);
800bad_mds_req:
801 kmem_cache_destroy(ceph_dir_file_cachep);
802bad_dir_file:
803 kmem_cache_destroy(ceph_file_cachep);
804bad_file:
805 kmem_cache_destroy(ceph_dentry_cachep);
806bad_dentry:
807 kmem_cache_destroy(ceph_cap_flush_cachep);
808bad_cap_flush:
809 kmem_cache_destroy(ceph_cap_cachep);
810bad_cap:
811 kmem_cache_destroy(ceph_inode_cachep);
812 return error;
813}
814
815static void destroy_caches(void)
816{
817 /*
818 * Make sure all delayed rcu free inodes are flushed before we
819 * destroy cache.
820 */
821 rcu_barrier();
822
823 kmem_cache_destroy(ceph_inode_cachep);
824 kmem_cache_destroy(ceph_cap_cachep);
825 kmem_cache_destroy(ceph_cap_flush_cachep);
826 kmem_cache_destroy(ceph_dentry_cachep);
827 kmem_cache_destroy(ceph_file_cachep);
828 kmem_cache_destroy(ceph_dir_file_cachep);
829 kmem_cache_destroy(ceph_mds_request_cachep);
830 mempool_destroy(ceph_wb_pagevec_pool);
831
832 ceph_fscache_unregister();
833}
834
835/*
836 * ceph_umount_begin - initiate forced umount. Tear down the
837 * mount, skipping steps that may hang while waiting for server(s).
838 */
839static void ceph_umount_begin(struct super_block *sb)
840{
841 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
842
843 dout("ceph_umount_begin - starting forced umount\n");
844 if (!fsc)
845 return;
846 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
847 ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
848 ceph_mdsc_force_umount(fsc->mdsc);
849 fsc->filp_gen++; // invalidate open files
850}
851
852static const struct super_operations ceph_super_ops = {
853 .alloc_inode = ceph_alloc_inode,
854 .free_inode = ceph_free_inode,
855 .write_inode = ceph_write_inode,
856 .drop_inode = generic_delete_inode,
857 .evict_inode = ceph_evict_inode,
858 .sync_fs = ceph_sync_fs,
859 .put_super = ceph_put_super,
860 .show_options = ceph_show_options,
861 .statfs = ceph_statfs,
862 .umount_begin = ceph_umount_begin,
863};
864
865/*
866 * Bootstrap mount by opening the root directory. Note the mount
867 * @started time from caller, and time out if this takes too long.
868 */
869static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
870 const char *path,
871 unsigned long started)
872{
873 struct ceph_mds_client *mdsc = fsc->mdsc;
874 struct ceph_mds_request *req = NULL;
875 int err;
876 struct dentry *root;
877
878 /* open dir */
879 dout("open_root_inode opening '%s'\n", path);
880 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
881 if (IS_ERR(req))
882 return ERR_CAST(req);
883 req->r_path1 = kstrdup(path, GFP_NOFS);
884 if (!req->r_path1) {
885 root = ERR_PTR(-ENOMEM);
886 goto out;
887 }
888
889 req->r_ino1.ino = CEPH_INO_ROOT;
890 req->r_ino1.snap = CEPH_NOSNAP;
891 req->r_started = started;
892 req->r_timeout = fsc->client->options->mount_timeout;
893 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
894 req->r_num_caps = 2;
895 err = ceph_mdsc_do_request(mdsc, NULL, req);
896 if (err == 0) {
897 struct inode *inode = req->r_target_inode;
898 req->r_target_inode = NULL;
899 dout("open_root_inode success\n");
900 root = d_make_root(inode);
901 if (!root) {
902 root = ERR_PTR(-ENOMEM);
903 goto out;
904 }
905 dout("open_root_inode success, root dentry is %p\n", root);
906 } else {
907 root = ERR_PTR(err);
908 }
909out:
910 ceph_mdsc_put_request(req);
911 return root;
912}
913
914/*
915 * mount: join the ceph cluster, and open root directory.
916 */
917static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
918 struct fs_context *fc)
919{
920 int err;
921 unsigned long started = jiffies; /* note the start time */
922 struct dentry *root;
923
924 dout("mount start %p\n", fsc);
925 mutex_lock(&fsc->client->mount_mutex);
926
927 if (!fsc->sb->s_root) {
928 const char *path = fsc->mount_options->server_path ?
929 fsc->mount_options->server_path + 1 : "";
930
931 err = __ceph_open_session(fsc->client, started);
932 if (err < 0)
933 goto out;
934
935 /* setup fscache */
936 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
937 err = ceph_fscache_register_fs(fsc, fc);
938 if (err < 0)
939 goto out;
940 }
941
942 dout("mount opening path '%s'\n", path);
943
944 ceph_fs_debugfs_init(fsc);
945
946 root = open_root_dentry(fsc, path, started);
947 if (IS_ERR(root)) {
948 err = PTR_ERR(root);
949 goto out;
950 }
951 fsc->sb->s_root = dget(root);
952 } else {
953 root = dget(fsc->sb->s_root);
954 }
955
956 fsc->mount_state = CEPH_MOUNT_MOUNTED;
957 dout("mount success\n");
958 mutex_unlock(&fsc->client->mount_mutex);
959 return root;
960
961out:
962 mutex_unlock(&fsc->client->mount_mutex);
963 return ERR_PTR(err);
964}
965
966static int ceph_set_super(struct super_block *s, struct fs_context *fc)
967{
968 struct ceph_fs_client *fsc = s->s_fs_info;
969 int ret;
970
971 dout("set_super %p\n", s);
972
973 s->s_maxbytes = MAX_LFS_FILESIZE;
974
975 s->s_xattr = ceph_xattr_handlers;
976 fsc->sb = s;
977 fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
978
979 s->s_op = &ceph_super_ops;
980 s->s_d_op = &ceph_dentry_ops;
981 s->s_export_op = &ceph_export_ops;
982
983 s->s_time_gran = 1;
984 s->s_time_min = 0;
985 s->s_time_max = U32_MAX;
986
987 ret = set_anon_super_fc(s, fc);
988 if (ret != 0)
989 fsc->sb = NULL;
990 return ret;
991}
992
993/*
994 * share superblock if same fs AND options
995 */
996static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
997{
998 struct ceph_fs_client *new = fc->s_fs_info;
999 struct ceph_mount_options *fsopt = new->mount_options;
1000 struct ceph_options *opt = new->client->options;
1001 struct ceph_fs_client *other = ceph_sb_to_client(sb);
1002
1003 dout("ceph_compare_super %p\n", sb);
1004
1005 if (compare_mount_options(fsopt, opt, other)) {
1006 dout("monitor(s)/mount options don't match\n");
1007 return 0;
1008 }
1009 if ((opt->flags & CEPH_OPT_FSID) &&
1010 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
1011 dout("fsid doesn't match\n");
1012 return 0;
1013 }
1014 if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
1015 dout("flags differ\n");
1016 return 0;
1017 }
1018 return 1;
1019}
1020
1021/*
1022 * construct our own bdi so we can control readahead, etc.
1023 */
1024static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1025
1026static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1027{
1028 int err;
1029
1030 err = super_setup_bdi_name(sb, "ceph-%ld",
1031 atomic_long_inc_return(&bdi_seq));
1032 if (err)
1033 return err;
1034
1035 /* set ra_pages based on rasize mount option? */
1036 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1037
1038 /* set io_pages based on max osd read size */
1039 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1040
1041 return 0;
1042}
1043
1044static int ceph_get_tree(struct fs_context *fc)
1045{
1046 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1047 struct super_block *sb;
1048 struct ceph_fs_client *fsc;
1049 struct dentry *res;
1050 int (*compare_super)(struct super_block *, struct fs_context *) =
1051 ceph_compare_super;
1052 int err;
1053
1054 dout("ceph_get_tree\n");
1055
1056 if (!fc->source)
1057 return invalfc(fc, "No source");
1058
1059 /* create client (which we may/may not use) */
1060 fsc = create_fs_client(pctx->opts, pctx->copts);
1061 pctx->opts = NULL;
1062 pctx->copts = NULL;
1063 if (IS_ERR(fsc)) {
1064 err = PTR_ERR(fsc);
1065 goto out_final;
1066 }
1067
1068 err = ceph_mdsc_init(fsc);
1069 if (err < 0)
1070 goto out;
1071
1072 if (ceph_test_opt(fsc->client, NOSHARE))
1073 compare_super = NULL;
1074
1075 fc->s_fs_info = fsc;
1076 sb = sget_fc(fc, compare_super, ceph_set_super);
1077 fc->s_fs_info = NULL;
1078 if (IS_ERR(sb)) {
1079 err = PTR_ERR(sb);
1080 goto out;
1081 }
1082
1083 if (ceph_sb_to_client(sb) != fsc) {
1084 destroy_fs_client(fsc);
1085 fsc = ceph_sb_to_client(sb);
1086 dout("get_sb got existing client %p\n", fsc);
1087 } else {
1088 dout("get_sb using new client %p\n", fsc);
1089 err = ceph_setup_bdi(sb, fsc);
1090 if (err < 0)
1091 goto out_splat;
1092 }
1093
1094 res = ceph_real_mount(fsc, fc);
1095 if (IS_ERR(res)) {
1096 err = PTR_ERR(res);
1097 goto out_splat;
1098 }
1099 dout("root %p inode %p ino %llx.%llx\n", res,
1100 d_inode(res), ceph_vinop(d_inode(res)));
1101 fc->root = fsc->sb->s_root;
1102 return 0;
1103
1104out_splat:
1105 if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
1106 pr_info("No mds server is up or the cluster is laggy\n");
1107 err = -EHOSTUNREACH;
1108 }
1109
1110 ceph_mdsc_close_sessions(fsc->mdsc);
1111 deactivate_locked_super(sb);
1112 goto out_final;
1113
1114out:
1115 destroy_fs_client(fsc);
1116out_final:
1117 dout("ceph_get_tree fail %d\n", err);
1118 return err;
1119}
1120
1121static void ceph_free_fc(struct fs_context *fc)
1122{
1123 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1124
1125 if (pctx) {
1126 destroy_mount_options(pctx->opts);
1127 ceph_destroy_options(pctx->copts);
1128 kfree(pctx);
1129 }
1130}
1131
1132static int ceph_reconfigure_fc(struct fs_context *fc)
1133{
1134 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1135 struct ceph_mount_options *fsopt = pctx->opts;
1136 struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
1137
1138 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
1139 ceph_set_mount_opt(fsc, ASYNC_DIROPS);
1140 else
1141 ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
1142
1143 sync_filesystem(fc->root->d_sb);
1144 return 0;
1145}
1146
1147static const struct fs_context_operations ceph_context_ops = {
1148 .free = ceph_free_fc,
1149 .parse_param = ceph_parse_mount_param,
1150 .get_tree = ceph_get_tree,
1151 .reconfigure = ceph_reconfigure_fc,
1152};
1153
1154/*
1155 * Set up the filesystem mount context.
1156 */
1157static int ceph_init_fs_context(struct fs_context *fc)
1158{
1159 struct ceph_parse_opts_ctx *pctx;
1160 struct ceph_mount_options *fsopt;
1161
1162 pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
1163 if (!pctx)
1164 return -ENOMEM;
1165
1166 pctx->copts = ceph_alloc_options();
1167 if (!pctx->copts)
1168 goto nomem;
1169
1170 pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
1171 if (!pctx->opts)
1172 goto nomem;
1173
1174 fsopt = pctx->opts;
1175 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
1176
1177 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
1178 fsopt->rsize = CEPH_MAX_READ_SIZE;
1179 fsopt->rasize = CEPH_RASIZE_DEFAULT;
1180 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
1181 if (!fsopt->snapdir_name)
1182 goto nomem;
1183
1184 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
1185 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
1186 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
1187 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1188 fsopt->congestion_kb = default_congestion_kb();
1189
1190#ifdef CONFIG_CEPH_FS_POSIX_ACL
1191 fc->sb_flags |= SB_POSIXACL;
1192#endif
1193
1194 fc->fs_private = pctx;
1195 fc->ops = &ceph_context_ops;
1196 return 0;
1197
1198nomem:
1199 destroy_mount_options(pctx->opts);
1200 ceph_destroy_options(pctx->copts);
1201 kfree(pctx);
1202 return -ENOMEM;
1203}
1204
1205static void ceph_kill_sb(struct super_block *s)
1206{
1207 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1208 dev_t dev = s->s_dev;
1209
1210 dout("kill_sb %p\n", s);
1211
1212 ceph_mdsc_pre_umount(fsc->mdsc);
1213 flush_fs_workqueues(fsc);
1214
1215 generic_shutdown_super(s);
1216
1217 fsc->client->extra_mon_dispatch = NULL;
1218 ceph_fs_debugfs_cleanup(fsc);
1219
1220 ceph_fscache_unregister_fs(fsc);
1221
1222 destroy_fs_client(fsc);
1223 free_anon_bdev(dev);
1224}
1225
1226static struct file_system_type ceph_fs_type = {
1227 .owner = THIS_MODULE,
1228 .name = "ceph",
1229 .init_fs_context = ceph_init_fs_context,
1230 .kill_sb = ceph_kill_sb,
1231 .fs_flags = FS_RENAME_DOES_D_MOVE,
1232};
1233MODULE_ALIAS_FS("ceph");
1234
1235int ceph_force_reconnect(struct super_block *sb)
1236{
1237 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1238 int err = 0;
1239
1240 ceph_umount_begin(sb);
1241
1242 /* Make sure all page caches get invalidated.
1243 * see remove_session_caps_cb() */
1244 flush_workqueue(fsc->inode_wq);
1245
1246 /* In case that we were blacklisted. This also reset
1247 * all mon/osd connections */
1248 ceph_reset_client_addr(fsc->client);
1249
1250 ceph_osdc_clear_abort_err(&fsc->client->osdc);
1251
1252 fsc->blacklisted = false;
1253 fsc->mount_state = CEPH_MOUNT_MOUNTED;
1254
1255 if (sb->s_root) {
1256 err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
1257 CEPH_STAT_CAP_INODE, true);
1258 }
1259 return err;
1260}
1261
1262static int __init init_ceph(void)
1263{
1264 int ret = init_caches();
1265 if (ret)
1266 goto out;
1267
1268 ceph_flock_init();
1269 ret = register_filesystem(&ceph_fs_type);
1270 if (ret)
1271 goto out_caches;
1272
1273 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1274
1275 return 0;
1276
1277out_caches:
1278 destroy_caches();
1279out:
1280 return ret;
1281}
1282
1283static void __exit exit_ceph(void)
1284{
1285 dout("exit_ceph\n");
1286 unregister_filesystem(&ceph_fs_type);
1287 destroy_caches();
1288}
1289
1290static int param_set_metrics(const char *val, const struct kernel_param *kp)
1291{
1292 struct ceph_fs_client *fsc;
1293 int ret;
1294
1295 ret = param_set_bool(val, kp);
1296 if (ret) {
1297 pr_err("Failed to parse sending metrics switch value '%s'\n",
1298 val);
1299 return ret;
1300 } else if (!disable_send_metrics) {
1301 // wake up all the mds clients
1302 spin_lock(&ceph_fsc_lock);
1303 list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
1304 metric_schedule_delayed(&fsc->mdsc->metric);
1305 }
1306 spin_unlock(&ceph_fsc_lock);
1307 }
1308
1309 return 0;
1310}
1311
1312static const struct kernel_param_ops param_ops_metrics = {
1313 .set = param_set_metrics,
1314 .get = param_get_bool,
1315};
1316
1317bool disable_send_metrics = false;
1318module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644);
1319MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
1320
1321module_init(init_ceph);
1322module_exit(exit_ceph);
1323
1324MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1325MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1326MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1327MODULE_DESCRIPTION("Ceph filesystem for Linux");
1328MODULE_LICENSE("GPL");