Loading...
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20#include "cache.h"
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43
44 /*
45 * ensure we release the bdi before put_anon_super releases
46 * the device name.
47 */
48 if (s->s_bdi == &fsc->backing_dev_info) {
49 bdi_unregister(&fsc->backing_dev_info);
50 s->s_bdi = NULL;
51 }
52
53 return;
54}
55
56static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
57{
58 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
59 struct ceph_monmap *monmap = fsc->client->monc.monmap;
60 struct ceph_statfs st;
61 u64 fsid;
62 int err;
63
64 dout("statfs\n");
65 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
66 if (err < 0)
67 return err;
68
69 /* fill in kstatfs */
70 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
71
72 /*
73 * express utilization in terms of large blocks to avoid
74 * overflow on 32-bit machines.
75 *
76 * NOTE: for the time being, we make bsize == frsize to humor
77 * not-yet-ancient versions of glibc that are broken.
78 * Someday, we will probably want to report a real block
79 * size... whatever that may mean for a network file system!
80 */
81 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
82 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
83 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
84 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
85 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
86
87 buf->f_files = le64_to_cpu(st.num_objects);
88 buf->f_ffree = -1;
89 buf->f_namelen = NAME_MAX;
90
91 /* leave fsid little-endian, regardless of host endianness */
92 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
93 buf->f_fsid.val[0] = fsid & 0xffffffff;
94 buf->f_fsid.val[1] = fsid >> 32;
95
96 return 0;
97}
98
99
100static int ceph_sync_fs(struct super_block *sb, int wait)
101{
102 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
103
104 if (!wait) {
105 dout("sync_fs (non-blocking)\n");
106 ceph_flush_dirty_caps(fsc->mdsc);
107 dout("sync_fs (non-blocking) done\n");
108 return 0;
109 }
110
111 dout("sync_fs (blocking)\n");
112 ceph_osdc_sync(&fsc->client->osdc);
113 ceph_mdsc_sync(fsc->mdsc);
114 dout("sync_fs (blocking) done\n");
115 return 0;
116}
117
118/*
119 * mount options
120 */
121enum {
122 Opt_wsize,
123 Opt_rsize,
124 Opt_rasize,
125 Opt_caps_wanted_delay_min,
126 Opt_caps_wanted_delay_max,
127 Opt_cap_release_safety,
128 Opt_readdir_max_entries,
129 Opt_readdir_max_bytes,
130 Opt_congestion_kb,
131 Opt_last_int,
132 /* int args above */
133 Opt_snapdirname,
134 Opt_last_string,
135 /* string args above */
136 Opt_dirstat,
137 Opt_nodirstat,
138 Opt_rbytes,
139 Opt_norbytes,
140 Opt_asyncreaddir,
141 Opt_noasyncreaddir,
142 Opt_dcache,
143 Opt_nodcache,
144 Opt_ino32,
145 Opt_noino32,
146 Opt_fscache,
147 Opt_nofscache,
148#ifdef CONFIG_CEPH_FS_POSIX_ACL
149 Opt_acl,
150#endif
151 Opt_noacl
152};
153
154static match_table_t fsopt_tokens = {
155 {Opt_wsize, "wsize=%d"},
156 {Opt_rsize, "rsize=%d"},
157 {Opt_rasize, "rasize=%d"},
158 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
159 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
160 {Opt_cap_release_safety, "cap_release_safety=%d"},
161 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
162 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
163 {Opt_congestion_kb, "write_congestion_kb=%d"},
164 /* int args above */
165 {Opt_snapdirname, "snapdirname=%s"},
166 /* string args above */
167 {Opt_dirstat, "dirstat"},
168 {Opt_nodirstat, "nodirstat"},
169 {Opt_rbytes, "rbytes"},
170 {Opt_norbytes, "norbytes"},
171 {Opt_asyncreaddir, "asyncreaddir"},
172 {Opt_noasyncreaddir, "noasyncreaddir"},
173 {Opt_dcache, "dcache"},
174 {Opt_nodcache, "nodcache"},
175 {Opt_ino32, "ino32"},
176 {Opt_noino32, "noino32"},
177 {Opt_fscache, "fsc"},
178 {Opt_nofscache, "nofsc"},
179#ifdef CONFIG_CEPH_FS_POSIX_ACL
180 {Opt_acl, "acl"},
181#endif
182 {Opt_noacl, "noacl"},
183 {-1, NULL}
184};
185
186static int parse_fsopt_token(char *c, void *private)
187{
188 struct ceph_mount_options *fsopt = private;
189 substring_t argstr[MAX_OPT_ARGS];
190 int token, intval, ret;
191
192 token = match_token((char *)c, fsopt_tokens, argstr);
193 if (token < 0)
194 return -EINVAL;
195
196 if (token < Opt_last_int) {
197 ret = match_int(&argstr[0], &intval);
198 if (ret < 0) {
199 pr_err("bad mount option arg (not int) "
200 "at '%s'\n", c);
201 return ret;
202 }
203 dout("got int token %d val %d\n", token, intval);
204 } else if (token > Opt_last_int && token < Opt_last_string) {
205 dout("got string token %d val %s\n", token,
206 argstr[0].from);
207 } else {
208 dout("got token %d\n", token);
209 }
210
211 switch (token) {
212 case Opt_snapdirname:
213 kfree(fsopt->snapdir_name);
214 fsopt->snapdir_name = kstrndup(argstr[0].from,
215 argstr[0].to-argstr[0].from,
216 GFP_KERNEL);
217 if (!fsopt->snapdir_name)
218 return -ENOMEM;
219 break;
220
221 /* misc */
222 case Opt_wsize:
223 fsopt->wsize = intval;
224 break;
225 case Opt_rsize:
226 fsopt->rsize = intval;
227 break;
228 case Opt_rasize:
229 fsopt->rasize = intval;
230 break;
231 case Opt_caps_wanted_delay_min:
232 fsopt->caps_wanted_delay_min = intval;
233 break;
234 case Opt_caps_wanted_delay_max:
235 fsopt->caps_wanted_delay_max = intval;
236 break;
237 case Opt_readdir_max_entries:
238 fsopt->max_readdir = intval;
239 break;
240 case Opt_readdir_max_bytes:
241 fsopt->max_readdir_bytes = intval;
242 break;
243 case Opt_congestion_kb:
244 fsopt->congestion_kb = intval;
245 break;
246 case Opt_dirstat:
247 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
248 break;
249 case Opt_nodirstat:
250 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
251 break;
252 case Opt_rbytes:
253 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
254 break;
255 case Opt_norbytes:
256 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
257 break;
258 case Opt_asyncreaddir:
259 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
260 break;
261 case Opt_noasyncreaddir:
262 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
263 break;
264 case Opt_dcache:
265 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
266 break;
267 case Opt_nodcache:
268 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
269 break;
270 case Opt_ino32:
271 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
272 break;
273 case Opt_noino32:
274 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
275 break;
276 case Opt_fscache:
277 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
278 break;
279 case Opt_nofscache:
280 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
281 break;
282#ifdef CONFIG_CEPH_FS_POSIX_ACL
283 case Opt_acl:
284 fsopt->sb_flags |= MS_POSIXACL;
285 break;
286#endif
287 case Opt_noacl:
288 fsopt->sb_flags &= ~MS_POSIXACL;
289 break;
290 default:
291 BUG_ON(token);
292 }
293 return 0;
294}
295
296static void destroy_mount_options(struct ceph_mount_options *args)
297{
298 dout("destroy_mount_options %p\n", args);
299 kfree(args->snapdir_name);
300 kfree(args);
301}
302
303static int strcmp_null(const char *s1, const char *s2)
304{
305 if (!s1 && !s2)
306 return 0;
307 if (s1 && !s2)
308 return -1;
309 if (!s1 && s2)
310 return 1;
311 return strcmp(s1, s2);
312}
313
314static int compare_mount_options(struct ceph_mount_options *new_fsopt,
315 struct ceph_options *new_opt,
316 struct ceph_fs_client *fsc)
317{
318 struct ceph_mount_options *fsopt1 = new_fsopt;
319 struct ceph_mount_options *fsopt2 = fsc->mount_options;
320 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
321 int ret;
322
323 ret = memcmp(fsopt1, fsopt2, ofs);
324 if (ret)
325 return ret;
326
327 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
328 if (ret)
329 return ret;
330
331 return ceph_compare_options(new_opt, fsc->client);
332}
333
334static int parse_mount_options(struct ceph_mount_options **pfsopt,
335 struct ceph_options **popt,
336 int flags, char *options,
337 const char *dev_name,
338 const char **path)
339{
340 struct ceph_mount_options *fsopt;
341 const char *dev_name_end;
342 int err;
343
344 if (!dev_name || !*dev_name)
345 return -EINVAL;
346
347 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
348 if (!fsopt)
349 return -ENOMEM;
350
351 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
352
353 fsopt->sb_flags = flags;
354 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
355
356 fsopt->rsize = CEPH_RSIZE_DEFAULT;
357 fsopt->rasize = CEPH_RASIZE_DEFAULT;
358 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
359 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
360 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
361 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
362 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
363 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
364 fsopt->congestion_kb = default_congestion_kb();
365
366 /*
367 * Distinguish the server list from the path in "dev_name".
368 * Internally we do not include the leading '/' in the path.
369 *
370 * "dev_name" will look like:
371 * <server_spec>[,<server_spec>...]:[<path>]
372 * where
373 * <server_spec> is <ip>[:<port>]
374 * <path> is optional, but if present must begin with '/'
375 */
376 dev_name_end = strchr(dev_name, '/');
377 if (dev_name_end) {
378 /* skip over leading '/' for path */
379 *path = dev_name_end + 1;
380 } else {
381 /* path is empty */
382 dev_name_end = dev_name + strlen(dev_name);
383 *path = dev_name_end;
384 }
385 err = -EINVAL;
386 dev_name_end--; /* back up to ':' separator */
387 if (dev_name_end < dev_name || *dev_name_end != ':') {
388 pr_err("device name is missing path (no : separator in %s)\n",
389 dev_name);
390 goto out;
391 }
392 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
393 dout("server path '%s'\n", *path);
394
395 *popt = ceph_parse_options(options, dev_name, dev_name_end,
396 parse_fsopt_token, (void *)fsopt);
397 if (IS_ERR(*popt)) {
398 err = PTR_ERR(*popt);
399 goto out;
400 }
401
402 /* success */
403 *pfsopt = fsopt;
404 return 0;
405
406out:
407 destroy_mount_options(fsopt);
408 return err;
409}
410
411/**
412 * ceph_show_options - Show mount options in /proc/mounts
413 * @m: seq_file to write to
414 * @root: root of that (sub)tree
415 */
416static int ceph_show_options(struct seq_file *m, struct dentry *root)
417{
418 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
419 struct ceph_mount_options *fsopt = fsc->mount_options;
420 struct ceph_options *opt = fsc->client->options;
421
422 if (opt->flags & CEPH_OPT_FSID)
423 seq_printf(m, ",fsid=%pU", &opt->fsid);
424 if (opt->flags & CEPH_OPT_NOSHARE)
425 seq_puts(m, ",noshare");
426 if (opt->flags & CEPH_OPT_NOCRC)
427 seq_puts(m, ",nocrc");
428
429 if (opt->name)
430 seq_printf(m, ",name=%s", opt->name);
431 if (opt->key)
432 seq_puts(m, ",secret=<hidden>");
433
434 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
435 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
436 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
437 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
438 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
439 seq_printf(m, ",osdkeepalivetimeout=%d",
440 opt->osd_keepalive_timeout);
441
442 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
443 seq_puts(m, ",dirstat");
444 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
445 seq_puts(m, ",norbytes");
446 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
447 seq_puts(m, ",noasyncreaddir");
448 if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE)
449 seq_puts(m, ",dcache");
450 else
451 seq_puts(m, ",nodcache");
452 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
453 seq_puts(m, ",fsc");
454 else
455 seq_puts(m, ",nofsc");
456
457#ifdef CONFIG_CEPH_FS_POSIX_ACL
458 if (fsopt->sb_flags & MS_POSIXACL)
459 seq_puts(m, ",acl");
460 else
461 seq_puts(m, ",noacl");
462#endif
463
464 if (fsopt->wsize)
465 seq_printf(m, ",wsize=%d", fsopt->wsize);
466 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
467 seq_printf(m, ",rsize=%d", fsopt->rsize);
468 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
469 seq_printf(m, ",rasize=%d", fsopt->rasize);
470 if (fsopt->congestion_kb != default_congestion_kb())
471 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
472 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
473 seq_printf(m, ",caps_wanted_delay_min=%d",
474 fsopt->caps_wanted_delay_min);
475 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
476 seq_printf(m, ",caps_wanted_delay_max=%d",
477 fsopt->caps_wanted_delay_max);
478 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
479 seq_printf(m, ",cap_release_safety=%d",
480 fsopt->cap_release_safety);
481 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
482 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
483 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
484 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
485 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
486 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
487 return 0;
488}
489
490/*
491 * handle any mon messages the standard library doesn't understand.
492 * return error if we don't either.
493 */
494static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
495{
496 struct ceph_fs_client *fsc = client->private;
497 int type = le16_to_cpu(msg->hdr.type);
498
499 switch (type) {
500 case CEPH_MSG_MDS_MAP:
501 ceph_mdsc_handle_map(fsc->mdsc, msg);
502 return 0;
503
504 default:
505 return -1;
506 }
507}
508
509/*
510 * create a new fs client
511 */
512static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
513 struct ceph_options *opt)
514{
515 struct ceph_fs_client *fsc;
516 const u64 supported_features =
517 CEPH_FEATURE_FLOCK |
518 CEPH_FEATURE_DIRLAYOUTHASH;
519 const u64 required_features = 0;
520 int page_count;
521 size_t size;
522 int err = -ENOMEM;
523
524 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
525 if (!fsc)
526 return ERR_PTR(-ENOMEM);
527
528 fsc->client = ceph_create_client(opt, fsc, supported_features,
529 required_features);
530 if (IS_ERR(fsc->client)) {
531 err = PTR_ERR(fsc->client);
532 goto fail;
533 }
534 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
535 fsc->client->monc.want_mdsmap = 1;
536
537 fsc->mount_options = fsopt;
538
539 fsc->sb = NULL;
540 fsc->mount_state = CEPH_MOUNT_MOUNTING;
541
542 atomic_long_set(&fsc->writeback_count, 0);
543
544 err = bdi_init(&fsc->backing_dev_info);
545 if (err < 0)
546 goto fail_client;
547
548 err = -ENOMEM;
549 /*
550 * The number of concurrent works can be high but they don't need
551 * to be processed in parallel, limit concurrency.
552 */
553 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
554 if (fsc->wb_wq == NULL)
555 goto fail_bdi;
556 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
557 if (fsc->pg_inv_wq == NULL)
558 goto fail_wb_wq;
559 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
560 if (fsc->trunc_wq == NULL)
561 goto fail_pg_inv_wq;
562
563 /* set up mempools */
564 err = -ENOMEM;
565 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
566 size = sizeof (struct page *) * (page_count ? page_count : 1);
567 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
568 if (!fsc->wb_pagevec_pool)
569 goto fail_trunc_wq;
570
571 /* setup fscache */
572 if ((fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) &&
573 (ceph_fscache_register_fs(fsc) != 0))
574 goto fail_fscache;
575
576 /* caps */
577 fsc->min_caps = fsopt->max_readdir;
578
579 return fsc;
580
581fail_fscache:
582 ceph_fscache_unregister_fs(fsc);
583fail_trunc_wq:
584 destroy_workqueue(fsc->trunc_wq);
585fail_pg_inv_wq:
586 destroy_workqueue(fsc->pg_inv_wq);
587fail_wb_wq:
588 destroy_workqueue(fsc->wb_wq);
589fail_bdi:
590 bdi_destroy(&fsc->backing_dev_info);
591fail_client:
592 ceph_destroy_client(fsc->client);
593fail:
594 kfree(fsc);
595 return ERR_PTR(err);
596}
597
598static void destroy_fs_client(struct ceph_fs_client *fsc)
599{
600 dout("destroy_fs_client %p\n", fsc);
601
602 ceph_fscache_unregister_fs(fsc);
603
604 destroy_workqueue(fsc->wb_wq);
605 destroy_workqueue(fsc->pg_inv_wq);
606 destroy_workqueue(fsc->trunc_wq);
607
608 bdi_destroy(&fsc->backing_dev_info);
609
610 mempool_destroy(fsc->wb_pagevec_pool);
611
612 destroy_mount_options(fsc->mount_options);
613
614 ceph_fs_debugfs_cleanup(fsc);
615
616 ceph_destroy_client(fsc->client);
617
618 kfree(fsc);
619 dout("destroy_fs_client %p done\n", fsc);
620}
621
622/*
623 * caches
624 */
625struct kmem_cache *ceph_inode_cachep;
626struct kmem_cache *ceph_cap_cachep;
627struct kmem_cache *ceph_dentry_cachep;
628struct kmem_cache *ceph_file_cachep;
629
630static void ceph_inode_init_once(void *foo)
631{
632 struct ceph_inode_info *ci = foo;
633 inode_init_once(&ci->vfs_inode);
634}
635
636static int __init init_caches(void)
637{
638 int error = -ENOMEM;
639
640 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
641 sizeof(struct ceph_inode_info),
642 __alignof__(struct ceph_inode_info),
643 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
644 ceph_inode_init_once);
645 if (ceph_inode_cachep == NULL)
646 return -ENOMEM;
647
648 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
649 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
650 if (ceph_cap_cachep == NULL)
651 goto bad_cap;
652
653 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
654 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
655 if (ceph_dentry_cachep == NULL)
656 goto bad_dentry;
657
658 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
659 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
660 if (ceph_file_cachep == NULL)
661 goto bad_file;
662
663 if ((error = ceph_fscache_register()))
664 goto bad_file;
665
666 return 0;
667bad_file:
668 kmem_cache_destroy(ceph_dentry_cachep);
669bad_dentry:
670 kmem_cache_destroy(ceph_cap_cachep);
671bad_cap:
672 kmem_cache_destroy(ceph_inode_cachep);
673 return error;
674}
675
676static void destroy_caches(void)
677{
678 /*
679 * Make sure all delayed rcu free inodes are flushed before we
680 * destroy cache.
681 */
682 rcu_barrier();
683
684 kmem_cache_destroy(ceph_inode_cachep);
685 kmem_cache_destroy(ceph_cap_cachep);
686 kmem_cache_destroy(ceph_dentry_cachep);
687 kmem_cache_destroy(ceph_file_cachep);
688
689 ceph_fscache_unregister();
690}
691
692
693/*
694 * ceph_umount_begin - initiate forced umount. Tear down down the
695 * mount, skipping steps that may hang while waiting for server(s).
696 */
697static void ceph_umount_begin(struct super_block *sb)
698{
699 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
700
701 dout("ceph_umount_begin - starting forced umount\n");
702 if (!fsc)
703 return;
704 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
705 return;
706}
707
708static const struct super_operations ceph_super_ops = {
709 .alloc_inode = ceph_alloc_inode,
710 .destroy_inode = ceph_destroy_inode,
711 .write_inode = ceph_write_inode,
712 .drop_inode = ceph_drop_inode,
713 .sync_fs = ceph_sync_fs,
714 .put_super = ceph_put_super,
715 .show_options = ceph_show_options,
716 .statfs = ceph_statfs,
717 .umount_begin = ceph_umount_begin,
718};
719
720/*
721 * Bootstrap mount by opening the root directory. Note the mount
722 * @started time from caller, and time out if this takes too long.
723 */
724static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
725 const char *path,
726 unsigned long started)
727{
728 struct ceph_mds_client *mdsc = fsc->mdsc;
729 struct ceph_mds_request *req = NULL;
730 int err;
731 struct dentry *root;
732
733 /* open dir */
734 dout("open_root_inode opening '%s'\n", path);
735 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
736 if (IS_ERR(req))
737 return ERR_CAST(req);
738 req->r_path1 = kstrdup(path, GFP_NOFS);
739 req->r_ino1.ino = CEPH_INO_ROOT;
740 req->r_ino1.snap = CEPH_NOSNAP;
741 req->r_started = started;
742 req->r_timeout = fsc->client->options->mount_timeout * HZ;
743 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
744 req->r_num_caps = 2;
745 err = ceph_mdsc_do_request(mdsc, NULL, req);
746 if (err == 0) {
747 struct inode *inode = req->r_target_inode;
748 req->r_target_inode = NULL;
749 dout("open_root_inode success\n");
750 if (ceph_ino(inode) == CEPH_INO_ROOT &&
751 fsc->sb->s_root == NULL) {
752 root = d_make_root(inode);
753 if (!root) {
754 root = ERR_PTR(-ENOMEM);
755 goto out;
756 }
757 } else {
758 root = d_obtain_alias(inode);
759 }
760 ceph_init_dentry(root);
761 dout("open_root_inode success, root dentry is %p\n", root);
762 } else {
763 root = ERR_PTR(err);
764 }
765out:
766 ceph_mdsc_put_request(req);
767 return root;
768}
769
770
771
772
773/*
774 * mount: join the ceph cluster, and open root directory.
775 */
776static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
777 const char *path)
778{
779 int err;
780 unsigned long started = jiffies; /* note the start time */
781 struct dentry *root;
782 int first = 0; /* first vfsmount for this super_block */
783
784 dout("mount start\n");
785 mutex_lock(&fsc->client->mount_mutex);
786
787 err = __ceph_open_session(fsc->client, started);
788 if (err < 0)
789 goto out;
790
791 dout("mount opening root\n");
792 root = open_root_dentry(fsc, "", started);
793 if (IS_ERR(root)) {
794 err = PTR_ERR(root);
795 goto out;
796 }
797 if (fsc->sb->s_root) {
798 dput(root);
799 } else {
800 fsc->sb->s_root = root;
801 first = 1;
802
803 err = ceph_fs_debugfs_init(fsc);
804 if (err < 0)
805 goto fail;
806 }
807
808 if (path[0] == 0) {
809 dget(root);
810 } else {
811 dout("mount opening base mountpoint\n");
812 root = open_root_dentry(fsc, path, started);
813 if (IS_ERR(root)) {
814 err = PTR_ERR(root);
815 goto fail;
816 }
817 }
818
819 fsc->mount_state = CEPH_MOUNT_MOUNTED;
820 dout("mount success\n");
821 mutex_unlock(&fsc->client->mount_mutex);
822 return root;
823
824out:
825 mutex_unlock(&fsc->client->mount_mutex);
826 return ERR_PTR(err);
827
828fail:
829 if (first) {
830 dput(fsc->sb->s_root);
831 fsc->sb->s_root = NULL;
832 }
833 goto out;
834}
835
836static int ceph_set_super(struct super_block *s, void *data)
837{
838 struct ceph_fs_client *fsc = data;
839 int ret;
840
841 dout("set_super %p data %p\n", s, data);
842
843 s->s_flags = fsc->mount_options->sb_flags;
844 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
845
846 s->s_xattr = ceph_xattr_handlers;
847 s->s_fs_info = fsc;
848 fsc->sb = s;
849
850 s->s_op = &ceph_super_ops;
851 s->s_export_op = &ceph_export_ops;
852
853 s->s_time_gran = 1000; /* 1000 ns == 1 us */
854
855 ret = set_anon_super(s, NULL); /* what is that second arg for? */
856 if (ret != 0)
857 goto fail;
858
859 return ret;
860
861fail:
862 s->s_fs_info = NULL;
863 fsc->sb = NULL;
864 return ret;
865}
866
867/*
868 * share superblock if same fs AND options
869 */
870static int ceph_compare_super(struct super_block *sb, void *data)
871{
872 struct ceph_fs_client *new = data;
873 struct ceph_mount_options *fsopt = new->mount_options;
874 struct ceph_options *opt = new->client->options;
875 struct ceph_fs_client *other = ceph_sb_to_client(sb);
876
877 dout("ceph_compare_super %p\n", sb);
878
879 if (compare_mount_options(fsopt, opt, other)) {
880 dout("monitor(s)/mount options don't match\n");
881 return 0;
882 }
883 if ((opt->flags & CEPH_OPT_FSID) &&
884 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
885 dout("fsid doesn't match\n");
886 return 0;
887 }
888 if (fsopt->sb_flags != other->mount_options->sb_flags) {
889 dout("flags differ\n");
890 return 0;
891 }
892 return 1;
893}
894
895/*
896 * construct our own bdi so we can control readahead, etc.
897 */
898static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
899
900static int ceph_register_bdi(struct super_block *sb,
901 struct ceph_fs_client *fsc)
902{
903 int err;
904
905 /* set ra_pages based on rasize mount option? */
906 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
907 fsc->backing_dev_info.ra_pages =
908 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
909 >> PAGE_SHIFT;
910 else
911 fsc->backing_dev_info.ra_pages =
912 default_backing_dev_info.ra_pages;
913
914 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
915 atomic_long_inc_return(&bdi_seq));
916 if (!err)
917 sb->s_bdi = &fsc->backing_dev_info;
918 return err;
919}
920
921static struct dentry *ceph_mount(struct file_system_type *fs_type,
922 int flags, const char *dev_name, void *data)
923{
924 struct super_block *sb;
925 struct ceph_fs_client *fsc;
926 struct dentry *res;
927 int err;
928 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
929 const char *path = NULL;
930 struct ceph_mount_options *fsopt = NULL;
931 struct ceph_options *opt = NULL;
932
933 dout("ceph_mount\n");
934
935#ifdef CONFIG_CEPH_FS_POSIX_ACL
936 flags |= MS_POSIXACL;
937#endif
938 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
939 if (err < 0) {
940 res = ERR_PTR(err);
941 goto out_final;
942 }
943
944 /* create client (which we may/may not use) */
945 fsc = create_fs_client(fsopt, opt);
946 if (IS_ERR(fsc)) {
947 res = ERR_CAST(fsc);
948 destroy_mount_options(fsopt);
949 ceph_destroy_options(opt);
950 goto out_final;
951 }
952
953 err = ceph_mdsc_init(fsc);
954 if (err < 0) {
955 res = ERR_PTR(err);
956 goto out;
957 }
958
959 if (ceph_test_opt(fsc->client, NOSHARE))
960 compare_super = NULL;
961 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
962 if (IS_ERR(sb)) {
963 res = ERR_CAST(sb);
964 goto out;
965 }
966
967 if (ceph_sb_to_client(sb) != fsc) {
968 ceph_mdsc_destroy(fsc);
969 destroy_fs_client(fsc);
970 fsc = ceph_sb_to_client(sb);
971 dout("get_sb got existing client %p\n", fsc);
972 } else {
973 dout("get_sb using new client %p\n", fsc);
974 err = ceph_register_bdi(sb, fsc);
975 if (err < 0) {
976 res = ERR_PTR(err);
977 goto out_splat;
978 }
979 }
980
981 res = ceph_real_mount(fsc, path);
982 if (IS_ERR(res))
983 goto out_splat;
984 dout("root %p inode %p ino %llx.%llx\n", res,
985 res->d_inode, ceph_vinop(res->d_inode));
986 return res;
987
988out_splat:
989 ceph_mdsc_close_sessions(fsc->mdsc);
990 deactivate_locked_super(sb);
991 goto out_final;
992
993out:
994 ceph_mdsc_destroy(fsc);
995 destroy_fs_client(fsc);
996out_final:
997 dout("ceph_mount fail %ld\n", PTR_ERR(res));
998 return res;
999}
1000
1001static void ceph_kill_sb(struct super_block *s)
1002{
1003 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1004 dout("kill_sb %p\n", s);
1005 ceph_mdsc_pre_umount(fsc->mdsc);
1006 kill_anon_super(s); /* will call put_super after sb is r/o */
1007 ceph_mdsc_destroy(fsc);
1008 destroy_fs_client(fsc);
1009}
1010
1011static struct file_system_type ceph_fs_type = {
1012 .owner = THIS_MODULE,
1013 .name = "ceph",
1014 .mount = ceph_mount,
1015 .kill_sb = ceph_kill_sb,
1016 .fs_flags = FS_RENAME_DOES_D_MOVE,
1017};
1018MODULE_ALIAS_FS("ceph");
1019
1020#define _STRINGIFY(x) #x
1021#define STRINGIFY(x) _STRINGIFY(x)
1022
1023static int __init init_ceph(void)
1024{
1025 int ret = init_caches();
1026 if (ret)
1027 goto out;
1028
1029 ceph_flock_init();
1030 ceph_xattr_init();
1031 ret = register_filesystem(&ceph_fs_type);
1032 if (ret)
1033 goto out_icache;
1034
1035 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1036
1037 return 0;
1038
1039out_icache:
1040 ceph_xattr_exit();
1041 destroy_caches();
1042out:
1043 return ret;
1044}
1045
1046static void __exit exit_ceph(void)
1047{
1048 dout("exit_ceph\n");
1049 unregister_filesystem(&ceph_fs_type);
1050 ceph_xattr_exit();
1051 destroy_caches();
1052}
1053
1054module_init(init_ceph);
1055module_exit(exit_ceph);
1056
1057MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1058MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1059MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1060MODULE_DESCRIPTION("Ceph filesystem for Linux");
1061MODULE_LICENSE("GPL");
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20#include "cache.h"
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43}
44
45static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46{
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_monmap *monmap = fsc->client->monc.monmap;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52 u64 data_pool;
53
54 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 } else {
57 data_pool = CEPH_NOPOOL;
58 }
59
60 dout("statfs\n");
61 err = ceph_monc_do_statfs(&fsc->client->monc, data_pool, &st);
62 if (err < 0)
63 return err;
64
65 /* fill in kstatfs */
66 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
67
68 /*
69 * express utilization in terms of large blocks to avoid
70 * overflow on 32-bit machines.
71 *
72 * NOTE: for the time being, we make bsize == frsize to humor
73 * not-yet-ancient versions of glibc that are broken.
74 * Someday, we will probably want to report a real block
75 * size... whatever that may mean for a network file system!
76 */
77 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79
80 /*
81 * By default use root quota for stats; fallback to overall filesystem
82 * usage if using 'noquotadf' mount option or if the root dir doesn't
83 * have max_bytes quota set.
84 */
85 if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
86 !ceph_quota_update_statfs(fsc, buf)) {
87 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
88 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
89 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
90 }
91
92 buf->f_files = le64_to_cpu(st.num_objects);
93 buf->f_ffree = -1;
94 buf->f_namelen = NAME_MAX;
95
96 /* Must convert the fsid, for consistent values across arches */
97 fsid = le64_to_cpu(*(__le64 *)(&monmap->fsid)) ^
98 le64_to_cpu(*((__le64 *)&monmap->fsid + 1));
99 buf->f_fsid.val[0] = fsid & 0xffffffff;
100 buf->f_fsid.val[1] = fsid >> 32;
101
102 return 0;
103}
104
105
106static int ceph_sync_fs(struct super_block *sb, int wait)
107{
108 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
109
110 if (!wait) {
111 dout("sync_fs (non-blocking)\n");
112 ceph_flush_dirty_caps(fsc->mdsc);
113 dout("sync_fs (non-blocking) done\n");
114 return 0;
115 }
116
117 dout("sync_fs (blocking)\n");
118 ceph_osdc_sync(&fsc->client->osdc);
119 ceph_mdsc_sync(fsc->mdsc);
120 dout("sync_fs (blocking) done\n");
121 return 0;
122}
123
124/*
125 * mount options
126 */
127enum {
128 Opt_wsize,
129 Opt_rsize,
130 Opt_rasize,
131 Opt_caps_wanted_delay_min,
132 Opt_caps_wanted_delay_max,
133 Opt_readdir_max_entries,
134 Opt_readdir_max_bytes,
135 Opt_congestion_kb,
136 Opt_last_int,
137 /* int args above */
138 Opt_snapdirname,
139 Opt_mds_namespace,
140 Opt_fscache_uniq,
141 Opt_last_string,
142 /* string args above */
143 Opt_dirstat,
144 Opt_nodirstat,
145 Opt_rbytes,
146 Opt_norbytes,
147 Opt_asyncreaddir,
148 Opt_noasyncreaddir,
149 Opt_dcache,
150 Opt_nodcache,
151 Opt_ino32,
152 Opt_noino32,
153 Opt_fscache,
154 Opt_nofscache,
155 Opt_poolperm,
156 Opt_nopoolperm,
157 Opt_require_active_mds,
158 Opt_norequire_active_mds,
159#ifdef CONFIG_CEPH_FS_POSIX_ACL
160 Opt_acl,
161#endif
162 Opt_noacl,
163 Opt_quotadf,
164 Opt_noquotadf,
165};
166
167static match_table_t fsopt_tokens = {
168 {Opt_wsize, "wsize=%d"},
169 {Opt_rsize, "rsize=%d"},
170 {Opt_rasize, "rasize=%d"},
171 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
172 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
173 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
174 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
175 {Opt_congestion_kb, "write_congestion_kb=%d"},
176 /* int args above */
177 {Opt_snapdirname, "snapdirname=%s"},
178 {Opt_mds_namespace, "mds_namespace=%s"},
179 {Opt_fscache_uniq, "fsc=%s"},
180 /* string args above */
181 {Opt_dirstat, "dirstat"},
182 {Opt_nodirstat, "nodirstat"},
183 {Opt_rbytes, "rbytes"},
184 {Opt_norbytes, "norbytes"},
185 {Opt_asyncreaddir, "asyncreaddir"},
186 {Opt_noasyncreaddir, "noasyncreaddir"},
187 {Opt_dcache, "dcache"},
188 {Opt_nodcache, "nodcache"},
189 {Opt_ino32, "ino32"},
190 {Opt_noino32, "noino32"},
191 {Opt_fscache, "fsc"},
192 {Opt_nofscache, "nofsc"},
193 {Opt_poolperm, "poolperm"},
194 {Opt_nopoolperm, "nopoolperm"},
195 {Opt_require_active_mds, "require_active_mds"},
196 {Opt_norequire_active_mds, "norequire_active_mds"},
197#ifdef CONFIG_CEPH_FS_POSIX_ACL
198 {Opt_acl, "acl"},
199#endif
200 {Opt_noacl, "noacl"},
201 {Opt_quotadf, "quotadf"},
202 {Opt_noquotadf, "noquotadf"},
203 {-1, NULL}
204};
205
206static int parse_fsopt_token(char *c, void *private)
207{
208 struct ceph_mount_options *fsopt = private;
209 substring_t argstr[MAX_OPT_ARGS];
210 int token, intval, ret;
211
212 token = match_token((char *)c, fsopt_tokens, argstr);
213 if (token < 0)
214 return -EINVAL;
215
216 if (token < Opt_last_int) {
217 ret = match_int(&argstr[0], &intval);
218 if (ret < 0) {
219 pr_err("bad mount option arg (not int) "
220 "at '%s'\n", c);
221 return ret;
222 }
223 dout("got int token %d val %d\n", token, intval);
224 } else if (token > Opt_last_int && token < Opt_last_string) {
225 dout("got string token %d val %s\n", token,
226 argstr[0].from);
227 } else {
228 dout("got token %d\n", token);
229 }
230
231 switch (token) {
232 case Opt_snapdirname:
233 kfree(fsopt->snapdir_name);
234 fsopt->snapdir_name = kstrndup(argstr[0].from,
235 argstr[0].to-argstr[0].from,
236 GFP_KERNEL);
237 if (!fsopt->snapdir_name)
238 return -ENOMEM;
239 break;
240 case Opt_mds_namespace:
241 kfree(fsopt->mds_namespace);
242 fsopt->mds_namespace = kstrndup(argstr[0].from,
243 argstr[0].to-argstr[0].from,
244 GFP_KERNEL);
245 if (!fsopt->mds_namespace)
246 return -ENOMEM;
247 break;
248 case Opt_fscache_uniq:
249 kfree(fsopt->fscache_uniq);
250 fsopt->fscache_uniq = kstrndup(argstr[0].from,
251 argstr[0].to-argstr[0].from,
252 GFP_KERNEL);
253 if (!fsopt->fscache_uniq)
254 return -ENOMEM;
255 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
256 break;
257 /* misc */
258 case Opt_wsize:
259 if (intval < PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
260 return -EINVAL;
261 fsopt->wsize = ALIGN(intval, PAGE_SIZE);
262 break;
263 case Opt_rsize:
264 if (intval < PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
265 return -EINVAL;
266 fsopt->rsize = ALIGN(intval, PAGE_SIZE);
267 break;
268 case Opt_rasize:
269 if (intval < 0)
270 return -EINVAL;
271 fsopt->rasize = ALIGN(intval + PAGE_SIZE - 1, PAGE_SIZE);
272 break;
273 case Opt_caps_wanted_delay_min:
274 if (intval < 1)
275 return -EINVAL;
276 fsopt->caps_wanted_delay_min = intval;
277 break;
278 case Opt_caps_wanted_delay_max:
279 if (intval < 1)
280 return -EINVAL;
281 fsopt->caps_wanted_delay_max = intval;
282 break;
283 case Opt_readdir_max_entries:
284 if (intval < 1)
285 return -EINVAL;
286 fsopt->max_readdir = intval;
287 break;
288 case Opt_readdir_max_bytes:
289 if (intval < PAGE_SIZE && intval != 0)
290 return -EINVAL;
291 fsopt->max_readdir_bytes = intval;
292 break;
293 case Opt_congestion_kb:
294 if (intval < 1024) /* at least 1M */
295 return -EINVAL;
296 fsopt->congestion_kb = intval;
297 break;
298 case Opt_dirstat:
299 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
300 break;
301 case Opt_nodirstat:
302 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
303 break;
304 case Opt_rbytes:
305 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
306 break;
307 case Opt_norbytes:
308 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
309 break;
310 case Opt_asyncreaddir:
311 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
312 break;
313 case Opt_noasyncreaddir:
314 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
315 break;
316 case Opt_dcache:
317 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
318 break;
319 case Opt_nodcache:
320 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
321 break;
322 case Opt_ino32:
323 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
324 break;
325 case Opt_noino32:
326 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
327 break;
328 case Opt_fscache:
329 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
330 kfree(fsopt->fscache_uniq);
331 fsopt->fscache_uniq = NULL;
332 break;
333 case Opt_nofscache:
334 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
335 kfree(fsopt->fscache_uniq);
336 fsopt->fscache_uniq = NULL;
337 break;
338 case Opt_poolperm:
339 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
340 break;
341 case Opt_nopoolperm:
342 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
343 break;
344 case Opt_require_active_mds:
345 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
346 break;
347 case Opt_norequire_active_mds:
348 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
349 break;
350 case Opt_quotadf:
351 fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
352 break;
353 case Opt_noquotadf:
354 fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
355 break;
356#ifdef CONFIG_CEPH_FS_POSIX_ACL
357 case Opt_acl:
358 fsopt->sb_flags |= SB_POSIXACL;
359 break;
360#endif
361 case Opt_noacl:
362 fsopt->sb_flags &= ~SB_POSIXACL;
363 break;
364 default:
365 BUG_ON(token);
366 }
367 return 0;
368}
369
370static void destroy_mount_options(struct ceph_mount_options *args)
371{
372 dout("destroy_mount_options %p\n", args);
373 kfree(args->snapdir_name);
374 kfree(args->mds_namespace);
375 kfree(args->server_path);
376 kfree(args->fscache_uniq);
377 kfree(args);
378}
379
380static int strcmp_null(const char *s1, const char *s2)
381{
382 if (!s1 && !s2)
383 return 0;
384 if (s1 && !s2)
385 return -1;
386 if (!s1 && s2)
387 return 1;
388 return strcmp(s1, s2);
389}
390
391static int compare_mount_options(struct ceph_mount_options *new_fsopt,
392 struct ceph_options *new_opt,
393 struct ceph_fs_client *fsc)
394{
395 struct ceph_mount_options *fsopt1 = new_fsopt;
396 struct ceph_mount_options *fsopt2 = fsc->mount_options;
397 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
398 int ret;
399
400 ret = memcmp(fsopt1, fsopt2, ofs);
401 if (ret)
402 return ret;
403
404 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
405 if (ret)
406 return ret;
407 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
408 if (ret)
409 return ret;
410 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
411 if (ret)
412 return ret;
413 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
414 if (ret)
415 return ret;
416
417 return ceph_compare_options(new_opt, fsc->client);
418}
419
420static int parse_mount_options(struct ceph_mount_options **pfsopt,
421 struct ceph_options **popt,
422 int flags, char *options,
423 const char *dev_name)
424{
425 struct ceph_mount_options *fsopt;
426 const char *dev_name_end;
427 int err;
428
429 if (!dev_name || !*dev_name)
430 return -EINVAL;
431
432 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
433 if (!fsopt)
434 return -ENOMEM;
435
436 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
437
438 fsopt->sb_flags = flags;
439 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
440
441 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
442 fsopt->rsize = CEPH_MAX_READ_SIZE;
443 fsopt->rasize = CEPH_RASIZE_DEFAULT;
444 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
445 if (!fsopt->snapdir_name) {
446 err = -ENOMEM;
447 goto out;
448 }
449
450 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
451 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
452 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
453 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
454 fsopt->congestion_kb = default_congestion_kb();
455
456 /*
457 * Distinguish the server list from the path in "dev_name".
458 * Internally we do not include the leading '/' in the path.
459 *
460 * "dev_name" will look like:
461 * <server_spec>[,<server_spec>...]:[<path>]
462 * where
463 * <server_spec> is <ip>[:<port>]
464 * <path> is optional, but if present must begin with '/'
465 */
466 dev_name_end = strchr(dev_name, '/');
467 if (dev_name_end) {
468 if (strlen(dev_name_end) > 1) {
469 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
470 if (!fsopt->server_path) {
471 err = -ENOMEM;
472 goto out;
473 }
474 }
475 } else {
476 dev_name_end = dev_name + strlen(dev_name);
477 }
478 err = -EINVAL;
479 dev_name_end--; /* back up to ':' separator */
480 if (dev_name_end < dev_name || *dev_name_end != ':') {
481 pr_err("device name is missing path (no : separator in %s)\n",
482 dev_name);
483 goto out;
484 }
485 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
486 if (fsopt->server_path)
487 dout("server path '%s'\n", fsopt->server_path);
488
489 *popt = ceph_parse_options(options, dev_name, dev_name_end,
490 parse_fsopt_token, (void *)fsopt);
491 if (IS_ERR(*popt)) {
492 err = PTR_ERR(*popt);
493 goto out;
494 }
495
496 /* success */
497 *pfsopt = fsopt;
498 return 0;
499
500out:
501 destroy_mount_options(fsopt);
502 return err;
503}
504
505/**
506 * ceph_show_options - Show mount options in /proc/mounts
507 * @m: seq_file to write to
508 * @root: root of that (sub)tree
509 */
510static int ceph_show_options(struct seq_file *m, struct dentry *root)
511{
512 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
513 struct ceph_mount_options *fsopt = fsc->mount_options;
514 size_t pos;
515 int ret;
516
517 /* a comma between MNT/MS and client options */
518 seq_putc(m, ',');
519 pos = m->count;
520
521 ret = ceph_print_client_options(m, fsc->client);
522 if (ret)
523 return ret;
524
525 /* retract our comma if no client options */
526 if (m->count == pos)
527 m->count--;
528
529 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
530 seq_puts(m, ",dirstat");
531 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
532 seq_puts(m, ",rbytes");
533 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
534 seq_puts(m, ",noasyncreaddir");
535 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
536 seq_puts(m, ",nodcache");
537 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
538 seq_show_option(m, "fsc", fsopt->fscache_uniq);
539 }
540 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
541 seq_puts(m, ",nopoolperm");
542 if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
543 seq_puts(m, ",noquotadf");
544
545#ifdef CONFIG_CEPH_FS_POSIX_ACL
546 if (fsopt->sb_flags & SB_POSIXACL)
547 seq_puts(m, ",acl");
548 else
549 seq_puts(m, ",noacl");
550#endif
551
552 if (fsopt->mds_namespace)
553 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
554 if (fsopt->wsize)
555 seq_printf(m, ",wsize=%d", fsopt->wsize);
556 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
557 seq_printf(m, ",rsize=%d", fsopt->rsize);
558 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
559 seq_printf(m, ",rasize=%d", fsopt->rasize);
560 if (fsopt->congestion_kb != default_congestion_kb())
561 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
562 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
563 seq_printf(m, ",caps_wanted_delay_min=%d",
564 fsopt->caps_wanted_delay_min);
565 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
566 seq_printf(m, ",caps_wanted_delay_max=%d",
567 fsopt->caps_wanted_delay_max);
568 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
569 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
570 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
571 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
572 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
573 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
574
575 return 0;
576}
577
578/*
579 * handle any mon messages the standard library doesn't understand.
580 * return error if we don't either.
581 */
582static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
583{
584 struct ceph_fs_client *fsc = client->private;
585 int type = le16_to_cpu(msg->hdr.type);
586
587 switch (type) {
588 case CEPH_MSG_MDS_MAP:
589 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
590 return 0;
591 case CEPH_MSG_FS_MAP_USER:
592 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
593 return 0;
594 default:
595 return -1;
596 }
597}
598
599/*
600 * create a new fs client
601 */
602static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
603 struct ceph_options *opt)
604{
605 struct ceph_fs_client *fsc;
606 int page_count;
607 size_t size;
608 int err = -ENOMEM;
609
610 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
611 if (!fsc)
612 return ERR_PTR(-ENOMEM);
613
614 fsc->client = ceph_create_client(opt, fsc);
615 if (IS_ERR(fsc->client)) {
616 err = PTR_ERR(fsc->client);
617 goto fail;
618 }
619 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
620
621 if (!fsopt->mds_namespace) {
622 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
623 0, true);
624 } else {
625 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
626 0, false);
627 }
628
629 fsc->mount_options = fsopt;
630
631 fsc->sb = NULL;
632 fsc->mount_state = CEPH_MOUNT_MOUNTING;
633
634 atomic_long_set(&fsc->writeback_count, 0);
635
636 err = -ENOMEM;
637 /*
638 * The number of concurrent works can be high but they don't need
639 * to be processed in parallel, limit concurrency.
640 */
641 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
642 if (!fsc->wb_wq)
643 goto fail_client;
644 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
645 if (!fsc->pg_inv_wq)
646 goto fail_wb_wq;
647 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
648 if (!fsc->trunc_wq)
649 goto fail_pg_inv_wq;
650
651 /* set up mempools */
652 err = -ENOMEM;
653 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
654 size = sizeof (struct page *) * (page_count ? page_count : 1);
655 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
656 if (!fsc->wb_pagevec_pool)
657 goto fail_trunc_wq;
658
659 /* caps */
660 fsc->min_caps = fsopt->max_readdir;
661
662 return fsc;
663
664fail_trunc_wq:
665 destroy_workqueue(fsc->trunc_wq);
666fail_pg_inv_wq:
667 destroy_workqueue(fsc->pg_inv_wq);
668fail_wb_wq:
669 destroy_workqueue(fsc->wb_wq);
670fail_client:
671 ceph_destroy_client(fsc->client);
672fail:
673 kfree(fsc);
674 return ERR_PTR(err);
675}
676
677static void destroy_fs_client(struct ceph_fs_client *fsc)
678{
679 dout("destroy_fs_client %p\n", fsc);
680
681 destroy_workqueue(fsc->wb_wq);
682 destroy_workqueue(fsc->pg_inv_wq);
683 destroy_workqueue(fsc->trunc_wq);
684
685 mempool_destroy(fsc->wb_pagevec_pool);
686
687 destroy_mount_options(fsc->mount_options);
688
689 ceph_destroy_client(fsc->client);
690
691 kfree(fsc);
692 dout("destroy_fs_client %p done\n", fsc);
693}
694
695/*
696 * caches
697 */
698struct kmem_cache *ceph_inode_cachep;
699struct kmem_cache *ceph_cap_cachep;
700struct kmem_cache *ceph_cap_flush_cachep;
701struct kmem_cache *ceph_dentry_cachep;
702struct kmem_cache *ceph_file_cachep;
703struct kmem_cache *ceph_dir_file_cachep;
704
705static void ceph_inode_init_once(void *foo)
706{
707 struct ceph_inode_info *ci = foo;
708 inode_init_once(&ci->vfs_inode);
709}
710
711static int __init init_caches(void)
712{
713 int error = -ENOMEM;
714
715 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
716 sizeof(struct ceph_inode_info),
717 __alignof__(struct ceph_inode_info),
718 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
719 SLAB_ACCOUNT, ceph_inode_init_once);
720 if (!ceph_inode_cachep)
721 return -ENOMEM;
722
723 ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
724 if (!ceph_cap_cachep)
725 goto bad_cap;
726 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
727 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
728 if (!ceph_cap_flush_cachep)
729 goto bad_cap_flush;
730
731 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
732 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
733 if (!ceph_dentry_cachep)
734 goto bad_dentry;
735
736 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
737 if (!ceph_file_cachep)
738 goto bad_file;
739
740 ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
741 if (!ceph_dir_file_cachep)
742 goto bad_dir_file;
743
744 error = ceph_fscache_register();
745 if (error)
746 goto bad_fscache;
747
748 return 0;
749
750bad_fscache:
751 kmem_cache_destroy(ceph_dir_file_cachep);
752bad_dir_file:
753 kmem_cache_destroy(ceph_file_cachep);
754bad_file:
755 kmem_cache_destroy(ceph_dentry_cachep);
756bad_dentry:
757 kmem_cache_destroy(ceph_cap_flush_cachep);
758bad_cap_flush:
759 kmem_cache_destroy(ceph_cap_cachep);
760bad_cap:
761 kmem_cache_destroy(ceph_inode_cachep);
762 return error;
763}
764
765static void destroy_caches(void)
766{
767 /*
768 * Make sure all delayed rcu free inodes are flushed before we
769 * destroy cache.
770 */
771 rcu_barrier();
772
773 kmem_cache_destroy(ceph_inode_cachep);
774 kmem_cache_destroy(ceph_cap_cachep);
775 kmem_cache_destroy(ceph_cap_flush_cachep);
776 kmem_cache_destroy(ceph_dentry_cachep);
777 kmem_cache_destroy(ceph_file_cachep);
778 kmem_cache_destroy(ceph_dir_file_cachep);
779
780 ceph_fscache_unregister();
781}
782
783
784/*
785 * ceph_umount_begin - initiate forced umount. Tear down down the
786 * mount, skipping steps that may hang while waiting for server(s).
787 */
788static void ceph_umount_begin(struct super_block *sb)
789{
790 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
791
792 dout("ceph_umount_begin - starting forced umount\n");
793 if (!fsc)
794 return;
795 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
796 ceph_mdsc_force_umount(fsc->mdsc);
797 return;
798}
799
800static const struct super_operations ceph_super_ops = {
801 .alloc_inode = ceph_alloc_inode,
802 .destroy_inode = ceph_destroy_inode,
803 .write_inode = ceph_write_inode,
804 .drop_inode = ceph_drop_inode,
805 .sync_fs = ceph_sync_fs,
806 .put_super = ceph_put_super,
807 .show_options = ceph_show_options,
808 .statfs = ceph_statfs,
809 .umount_begin = ceph_umount_begin,
810};
811
812/*
813 * Bootstrap mount by opening the root directory. Note the mount
814 * @started time from caller, and time out if this takes too long.
815 */
816static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
817 const char *path,
818 unsigned long started)
819{
820 struct ceph_mds_client *mdsc = fsc->mdsc;
821 struct ceph_mds_request *req = NULL;
822 int err;
823 struct dentry *root;
824
825 /* open dir */
826 dout("open_root_inode opening '%s'\n", path);
827 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
828 if (IS_ERR(req))
829 return ERR_CAST(req);
830 req->r_path1 = kstrdup(path, GFP_NOFS);
831 if (!req->r_path1) {
832 root = ERR_PTR(-ENOMEM);
833 goto out;
834 }
835
836 req->r_ino1.ino = CEPH_INO_ROOT;
837 req->r_ino1.snap = CEPH_NOSNAP;
838 req->r_started = started;
839 req->r_timeout = fsc->client->options->mount_timeout;
840 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
841 req->r_num_caps = 2;
842 err = ceph_mdsc_do_request(mdsc, NULL, req);
843 if (err == 0) {
844 struct inode *inode = req->r_target_inode;
845 req->r_target_inode = NULL;
846 dout("open_root_inode success\n");
847 root = d_make_root(inode);
848 if (!root) {
849 root = ERR_PTR(-ENOMEM);
850 goto out;
851 }
852 dout("open_root_inode success, root dentry is %p\n", root);
853 } else {
854 root = ERR_PTR(err);
855 }
856out:
857 ceph_mdsc_put_request(req);
858 return root;
859}
860
861
862
863
864/*
865 * mount: join the ceph cluster, and open root directory.
866 */
867static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
868{
869 int err;
870 unsigned long started = jiffies; /* note the start time */
871 struct dentry *root;
872
873 dout("mount start %p\n", fsc);
874 mutex_lock(&fsc->client->mount_mutex);
875
876 if (!fsc->sb->s_root) {
877 const char *path;
878 err = __ceph_open_session(fsc->client, started);
879 if (err < 0)
880 goto out;
881
882 /* setup fscache */
883 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
884 err = ceph_fscache_register_fs(fsc);
885 if (err < 0)
886 goto out;
887 }
888
889 if (!fsc->mount_options->server_path) {
890 path = "";
891 dout("mount opening path \\t\n");
892 } else {
893 path = fsc->mount_options->server_path + 1;
894 dout("mount opening path %s\n", path);
895 }
896
897 err = ceph_fs_debugfs_init(fsc);
898 if (err < 0)
899 goto out;
900
901 root = open_root_dentry(fsc, path, started);
902 if (IS_ERR(root)) {
903 err = PTR_ERR(root);
904 goto out;
905 }
906 fsc->sb->s_root = dget(root);
907 } else {
908 root = dget(fsc->sb->s_root);
909 }
910
911 fsc->mount_state = CEPH_MOUNT_MOUNTED;
912 dout("mount success\n");
913 mutex_unlock(&fsc->client->mount_mutex);
914 return root;
915
916out:
917 mutex_unlock(&fsc->client->mount_mutex);
918 return ERR_PTR(err);
919}
920
921static int ceph_set_super(struct super_block *s, void *data)
922{
923 struct ceph_fs_client *fsc = data;
924 int ret;
925
926 dout("set_super %p data %p\n", s, data);
927
928 s->s_flags = fsc->mount_options->sb_flags;
929 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
930
931 s->s_xattr = ceph_xattr_handlers;
932 s->s_fs_info = fsc;
933 fsc->sb = s;
934
935 s->s_op = &ceph_super_ops;
936 s->s_d_op = &ceph_dentry_ops;
937 s->s_export_op = &ceph_export_ops;
938
939 s->s_time_gran = 1000; /* 1000 ns == 1 us */
940
941 ret = set_anon_super(s, NULL); /* what is that second arg for? */
942 if (ret != 0)
943 goto fail;
944
945 return ret;
946
947fail:
948 s->s_fs_info = NULL;
949 fsc->sb = NULL;
950 return ret;
951}
952
953/*
954 * share superblock if same fs AND options
955 */
956static int ceph_compare_super(struct super_block *sb, void *data)
957{
958 struct ceph_fs_client *new = data;
959 struct ceph_mount_options *fsopt = new->mount_options;
960 struct ceph_options *opt = new->client->options;
961 struct ceph_fs_client *other = ceph_sb_to_client(sb);
962
963 dout("ceph_compare_super %p\n", sb);
964
965 if (compare_mount_options(fsopt, opt, other)) {
966 dout("monitor(s)/mount options don't match\n");
967 return 0;
968 }
969 if ((opt->flags & CEPH_OPT_FSID) &&
970 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
971 dout("fsid doesn't match\n");
972 return 0;
973 }
974 if (fsopt->sb_flags != other->mount_options->sb_flags) {
975 dout("flags differ\n");
976 return 0;
977 }
978 return 1;
979}
980
981/*
982 * construct our own bdi so we can control readahead, etc.
983 */
984static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
985
986static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
987{
988 int err;
989
990 err = super_setup_bdi_name(sb, "ceph-%ld",
991 atomic_long_inc_return(&bdi_seq));
992 if (err)
993 return err;
994
995 /* set ra_pages based on rasize mount option? */
996 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
997
998 /* set io_pages based on max osd read size */
999 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1000
1001 return 0;
1002}
1003
1004static struct dentry *ceph_mount(struct file_system_type *fs_type,
1005 int flags, const char *dev_name, void *data)
1006{
1007 struct super_block *sb;
1008 struct ceph_fs_client *fsc;
1009 struct dentry *res;
1010 int err;
1011 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
1012 struct ceph_mount_options *fsopt = NULL;
1013 struct ceph_options *opt = NULL;
1014
1015 dout("ceph_mount\n");
1016
1017#ifdef CONFIG_CEPH_FS_POSIX_ACL
1018 flags |= SB_POSIXACL;
1019#endif
1020 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
1021 if (err < 0) {
1022 res = ERR_PTR(err);
1023 goto out_final;
1024 }
1025
1026 /* create client (which we may/may not use) */
1027 fsc = create_fs_client(fsopt, opt);
1028 if (IS_ERR(fsc)) {
1029 res = ERR_CAST(fsc);
1030 destroy_mount_options(fsopt);
1031 ceph_destroy_options(opt);
1032 goto out_final;
1033 }
1034
1035 err = ceph_mdsc_init(fsc);
1036 if (err < 0) {
1037 res = ERR_PTR(err);
1038 goto out;
1039 }
1040
1041 if (ceph_test_opt(fsc->client, NOSHARE))
1042 compare_super = NULL;
1043 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1044 if (IS_ERR(sb)) {
1045 res = ERR_CAST(sb);
1046 goto out;
1047 }
1048
1049 if (ceph_sb_to_client(sb) != fsc) {
1050 ceph_mdsc_destroy(fsc);
1051 destroy_fs_client(fsc);
1052 fsc = ceph_sb_to_client(sb);
1053 dout("get_sb got existing client %p\n", fsc);
1054 } else {
1055 dout("get_sb using new client %p\n", fsc);
1056 err = ceph_setup_bdi(sb, fsc);
1057 if (err < 0) {
1058 res = ERR_PTR(err);
1059 goto out_splat;
1060 }
1061 }
1062
1063 res = ceph_real_mount(fsc);
1064 if (IS_ERR(res))
1065 goto out_splat;
1066 dout("root %p inode %p ino %llx.%llx\n", res,
1067 d_inode(res), ceph_vinop(d_inode(res)));
1068 return res;
1069
1070out_splat:
1071 ceph_mdsc_close_sessions(fsc->mdsc);
1072 deactivate_locked_super(sb);
1073 goto out_final;
1074
1075out:
1076 ceph_mdsc_destroy(fsc);
1077 destroy_fs_client(fsc);
1078out_final:
1079 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1080 return res;
1081}
1082
1083static void ceph_kill_sb(struct super_block *s)
1084{
1085 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1086 dev_t dev = s->s_dev;
1087
1088 dout("kill_sb %p\n", s);
1089
1090 ceph_mdsc_pre_umount(fsc->mdsc);
1091 generic_shutdown_super(s);
1092
1093 fsc->client->extra_mon_dispatch = NULL;
1094 ceph_fs_debugfs_cleanup(fsc);
1095
1096 ceph_fscache_unregister_fs(fsc);
1097
1098 ceph_mdsc_destroy(fsc);
1099
1100 destroy_fs_client(fsc);
1101 free_anon_bdev(dev);
1102}
1103
1104static struct file_system_type ceph_fs_type = {
1105 .owner = THIS_MODULE,
1106 .name = "ceph",
1107 .mount = ceph_mount,
1108 .kill_sb = ceph_kill_sb,
1109 .fs_flags = FS_RENAME_DOES_D_MOVE,
1110};
1111MODULE_ALIAS_FS("ceph");
1112
1113static int __init init_ceph(void)
1114{
1115 int ret = init_caches();
1116 if (ret)
1117 goto out;
1118
1119 ceph_flock_init();
1120 ceph_xattr_init();
1121 ret = register_filesystem(&ceph_fs_type);
1122 if (ret)
1123 goto out_xattr;
1124
1125 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1126
1127 return 0;
1128
1129out_xattr:
1130 ceph_xattr_exit();
1131 destroy_caches();
1132out:
1133 return ret;
1134}
1135
1136static void __exit exit_ceph(void)
1137{
1138 dout("exit_ceph\n");
1139 unregister_filesystem(&ceph_fs_type);
1140 ceph_xattr_exit();
1141 destroy_caches();
1142}
1143
1144module_init(init_ceph);
1145module_exit(exit_ceph);
1146
1147MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1148MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1149MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1150MODULE_DESCRIPTION("Ceph filesystem for Linux");
1151MODULE_LICENSE("GPL");