Loading...
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20#include "cache.h"
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43
44 /*
45 * ensure we release the bdi before put_anon_super releases
46 * the device name.
47 */
48 if (s->s_bdi == &fsc->backing_dev_info) {
49 bdi_unregister(&fsc->backing_dev_info);
50 s->s_bdi = NULL;
51 }
52
53 return;
54}
55
56static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
57{
58 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
59 struct ceph_monmap *monmap = fsc->client->monc.monmap;
60 struct ceph_statfs st;
61 u64 fsid;
62 int err;
63
64 dout("statfs\n");
65 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
66 if (err < 0)
67 return err;
68
69 /* fill in kstatfs */
70 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
71
72 /*
73 * express utilization in terms of large blocks to avoid
74 * overflow on 32-bit machines.
75 *
76 * NOTE: for the time being, we make bsize == frsize to humor
77 * not-yet-ancient versions of glibc that are broken.
78 * Someday, we will probably want to report a real block
79 * size... whatever that may mean for a network file system!
80 */
81 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
82 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
83 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
84 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
85 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
86
87 buf->f_files = le64_to_cpu(st.num_objects);
88 buf->f_ffree = -1;
89 buf->f_namelen = NAME_MAX;
90
91 /* leave fsid little-endian, regardless of host endianness */
92 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
93 buf->f_fsid.val[0] = fsid & 0xffffffff;
94 buf->f_fsid.val[1] = fsid >> 32;
95
96 return 0;
97}
98
99
100static int ceph_sync_fs(struct super_block *sb, int wait)
101{
102 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
103
104 if (!wait) {
105 dout("sync_fs (non-blocking)\n");
106 ceph_flush_dirty_caps(fsc->mdsc);
107 dout("sync_fs (non-blocking) done\n");
108 return 0;
109 }
110
111 dout("sync_fs (blocking)\n");
112 ceph_osdc_sync(&fsc->client->osdc);
113 ceph_mdsc_sync(fsc->mdsc);
114 dout("sync_fs (blocking) done\n");
115 return 0;
116}
117
118/*
119 * mount options
120 */
121enum {
122 Opt_wsize,
123 Opt_rsize,
124 Opt_rasize,
125 Opt_caps_wanted_delay_min,
126 Opt_caps_wanted_delay_max,
127 Opt_cap_release_safety,
128 Opt_readdir_max_entries,
129 Opt_readdir_max_bytes,
130 Opt_congestion_kb,
131 Opt_last_int,
132 /* int args above */
133 Opt_snapdirname,
134 Opt_last_string,
135 /* string args above */
136 Opt_dirstat,
137 Opt_nodirstat,
138 Opt_rbytes,
139 Opt_norbytes,
140 Opt_asyncreaddir,
141 Opt_noasyncreaddir,
142 Opt_dcache,
143 Opt_nodcache,
144 Opt_ino32,
145 Opt_noino32,
146 Opt_fscache,
147 Opt_nofscache,
148#ifdef CONFIG_CEPH_FS_POSIX_ACL
149 Opt_acl,
150#endif
151 Opt_noacl
152};
153
154static match_table_t fsopt_tokens = {
155 {Opt_wsize, "wsize=%d"},
156 {Opt_rsize, "rsize=%d"},
157 {Opt_rasize, "rasize=%d"},
158 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
159 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
160 {Opt_cap_release_safety, "cap_release_safety=%d"},
161 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
162 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
163 {Opt_congestion_kb, "write_congestion_kb=%d"},
164 /* int args above */
165 {Opt_snapdirname, "snapdirname=%s"},
166 /* string args above */
167 {Opt_dirstat, "dirstat"},
168 {Opt_nodirstat, "nodirstat"},
169 {Opt_rbytes, "rbytes"},
170 {Opt_norbytes, "norbytes"},
171 {Opt_asyncreaddir, "asyncreaddir"},
172 {Opt_noasyncreaddir, "noasyncreaddir"},
173 {Opt_dcache, "dcache"},
174 {Opt_nodcache, "nodcache"},
175 {Opt_ino32, "ino32"},
176 {Opt_noino32, "noino32"},
177 {Opt_fscache, "fsc"},
178 {Opt_nofscache, "nofsc"},
179#ifdef CONFIG_CEPH_FS_POSIX_ACL
180 {Opt_acl, "acl"},
181#endif
182 {Opt_noacl, "noacl"},
183 {-1, NULL}
184};
185
186static int parse_fsopt_token(char *c, void *private)
187{
188 struct ceph_mount_options *fsopt = private;
189 substring_t argstr[MAX_OPT_ARGS];
190 int token, intval, ret;
191
192 token = match_token((char *)c, fsopt_tokens, argstr);
193 if (token < 0)
194 return -EINVAL;
195
196 if (token < Opt_last_int) {
197 ret = match_int(&argstr[0], &intval);
198 if (ret < 0) {
199 pr_err("bad mount option arg (not int) "
200 "at '%s'\n", c);
201 return ret;
202 }
203 dout("got int token %d val %d\n", token, intval);
204 } else if (token > Opt_last_int && token < Opt_last_string) {
205 dout("got string token %d val %s\n", token,
206 argstr[0].from);
207 } else {
208 dout("got token %d\n", token);
209 }
210
211 switch (token) {
212 case Opt_snapdirname:
213 kfree(fsopt->snapdir_name);
214 fsopt->snapdir_name = kstrndup(argstr[0].from,
215 argstr[0].to-argstr[0].from,
216 GFP_KERNEL);
217 if (!fsopt->snapdir_name)
218 return -ENOMEM;
219 break;
220
221 /* misc */
222 case Opt_wsize:
223 fsopt->wsize = intval;
224 break;
225 case Opt_rsize:
226 fsopt->rsize = intval;
227 break;
228 case Opt_rasize:
229 fsopt->rasize = intval;
230 break;
231 case Opt_caps_wanted_delay_min:
232 fsopt->caps_wanted_delay_min = intval;
233 break;
234 case Opt_caps_wanted_delay_max:
235 fsopt->caps_wanted_delay_max = intval;
236 break;
237 case Opt_readdir_max_entries:
238 fsopt->max_readdir = intval;
239 break;
240 case Opt_readdir_max_bytes:
241 fsopt->max_readdir_bytes = intval;
242 break;
243 case Opt_congestion_kb:
244 fsopt->congestion_kb = intval;
245 break;
246 case Opt_dirstat:
247 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
248 break;
249 case Opt_nodirstat:
250 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
251 break;
252 case Opt_rbytes:
253 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
254 break;
255 case Opt_norbytes:
256 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
257 break;
258 case Opt_asyncreaddir:
259 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
260 break;
261 case Opt_noasyncreaddir:
262 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
263 break;
264 case Opt_dcache:
265 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
266 break;
267 case Opt_nodcache:
268 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
269 break;
270 case Opt_ino32:
271 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
272 break;
273 case Opt_noino32:
274 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
275 break;
276 case Opt_fscache:
277 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
278 break;
279 case Opt_nofscache:
280 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
281 break;
282#ifdef CONFIG_CEPH_FS_POSIX_ACL
283 case Opt_acl:
284 fsopt->sb_flags |= MS_POSIXACL;
285 break;
286#endif
287 case Opt_noacl:
288 fsopt->sb_flags &= ~MS_POSIXACL;
289 break;
290 default:
291 BUG_ON(token);
292 }
293 return 0;
294}
295
296static void destroy_mount_options(struct ceph_mount_options *args)
297{
298 dout("destroy_mount_options %p\n", args);
299 kfree(args->snapdir_name);
300 kfree(args);
301}
302
303static int strcmp_null(const char *s1, const char *s2)
304{
305 if (!s1 && !s2)
306 return 0;
307 if (s1 && !s2)
308 return -1;
309 if (!s1 && s2)
310 return 1;
311 return strcmp(s1, s2);
312}
313
314static int compare_mount_options(struct ceph_mount_options *new_fsopt,
315 struct ceph_options *new_opt,
316 struct ceph_fs_client *fsc)
317{
318 struct ceph_mount_options *fsopt1 = new_fsopt;
319 struct ceph_mount_options *fsopt2 = fsc->mount_options;
320 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
321 int ret;
322
323 ret = memcmp(fsopt1, fsopt2, ofs);
324 if (ret)
325 return ret;
326
327 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
328 if (ret)
329 return ret;
330
331 return ceph_compare_options(new_opt, fsc->client);
332}
333
334static int parse_mount_options(struct ceph_mount_options **pfsopt,
335 struct ceph_options **popt,
336 int flags, char *options,
337 const char *dev_name,
338 const char **path)
339{
340 struct ceph_mount_options *fsopt;
341 const char *dev_name_end;
342 int err;
343
344 if (!dev_name || !*dev_name)
345 return -EINVAL;
346
347 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
348 if (!fsopt)
349 return -ENOMEM;
350
351 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
352
353 fsopt->sb_flags = flags;
354 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
355
356 fsopt->rsize = CEPH_RSIZE_DEFAULT;
357 fsopt->rasize = CEPH_RASIZE_DEFAULT;
358 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
359 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
360 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
361 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
362 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
363 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
364 fsopt->congestion_kb = default_congestion_kb();
365
366 /*
367 * Distinguish the server list from the path in "dev_name".
368 * Internally we do not include the leading '/' in the path.
369 *
370 * "dev_name" will look like:
371 * <server_spec>[,<server_spec>...]:[<path>]
372 * where
373 * <server_spec> is <ip>[:<port>]
374 * <path> is optional, but if present must begin with '/'
375 */
376 dev_name_end = strchr(dev_name, '/');
377 if (dev_name_end) {
378 /* skip over leading '/' for path */
379 *path = dev_name_end + 1;
380 } else {
381 /* path is empty */
382 dev_name_end = dev_name + strlen(dev_name);
383 *path = dev_name_end;
384 }
385 err = -EINVAL;
386 dev_name_end--; /* back up to ':' separator */
387 if (dev_name_end < dev_name || *dev_name_end != ':') {
388 pr_err("device name is missing path (no : separator in %s)\n",
389 dev_name);
390 goto out;
391 }
392 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
393 dout("server path '%s'\n", *path);
394
395 *popt = ceph_parse_options(options, dev_name, dev_name_end,
396 parse_fsopt_token, (void *)fsopt);
397 if (IS_ERR(*popt)) {
398 err = PTR_ERR(*popt);
399 goto out;
400 }
401
402 /* success */
403 *pfsopt = fsopt;
404 return 0;
405
406out:
407 destroy_mount_options(fsopt);
408 return err;
409}
410
411/**
412 * ceph_show_options - Show mount options in /proc/mounts
413 * @m: seq_file to write to
414 * @root: root of that (sub)tree
415 */
416static int ceph_show_options(struct seq_file *m, struct dentry *root)
417{
418 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
419 struct ceph_mount_options *fsopt = fsc->mount_options;
420 struct ceph_options *opt = fsc->client->options;
421
422 if (opt->flags & CEPH_OPT_FSID)
423 seq_printf(m, ",fsid=%pU", &opt->fsid);
424 if (opt->flags & CEPH_OPT_NOSHARE)
425 seq_puts(m, ",noshare");
426 if (opt->flags & CEPH_OPT_NOCRC)
427 seq_puts(m, ",nocrc");
428
429 if (opt->name)
430 seq_printf(m, ",name=%s", opt->name);
431 if (opt->key)
432 seq_puts(m, ",secret=<hidden>");
433
434 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
435 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
436 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
437 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
438 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
439 seq_printf(m, ",osdkeepalivetimeout=%d",
440 opt->osd_keepalive_timeout);
441
442 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
443 seq_puts(m, ",dirstat");
444 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
445 seq_puts(m, ",norbytes");
446 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
447 seq_puts(m, ",noasyncreaddir");
448 if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE)
449 seq_puts(m, ",dcache");
450 else
451 seq_puts(m, ",nodcache");
452 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
453 seq_puts(m, ",fsc");
454 else
455 seq_puts(m, ",nofsc");
456
457#ifdef CONFIG_CEPH_FS_POSIX_ACL
458 if (fsopt->sb_flags & MS_POSIXACL)
459 seq_puts(m, ",acl");
460 else
461 seq_puts(m, ",noacl");
462#endif
463
464 if (fsopt->wsize)
465 seq_printf(m, ",wsize=%d", fsopt->wsize);
466 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
467 seq_printf(m, ",rsize=%d", fsopt->rsize);
468 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
469 seq_printf(m, ",rasize=%d", fsopt->rasize);
470 if (fsopt->congestion_kb != default_congestion_kb())
471 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
472 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
473 seq_printf(m, ",caps_wanted_delay_min=%d",
474 fsopt->caps_wanted_delay_min);
475 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
476 seq_printf(m, ",caps_wanted_delay_max=%d",
477 fsopt->caps_wanted_delay_max);
478 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
479 seq_printf(m, ",cap_release_safety=%d",
480 fsopt->cap_release_safety);
481 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
482 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
483 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
484 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
485 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
486 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
487 return 0;
488}
489
490/*
491 * handle any mon messages the standard library doesn't understand.
492 * return error if we don't either.
493 */
494static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
495{
496 struct ceph_fs_client *fsc = client->private;
497 int type = le16_to_cpu(msg->hdr.type);
498
499 switch (type) {
500 case CEPH_MSG_MDS_MAP:
501 ceph_mdsc_handle_map(fsc->mdsc, msg);
502 return 0;
503
504 default:
505 return -1;
506 }
507}
508
509/*
510 * create a new fs client
511 */
512static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
513 struct ceph_options *opt)
514{
515 struct ceph_fs_client *fsc;
516 const u64 supported_features =
517 CEPH_FEATURE_FLOCK |
518 CEPH_FEATURE_DIRLAYOUTHASH;
519 const u64 required_features = 0;
520 int page_count;
521 size_t size;
522 int err = -ENOMEM;
523
524 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
525 if (!fsc)
526 return ERR_PTR(-ENOMEM);
527
528 fsc->client = ceph_create_client(opt, fsc, supported_features,
529 required_features);
530 if (IS_ERR(fsc->client)) {
531 err = PTR_ERR(fsc->client);
532 goto fail;
533 }
534 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
535 fsc->client->monc.want_mdsmap = 1;
536
537 fsc->mount_options = fsopt;
538
539 fsc->sb = NULL;
540 fsc->mount_state = CEPH_MOUNT_MOUNTING;
541
542 atomic_long_set(&fsc->writeback_count, 0);
543
544 err = bdi_init(&fsc->backing_dev_info);
545 if (err < 0)
546 goto fail_client;
547
548 err = -ENOMEM;
549 /*
550 * The number of concurrent works can be high but they don't need
551 * to be processed in parallel, limit concurrency.
552 */
553 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
554 if (fsc->wb_wq == NULL)
555 goto fail_bdi;
556 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
557 if (fsc->pg_inv_wq == NULL)
558 goto fail_wb_wq;
559 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
560 if (fsc->trunc_wq == NULL)
561 goto fail_pg_inv_wq;
562
563 /* set up mempools */
564 err = -ENOMEM;
565 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
566 size = sizeof (struct page *) * (page_count ? page_count : 1);
567 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
568 if (!fsc->wb_pagevec_pool)
569 goto fail_trunc_wq;
570
571 /* setup fscache */
572 if ((fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) &&
573 (ceph_fscache_register_fs(fsc) != 0))
574 goto fail_fscache;
575
576 /* caps */
577 fsc->min_caps = fsopt->max_readdir;
578
579 return fsc;
580
581fail_fscache:
582 ceph_fscache_unregister_fs(fsc);
583fail_trunc_wq:
584 destroy_workqueue(fsc->trunc_wq);
585fail_pg_inv_wq:
586 destroy_workqueue(fsc->pg_inv_wq);
587fail_wb_wq:
588 destroy_workqueue(fsc->wb_wq);
589fail_bdi:
590 bdi_destroy(&fsc->backing_dev_info);
591fail_client:
592 ceph_destroy_client(fsc->client);
593fail:
594 kfree(fsc);
595 return ERR_PTR(err);
596}
597
598static void destroy_fs_client(struct ceph_fs_client *fsc)
599{
600 dout("destroy_fs_client %p\n", fsc);
601
602 ceph_fscache_unregister_fs(fsc);
603
604 destroy_workqueue(fsc->wb_wq);
605 destroy_workqueue(fsc->pg_inv_wq);
606 destroy_workqueue(fsc->trunc_wq);
607
608 bdi_destroy(&fsc->backing_dev_info);
609
610 mempool_destroy(fsc->wb_pagevec_pool);
611
612 destroy_mount_options(fsc->mount_options);
613
614 ceph_fs_debugfs_cleanup(fsc);
615
616 ceph_destroy_client(fsc->client);
617
618 kfree(fsc);
619 dout("destroy_fs_client %p done\n", fsc);
620}
621
622/*
623 * caches
624 */
625struct kmem_cache *ceph_inode_cachep;
626struct kmem_cache *ceph_cap_cachep;
627struct kmem_cache *ceph_dentry_cachep;
628struct kmem_cache *ceph_file_cachep;
629
630static void ceph_inode_init_once(void *foo)
631{
632 struct ceph_inode_info *ci = foo;
633 inode_init_once(&ci->vfs_inode);
634}
635
636static int __init init_caches(void)
637{
638 int error = -ENOMEM;
639
640 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
641 sizeof(struct ceph_inode_info),
642 __alignof__(struct ceph_inode_info),
643 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
644 ceph_inode_init_once);
645 if (ceph_inode_cachep == NULL)
646 return -ENOMEM;
647
648 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
649 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
650 if (ceph_cap_cachep == NULL)
651 goto bad_cap;
652
653 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
654 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
655 if (ceph_dentry_cachep == NULL)
656 goto bad_dentry;
657
658 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
659 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
660 if (ceph_file_cachep == NULL)
661 goto bad_file;
662
663 if ((error = ceph_fscache_register()))
664 goto bad_file;
665
666 return 0;
667bad_file:
668 kmem_cache_destroy(ceph_dentry_cachep);
669bad_dentry:
670 kmem_cache_destroy(ceph_cap_cachep);
671bad_cap:
672 kmem_cache_destroy(ceph_inode_cachep);
673 return error;
674}
675
676static void destroy_caches(void)
677{
678 /*
679 * Make sure all delayed rcu free inodes are flushed before we
680 * destroy cache.
681 */
682 rcu_barrier();
683
684 kmem_cache_destroy(ceph_inode_cachep);
685 kmem_cache_destroy(ceph_cap_cachep);
686 kmem_cache_destroy(ceph_dentry_cachep);
687 kmem_cache_destroy(ceph_file_cachep);
688
689 ceph_fscache_unregister();
690}
691
692
693/*
694 * ceph_umount_begin - initiate forced umount. Tear down down the
695 * mount, skipping steps that may hang while waiting for server(s).
696 */
697static void ceph_umount_begin(struct super_block *sb)
698{
699 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
700
701 dout("ceph_umount_begin - starting forced umount\n");
702 if (!fsc)
703 return;
704 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
705 return;
706}
707
708static const struct super_operations ceph_super_ops = {
709 .alloc_inode = ceph_alloc_inode,
710 .destroy_inode = ceph_destroy_inode,
711 .write_inode = ceph_write_inode,
712 .drop_inode = ceph_drop_inode,
713 .sync_fs = ceph_sync_fs,
714 .put_super = ceph_put_super,
715 .show_options = ceph_show_options,
716 .statfs = ceph_statfs,
717 .umount_begin = ceph_umount_begin,
718};
719
720/*
721 * Bootstrap mount by opening the root directory. Note the mount
722 * @started time from caller, and time out if this takes too long.
723 */
724static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
725 const char *path,
726 unsigned long started)
727{
728 struct ceph_mds_client *mdsc = fsc->mdsc;
729 struct ceph_mds_request *req = NULL;
730 int err;
731 struct dentry *root;
732
733 /* open dir */
734 dout("open_root_inode opening '%s'\n", path);
735 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
736 if (IS_ERR(req))
737 return ERR_CAST(req);
738 req->r_path1 = kstrdup(path, GFP_NOFS);
739 req->r_ino1.ino = CEPH_INO_ROOT;
740 req->r_ino1.snap = CEPH_NOSNAP;
741 req->r_started = started;
742 req->r_timeout = fsc->client->options->mount_timeout * HZ;
743 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
744 req->r_num_caps = 2;
745 err = ceph_mdsc_do_request(mdsc, NULL, req);
746 if (err == 0) {
747 struct inode *inode = req->r_target_inode;
748 req->r_target_inode = NULL;
749 dout("open_root_inode success\n");
750 if (ceph_ino(inode) == CEPH_INO_ROOT &&
751 fsc->sb->s_root == NULL) {
752 root = d_make_root(inode);
753 if (!root) {
754 root = ERR_PTR(-ENOMEM);
755 goto out;
756 }
757 } else {
758 root = d_obtain_alias(inode);
759 }
760 ceph_init_dentry(root);
761 dout("open_root_inode success, root dentry is %p\n", root);
762 } else {
763 root = ERR_PTR(err);
764 }
765out:
766 ceph_mdsc_put_request(req);
767 return root;
768}
769
770
771
772
773/*
774 * mount: join the ceph cluster, and open root directory.
775 */
776static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
777 const char *path)
778{
779 int err;
780 unsigned long started = jiffies; /* note the start time */
781 struct dentry *root;
782 int first = 0; /* first vfsmount for this super_block */
783
784 dout("mount start\n");
785 mutex_lock(&fsc->client->mount_mutex);
786
787 err = __ceph_open_session(fsc->client, started);
788 if (err < 0)
789 goto out;
790
791 dout("mount opening root\n");
792 root = open_root_dentry(fsc, "", started);
793 if (IS_ERR(root)) {
794 err = PTR_ERR(root);
795 goto out;
796 }
797 if (fsc->sb->s_root) {
798 dput(root);
799 } else {
800 fsc->sb->s_root = root;
801 first = 1;
802
803 err = ceph_fs_debugfs_init(fsc);
804 if (err < 0)
805 goto fail;
806 }
807
808 if (path[0] == 0) {
809 dget(root);
810 } else {
811 dout("mount opening base mountpoint\n");
812 root = open_root_dentry(fsc, path, started);
813 if (IS_ERR(root)) {
814 err = PTR_ERR(root);
815 goto fail;
816 }
817 }
818
819 fsc->mount_state = CEPH_MOUNT_MOUNTED;
820 dout("mount success\n");
821 mutex_unlock(&fsc->client->mount_mutex);
822 return root;
823
824out:
825 mutex_unlock(&fsc->client->mount_mutex);
826 return ERR_PTR(err);
827
828fail:
829 if (first) {
830 dput(fsc->sb->s_root);
831 fsc->sb->s_root = NULL;
832 }
833 goto out;
834}
835
836static int ceph_set_super(struct super_block *s, void *data)
837{
838 struct ceph_fs_client *fsc = data;
839 int ret;
840
841 dout("set_super %p data %p\n", s, data);
842
843 s->s_flags = fsc->mount_options->sb_flags;
844 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
845
846 s->s_xattr = ceph_xattr_handlers;
847 s->s_fs_info = fsc;
848 fsc->sb = s;
849
850 s->s_op = &ceph_super_ops;
851 s->s_export_op = &ceph_export_ops;
852
853 s->s_time_gran = 1000; /* 1000 ns == 1 us */
854
855 ret = set_anon_super(s, NULL); /* what is that second arg for? */
856 if (ret != 0)
857 goto fail;
858
859 return ret;
860
861fail:
862 s->s_fs_info = NULL;
863 fsc->sb = NULL;
864 return ret;
865}
866
867/*
868 * share superblock if same fs AND options
869 */
870static int ceph_compare_super(struct super_block *sb, void *data)
871{
872 struct ceph_fs_client *new = data;
873 struct ceph_mount_options *fsopt = new->mount_options;
874 struct ceph_options *opt = new->client->options;
875 struct ceph_fs_client *other = ceph_sb_to_client(sb);
876
877 dout("ceph_compare_super %p\n", sb);
878
879 if (compare_mount_options(fsopt, opt, other)) {
880 dout("monitor(s)/mount options don't match\n");
881 return 0;
882 }
883 if ((opt->flags & CEPH_OPT_FSID) &&
884 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
885 dout("fsid doesn't match\n");
886 return 0;
887 }
888 if (fsopt->sb_flags != other->mount_options->sb_flags) {
889 dout("flags differ\n");
890 return 0;
891 }
892 return 1;
893}
894
895/*
896 * construct our own bdi so we can control readahead, etc.
897 */
898static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
899
900static int ceph_register_bdi(struct super_block *sb,
901 struct ceph_fs_client *fsc)
902{
903 int err;
904
905 /* set ra_pages based on rasize mount option? */
906 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
907 fsc->backing_dev_info.ra_pages =
908 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
909 >> PAGE_SHIFT;
910 else
911 fsc->backing_dev_info.ra_pages =
912 default_backing_dev_info.ra_pages;
913
914 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
915 atomic_long_inc_return(&bdi_seq));
916 if (!err)
917 sb->s_bdi = &fsc->backing_dev_info;
918 return err;
919}
920
921static struct dentry *ceph_mount(struct file_system_type *fs_type,
922 int flags, const char *dev_name, void *data)
923{
924 struct super_block *sb;
925 struct ceph_fs_client *fsc;
926 struct dentry *res;
927 int err;
928 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
929 const char *path = NULL;
930 struct ceph_mount_options *fsopt = NULL;
931 struct ceph_options *opt = NULL;
932
933 dout("ceph_mount\n");
934
935#ifdef CONFIG_CEPH_FS_POSIX_ACL
936 flags |= MS_POSIXACL;
937#endif
938 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
939 if (err < 0) {
940 res = ERR_PTR(err);
941 goto out_final;
942 }
943
944 /* create client (which we may/may not use) */
945 fsc = create_fs_client(fsopt, opt);
946 if (IS_ERR(fsc)) {
947 res = ERR_CAST(fsc);
948 destroy_mount_options(fsopt);
949 ceph_destroy_options(opt);
950 goto out_final;
951 }
952
953 err = ceph_mdsc_init(fsc);
954 if (err < 0) {
955 res = ERR_PTR(err);
956 goto out;
957 }
958
959 if (ceph_test_opt(fsc->client, NOSHARE))
960 compare_super = NULL;
961 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
962 if (IS_ERR(sb)) {
963 res = ERR_CAST(sb);
964 goto out;
965 }
966
967 if (ceph_sb_to_client(sb) != fsc) {
968 ceph_mdsc_destroy(fsc);
969 destroy_fs_client(fsc);
970 fsc = ceph_sb_to_client(sb);
971 dout("get_sb got existing client %p\n", fsc);
972 } else {
973 dout("get_sb using new client %p\n", fsc);
974 err = ceph_register_bdi(sb, fsc);
975 if (err < 0) {
976 res = ERR_PTR(err);
977 goto out_splat;
978 }
979 }
980
981 res = ceph_real_mount(fsc, path);
982 if (IS_ERR(res))
983 goto out_splat;
984 dout("root %p inode %p ino %llx.%llx\n", res,
985 res->d_inode, ceph_vinop(res->d_inode));
986 return res;
987
988out_splat:
989 ceph_mdsc_close_sessions(fsc->mdsc);
990 deactivate_locked_super(sb);
991 goto out_final;
992
993out:
994 ceph_mdsc_destroy(fsc);
995 destroy_fs_client(fsc);
996out_final:
997 dout("ceph_mount fail %ld\n", PTR_ERR(res));
998 return res;
999}
1000
1001static void ceph_kill_sb(struct super_block *s)
1002{
1003 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1004 dout("kill_sb %p\n", s);
1005 ceph_mdsc_pre_umount(fsc->mdsc);
1006 kill_anon_super(s); /* will call put_super after sb is r/o */
1007 ceph_mdsc_destroy(fsc);
1008 destroy_fs_client(fsc);
1009}
1010
1011static struct file_system_type ceph_fs_type = {
1012 .owner = THIS_MODULE,
1013 .name = "ceph",
1014 .mount = ceph_mount,
1015 .kill_sb = ceph_kill_sb,
1016 .fs_flags = FS_RENAME_DOES_D_MOVE,
1017};
1018MODULE_ALIAS_FS("ceph");
1019
1020#define _STRINGIFY(x) #x
1021#define STRINGIFY(x) _STRINGIFY(x)
1022
1023static int __init init_ceph(void)
1024{
1025 int ret = init_caches();
1026 if (ret)
1027 goto out;
1028
1029 ceph_flock_init();
1030 ceph_xattr_init();
1031 ret = register_filesystem(&ceph_fs_type);
1032 if (ret)
1033 goto out_icache;
1034
1035 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1036
1037 return 0;
1038
1039out_icache:
1040 ceph_xattr_exit();
1041 destroy_caches();
1042out:
1043 return ret;
1044}
1045
1046static void __exit exit_ceph(void)
1047{
1048 dout("exit_ceph\n");
1049 unregister_filesystem(&ceph_fs_type);
1050 ceph_xattr_exit();
1051 destroy_caches();
1052}
1053
1054module_init(init_ceph);
1055module_exit(exit_ceph);
1056
1057MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1058MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1059MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1060MODULE_DESCRIPTION("Ceph filesystem for Linux");
1061MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/ceph/ceph_debug.h>
4
5#include <linux/backing-dev.h>
6#include <linux/ctype.h>
7#include <linux/fs.h>
8#include <linux/inet.h>
9#include <linux/in6.h>
10#include <linux/module.h>
11#include <linux/mount.h>
12#include <linux/fs_context.h>
13#include <linux/fs_parser.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/slab.h>
17#include <linux/statfs.h>
18#include <linux/string.h>
19
20#include "super.h"
21#include "mds_client.h"
22#include "cache.h"
23
24#include <linux/ceph/ceph_features.h>
25#include <linux/ceph/decode.h>
26#include <linux/ceph/mon_client.h>
27#include <linux/ceph/auth.h>
28#include <linux/ceph/debugfs.h>
29
30static DEFINE_SPINLOCK(ceph_fsc_lock);
31static LIST_HEAD(ceph_fsc_list);
32
33/*
34 * Ceph superblock operations
35 *
36 * Handle the basics of mounting, unmounting.
37 */
38
39/*
40 * super ops
41 */
42static void ceph_put_super(struct super_block *s)
43{
44 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
45
46 dout("put_super\n");
47 ceph_mdsc_close_sessions(fsc->mdsc);
48}
49
50static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
51{
52 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
53 struct ceph_mon_client *monc = &fsc->client->monc;
54 struct ceph_statfs st;
55 u64 fsid;
56 int err;
57 u64 data_pool;
58
59 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
60 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
61 } else {
62 data_pool = CEPH_NOPOOL;
63 }
64
65 dout("statfs\n");
66 err = ceph_monc_do_statfs(monc, data_pool, &st);
67 if (err < 0)
68 return err;
69
70 /* fill in kstatfs */
71 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
72
73 /*
74 * express utilization in terms of large blocks to avoid
75 * overflow on 32-bit machines.
76 *
77 * NOTE: for the time being, we make bsize == frsize to humor
78 * not-yet-ancient versions of glibc that are broken.
79 * Someday, we will probably want to report a real block
80 * size... whatever that may mean for a network file system!
81 */
82 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
83 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
84
85 /*
86 * By default use root quota for stats; fallback to overall filesystem
87 * usage if using 'noquotadf' mount option or if the root dir doesn't
88 * have max_bytes quota set.
89 */
90 if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
91 !ceph_quota_update_statfs(fsc, buf)) {
92 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
93 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
94 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
95 }
96
97 buf->f_files = le64_to_cpu(st.num_objects);
98 buf->f_ffree = -1;
99 buf->f_namelen = NAME_MAX;
100
101 /* Must convert the fsid, for consistent values across arches */
102 mutex_lock(&monc->mutex);
103 fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
104 le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
105 mutex_unlock(&monc->mutex);
106
107 buf->f_fsid.val[0] = fsid & 0xffffffff;
108 buf->f_fsid.val[1] = fsid >> 32;
109
110 return 0;
111}
112
113static int ceph_sync_fs(struct super_block *sb, int wait)
114{
115 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
116
117 if (!wait) {
118 dout("sync_fs (non-blocking)\n");
119 ceph_flush_dirty_caps(fsc->mdsc);
120 dout("sync_fs (non-blocking) done\n");
121 return 0;
122 }
123
124 dout("sync_fs (blocking)\n");
125 ceph_osdc_sync(&fsc->client->osdc);
126 ceph_mdsc_sync(fsc->mdsc);
127 dout("sync_fs (blocking) done\n");
128 return 0;
129}
130
131/*
132 * mount options
133 */
134enum {
135 Opt_wsize,
136 Opt_rsize,
137 Opt_rasize,
138 Opt_caps_wanted_delay_min,
139 Opt_caps_wanted_delay_max,
140 Opt_caps_max,
141 Opt_readdir_max_entries,
142 Opt_readdir_max_bytes,
143 Opt_congestion_kb,
144 /* int args above */
145 Opt_snapdirname,
146 Opt_mds_namespace,
147 Opt_recover_session,
148 Opt_source,
149 /* string args above */
150 Opt_dirstat,
151 Opt_rbytes,
152 Opt_asyncreaddir,
153 Opt_dcache,
154 Opt_ino32,
155 Opt_fscache,
156 Opt_poolperm,
157 Opt_require_active_mds,
158 Opt_acl,
159 Opt_quotadf,
160 Opt_copyfrom,
161 Opt_wsync,
162};
163
164enum ceph_recover_session_mode {
165 ceph_recover_session_no,
166 ceph_recover_session_clean
167};
168
169static const struct constant_table ceph_param_recover[] = {
170 { "no", ceph_recover_session_no },
171 { "clean", ceph_recover_session_clean },
172 {}
173};
174
175static const struct fs_parameter_spec ceph_mount_parameters[] = {
176 fsparam_flag_no ("acl", Opt_acl),
177 fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
178 fsparam_s32 ("caps_max", Opt_caps_max),
179 fsparam_u32 ("caps_wanted_delay_max", Opt_caps_wanted_delay_max),
180 fsparam_u32 ("caps_wanted_delay_min", Opt_caps_wanted_delay_min),
181 fsparam_u32 ("write_congestion_kb", Opt_congestion_kb),
182 fsparam_flag_no ("copyfrom", Opt_copyfrom),
183 fsparam_flag_no ("dcache", Opt_dcache),
184 fsparam_flag_no ("dirstat", Opt_dirstat),
185 fsparam_flag_no ("fsc", Opt_fscache), // fsc|nofsc
186 fsparam_string ("fsc", Opt_fscache), // fsc=...
187 fsparam_flag_no ("ino32", Opt_ino32),
188 fsparam_string ("mds_namespace", Opt_mds_namespace),
189 fsparam_flag_no ("poolperm", Opt_poolperm),
190 fsparam_flag_no ("quotadf", Opt_quotadf),
191 fsparam_u32 ("rasize", Opt_rasize),
192 fsparam_flag_no ("rbytes", Opt_rbytes),
193 fsparam_u32 ("readdir_max_bytes", Opt_readdir_max_bytes),
194 fsparam_u32 ("readdir_max_entries", Opt_readdir_max_entries),
195 fsparam_enum ("recover_session", Opt_recover_session, ceph_param_recover),
196 fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
197 fsparam_u32 ("rsize", Opt_rsize),
198 fsparam_string ("snapdirname", Opt_snapdirname),
199 fsparam_string ("source", Opt_source),
200 fsparam_u32 ("wsize", Opt_wsize),
201 fsparam_flag_no ("wsync", Opt_wsync),
202 {}
203};
204
205struct ceph_parse_opts_ctx {
206 struct ceph_options *copts;
207 struct ceph_mount_options *opts;
208};
209
210/*
211 * Remove adjacent slashes and then the trailing slash, unless it is
212 * the only remaining character.
213 *
214 * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
215 */
216static void canonicalize_path(char *path)
217{
218 int i, j = 0;
219
220 for (i = 0; path[i] != '\0'; i++) {
221 if (path[i] != '/' || j < 1 || path[j - 1] != '/')
222 path[j++] = path[i];
223 }
224
225 if (j > 1 && path[j - 1] == '/')
226 j--;
227 path[j] = '\0';
228}
229
230/*
231 * Parse the source parameter. Distinguish the server list from the path.
232 *
233 * The source will look like:
234 * <server_spec>[,<server_spec>...]:[<path>]
235 * where
236 * <server_spec> is <ip>[:<port>]
237 * <path> is optional, but if present must begin with '/'
238 */
239static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
240{
241 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
242 struct ceph_mount_options *fsopt = pctx->opts;
243 char *dev_name = param->string, *dev_name_end;
244 int ret;
245
246 dout("%s '%s'\n", __func__, dev_name);
247 if (!dev_name || !*dev_name)
248 return invalfc(fc, "Empty source");
249
250 dev_name_end = strchr(dev_name, '/');
251 if (dev_name_end) {
252 /*
253 * The server_path will include the whole chars from userland
254 * including the leading '/'.
255 */
256 kfree(fsopt->server_path);
257 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
258 if (!fsopt->server_path)
259 return -ENOMEM;
260
261 canonicalize_path(fsopt->server_path);
262 } else {
263 dev_name_end = dev_name + strlen(dev_name);
264 }
265
266 dev_name_end--; /* back up to ':' separator */
267 if (dev_name_end < dev_name || *dev_name_end != ':')
268 return invalfc(fc, "No path or : separator in source");
269
270 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
271 if (fsopt->server_path)
272 dout("server path '%s'\n", fsopt->server_path);
273
274 ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
275 pctx->copts, fc->log.log);
276 if (ret)
277 return ret;
278
279 fc->source = param->string;
280 param->string = NULL;
281 return 0;
282}
283
284static int ceph_parse_mount_param(struct fs_context *fc,
285 struct fs_parameter *param)
286{
287 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
288 struct ceph_mount_options *fsopt = pctx->opts;
289 struct fs_parse_result result;
290 unsigned int mode;
291 int token, ret;
292
293 ret = ceph_parse_param(param, pctx->copts, fc->log.log);
294 if (ret != -ENOPARAM)
295 return ret;
296
297 token = fs_parse(fc, ceph_mount_parameters, param, &result);
298 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
299 if (token < 0)
300 return token;
301
302 switch (token) {
303 case Opt_snapdirname:
304 kfree(fsopt->snapdir_name);
305 fsopt->snapdir_name = param->string;
306 param->string = NULL;
307 break;
308 case Opt_mds_namespace:
309 kfree(fsopt->mds_namespace);
310 fsopt->mds_namespace = param->string;
311 param->string = NULL;
312 break;
313 case Opt_recover_session:
314 mode = result.uint_32;
315 if (mode == ceph_recover_session_no)
316 fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
317 else if (mode == ceph_recover_session_clean)
318 fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
319 else
320 BUG();
321 break;
322 case Opt_source:
323 if (fc->source)
324 return invalfc(fc, "Multiple sources specified");
325 return ceph_parse_source(param, fc);
326 case Opt_wsize:
327 if (result.uint_32 < PAGE_SIZE ||
328 result.uint_32 > CEPH_MAX_WRITE_SIZE)
329 goto out_of_range;
330 fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
331 break;
332 case Opt_rsize:
333 if (result.uint_32 < PAGE_SIZE ||
334 result.uint_32 > CEPH_MAX_READ_SIZE)
335 goto out_of_range;
336 fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
337 break;
338 case Opt_rasize:
339 fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
340 break;
341 case Opt_caps_wanted_delay_min:
342 if (result.uint_32 < 1)
343 goto out_of_range;
344 fsopt->caps_wanted_delay_min = result.uint_32;
345 break;
346 case Opt_caps_wanted_delay_max:
347 if (result.uint_32 < 1)
348 goto out_of_range;
349 fsopt->caps_wanted_delay_max = result.uint_32;
350 break;
351 case Opt_caps_max:
352 if (result.int_32 < 0)
353 goto out_of_range;
354 fsopt->caps_max = result.int_32;
355 break;
356 case Opt_readdir_max_entries:
357 if (result.uint_32 < 1)
358 goto out_of_range;
359 fsopt->max_readdir = result.uint_32;
360 break;
361 case Opt_readdir_max_bytes:
362 if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
363 goto out_of_range;
364 fsopt->max_readdir_bytes = result.uint_32;
365 break;
366 case Opt_congestion_kb:
367 if (result.uint_32 < 1024) /* at least 1M */
368 goto out_of_range;
369 fsopt->congestion_kb = result.uint_32;
370 break;
371 case Opt_dirstat:
372 if (!result.negated)
373 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
374 else
375 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
376 break;
377 case Opt_rbytes:
378 if (!result.negated)
379 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
380 else
381 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
382 break;
383 case Opt_asyncreaddir:
384 if (!result.negated)
385 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
386 else
387 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
388 break;
389 case Opt_dcache:
390 if (!result.negated)
391 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
392 else
393 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
394 break;
395 case Opt_ino32:
396 if (!result.negated)
397 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
398 else
399 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
400 break;
401
402 case Opt_fscache:
403#ifdef CONFIG_CEPH_FSCACHE
404 kfree(fsopt->fscache_uniq);
405 fsopt->fscache_uniq = NULL;
406 if (result.negated) {
407 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
408 } else {
409 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
410 fsopt->fscache_uniq = param->string;
411 param->string = NULL;
412 }
413 break;
414#else
415 return invalfc(fc, "fscache support is disabled");
416#endif
417 case Opt_poolperm:
418 if (!result.negated)
419 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
420 else
421 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
422 break;
423 case Opt_require_active_mds:
424 if (!result.negated)
425 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
426 else
427 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
428 break;
429 case Opt_quotadf:
430 if (!result.negated)
431 fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
432 else
433 fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
434 break;
435 case Opt_copyfrom:
436 if (!result.negated)
437 fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
438 else
439 fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
440 break;
441 case Opt_acl:
442 if (!result.negated) {
443#ifdef CONFIG_CEPH_FS_POSIX_ACL
444 fc->sb_flags |= SB_POSIXACL;
445#else
446 return invalfc(fc, "POSIX ACL support is disabled");
447#endif
448 } else {
449 fc->sb_flags &= ~SB_POSIXACL;
450 }
451 break;
452 case Opt_wsync:
453 if (!result.negated)
454 fsopt->flags &= ~CEPH_MOUNT_OPT_ASYNC_DIROPS;
455 else
456 fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
457 break;
458 default:
459 BUG();
460 }
461 return 0;
462
463out_of_range:
464 return invalfc(fc, "%s out of range", param->key);
465}
466
467static void destroy_mount_options(struct ceph_mount_options *args)
468{
469 dout("destroy_mount_options %p\n", args);
470 if (!args)
471 return;
472
473 kfree(args->snapdir_name);
474 kfree(args->mds_namespace);
475 kfree(args->server_path);
476 kfree(args->fscache_uniq);
477 kfree(args);
478}
479
480static int strcmp_null(const char *s1, const char *s2)
481{
482 if (!s1 && !s2)
483 return 0;
484 if (s1 && !s2)
485 return -1;
486 if (!s1 && s2)
487 return 1;
488 return strcmp(s1, s2);
489}
490
491static int compare_mount_options(struct ceph_mount_options *new_fsopt,
492 struct ceph_options *new_opt,
493 struct ceph_fs_client *fsc)
494{
495 struct ceph_mount_options *fsopt1 = new_fsopt;
496 struct ceph_mount_options *fsopt2 = fsc->mount_options;
497 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
498 int ret;
499
500 ret = memcmp(fsopt1, fsopt2, ofs);
501 if (ret)
502 return ret;
503
504 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
505 if (ret)
506 return ret;
507
508 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
509 if (ret)
510 return ret;
511
512 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
513 if (ret)
514 return ret;
515
516 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
517 if (ret)
518 return ret;
519
520 return ceph_compare_options(new_opt, fsc->client);
521}
522
523/**
524 * ceph_show_options - Show mount options in /proc/mounts
525 * @m: seq_file to write to
526 * @root: root of that (sub)tree
527 */
528static int ceph_show_options(struct seq_file *m, struct dentry *root)
529{
530 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
531 struct ceph_mount_options *fsopt = fsc->mount_options;
532 size_t pos;
533 int ret;
534
535 /* a comma between MNT/MS and client options */
536 seq_putc(m, ',');
537 pos = m->count;
538
539 ret = ceph_print_client_options(m, fsc->client, false);
540 if (ret)
541 return ret;
542
543 /* retract our comma if no client options */
544 if (m->count == pos)
545 m->count--;
546
547 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
548 seq_puts(m, ",dirstat");
549 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
550 seq_puts(m, ",rbytes");
551 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
552 seq_puts(m, ",noasyncreaddir");
553 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
554 seq_puts(m, ",nodcache");
555 if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
556 seq_puts(m, ",ino32");
557 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
558 seq_show_option(m, "fsc", fsopt->fscache_uniq);
559 }
560 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
561 seq_puts(m, ",nopoolperm");
562 if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
563 seq_puts(m, ",noquotadf");
564
565#ifdef CONFIG_CEPH_FS_POSIX_ACL
566 if (root->d_sb->s_flags & SB_POSIXACL)
567 seq_puts(m, ",acl");
568 else
569 seq_puts(m, ",noacl");
570#endif
571
572 if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
573 seq_puts(m, ",copyfrom");
574
575 if (fsopt->mds_namespace)
576 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
577
578 if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
579 seq_show_option(m, "recover_session", "clean");
580
581 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
582 seq_puts(m, ",nowsync");
583
584 if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
585 seq_printf(m, ",wsize=%u", fsopt->wsize);
586 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
587 seq_printf(m, ",rsize=%u", fsopt->rsize);
588 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
589 seq_printf(m, ",rasize=%u", fsopt->rasize);
590 if (fsopt->congestion_kb != default_congestion_kb())
591 seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
592 if (fsopt->caps_max)
593 seq_printf(m, ",caps_max=%d", fsopt->caps_max);
594 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
595 seq_printf(m, ",caps_wanted_delay_min=%u",
596 fsopt->caps_wanted_delay_min);
597 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
598 seq_printf(m, ",caps_wanted_delay_max=%u",
599 fsopt->caps_wanted_delay_max);
600 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
601 seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
602 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
603 seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
604 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
605 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
606
607 return 0;
608}
609
610/*
611 * handle any mon messages the standard library doesn't understand.
612 * return error if we don't either.
613 */
614static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
615{
616 struct ceph_fs_client *fsc = client->private;
617 int type = le16_to_cpu(msg->hdr.type);
618
619 switch (type) {
620 case CEPH_MSG_MDS_MAP:
621 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
622 return 0;
623 case CEPH_MSG_FS_MAP_USER:
624 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
625 return 0;
626 default:
627 return -1;
628 }
629}
630
631/*
632 * create a new fs client
633 *
634 * Success or not, this function consumes @fsopt and @opt.
635 */
636static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
637 struct ceph_options *opt)
638{
639 struct ceph_fs_client *fsc;
640 int err;
641
642 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
643 if (!fsc) {
644 err = -ENOMEM;
645 goto fail;
646 }
647
648 fsc->client = ceph_create_client(opt, fsc);
649 if (IS_ERR(fsc->client)) {
650 err = PTR_ERR(fsc->client);
651 goto fail;
652 }
653 opt = NULL; /* fsc->client now owns this */
654
655 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
656 ceph_set_opt(fsc->client, ABORT_ON_FULL);
657
658 if (!fsopt->mds_namespace) {
659 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
660 0, true);
661 } else {
662 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
663 0, false);
664 }
665
666 fsc->mount_options = fsopt;
667
668 fsc->sb = NULL;
669 fsc->mount_state = CEPH_MOUNT_MOUNTING;
670 fsc->filp_gen = 1;
671 fsc->have_copy_from2 = true;
672
673 atomic_long_set(&fsc->writeback_count, 0);
674
675 err = -ENOMEM;
676 /*
677 * The number of concurrent works can be high but they don't need
678 * to be processed in parallel, limit concurrency.
679 */
680 fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
681 if (!fsc->inode_wq)
682 goto fail_client;
683 fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
684 if (!fsc->cap_wq)
685 goto fail_inode_wq;
686
687 spin_lock(&ceph_fsc_lock);
688 list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
689 spin_unlock(&ceph_fsc_lock);
690
691 return fsc;
692
693fail_inode_wq:
694 destroy_workqueue(fsc->inode_wq);
695fail_client:
696 ceph_destroy_client(fsc->client);
697fail:
698 kfree(fsc);
699 if (opt)
700 ceph_destroy_options(opt);
701 destroy_mount_options(fsopt);
702 return ERR_PTR(err);
703}
704
705static void flush_fs_workqueues(struct ceph_fs_client *fsc)
706{
707 flush_workqueue(fsc->inode_wq);
708 flush_workqueue(fsc->cap_wq);
709}
710
711static void destroy_fs_client(struct ceph_fs_client *fsc)
712{
713 dout("destroy_fs_client %p\n", fsc);
714
715 spin_lock(&ceph_fsc_lock);
716 list_del(&fsc->metric_wakeup);
717 spin_unlock(&ceph_fsc_lock);
718
719 ceph_mdsc_destroy(fsc);
720 destroy_workqueue(fsc->inode_wq);
721 destroy_workqueue(fsc->cap_wq);
722
723 destroy_mount_options(fsc->mount_options);
724
725 ceph_destroy_client(fsc->client);
726
727 kfree(fsc);
728 dout("destroy_fs_client %p done\n", fsc);
729}
730
731/*
732 * caches
733 */
734struct kmem_cache *ceph_inode_cachep;
735struct kmem_cache *ceph_cap_cachep;
736struct kmem_cache *ceph_cap_flush_cachep;
737struct kmem_cache *ceph_dentry_cachep;
738struct kmem_cache *ceph_file_cachep;
739struct kmem_cache *ceph_dir_file_cachep;
740struct kmem_cache *ceph_mds_request_cachep;
741mempool_t *ceph_wb_pagevec_pool;
742
743static void ceph_inode_init_once(void *foo)
744{
745 struct ceph_inode_info *ci = foo;
746 inode_init_once(&ci->vfs_inode);
747}
748
749static int __init init_caches(void)
750{
751 int error = -ENOMEM;
752
753 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
754 sizeof(struct ceph_inode_info),
755 __alignof__(struct ceph_inode_info),
756 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
757 SLAB_ACCOUNT, ceph_inode_init_once);
758 if (!ceph_inode_cachep)
759 return -ENOMEM;
760
761 ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
762 if (!ceph_cap_cachep)
763 goto bad_cap;
764 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
765 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
766 if (!ceph_cap_flush_cachep)
767 goto bad_cap_flush;
768
769 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
770 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
771 if (!ceph_dentry_cachep)
772 goto bad_dentry;
773
774 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
775 if (!ceph_file_cachep)
776 goto bad_file;
777
778 ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
779 if (!ceph_dir_file_cachep)
780 goto bad_dir_file;
781
782 ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
783 if (!ceph_mds_request_cachep)
784 goto bad_mds_req;
785
786 ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
787 if (!ceph_wb_pagevec_pool)
788 goto bad_pagevec_pool;
789
790 error = ceph_fscache_register();
791 if (error)
792 goto bad_fscache;
793
794 return 0;
795
796bad_fscache:
797 kmem_cache_destroy(ceph_mds_request_cachep);
798bad_pagevec_pool:
799 mempool_destroy(ceph_wb_pagevec_pool);
800bad_mds_req:
801 kmem_cache_destroy(ceph_dir_file_cachep);
802bad_dir_file:
803 kmem_cache_destroy(ceph_file_cachep);
804bad_file:
805 kmem_cache_destroy(ceph_dentry_cachep);
806bad_dentry:
807 kmem_cache_destroy(ceph_cap_flush_cachep);
808bad_cap_flush:
809 kmem_cache_destroy(ceph_cap_cachep);
810bad_cap:
811 kmem_cache_destroy(ceph_inode_cachep);
812 return error;
813}
814
815static void destroy_caches(void)
816{
817 /*
818 * Make sure all delayed rcu free inodes are flushed before we
819 * destroy cache.
820 */
821 rcu_barrier();
822
823 kmem_cache_destroy(ceph_inode_cachep);
824 kmem_cache_destroy(ceph_cap_cachep);
825 kmem_cache_destroy(ceph_cap_flush_cachep);
826 kmem_cache_destroy(ceph_dentry_cachep);
827 kmem_cache_destroy(ceph_file_cachep);
828 kmem_cache_destroy(ceph_dir_file_cachep);
829 kmem_cache_destroy(ceph_mds_request_cachep);
830 mempool_destroy(ceph_wb_pagevec_pool);
831
832 ceph_fscache_unregister();
833}
834
835/*
836 * ceph_umount_begin - initiate forced umount. Tear down the
837 * mount, skipping steps that may hang while waiting for server(s).
838 */
839static void ceph_umount_begin(struct super_block *sb)
840{
841 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
842
843 dout("ceph_umount_begin - starting forced umount\n");
844 if (!fsc)
845 return;
846 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
847 ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
848 ceph_mdsc_force_umount(fsc->mdsc);
849 fsc->filp_gen++; // invalidate open files
850}
851
852static const struct super_operations ceph_super_ops = {
853 .alloc_inode = ceph_alloc_inode,
854 .free_inode = ceph_free_inode,
855 .write_inode = ceph_write_inode,
856 .drop_inode = generic_delete_inode,
857 .evict_inode = ceph_evict_inode,
858 .sync_fs = ceph_sync_fs,
859 .put_super = ceph_put_super,
860 .show_options = ceph_show_options,
861 .statfs = ceph_statfs,
862 .umount_begin = ceph_umount_begin,
863};
864
865/*
866 * Bootstrap mount by opening the root directory. Note the mount
867 * @started time from caller, and time out if this takes too long.
868 */
869static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
870 const char *path,
871 unsigned long started)
872{
873 struct ceph_mds_client *mdsc = fsc->mdsc;
874 struct ceph_mds_request *req = NULL;
875 int err;
876 struct dentry *root;
877
878 /* open dir */
879 dout("open_root_inode opening '%s'\n", path);
880 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
881 if (IS_ERR(req))
882 return ERR_CAST(req);
883 req->r_path1 = kstrdup(path, GFP_NOFS);
884 if (!req->r_path1) {
885 root = ERR_PTR(-ENOMEM);
886 goto out;
887 }
888
889 req->r_ino1.ino = CEPH_INO_ROOT;
890 req->r_ino1.snap = CEPH_NOSNAP;
891 req->r_started = started;
892 req->r_timeout = fsc->client->options->mount_timeout;
893 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
894 req->r_num_caps = 2;
895 err = ceph_mdsc_do_request(mdsc, NULL, req);
896 if (err == 0) {
897 struct inode *inode = req->r_target_inode;
898 req->r_target_inode = NULL;
899 dout("open_root_inode success\n");
900 root = d_make_root(inode);
901 if (!root) {
902 root = ERR_PTR(-ENOMEM);
903 goto out;
904 }
905 dout("open_root_inode success, root dentry is %p\n", root);
906 } else {
907 root = ERR_PTR(err);
908 }
909out:
910 ceph_mdsc_put_request(req);
911 return root;
912}
913
914/*
915 * mount: join the ceph cluster, and open root directory.
916 */
917static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
918 struct fs_context *fc)
919{
920 int err;
921 unsigned long started = jiffies; /* note the start time */
922 struct dentry *root;
923
924 dout("mount start %p\n", fsc);
925 mutex_lock(&fsc->client->mount_mutex);
926
927 if (!fsc->sb->s_root) {
928 const char *path = fsc->mount_options->server_path ?
929 fsc->mount_options->server_path + 1 : "";
930
931 err = __ceph_open_session(fsc->client, started);
932 if (err < 0)
933 goto out;
934
935 /* setup fscache */
936 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
937 err = ceph_fscache_register_fs(fsc, fc);
938 if (err < 0)
939 goto out;
940 }
941
942 dout("mount opening path '%s'\n", path);
943
944 ceph_fs_debugfs_init(fsc);
945
946 root = open_root_dentry(fsc, path, started);
947 if (IS_ERR(root)) {
948 err = PTR_ERR(root);
949 goto out;
950 }
951 fsc->sb->s_root = dget(root);
952 } else {
953 root = dget(fsc->sb->s_root);
954 }
955
956 fsc->mount_state = CEPH_MOUNT_MOUNTED;
957 dout("mount success\n");
958 mutex_unlock(&fsc->client->mount_mutex);
959 return root;
960
961out:
962 mutex_unlock(&fsc->client->mount_mutex);
963 return ERR_PTR(err);
964}
965
966static int ceph_set_super(struct super_block *s, struct fs_context *fc)
967{
968 struct ceph_fs_client *fsc = s->s_fs_info;
969 int ret;
970
971 dout("set_super %p\n", s);
972
973 s->s_maxbytes = MAX_LFS_FILESIZE;
974
975 s->s_xattr = ceph_xattr_handlers;
976 fsc->sb = s;
977 fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
978
979 s->s_op = &ceph_super_ops;
980 s->s_d_op = &ceph_dentry_ops;
981 s->s_export_op = &ceph_export_ops;
982
983 s->s_time_gran = 1;
984 s->s_time_min = 0;
985 s->s_time_max = U32_MAX;
986
987 ret = set_anon_super_fc(s, fc);
988 if (ret != 0)
989 fsc->sb = NULL;
990 return ret;
991}
992
993/*
994 * share superblock if same fs AND options
995 */
996static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
997{
998 struct ceph_fs_client *new = fc->s_fs_info;
999 struct ceph_mount_options *fsopt = new->mount_options;
1000 struct ceph_options *opt = new->client->options;
1001 struct ceph_fs_client *other = ceph_sb_to_client(sb);
1002
1003 dout("ceph_compare_super %p\n", sb);
1004
1005 if (compare_mount_options(fsopt, opt, other)) {
1006 dout("monitor(s)/mount options don't match\n");
1007 return 0;
1008 }
1009 if ((opt->flags & CEPH_OPT_FSID) &&
1010 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
1011 dout("fsid doesn't match\n");
1012 return 0;
1013 }
1014 if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
1015 dout("flags differ\n");
1016 return 0;
1017 }
1018 return 1;
1019}
1020
1021/*
1022 * construct our own bdi so we can control readahead, etc.
1023 */
1024static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1025
1026static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1027{
1028 int err;
1029
1030 err = super_setup_bdi_name(sb, "ceph-%ld",
1031 atomic_long_inc_return(&bdi_seq));
1032 if (err)
1033 return err;
1034
1035 /* set ra_pages based on rasize mount option? */
1036 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1037
1038 /* set io_pages based on max osd read size */
1039 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1040
1041 return 0;
1042}
1043
1044static int ceph_get_tree(struct fs_context *fc)
1045{
1046 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1047 struct super_block *sb;
1048 struct ceph_fs_client *fsc;
1049 struct dentry *res;
1050 int (*compare_super)(struct super_block *, struct fs_context *) =
1051 ceph_compare_super;
1052 int err;
1053
1054 dout("ceph_get_tree\n");
1055
1056 if (!fc->source)
1057 return invalfc(fc, "No source");
1058
1059 /* create client (which we may/may not use) */
1060 fsc = create_fs_client(pctx->opts, pctx->copts);
1061 pctx->opts = NULL;
1062 pctx->copts = NULL;
1063 if (IS_ERR(fsc)) {
1064 err = PTR_ERR(fsc);
1065 goto out_final;
1066 }
1067
1068 err = ceph_mdsc_init(fsc);
1069 if (err < 0)
1070 goto out;
1071
1072 if (ceph_test_opt(fsc->client, NOSHARE))
1073 compare_super = NULL;
1074
1075 fc->s_fs_info = fsc;
1076 sb = sget_fc(fc, compare_super, ceph_set_super);
1077 fc->s_fs_info = NULL;
1078 if (IS_ERR(sb)) {
1079 err = PTR_ERR(sb);
1080 goto out;
1081 }
1082
1083 if (ceph_sb_to_client(sb) != fsc) {
1084 destroy_fs_client(fsc);
1085 fsc = ceph_sb_to_client(sb);
1086 dout("get_sb got existing client %p\n", fsc);
1087 } else {
1088 dout("get_sb using new client %p\n", fsc);
1089 err = ceph_setup_bdi(sb, fsc);
1090 if (err < 0)
1091 goto out_splat;
1092 }
1093
1094 res = ceph_real_mount(fsc, fc);
1095 if (IS_ERR(res)) {
1096 err = PTR_ERR(res);
1097 goto out_splat;
1098 }
1099 dout("root %p inode %p ino %llx.%llx\n", res,
1100 d_inode(res), ceph_vinop(d_inode(res)));
1101 fc->root = fsc->sb->s_root;
1102 return 0;
1103
1104out_splat:
1105 if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
1106 pr_info("No mds server is up or the cluster is laggy\n");
1107 err = -EHOSTUNREACH;
1108 }
1109
1110 ceph_mdsc_close_sessions(fsc->mdsc);
1111 deactivate_locked_super(sb);
1112 goto out_final;
1113
1114out:
1115 destroy_fs_client(fsc);
1116out_final:
1117 dout("ceph_get_tree fail %d\n", err);
1118 return err;
1119}
1120
1121static void ceph_free_fc(struct fs_context *fc)
1122{
1123 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1124
1125 if (pctx) {
1126 destroy_mount_options(pctx->opts);
1127 ceph_destroy_options(pctx->copts);
1128 kfree(pctx);
1129 }
1130}
1131
1132static int ceph_reconfigure_fc(struct fs_context *fc)
1133{
1134 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1135 struct ceph_mount_options *fsopt = pctx->opts;
1136 struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
1137
1138 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
1139 ceph_set_mount_opt(fsc, ASYNC_DIROPS);
1140 else
1141 ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
1142
1143 sync_filesystem(fc->root->d_sb);
1144 return 0;
1145}
1146
1147static const struct fs_context_operations ceph_context_ops = {
1148 .free = ceph_free_fc,
1149 .parse_param = ceph_parse_mount_param,
1150 .get_tree = ceph_get_tree,
1151 .reconfigure = ceph_reconfigure_fc,
1152};
1153
1154/*
1155 * Set up the filesystem mount context.
1156 */
1157static int ceph_init_fs_context(struct fs_context *fc)
1158{
1159 struct ceph_parse_opts_ctx *pctx;
1160 struct ceph_mount_options *fsopt;
1161
1162 pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
1163 if (!pctx)
1164 return -ENOMEM;
1165
1166 pctx->copts = ceph_alloc_options();
1167 if (!pctx->copts)
1168 goto nomem;
1169
1170 pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
1171 if (!pctx->opts)
1172 goto nomem;
1173
1174 fsopt = pctx->opts;
1175 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
1176
1177 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
1178 fsopt->rsize = CEPH_MAX_READ_SIZE;
1179 fsopt->rasize = CEPH_RASIZE_DEFAULT;
1180 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
1181 if (!fsopt->snapdir_name)
1182 goto nomem;
1183
1184 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
1185 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
1186 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
1187 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1188 fsopt->congestion_kb = default_congestion_kb();
1189
1190#ifdef CONFIG_CEPH_FS_POSIX_ACL
1191 fc->sb_flags |= SB_POSIXACL;
1192#endif
1193
1194 fc->fs_private = pctx;
1195 fc->ops = &ceph_context_ops;
1196 return 0;
1197
1198nomem:
1199 destroy_mount_options(pctx->opts);
1200 ceph_destroy_options(pctx->copts);
1201 kfree(pctx);
1202 return -ENOMEM;
1203}
1204
1205static void ceph_kill_sb(struct super_block *s)
1206{
1207 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1208 dev_t dev = s->s_dev;
1209
1210 dout("kill_sb %p\n", s);
1211
1212 ceph_mdsc_pre_umount(fsc->mdsc);
1213 flush_fs_workqueues(fsc);
1214
1215 generic_shutdown_super(s);
1216
1217 fsc->client->extra_mon_dispatch = NULL;
1218 ceph_fs_debugfs_cleanup(fsc);
1219
1220 ceph_fscache_unregister_fs(fsc);
1221
1222 destroy_fs_client(fsc);
1223 free_anon_bdev(dev);
1224}
1225
1226static struct file_system_type ceph_fs_type = {
1227 .owner = THIS_MODULE,
1228 .name = "ceph",
1229 .init_fs_context = ceph_init_fs_context,
1230 .kill_sb = ceph_kill_sb,
1231 .fs_flags = FS_RENAME_DOES_D_MOVE,
1232};
1233MODULE_ALIAS_FS("ceph");
1234
1235int ceph_force_reconnect(struct super_block *sb)
1236{
1237 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1238 int err = 0;
1239
1240 ceph_umount_begin(sb);
1241
1242 /* Make sure all page caches get invalidated.
1243 * see remove_session_caps_cb() */
1244 flush_workqueue(fsc->inode_wq);
1245
1246 /* In case that we were blacklisted. This also reset
1247 * all mon/osd connections */
1248 ceph_reset_client_addr(fsc->client);
1249
1250 ceph_osdc_clear_abort_err(&fsc->client->osdc);
1251
1252 fsc->blacklisted = false;
1253 fsc->mount_state = CEPH_MOUNT_MOUNTED;
1254
1255 if (sb->s_root) {
1256 err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
1257 CEPH_STAT_CAP_INODE, true);
1258 }
1259 return err;
1260}
1261
1262static int __init init_ceph(void)
1263{
1264 int ret = init_caches();
1265 if (ret)
1266 goto out;
1267
1268 ceph_flock_init();
1269 ret = register_filesystem(&ceph_fs_type);
1270 if (ret)
1271 goto out_caches;
1272
1273 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1274
1275 return 0;
1276
1277out_caches:
1278 destroy_caches();
1279out:
1280 return ret;
1281}
1282
1283static void __exit exit_ceph(void)
1284{
1285 dout("exit_ceph\n");
1286 unregister_filesystem(&ceph_fs_type);
1287 destroy_caches();
1288}
1289
1290static int param_set_metrics(const char *val, const struct kernel_param *kp)
1291{
1292 struct ceph_fs_client *fsc;
1293 int ret;
1294
1295 ret = param_set_bool(val, kp);
1296 if (ret) {
1297 pr_err("Failed to parse sending metrics switch value '%s'\n",
1298 val);
1299 return ret;
1300 } else if (!disable_send_metrics) {
1301 // wake up all the mds clients
1302 spin_lock(&ceph_fsc_lock);
1303 list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
1304 metric_schedule_delayed(&fsc->mdsc->metric);
1305 }
1306 spin_unlock(&ceph_fsc_lock);
1307 }
1308
1309 return 0;
1310}
1311
1312static const struct kernel_param_ops param_ops_metrics = {
1313 .set = param_set_metrics,
1314 .get = param_get_bool,
1315};
1316
1317bool disable_send_metrics = false;
1318module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644);
1319MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
1320
1321module_init(init_ceph);
1322module_exit(exit_ceph);
1323
1324MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1325MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1326MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1327MODULE_DESCRIPTION("Ceph filesystem for Linux");
1328MODULE_LICENSE("GPL");