Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/fs.h>
5#include <linux/wait.h>
6#include <linux/slab.h>
7#include <linux/gfp.h>
8#include <linux/sched.h>
9#include <linux/debugfs.h>
10#include <linux/seq_file.h>
11#include <linux/ratelimit.h>
12#include <linux/bits.h>
13#include <linux/ktime.h>
14#include <linux/bitmap.h>
15
16#include "super.h"
17#include "mds_client.h"
18
19#include <linux/ceph/ceph_features.h>
20#include <linux/ceph/messenger.h>
21#include <linux/ceph/decode.h>
22#include <linux/ceph/pagelist.h>
23#include <linux/ceph/auth.h>
24#include <linux/ceph/debugfs.h>
25
26#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
27
28/*
29 * A cluster of MDS (metadata server) daemons is responsible for
30 * managing the file system namespace (the directory hierarchy and
31 * inodes) and for coordinating shared access to storage. Metadata is
32 * partitioning hierarchically across a number of servers, and that
33 * partition varies over time as the cluster adjusts the distribution
34 * in order to balance load.
35 *
36 * The MDS client is primarily responsible to managing synchronous
37 * metadata requests for operations like open, unlink, and so forth.
38 * If there is a MDS failure, we find out about it when we (possibly
39 * request and) receive a new MDS map, and can resubmit affected
40 * requests.
41 *
42 * For the most part, though, we take advantage of a lossless
43 * communications channel to the MDS, and do not need to worry about
44 * timing out or resubmitting requests.
45 *
46 * We maintain a stateful "session" with each MDS we interact with.
47 * Within each session, we sent periodic heartbeat messages to ensure
48 * any capabilities or leases we have been issues remain valid. If
49 * the session times out and goes stale, our leases and capabilities
50 * are no longer valid.
51 */
52
53struct ceph_reconnect_state {
54 struct ceph_mds_session *session;
55 int nr_caps, nr_realms;
56 struct ceph_pagelist *pagelist;
57 unsigned msg_version;
58 bool allow_multi;
59};
60
61static void __wake_requests(struct ceph_mds_client *mdsc,
62 struct list_head *head);
63static void ceph_cap_release_work(struct work_struct *work);
64static void ceph_cap_reclaim_work(struct work_struct *work);
65
66static const struct ceph_connection_operations mds_con_ops;
67
68
69/*
70 * mds reply parsing
71 */
72
73static int parse_reply_info_quota(void **p, void *end,
74 struct ceph_mds_reply_info_in *info)
75{
76 u8 struct_v, struct_compat;
77 u32 struct_len;
78
79 ceph_decode_8_safe(p, end, struct_v, bad);
80 ceph_decode_8_safe(p, end, struct_compat, bad);
81 /* struct_v is expected to be >= 1. we only
82 * understand encoding with struct_compat == 1. */
83 if (!struct_v || struct_compat != 1)
84 goto bad;
85 ceph_decode_32_safe(p, end, struct_len, bad);
86 ceph_decode_need(p, end, struct_len, bad);
87 end = *p + struct_len;
88 ceph_decode_64_safe(p, end, info->max_bytes, bad);
89 ceph_decode_64_safe(p, end, info->max_files, bad);
90 *p = end;
91 return 0;
92bad:
93 return -EIO;
94}
95
96/*
97 * parse individual inode info
98 */
99static int parse_reply_info_in(void **p, void *end,
100 struct ceph_mds_reply_info_in *info,
101 u64 features)
102{
103 int err = 0;
104 u8 struct_v = 0;
105
106 if (features == (u64)-1) {
107 u32 struct_len;
108 u8 struct_compat;
109 ceph_decode_8_safe(p, end, struct_v, bad);
110 ceph_decode_8_safe(p, end, struct_compat, bad);
111 /* struct_v is expected to be >= 1. we only understand
112 * encoding with struct_compat == 1. */
113 if (!struct_v || struct_compat != 1)
114 goto bad;
115 ceph_decode_32_safe(p, end, struct_len, bad);
116 ceph_decode_need(p, end, struct_len, bad);
117 end = *p + struct_len;
118 }
119
120 ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
121 info->in = *p;
122 *p += sizeof(struct ceph_mds_reply_inode) +
123 sizeof(*info->in->fragtree.splits) *
124 le32_to_cpu(info->in->fragtree.nsplits);
125
126 ceph_decode_32_safe(p, end, info->symlink_len, bad);
127 ceph_decode_need(p, end, info->symlink_len, bad);
128 info->symlink = *p;
129 *p += info->symlink_len;
130
131 ceph_decode_copy_safe(p, end, &info->dir_layout,
132 sizeof(info->dir_layout), bad);
133 ceph_decode_32_safe(p, end, info->xattr_len, bad);
134 ceph_decode_need(p, end, info->xattr_len, bad);
135 info->xattr_data = *p;
136 *p += info->xattr_len;
137
138 if (features == (u64)-1) {
139 /* inline data */
140 ceph_decode_64_safe(p, end, info->inline_version, bad);
141 ceph_decode_32_safe(p, end, info->inline_len, bad);
142 ceph_decode_need(p, end, info->inline_len, bad);
143 info->inline_data = *p;
144 *p += info->inline_len;
145 /* quota */
146 err = parse_reply_info_quota(p, end, info);
147 if (err < 0)
148 goto out_bad;
149 /* pool namespace */
150 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
151 if (info->pool_ns_len > 0) {
152 ceph_decode_need(p, end, info->pool_ns_len, bad);
153 info->pool_ns_data = *p;
154 *p += info->pool_ns_len;
155 }
156
157 /* btime */
158 ceph_decode_need(p, end, sizeof(info->btime), bad);
159 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
160
161 /* change attribute */
162 ceph_decode_64_safe(p, end, info->change_attr, bad);
163
164 /* dir pin */
165 if (struct_v >= 2) {
166 ceph_decode_32_safe(p, end, info->dir_pin, bad);
167 } else {
168 info->dir_pin = -ENODATA;
169 }
170
171 /* snapshot birth time, remains zero for v<=2 */
172 if (struct_v >= 3) {
173 ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
174 ceph_decode_copy(p, &info->snap_btime,
175 sizeof(info->snap_btime));
176 } else {
177 memset(&info->snap_btime, 0, sizeof(info->snap_btime));
178 }
179
180 /* snapshot count, remains zero for v<=3 */
181 if (struct_v >= 4) {
182 ceph_decode_64_safe(p, end, info->rsnaps, bad);
183 } else {
184 info->rsnaps = 0;
185 }
186
187 *p = end;
188 } else {
189 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
190 ceph_decode_64_safe(p, end, info->inline_version, bad);
191 ceph_decode_32_safe(p, end, info->inline_len, bad);
192 ceph_decode_need(p, end, info->inline_len, bad);
193 info->inline_data = *p;
194 *p += info->inline_len;
195 } else
196 info->inline_version = CEPH_INLINE_NONE;
197
198 if (features & CEPH_FEATURE_MDS_QUOTA) {
199 err = parse_reply_info_quota(p, end, info);
200 if (err < 0)
201 goto out_bad;
202 } else {
203 info->max_bytes = 0;
204 info->max_files = 0;
205 }
206
207 info->pool_ns_len = 0;
208 info->pool_ns_data = NULL;
209 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
210 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
211 if (info->pool_ns_len > 0) {
212 ceph_decode_need(p, end, info->pool_ns_len, bad);
213 info->pool_ns_data = *p;
214 *p += info->pool_ns_len;
215 }
216 }
217
218 if (features & CEPH_FEATURE_FS_BTIME) {
219 ceph_decode_need(p, end, sizeof(info->btime), bad);
220 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
221 ceph_decode_64_safe(p, end, info->change_attr, bad);
222 }
223
224 info->dir_pin = -ENODATA;
225 /* info->snap_btime and info->rsnaps remain zero */
226 }
227 return 0;
228bad:
229 err = -EIO;
230out_bad:
231 return err;
232}
233
234static int parse_reply_info_dir(void **p, void *end,
235 struct ceph_mds_reply_dirfrag **dirfrag,
236 u64 features)
237{
238 if (features == (u64)-1) {
239 u8 struct_v, struct_compat;
240 u32 struct_len;
241 ceph_decode_8_safe(p, end, struct_v, bad);
242 ceph_decode_8_safe(p, end, struct_compat, bad);
243 /* struct_v is expected to be >= 1. we only understand
244 * encoding whose struct_compat == 1. */
245 if (!struct_v || struct_compat != 1)
246 goto bad;
247 ceph_decode_32_safe(p, end, struct_len, bad);
248 ceph_decode_need(p, end, struct_len, bad);
249 end = *p + struct_len;
250 }
251
252 ceph_decode_need(p, end, sizeof(**dirfrag), bad);
253 *dirfrag = *p;
254 *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
255 if (unlikely(*p > end))
256 goto bad;
257 if (features == (u64)-1)
258 *p = end;
259 return 0;
260bad:
261 return -EIO;
262}
263
264static int parse_reply_info_lease(void **p, void *end,
265 struct ceph_mds_reply_lease **lease,
266 u64 features)
267{
268 if (features == (u64)-1) {
269 u8 struct_v, struct_compat;
270 u32 struct_len;
271 ceph_decode_8_safe(p, end, struct_v, bad);
272 ceph_decode_8_safe(p, end, struct_compat, bad);
273 /* struct_v is expected to be >= 1. we only understand
274 * encoding whose struct_compat == 1. */
275 if (!struct_v || struct_compat != 1)
276 goto bad;
277 ceph_decode_32_safe(p, end, struct_len, bad);
278 ceph_decode_need(p, end, struct_len, bad);
279 end = *p + struct_len;
280 }
281
282 ceph_decode_need(p, end, sizeof(**lease), bad);
283 *lease = *p;
284 *p += sizeof(**lease);
285 if (features == (u64)-1)
286 *p = end;
287 return 0;
288bad:
289 return -EIO;
290}
291
292/*
293 * parse a normal reply, which may contain a (dir+)dentry and/or a
294 * target inode.
295 */
296static int parse_reply_info_trace(void **p, void *end,
297 struct ceph_mds_reply_info_parsed *info,
298 u64 features)
299{
300 int err;
301
302 if (info->head->is_dentry) {
303 err = parse_reply_info_in(p, end, &info->diri, features);
304 if (err < 0)
305 goto out_bad;
306
307 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
308 if (err < 0)
309 goto out_bad;
310
311 ceph_decode_32_safe(p, end, info->dname_len, bad);
312 ceph_decode_need(p, end, info->dname_len, bad);
313 info->dname = *p;
314 *p += info->dname_len;
315
316 err = parse_reply_info_lease(p, end, &info->dlease, features);
317 if (err < 0)
318 goto out_bad;
319 }
320
321 if (info->head->is_target) {
322 err = parse_reply_info_in(p, end, &info->targeti, features);
323 if (err < 0)
324 goto out_bad;
325 }
326
327 if (unlikely(*p != end))
328 goto bad;
329 return 0;
330
331bad:
332 err = -EIO;
333out_bad:
334 pr_err("problem parsing mds trace %d\n", err);
335 return err;
336}
337
338/*
339 * parse readdir results
340 */
341static int parse_reply_info_readdir(void **p, void *end,
342 struct ceph_mds_reply_info_parsed *info,
343 u64 features)
344{
345 u32 num, i = 0;
346 int err;
347
348 err = parse_reply_info_dir(p, end, &info->dir_dir, features);
349 if (err < 0)
350 goto out_bad;
351
352 ceph_decode_need(p, end, sizeof(num) + 2, bad);
353 num = ceph_decode_32(p);
354 {
355 u16 flags = ceph_decode_16(p);
356 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
357 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
358 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
359 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
360 }
361 if (num == 0)
362 goto done;
363
364 BUG_ON(!info->dir_entries);
365 if ((unsigned long)(info->dir_entries + num) >
366 (unsigned long)info->dir_entries + info->dir_buf_size) {
367 pr_err("dir contents are larger than expected\n");
368 WARN_ON(1);
369 goto bad;
370 }
371
372 info->dir_nr = num;
373 while (num) {
374 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
375 /* dentry */
376 ceph_decode_32_safe(p, end, rde->name_len, bad);
377 ceph_decode_need(p, end, rde->name_len, bad);
378 rde->name = *p;
379 *p += rde->name_len;
380 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
381
382 /* dentry lease */
383 err = parse_reply_info_lease(p, end, &rde->lease, features);
384 if (err)
385 goto out_bad;
386 /* inode */
387 err = parse_reply_info_in(p, end, &rde->inode, features);
388 if (err < 0)
389 goto out_bad;
390 /* ceph_readdir_prepopulate() will update it */
391 rde->offset = 0;
392 i++;
393 num--;
394 }
395
396done:
397 /* Skip over any unrecognized fields */
398 *p = end;
399 return 0;
400
401bad:
402 err = -EIO;
403out_bad:
404 pr_err("problem parsing dir contents %d\n", err);
405 return err;
406}
407
408/*
409 * parse fcntl F_GETLK results
410 */
411static int parse_reply_info_filelock(void **p, void *end,
412 struct ceph_mds_reply_info_parsed *info,
413 u64 features)
414{
415 if (*p + sizeof(*info->filelock_reply) > end)
416 goto bad;
417
418 info->filelock_reply = *p;
419
420 /* Skip over any unrecognized fields */
421 *p = end;
422 return 0;
423bad:
424 return -EIO;
425}
426
427
428#if BITS_PER_LONG == 64
429
430#define DELEGATED_INO_AVAILABLE xa_mk_value(1)
431
432static int ceph_parse_deleg_inos(void **p, void *end,
433 struct ceph_mds_session *s)
434{
435 u32 sets;
436
437 ceph_decode_32_safe(p, end, sets, bad);
438 dout("got %u sets of delegated inodes\n", sets);
439 while (sets--) {
440 u64 start, len;
441
442 ceph_decode_64_safe(p, end, start, bad);
443 ceph_decode_64_safe(p, end, len, bad);
444
445 /* Don't accept a delegation of system inodes */
446 if (start < CEPH_INO_SYSTEM_BASE) {
447 pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
448 start, len);
449 continue;
450 }
451 while (len--) {
452 int err = xa_insert(&s->s_delegated_inos, start++,
453 DELEGATED_INO_AVAILABLE,
454 GFP_KERNEL);
455 if (!err) {
456 dout("added delegated inode 0x%llx\n",
457 start - 1);
458 } else if (err == -EBUSY) {
459 pr_warn("MDS delegated inode 0x%llx more than once.\n",
460 start - 1);
461 } else {
462 return err;
463 }
464 }
465 }
466 return 0;
467bad:
468 return -EIO;
469}
470
471u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
472{
473 unsigned long ino;
474 void *val;
475
476 xa_for_each(&s->s_delegated_inos, ino, val) {
477 val = xa_erase(&s->s_delegated_inos, ino);
478 if (val == DELEGATED_INO_AVAILABLE)
479 return ino;
480 }
481 return 0;
482}
483
484int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
485{
486 return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
487 GFP_KERNEL);
488}
489#else /* BITS_PER_LONG == 64 */
490/*
491 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
492 * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
493 * and bottom words?
494 */
495static int ceph_parse_deleg_inos(void **p, void *end,
496 struct ceph_mds_session *s)
497{
498 u32 sets;
499
500 ceph_decode_32_safe(p, end, sets, bad);
501 if (sets)
502 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
503 return 0;
504bad:
505 return -EIO;
506}
507
508u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
509{
510 return 0;
511}
512
513int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
514{
515 return 0;
516}
517#endif /* BITS_PER_LONG == 64 */
518
519/*
520 * parse create results
521 */
522static int parse_reply_info_create(void **p, void *end,
523 struct ceph_mds_reply_info_parsed *info,
524 u64 features, struct ceph_mds_session *s)
525{
526 int ret;
527
528 if (features == (u64)-1 ||
529 (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
530 if (*p == end) {
531 /* Malformed reply? */
532 info->has_create_ino = false;
533 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
534 info->has_create_ino = true;
535 /* struct_v, struct_compat, and len */
536 ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
537 ceph_decode_64_safe(p, end, info->ino, bad);
538 ret = ceph_parse_deleg_inos(p, end, s);
539 if (ret)
540 return ret;
541 } else {
542 /* legacy */
543 ceph_decode_64_safe(p, end, info->ino, bad);
544 info->has_create_ino = true;
545 }
546 } else {
547 if (*p != end)
548 goto bad;
549 }
550
551 /* Skip over any unrecognized fields */
552 *p = end;
553 return 0;
554bad:
555 return -EIO;
556}
557
558static int parse_reply_info_getvxattr(void **p, void *end,
559 struct ceph_mds_reply_info_parsed *info,
560 u64 features)
561{
562 u32 value_len;
563
564 ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */
565 ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */
566 ceph_decode_skip_32(p, end, bad); /* skip payload length */
567
568 ceph_decode_32_safe(p, end, value_len, bad);
569
570 if (value_len == end - *p) {
571 info->xattr_info.xattr_value = *p;
572 info->xattr_info.xattr_value_len = value_len;
573 *p = end;
574 return value_len;
575 }
576bad:
577 return -EIO;
578}
579
580/*
581 * parse extra results
582 */
583static int parse_reply_info_extra(void **p, void *end,
584 struct ceph_mds_reply_info_parsed *info,
585 u64 features, struct ceph_mds_session *s)
586{
587 u32 op = le32_to_cpu(info->head->op);
588
589 if (op == CEPH_MDS_OP_GETFILELOCK)
590 return parse_reply_info_filelock(p, end, info, features);
591 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
592 return parse_reply_info_readdir(p, end, info, features);
593 else if (op == CEPH_MDS_OP_CREATE)
594 return parse_reply_info_create(p, end, info, features, s);
595 else if (op == CEPH_MDS_OP_GETVXATTR)
596 return parse_reply_info_getvxattr(p, end, info, features);
597 else
598 return -EIO;
599}
600
601/*
602 * parse entire mds reply
603 */
604static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
605 struct ceph_mds_reply_info_parsed *info,
606 u64 features)
607{
608 void *p, *end;
609 u32 len;
610 int err;
611
612 info->head = msg->front.iov_base;
613 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
614 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
615
616 /* trace */
617 ceph_decode_32_safe(&p, end, len, bad);
618 if (len > 0) {
619 ceph_decode_need(&p, end, len, bad);
620 err = parse_reply_info_trace(&p, p+len, info, features);
621 if (err < 0)
622 goto out_bad;
623 }
624
625 /* extra */
626 ceph_decode_32_safe(&p, end, len, bad);
627 if (len > 0) {
628 ceph_decode_need(&p, end, len, bad);
629 err = parse_reply_info_extra(&p, p+len, info, features, s);
630 if (err < 0)
631 goto out_bad;
632 }
633
634 /* snap blob */
635 ceph_decode_32_safe(&p, end, len, bad);
636 info->snapblob_len = len;
637 info->snapblob = p;
638 p += len;
639
640 if (p != end)
641 goto bad;
642 return 0;
643
644bad:
645 err = -EIO;
646out_bad:
647 pr_err("mds parse_reply err %d\n", err);
648 return err;
649}
650
651static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
652{
653 if (!info->dir_entries)
654 return;
655 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
656}
657
658/*
659 * In async unlink case the kclient won't wait for the first reply
660 * from MDS and just drop all the links and unhash the dentry and then
661 * succeeds immediately.
662 *
663 * For any new create/link/rename,etc requests followed by using the
664 * same file names we must wait for the first reply of the inflight
665 * unlink request, or the MDS possibly will fail these following
666 * requests with -EEXIST if the inflight async unlink request was
667 * delayed for some reasons.
668 *
669 * And the worst case is that for the none async openc request it will
670 * successfully open the file if the CDentry hasn't been unlinked yet,
671 * but later the previous delayed async unlink request will remove the
672 * CDenty. That means the just created file is possiblly deleted later
673 * by accident.
674 *
675 * We need to wait for the inflight async unlink requests to finish
676 * when creating new files/directories by using the same file names.
677 */
678int ceph_wait_on_conflict_unlink(struct dentry *dentry)
679{
680 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
681 struct dentry *pdentry = dentry->d_parent;
682 struct dentry *udentry, *found = NULL;
683 struct ceph_dentry_info *di;
684 struct qstr dname;
685 u32 hash = dentry->d_name.hash;
686 int err;
687
688 dname.name = dentry->d_name.name;
689 dname.len = dentry->d_name.len;
690
691 rcu_read_lock();
692 hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
693 hnode, hash) {
694 udentry = di->dentry;
695
696 spin_lock(&udentry->d_lock);
697 if (udentry->d_name.hash != hash)
698 goto next;
699 if (unlikely(udentry->d_parent != pdentry))
700 goto next;
701 if (!hash_hashed(&di->hnode))
702 goto next;
703
704 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
705 pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
706 __func__, dentry, dentry);
707
708 if (!d_same_name(udentry, pdentry, &dname))
709 goto next;
710
711 spin_unlock(&udentry->d_lock);
712 found = dget(udentry);
713 break;
714next:
715 spin_unlock(&udentry->d_lock);
716 }
717 rcu_read_unlock();
718
719 if (likely(!found))
720 return 0;
721
722 dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__,
723 dentry, dentry, found, found);
724
725 err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
726 TASK_KILLABLE);
727 dput(found);
728 return err;
729}
730
731
732/*
733 * sessions
734 */
735const char *ceph_session_state_name(int s)
736{
737 switch (s) {
738 case CEPH_MDS_SESSION_NEW: return "new";
739 case CEPH_MDS_SESSION_OPENING: return "opening";
740 case CEPH_MDS_SESSION_OPEN: return "open";
741 case CEPH_MDS_SESSION_HUNG: return "hung";
742 case CEPH_MDS_SESSION_CLOSING: return "closing";
743 case CEPH_MDS_SESSION_CLOSED: return "closed";
744 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
745 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
746 case CEPH_MDS_SESSION_REJECTED: return "rejected";
747 default: return "???";
748 }
749}
750
751struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
752{
753 if (refcount_inc_not_zero(&s->s_ref))
754 return s;
755 return NULL;
756}
757
758void ceph_put_mds_session(struct ceph_mds_session *s)
759{
760 if (IS_ERR_OR_NULL(s))
761 return;
762
763 if (refcount_dec_and_test(&s->s_ref)) {
764 if (s->s_auth.authorizer)
765 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
766 WARN_ON(mutex_is_locked(&s->s_mutex));
767 xa_destroy(&s->s_delegated_inos);
768 kfree(s);
769 }
770}
771
772/*
773 * called under mdsc->mutex
774 */
775struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
776 int mds)
777{
778 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
779 return NULL;
780 return ceph_get_mds_session(mdsc->sessions[mds]);
781}
782
783static bool __have_session(struct ceph_mds_client *mdsc, int mds)
784{
785 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
786 return false;
787 else
788 return true;
789}
790
791static int __verify_registered_session(struct ceph_mds_client *mdsc,
792 struct ceph_mds_session *s)
793{
794 if (s->s_mds >= mdsc->max_sessions ||
795 mdsc->sessions[s->s_mds] != s)
796 return -ENOENT;
797 return 0;
798}
799
800/*
801 * create+register a new session for given mds.
802 * called under mdsc->mutex.
803 */
804static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
805 int mds)
806{
807 struct ceph_mds_session *s;
808
809 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
810 return ERR_PTR(-EIO);
811
812 if (mds >= mdsc->mdsmap->possible_max_rank)
813 return ERR_PTR(-EINVAL);
814
815 s = kzalloc(sizeof(*s), GFP_NOFS);
816 if (!s)
817 return ERR_PTR(-ENOMEM);
818
819 if (mds >= mdsc->max_sessions) {
820 int newmax = 1 << get_count_order(mds + 1);
821 struct ceph_mds_session **sa;
822
823 dout("%s: realloc to %d\n", __func__, newmax);
824 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
825 if (!sa)
826 goto fail_realloc;
827 if (mdsc->sessions) {
828 memcpy(sa, mdsc->sessions,
829 mdsc->max_sessions * sizeof(void *));
830 kfree(mdsc->sessions);
831 }
832 mdsc->sessions = sa;
833 mdsc->max_sessions = newmax;
834 }
835
836 dout("%s: mds%d\n", __func__, mds);
837 s->s_mdsc = mdsc;
838 s->s_mds = mds;
839 s->s_state = CEPH_MDS_SESSION_NEW;
840 mutex_init(&s->s_mutex);
841
842 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
843
844 atomic_set(&s->s_cap_gen, 1);
845 s->s_cap_ttl = jiffies - 1;
846
847 spin_lock_init(&s->s_cap_lock);
848 INIT_LIST_HEAD(&s->s_caps);
849 refcount_set(&s->s_ref, 1);
850 INIT_LIST_HEAD(&s->s_waiting);
851 INIT_LIST_HEAD(&s->s_unsafe);
852 xa_init(&s->s_delegated_inos);
853 INIT_LIST_HEAD(&s->s_cap_releases);
854 INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
855
856 INIT_LIST_HEAD(&s->s_cap_dirty);
857 INIT_LIST_HEAD(&s->s_cap_flushing);
858
859 mdsc->sessions[mds] = s;
860 atomic_inc(&mdsc->num_sessions);
861 refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
862
863 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
864 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
865
866 return s;
867
868fail_realloc:
869 kfree(s);
870 return ERR_PTR(-ENOMEM);
871}
872
873/*
874 * called under mdsc->mutex
875 */
876static void __unregister_session(struct ceph_mds_client *mdsc,
877 struct ceph_mds_session *s)
878{
879 dout("__unregister_session mds%d %p\n", s->s_mds, s);
880 BUG_ON(mdsc->sessions[s->s_mds] != s);
881 mdsc->sessions[s->s_mds] = NULL;
882 ceph_con_close(&s->s_con);
883 ceph_put_mds_session(s);
884 atomic_dec(&mdsc->num_sessions);
885}
886
887/*
888 * drop session refs in request.
889 *
890 * should be last request ref, or hold mdsc->mutex
891 */
892static void put_request_session(struct ceph_mds_request *req)
893{
894 if (req->r_session) {
895 ceph_put_mds_session(req->r_session);
896 req->r_session = NULL;
897 }
898}
899
900void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
901 void (*cb)(struct ceph_mds_session *),
902 bool check_state)
903{
904 int mds;
905
906 mutex_lock(&mdsc->mutex);
907 for (mds = 0; mds < mdsc->max_sessions; ++mds) {
908 struct ceph_mds_session *s;
909
910 s = __ceph_lookup_mds_session(mdsc, mds);
911 if (!s)
912 continue;
913
914 if (check_state && !check_session_state(s)) {
915 ceph_put_mds_session(s);
916 continue;
917 }
918
919 mutex_unlock(&mdsc->mutex);
920 cb(s);
921 ceph_put_mds_session(s);
922 mutex_lock(&mdsc->mutex);
923 }
924 mutex_unlock(&mdsc->mutex);
925}
926
927void ceph_mdsc_release_request(struct kref *kref)
928{
929 struct ceph_mds_request *req = container_of(kref,
930 struct ceph_mds_request,
931 r_kref);
932 ceph_mdsc_release_dir_caps_no_check(req);
933 destroy_reply_info(&req->r_reply_info);
934 if (req->r_request)
935 ceph_msg_put(req->r_request);
936 if (req->r_reply)
937 ceph_msg_put(req->r_reply);
938 if (req->r_inode) {
939 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
940 iput(req->r_inode);
941 }
942 if (req->r_parent) {
943 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
944 iput(req->r_parent);
945 }
946 iput(req->r_target_inode);
947 if (req->r_dentry)
948 dput(req->r_dentry);
949 if (req->r_old_dentry)
950 dput(req->r_old_dentry);
951 if (req->r_old_dentry_dir) {
952 /*
953 * track (and drop pins for) r_old_dentry_dir
954 * separately, since r_old_dentry's d_parent may have
955 * changed between the dir mutex being dropped and
956 * this request being freed.
957 */
958 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
959 CEPH_CAP_PIN);
960 iput(req->r_old_dentry_dir);
961 }
962 kfree(req->r_path1);
963 kfree(req->r_path2);
964 put_cred(req->r_cred);
965 if (req->r_pagelist)
966 ceph_pagelist_release(req->r_pagelist);
967 put_request_session(req);
968 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
969 WARN_ON_ONCE(!list_empty(&req->r_wait));
970 kmem_cache_free(ceph_mds_request_cachep, req);
971}
972
973DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
974
975/*
976 * lookup session, bump ref if found.
977 *
978 * called under mdsc->mutex.
979 */
980static struct ceph_mds_request *
981lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
982{
983 struct ceph_mds_request *req;
984
985 req = lookup_request(&mdsc->request_tree, tid);
986 if (req)
987 ceph_mdsc_get_request(req);
988
989 return req;
990}
991
992/*
993 * Register an in-flight request, and assign a tid. Link to directory
994 * are modifying (if any).
995 *
996 * Called under mdsc->mutex.
997 */
998static void __register_request(struct ceph_mds_client *mdsc,
999 struct ceph_mds_request *req,
1000 struct inode *dir)
1001{
1002 int ret = 0;
1003
1004 req->r_tid = ++mdsc->last_tid;
1005 if (req->r_num_caps) {
1006 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
1007 req->r_num_caps);
1008 if (ret < 0) {
1009 pr_err("__register_request %p "
1010 "failed to reserve caps: %d\n", req, ret);
1011 /* set req->r_err to fail early from __do_request */
1012 req->r_err = ret;
1013 return;
1014 }
1015 }
1016 dout("__register_request %p tid %lld\n", req, req->r_tid);
1017 ceph_mdsc_get_request(req);
1018 insert_request(&mdsc->request_tree, req);
1019
1020 req->r_cred = get_current_cred();
1021
1022 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
1023 mdsc->oldest_tid = req->r_tid;
1024
1025 if (dir) {
1026 struct ceph_inode_info *ci = ceph_inode(dir);
1027
1028 ihold(dir);
1029 req->r_unsafe_dir = dir;
1030 spin_lock(&ci->i_unsafe_lock);
1031 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
1032 spin_unlock(&ci->i_unsafe_lock);
1033 }
1034}
1035
1036static void __unregister_request(struct ceph_mds_client *mdsc,
1037 struct ceph_mds_request *req)
1038{
1039 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1040
1041 /* Never leave an unregistered request on an unsafe list! */
1042 list_del_init(&req->r_unsafe_item);
1043
1044 if (req->r_tid == mdsc->oldest_tid) {
1045 struct rb_node *p = rb_next(&req->r_node);
1046 mdsc->oldest_tid = 0;
1047 while (p) {
1048 struct ceph_mds_request *next_req =
1049 rb_entry(p, struct ceph_mds_request, r_node);
1050 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
1051 mdsc->oldest_tid = next_req->r_tid;
1052 break;
1053 }
1054 p = rb_next(p);
1055 }
1056 }
1057
1058 erase_request(&mdsc->request_tree, req);
1059
1060 if (req->r_unsafe_dir) {
1061 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
1062 spin_lock(&ci->i_unsafe_lock);
1063 list_del_init(&req->r_unsafe_dir_item);
1064 spin_unlock(&ci->i_unsafe_lock);
1065 }
1066 if (req->r_target_inode &&
1067 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
1068 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
1069 spin_lock(&ci->i_unsafe_lock);
1070 list_del_init(&req->r_unsafe_target_item);
1071 spin_unlock(&ci->i_unsafe_lock);
1072 }
1073
1074 if (req->r_unsafe_dir) {
1075 iput(req->r_unsafe_dir);
1076 req->r_unsafe_dir = NULL;
1077 }
1078
1079 complete_all(&req->r_safe_completion);
1080
1081 ceph_mdsc_put_request(req);
1082}
1083
1084/*
1085 * Walk back up the dentry tree until we hit a dentry representing a
1086 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
1087 * when calling this) to ensure that the objects won't disappear while we're
1088 * working with them. Once we hit a candidate dentry, we attempt to take a
1089 * reference to it, and return that as the result.
1090 */
1091static struct inode *get_nonsnap_parent(struct dentry *dentry)
1092{
1093 struct inode *inode = NULL;
1094
1095 while (dentry && !IS_ROOT(dentry)) {
1096 inode = d_inode_rcu(dentry);
1097 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
1098 break;
1099 dentry = dentry->d_parent;
1100 }
1101 if (inode)
1102 inode = igrab(inode);
1103 return inode;
1104}
1105
1106/*
1107 * Choose mds to send request to next. If there is a hint set in the
1108 * request (e.g., due to a prior forward hint from the mds), use that.
1109 * Otherwise, consult frag tree and/or caps to identify the
1110 * appropriate mds. If all else fails, choose randomly.
1111 *
1112 * Called under mdsc->mutex.
1113 */
1114static int __choose_mds(struct ceph_mds_client *mdsc,
1115 struct ceph_mds_request *req,
1116 bool *random)
1117{
1118 struct inode *inode;
1119 struct ceph_inode_info *ci;
1120 struct ceph_cap *cap;
1121 int mode = req->r_direct_mode;
1122 int mds = -1;
1123 u32 hash = req->r_direct_hash;
1124 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1125
1126 if (random)
1127 *random = false;
1128
1129 /*
1130 * is there a specific mds we should try? ignore hint if we have
1131 * no session and the mds is not up (active or recovering).
1132 */
1133 if (req->r_resend_mds >= 0 &&
1134 (__have_session(mdsc, req->r_resend_mds) ||
1135 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1136 dout("%s using resend_mds mds%d\n", __func__,
1137 req->r_resend_mds);
1138 return req->r_resend_mds;
1139 }
1140
1141 if (mode == USE_RANDOM_MDS)
1142 goto random;
1143
1144 inode = NULL;
1145 if (req->r_inode) {
1146 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1147 inode = req->r_inode;
1148 ihold(inode);
1149 } else {
1150 /* req->r_dentry is non-null for LSSNAP request */
1151 rcu_read_lock();
1152 inode = get_nonsnap_parent(req->r_dentry);
1153 rcu_read_unlock();
1154 dout("%s using snapdir's parent %p\n", __func__, inode);
1155 }
1156 } else if (req->r_dentry) {
1157 /* ignore race with rename; old or new d_parent is okay */
1158 struct dentry *parent;
1159 struct inode *dir;
1160
1161 rcu_read_lock();
1162 parent = READ_ONCE(req->r_dentry->d_parent);
1163 dir = req->r_parent ? : d_inode_rcu(parent);
1164
1165 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1166 /* not this fs or parent went negative */
1167 inode = d_inode(req->r_dentry);
1168 if (inode)
1169 ihold(inode);
1170 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1171 /* direct snapped/virtual snapdir requests
1172 * based on parent dir inode */
1173 inode = get_nonsnap_parent(parent);
1174 dout("%s using nonsnap parent %p\n", __func__, inode);
1175 } else {
1176 /* dentry target */
1177 inode = d_inode(req->r_dentry);
1178 if (!inode || mode == USE_AUTH_MDS) {
1179 /* dir + name */
1180 inode = igrab(dir);
1181 hash = ceph_dentry_hash(dir, req->r_dentry);
1182 is_hash = true;
1183 } else {
1184 ihold(inode);
1185 }
1186 }
1187 rcu_read_unlock();
1188 }
1189
1190 dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1191 hash, mode);
1192 if (!inode)
1193 goto random;
1194 ci = ceph_inode(inode);
1195
1196 if (is_hash && S_ISDIR(inode->i_mode)) {
1197 struct ceph_inode_frag frag;
1198 int found;
1199
1200 ceph_choose_frag(ci, hash, &frag, &found);
1201 if (found) {
1202 if (mode == USE_ANY_MDS && frag.ndist > 0) {
1203 u8 r;
1204
1205 /* choose a random replica */
1206 get_random_bytes(&r, 1);
1207 r %= frag.ndist;
1208 mds = frag.dist[r];
1209 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1210 __func__, inode, ceph_vinop(inode),
1211 frag.frag, mds, (int)r, frag.ndist);
1212 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1213 CEPH_MDS_STATE_ACTIVE &&
1214 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1215 goto out;
1216 }
1217
1218 /* since this file/dir wasn't known to be
1219 * replicated, then we want to look for the
1220 * authoritative mds. */
1221 if (frag.mds >= 0) {
1222 /* choose auth mds */
1223 mds = frag.mds;
1224 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1225 __func__, inode, ceph_vinop(inode),
1226 frag.frag, mds);
1227 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1228 CEPH_MDS_STATE_ACTIVE) {
1229 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1230 mds))
1231 goto out;
1232 }
1233 }
1234 mode = USE_AUTH_MDS;
1235 }
1236 }
1237
1238 spin_lock(&ci->i_ceph_lock);
1239 cap = NULL;
1240 if (mode == USE_AUTH_MDS)
1241 cap = ci->i_auth_cap;
1242 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1243 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1244 if (!cap) {
1245 spin_unlock(&ci->i_ceph_lock);
1246 iput(inode);
1247 goto random;
1248 }
1249 mds = cap->session->s_mds;
1250 dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1251 inode, ceph_vinop(inode), mds,
1252 cap == ci->i_auth_cap ? "auth " : "", cap);
1253 spin_unlock(&ci->i_ceph_lock);
1254out:
1255 iput(inode);
1256 return mds;
1257
1258random:
1259 if (random)
1260 *random = true;
1261
1262 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1263 dout("%s chose random mds%d\n", __func__, mds);
1264 return mds;
1265}
1266
1267
1268/*
1269 * session messages
1270 */
1271struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1272{
1273 struct ceph_msg *msg;
1274 struct ceph_mds_session_head *h;
1275
1276 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1277 false);
1278 if (!msg) {
1279 pr_err("ENOMEM creating session %s msg\n",
1280 ceph_session_op_name(op));
1281 return NULL;
1282 }
1283 h = msg->front.iov_base;
1284 h->op = cpu_to_le32(op);
1285 h->seq = cpu_to_le64(seq);
1286
1287 return msg;
1288}
1289
1290static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1291#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1292static int encode_supported_features(void **p, void *end)
1293{
1294 static const size_t count = ARRAY_SIZE(feature_bits);
1295
1296 if (count > 0) {
1297 size_t i;
1298 size_t size = FEATURE_BYTES(count);
1299 unsigned long bit;
1300
1301 if (WARN_ON_ONCE(*p + 4 + size > end))
1302 return -ERANGE;
1303
1304 ceph_encode_32(p, size);
1305 memset(*p, 0, size);
1306 for (i = 0; i < count; i++) {
1307 bit = feature_bits[i];
1308 ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
1309 }
1310 *p += size;
1311 } else {
1312 if (WARN_ON_ONCE(*p + 4 > end))
1313 return -ERANGE;
1314
1315 ceph_encode_32(p, 0);
1316 }
1317
1318 return 0;
1319}
1320
1321static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1322#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1323static int encode_metric_spec(void **p, void *end)
1324{
1325 static const size_t count = ARRAY_SIZE(metric_bits);
1326
1327 /* header */
1328 if (WARN_ON_ONCE(*p + 2 > end))
1329 return -ERANGE;
1330
1331 ceph_encode_8(p, 1); /* version */
1332 ceph_encode_8(p, 1); /* compat */
1333
1334 if (count > 0) {
1335 size_t i;
1336 size_t size = METRIC_BYTES(count);
1337
1338 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1339 return -ERANGE;
1340
1341 /* metric spec info length */
1342 ceph_encode_32(p, 4 + size);
1343
1344 /* metric spec */
1345 ceph_encode_32(p, size);
1346 memset(*p, 0, size);
1347 for (i = 0; i < count; i++)
1348 ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1349 *p += size;
1350 } else {
1351 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1352 return -ERANGE;
1353
1354 /* metric spec info length */
1355 ceph_encode_32(p, 4);
1356 /* metric spec */
1357 ceph_encode_32(p, 0);
1358 }
1359
1360 return 0;
1361}
1362
1363/*
1364 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1365 * to include additional client metadata fields.
1366 */
1367static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1368{
1369 struct ceph_msg *msg;
1370 struct ceph_mds_session_head *h;
1371 int i;
1372 int extra_bytes = 0;
1373 int metadata_key_count = 0;
1374 struct ceph_options *opt = mdsc->fsc->client->options;
1375 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1376 size_t size, count;
1377 void *p, *end;
1378 int ret;
1379
1380 const char* metadata[][2] = {
1381 {"hostname", mdsc->nodename},
1382 {"kernel_version", init_utsname()->release},
1383 {"entity_id", opt->name ? : ""},
1384 {"root", fsopt->server_path ? : "/"},
1385 {NULL, NULL}
1386 };
1387
1388 /* Calculate serialized length of metadata */
1389 extra_bytes = 4; /* map length */
1390 for (i = 0; metadata[i][0]; ++i) {
1391 extra_bytes += 8 + strlen(metadata[i][0]) +
1392 strlen(metadata[i][1]);
1393 metadata_key_count++;
1394 }
1395
1396 /* supported feature */
1397 size = 0;
1398 count = ARRAY_SIZE(feature_bits);
1399 if (count > 0)
1400 size = FEATURE_BYTES(count);
1401 extra_bytes += 4 + size;
1402
1403 /* metric spec */
1404 size = 0;
1405 count = ARRAY_SIZE(metric_bits);
1406 if (count > 0)
1407 size = METRIC_BYTES(count);
1408 extra_bytes += 2 + 4 + 4 + size;
1409
1410 /* Allocate the message */
1411 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1412 GFP_NOFS, false);
1413 if (!msg) {
1414 pr_err("ENOMEM creating session open msg\n");
1415 return ERR_PTR(-ENOMEM);
1416 }
1417 p = msg->front.iov_base;
1418 end = p + msg->front.iov_len;
1419
1420 h = p;
1421 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1422 h->seq = cpu_to_le64(seq);
1423
1424 /*
1425 * Serialize client metadata into waiting buffer space, using
1426 * the format that userspace expects for map<string, string>
1427 *
1428 * ClientSession messages with metadata are v4
1429 */
1430 msg->hdr.version = cpu_to_le16(4);
1431 msg->hdr.compat_version = cpu_to_le16(1);
1432
1433 /* The write pointer, following the session_head structure */
1434 p += sizeof(*h);
1435
1436 /* Number of entries in the map */
1437 ceph_encode_32(&p, metadata_key_count);
1438
1439 /* Two length-prefixed strings for each entry in the map */
1440 for (i = 0; metadata[i][0]; ++i) {
1441 size_t const key_len = strlen(metadata[i][0]);
1442 size_t const val_len = strlen(metadata[i][1]);
1443
1444 ceph_encode_32(&p, key_len);
1445 memcpy(p, metadata[i][0], key_len);
1446 p += key_len;
1447 ceph_encode_32(&p, val_len);
1448 memcpy(p, metadata[i][1], val_len);
1449 p += val_len;
1450 }
1451
1452 ret = encode_supported_features(&p, end);
1453 if (ret) {
1454 pr_err("encode_supported_features failed!\n");
1455 ceph_msg_put(msg);
1456 return ERR_PTR(ret);
1457 }
1458
1459 ret = encode_metric_spec(&p, end);
1460 if (ret) {
1461 pr_err("encode_metric_spec failed!\n");
1462 ceph_msg_put(msg);
1463 return ERR_PTR(ret);
1464 }
1465
1466 msg->front.iov_len = p - msg->front.iov_base;
1467 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1468
1469 return msg;
1470}
1471
1472/*
1473 * send session open request.
1474 *
1475 * called under mdsc->mutex
1476 */
1477static int __open_session(struct ceph_mds_client *mdsc,
1478 struct ceph_mds_session *session)
1479{
1480 struct ceph_msg *msg;
1481 int mstate;
1482 int mds = session->s_mds;
1483
1484 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
1485 return -EIO;
1486
1487 /* wait for mds to go active? */
1488 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1489 dout("open_session to mds%d (%s)\n", mds,
1490 ceph_mds_state_name(mstate));
1491 session->s_state = CEPH_MDS_SESSION_OPENING;
1492 session->s_renew_requested = jiffies;
1493
1494 /* send connect message */
1495 msg = create_session_open_msg(mdsc, session->s_seq);
1496 if (IS_ERR(msg))
1497 return PTR_ERR(msg);
1498 ceph_con_send(&session->s_con, msg);
1499 return 0;
1500}
1501
1502/*
1503 * open sessions for any export targets for the given mds
1504 *
1505 * called under mdsc->mutex
1506 */
1507static struct ceph_mds_session *
1508__open_export_target_session(struct ceph_mds_client *mdsc, int target)
1509{
1510 struct ceph_mds_session *session;
1511 int ret;
1512
1513 session = __ceph_lookup_mds_session(mdsc, target);
1514 if (!session) {
1515 session = register_session(mdsc, target);
1516 if (IS_ERR(session))
1517 return session;
1518 }
1519 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1520 session->s_state == CEPH_MDS_SESSION_CLOSING) {
1521 ret = __open_session(mdsc, session);
1522 if (ret)
1523 return ERR_PTR(ret);
1524 }
1525
1526 return session;
1527}
1528
1529struct ceph_mds_session *
1530ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1531{
1532 struct ceph_mds_session *session;
1533
1534 dout("open_export_target_session to mds%d\n", target);
1535
1536 mutex_lock(&mdsc->mutex);
1537 session = __open_export_target_session(mdsc, target);
1538 mutex_unlock(&mdsc->mutex);
1539
1540 return session;
1541}
1542
1543static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1544 struct ceph_mds_session *session)
1545{
1546 struct ceph_mds_info *mi;
1547 struct ceph_mds_session *ts;
1548 int i, mds = session->s_mds;
1549
1550 if (mds >= mdsc->mdsmap->possible_max_rank)
1551 return;
1552
1553 mi = &mdsc->mdsmap->m_info[mds];
1554 dout("open_export_target_sessions for mds%d (%d targets)\n",
1555 session->s_mds, mi->num_export_targets);
1556
1557 for (i = 0; i < mi->num_export_targets; i++) {
1558 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1559 ceph_put_mds_session(ts);
1560 }
1561}
1562
1563void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1564 struct ceph_mds_session *session)
1565{
1566 mutex_lock(&mdsc->mutex);
1567 __open_export_target_sessions(mdsc, session);
1568 mutex_unlock(&mdsc->mutex);
1569}
1570
1571/*
1572 * session caps
1573 */
1574
1575static void detach_cap_releases(struct ceph_mds_session *session,
1576 struct list_head *target)
1577{
1578 lockdep_assert_held(&session->s_cap_lock);
1579
1580 list_splice_init(&session->s_cap_releases, target);
1581 session->s_num_cap_releases = 0;
1582 dout("dispose_cap_releases mds%d\n", session->s_mds);
1583}
1584
1585static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1586 struct list_head *dispose)
1587{
1588 while (!list_empty(dispose)) {
1589 struct ceph_cap *cap;
1590 /* zero out the in-progress message */
1591 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1592 list_del(&cap->session_caps);
1593 ceph_put_cap(mdsc, cap);
1594 }
1595}
1596
1597static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1598 struct ceph_mds_session *session)
1599{
1600 struct ceph_mds_request *req;
1601 struct rb_node *p;
1602
1603 dout("cleanup_session_requests mds%d\n", session->s_mds);
1604 mutex_lock(&mdsc->mutex);
1605 while (!list_empty(&session->s_unsafe)) {
1606 req = list_first_entry(&session->s_unsafe,
1607 struct ceph_mds_request, r_unsafe_item);
1608 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1609 req->r_tid);
1610 if (req->r_target_inode)
1611 mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1612 if (req->r_unsafe_dir)
1613 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1614 __unregister_request(mdsc, req);
1615 }
1616 /* zero r_attempts, so kick_requests() will re-send requests */
1617 p = rb_first(&mdsc->request_tree);
1618 while (p) {
1619 req = rb_entry(p, struct ceph_mds_request, r_node);
1620 p = rb_next(p);
1621 if (req->r_session &&
1622 req->r_session->s_mds == session->s_mds)
1623 req->r_attempts = 0;
1624 }
1625 mutex_unlock(&mdsc->mutex);
1626}
1627
1628/*
1629 * Helper to safely iterate over all caps associated with a session, with
1630 * special care taken to handle a racing __ceph_remove_cap().
1631 *
1632 * Caller must hold session s_mutex.
1633 */
1634int ceph_iterate_session_caps(struct ceph_mds_session *session,
1635 int (*cb)(struct inode *, struct ceph_cap *,
1636 void *), void *arg)
1637{
1638 struct list_head *p;
1639 struct ceph_cap *cap;
1640 struct inode *inode, *last_inode = NULL;
1641 struct ceph_cap *old_cap = NULL;
1642 int ret;
1643
1644 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1645 spin_lock(&session->s_cap_lock);
1646 p = session->s_caps.next;
1647 while (p != &session->s_caps) {
1648 cap = list_entry(p, struct ceph_cap, session_caps);
1649 inode = igrab(&cap->ci->netfs.inode);
1650 if (!inode) {
1651 p = p->next;
1652 continue;
1653 }
1654 session->s_cap_iterator = cap;
1655 spin_unlock(&session->s_cap_lock);
1656
1657 if (last_inode) {
1658 iput(last_inode);
1659 last_inode = NULL;
1660 }
1661 if (old_cap) {
1662 ceph_put_cap(session->s_mdsc, old_cap);
1663 old_cap = NULL;
1664 }
1665
1666 ret = cb(inode, cap, arg);
1667 last_inode = inode;
1668
1669 spin_lock(&session->s_cap_lock);
1670 p = p->next;
1671 if (!cap->ci) {
1672 dout("iterate_session_caps finishing cap %p removal\n",
1673 cap);
1674 BUG_ON(cap->session != session);
1675 cap->session = NULL;
1676 list_del_init(&cap->session_caps);
1677 session->s_nr_caps--;
1678 atomic64_dec(&session->s_mdsc->metric.total_caps);
1679 if (cap->queue_release)
1680 __ceph_queue_cap_release(session, cap);
1681 else
1682 old_cap = cap; /* put_cap it w/o locks held */
1683 }
1684 if (ret < 0)
1685 goto out;
1686 }
1687 ret = 0;
1688out:
1689 session->s_cap_iterator = NULL;
1690 spin_unlock(&session->s_cap_lock);
1691
1692 iput(last_inode);
1693 if (old_cap)
1694 ceph_put_cap(session->s_mdsc, old_cap);
1695
1696 return ret;
1697}
1698
1699static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1700 void *arg)
1701{
1702 struct ceph_inode_info *ci = ceph_inode(inode);
1703 bool invalidate = false;
1704 int iputs;
1705
1706 dout("removing cap %p, ci is %p, inode is %p\n",
1707 cap, ci, &ci->netfs.inode);
1708 spin_lock(&ci->i_ceph_lock);
1709 iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
1710 spin_unlock(&ci->i_ceph_lock);
1711
1712 wake_up_all(&ci->i_cap_wq);
1713 if (invalidate)
1714 ceph_queue_invalidate(inode);
1715 while (iputs--)
1716 iput(inode);
1717 return 0;
1718}
1719
1720/*
1721 * caller must hold session s_mutex
1722 */
1723static void remove_session_caps(struct ceph_mds_session *session)
1724{
1725 struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1726 struct super_block *sb = fsc->sb;
1727 LIST_HEAD(dispose);
1728
1729 dout("remove_session_caps on %p\n", session);
1730 ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1731
1732 wake_up_all(&fsc->mdsc->cap_flushing_wq);
1733
1734 spin_lock(&session->s_cap_lock);
1735 if (session->s_nr_caps > 0) {
1736 struct inode *inode;
1737 struct ceph_cap *cap, *prev = NULL;
1738 struct ceph_vino vino;
1739 /*
1740 * iterate_session_caps() skips inodes that are being
1741 * deleted, we need to wait until deletions are complete.
1742 * __wait_on_freeing_inode() is designed for the job,
1743 * but it is not exported, so use lookup inode function
1744 * to access it.
1745 */
1746 while (!list_empty(&session->s_caps)) {
1747 cap = list_entry(session->s_caps.next,
1748 struct ceph_cap, session_caps);
1749 if (cap == prev)
1750 break;
1751 prev = cap;
1752 vino = cap->ci->i_vino;
1753 spin_unlock(&session->s_cap_lock);
1754
1755 inode = ceph_find_inode(sb, vino);
1756 iput(inode);
1757
1758 spin_lock(&session->s_cap_lock);
1759 }
1760 }
1761
1762 // drop cap expires and unlock s_cap_lock
1763 detach_cap_releases(session, &dispose);
1764
1765 BUG_ON(session->s_nr_caps > 0);
1766 BUG_ON(!list_empty(&session->s_cap_flushing));
1767 spin_unlock(&session->s_cap_lock);
1768 dispose_cap_releases(session->s_mdsc, &dispose);
1769}
1770
1771enum {
1772 RECONNECT,
1773 RENEWCAPS,
1774 FORCE_RO,
1775};
1776
1777/*
1778 * wake up any threads waiting on this session's caps. if the cap is
1779 * old (didn't get renewed on the client reconnect), remove it now.
1780 *
1781 * caller must hold s_mutex.
1782 */
1783static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1784 void *arg)
1785{
1786 struct ceph_inode_info *ci = ceph_inode(inode);
1787 unsigned long ev = (unsigned long)arg;
1788
1789 if (ev == RECONNECT) {
1790 spin_lock(&ci->i_ceph_lock);
1791 ci->i_wanted_max_size = 0;
1792 ci->i_requested_max_size = 0;
1793 spin_unlock(&ci->i_ceph_lock);
1794 } else if (ev == RENEWCAPS) {
1795 if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1796 /* mds did not re-issue stale cap */
1797 spin_lock(&ci->i_ceph_lock);
1798 cap->issued = cap->implemented = CEPH_CAP_PIN;
1799 spin_unlock(&ci->i_ceph_lock);
1800 }
1801 } else if (ev == FORCE_RO) {
1802 }
1803 wake_up_all(&ci->i_cap_wq);
1804 return 0;
1805}
1806
1807static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1808{
1809 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1810 ceph_iterate_session_caps(session, wake_up_session_cb,
1811 (void *)(unsigned long)ev);
1812}
1813
1814/*
1815 * Send periodic message to MDS renewing all currently held caps. The
1816 * ack will reset the expiration for all caps from this session.
1817 *
1818 * caller holds s_mutex
1819 */
1820static int send_renew_caps(struct ceph_mds_client *mdsc,
1821 struct ceph_mds_session *session)
1822{
1823 struct ceph_msg *msg;
1824 int state;
1825
1826 if (time_after_eq(jiffies, session->s_cap_ttl) &&
1827 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1828 pr_info("mds%d caps stale\n", session->s_mds);
1829 session->s_renew_requested = jiffies;
1830
1831 /* do not try to renew caps until a recovering mds has reconnected
1832 * with its clients. */
1833 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1834 if (state < CEPH_MDS_STATE_RECONNECT) {
1835 dout("send_renew_caps ignoring mds%d (%s)\n",
1836 session->s_mds, ceph_mds_state_name(state));
1837 return 0;
1838 }
1839
1840 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1841 ceph_mds_state_name(state));
1842 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1843 ++session->s_renew_seq);
1844 if (!msg)
1845 return -ENOMEM;
1846 ceph_con_send(&session->s_con, msg);
1847 return 0;
1848}
1849
1850static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1851 struct ceph_mds_session *session, u64 seq)
1852{
1853 struct ceph_msg *msg;
1854
1855 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1856 session->s_mds, ceph_session_state_name(session->s_state), seq);
1857 msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1858 if (!msg)
1859 return -ENOMEM;
1860 ceph_con_send(&session->s_con, msg);
1861 return 0;
1862}
1863
1864
1865/*
1866 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1867 *
1868 * Called under session->s_mutex
1869 */
1870static void renewed_caps(struct ceph_mds_client *mdsc,
1871 struct ceph_mds_session *session, int is_renew)
1872{
1873 int was_stale;
1874 int wake = 0;
1875
1876 spin_lock(&session->s_cap_lock);
1877 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1878
1879 session->s_cap_ttl = session->s_renew_requested +
1880 mdsc->mdsmap->m_session_timeout*HZ;
1881
1882 if (was_stale) {
1883 if (time_before(jiffies, session->s_cap_ttl)) {
1884 pr_info("mds%d caps renewed\n", session->s_mds);
1885 wake = 1;
1886 } else {
1887 pr_info("mds%d caps still stale\n", session->s_mds);
1888 }
1889 }
1890 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1891 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1892 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1893 spin_unlock(&session->s_cap_lock);
1894
1895 if (wake)
1896 wake_up_session_caps(session, RENEWCAPS);
1897}
1898
1899/*
1900 * send a session close request
1901 */
1902static int request_close_session(struct ceph_mds_session *session)
1903{
1904 struct ceph_msg *msg;
1905
1906 dout("request_close_session mds%d state %s seq %lld\n",
1907 session->s_mds, ceph_session_state_name(session->s_state),
1908 session->s_seq);
1909 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
1910 session->s_seq);
1911 if (!msg)
1912 return -ENOMEM;
1913 ceph_con_send(&session->s_con, msg);
1914 return 1;
1915}
1916
1917/*
1918 * Called with s_mutex held.
1919 */
1920static int __close_session(struct ceph_mds_client *mdsc,
1921 struct ceph_mds_session *session)
1922{
1923 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1924 return 0;
1925 session->s_state = CEPH_MDS_SESSION_CLOSING;
1926 return request_close_session(session);
1927}
1928
1929static bool drop_negative_children(struct dentry *dentry)
1930{
1931 struct dentry *child;
1932 bool all_negative = true;
1933
1934 if (!d_is_dir(dentry))
1935 goto out;
1936
1937 spin_lock(&dentry->d_lock);
1938 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1939 if (d_really_is_positive(child)) {
1940 all_negative = false;
1941 break;
1942 }
1943 }
1944 spin_unlock(&dentry->d_lock);
1945
1946 if (all_negative)
1947 shrink_dcache_parent(dentry);
1948out:
1949 return all_negative;
1950}
1951
1952/*
1953 * Trim old(er) caps.
1954 *
1955 * Because we can't cache an inode without one or more caps, we do
1956 * this indirectly: if a cap is unused, we prune its aliases, at which
1957 * point the inode will hopefully get dropped to.
1958 *
1959 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1960 * memory pressure from the MDS, though, so it needn't be perfect.
1961 */
1962static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1963{
1964 int *remaining = arg;
1965 struct ceph_inode_info *ci = ceph_inode(inode);
1966 int used, wanted, oissued, mine;
1967
1968 if (*remaining <= 0)
1969 return -1;
1970
1971 spin_lock(&ci->i_ceph_lock);
1972 mine = cap->issued | cap->implemented;
1973 used = __ceph_caps_used(ci);
1974 wanted = __ceph_caps_file_wanted(ci);
1975 oissued = __ceph_caps_issued_other(ci, cap);
1976
1977 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1978 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1979 ceph_cap_string(used), ceph_cap_string(wanted));
1980 if (cap == ci->i_auth_cap) {
1981 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1982 !list_empty(&ci->i_cap_snaps))
1983 goto out;
1984 if ((used | wanted) & CEPH_CAP_ANY_WR)
1985 goto out;
1986 /* Note: it's possible that i_filelock_ref becomes non-zero
1987 * after dropping auth caps. It doesn't hurt because reply
1988 * of lock mds request will re-add auth caps. */
1989 if (atomic_read(&ci->i_filelock_ref) > 0)
1990 goto out;
1991 }
1992 /* The inode has cached pages, but it's no longer used.
1993 * we can safely drop it */
1994 if (S_ISREG(inode->i_mode) &&
1995 wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1996 !(oissued & CEPH_CAP_FILE_CACHE)) {
1997 used = 0;
1998 oissued = 0;
1999 }
2000 if ((used | wanted) & ~oissued & mine)
2001 goto out; /* we need these caps */
2002
2003 if (oissued) {
2004 /* we aren't the only cap.. just remove us */
2005 ceph_remove_cap(cap, true);
2006 (*remaining)--;
2007 } else {
2008 struct dentry *dentry;
2009 /* try dropping referring dentries */
2010 spin_unlock(&ci->i_ceph_lock);
2011 dentry = d_find_any_alias(inode);
2012 if (dentry && drop_negative_children(dentry)) {
2013 int count;
2014 dput(dentry);
2015 d_prune_aliases(inode);
2016 count = atomic_read(&inode->i_count);
2017 if (count == 1)
2018 (*remaining)--;
2019 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2020 inode, cap, count);
2021 } else {
2022 dput(dentry);
2023 }
2024 return 0;
2025 }
2026
2027out:
2028 spin_unlock(&ci->i_ceph_lock);
2029 return 0;
2030}
2031
2032/*
2033 * Trim session cap count down to some max number.
2034 */
2035int ceph_trim_caps(struct ceph_mds_client *mdsc,
2036 struct ceph_mds_session *session,
2037 int max_caps)
2038{
2039 int trim_caps = session->s_nr_caps - max_caps;
2040
2041 dout("trim_caps mds%d start: %d / %d, trim %d\n",
2042 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2043 if (trim_caps > 0) {
2044 int remaining = trim_caps;
2045
2046 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2047 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2048 session->s_mds, session->s_nr_caps, max_caps,
2049 trim_caps - remaining);
2050 }
2051
2052 ceph_flush_cap_releases(mdsc, session);
2053 return 0;
2054}
2055
2056static int check_caps_flush(struct ceph_mds_client *mdsc,
2057 u64 want_flush_tid)
2058{
2059 int ret = 1;
2060
2061 spin_lock(&mdsc->cap_dirty_lock);
2062 if (!list_empty(&mdsc->cap_flush_list)) {
2063 struct ceph_cap_flush *cf =
2064 list_first_entry(&mdsc->cap_flush_list,
2065 struct ceph_cap_flush, g_list);
2066 if (cf->tid <= want_flush_tid) {
2067 dout("check_caps_flush still flushing tid "
2068 "%llu <= %llu\n", cf->tid, want_flush_tid);
2069 ret = 0;
2070 }
2071 }
2072 spin_unlock(&mdsc->cap_dirty_lock);
2073 return ret;
2074}
2075
2076/*
2077 * flush all dirty inode data to disk.
2078 *
2079 * returns true if we've flushed through want_flush_tid
2080 */
2081static void wait_caps_flush(struct ceph_mds_client *mdsc,
2082 u64 want_flush_tid)
2083{
2084 dout("check_caps_flush want %llu\n", want_flush_tid);
2085
2086 wait_event(mdsc->cap_flushing_wq,
2087 check_caps_flush(mdsc, want_flush_tid));
2088
2089 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2090}
2091
2092/*
2093 * called under s_mutex
2094 */
2095static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2096 struct ceph_mds_session *session)
2097{
2098 struct ceph_msg *msg = NULL;
2099 struct ceph_mds_cap_release *head;
2100 struct ceph_mds_cap_item *item;
2101 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2102 struct ceph_cap *cap;
2103 LIST_HEAD(tmp_list);
2104 int num_cap_releases;
2105 __le32 barrier, *cap_barrier;
2106
2107 down_read(&osdc->lock);
2108 barrier = cpu_to_le32(osdc->epoch_barrier);
2109 up_read(&osdc->lock);
2110
2111 spin_lock(&session->s_cap_lock);
2112again:
2113 list_splice_init(&session->s_cap_releases, &tmp_list);
2114 num_cap_releases = session->s_num_cap_releases;
2115 session->s_num_cap_releases = 0;
2116 spin_unlock(&session->s_cap_lock);
2117
2118 while (!list_empty(&tmp_list)) {
2119 if (!msg) {
2120 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2121 PAGE_SIZE, GFP_NOFS, false);
2122 if (!msg)
2123 goto out_err;
2124 head = msg->front.iov_base;
2125 head->num = cpu_to_le32(0);
2126 msg->front.iov_len = sizeof(*head);
2127
2128 msg->hdr.version = cpu_to_le16(2);
2129 msg->hdr.compat_version = cpu_to_le16(1);
2130 }
2131
2132 cap = list_first_entry(&tmp_list, struct ceph_cap,
2133 session_caps);
2134 list_del(&cap->session_caps);
2135 num_cap_releases--;
2136
2137 head = msg->front.iov_base;
2138 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2139 &head->num);
2140 item = msg->front.iov_base + msg->front.iov_len;
2141 item->ino = cpu_to_le64(cap->cap_ino);
2142 item->cap_id = cpu_to_le64(cap->cap_id);
2143 item->migrate_seq = cpu_to_le32(cap->mseq);
2144 item->seq = cpu_to_le32(cap->issue_seq);
2145 msg->front.iov_len += sizeof(*item);
2146
2147 ceph_put_cap(mdsc, cap);
2148
2149 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2150 // Append cap_barrier field
2151 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2152 *cap_barrier = barrier;
2153 msg->front.iov_len += sizeof(*cap_barrier);
2154
2155 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2156 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2157 ceph_con_send(&session->s_con, msg);
2158 msg = NULL;
2159 }
2160 }
2161
2162 BUG_ON(num_cap_releases != 0);
2163
2164 spin_lock(&session->s_cap_lock);
2165 if (!list_empty(&session->s_cap_releases))
2166 goto again;
2167 spin_unlock(&session->s_cap_lock);
2168
2169 if (msg) {
2170 // Append cap_barrier field
2171 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2172 *cap_barrier = barrier;
2173 msg->front.iov_len += sizeof(*cap_barrier);
2174
2175 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2176 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2177 ceph_con_send(&session->s_con, msg);
2178 }
2179 return;
2180out_err:
2181 pr_err("send_cap_releases mds%d, failed to allocate message\n",
2182 session->s_mds);
2183 spin_lock(&session->s_cap_lock);
2184 list_splice(&tmp_list, &session->s_cap_releases);
2185 session->s_num_cap_releases += num_cap_releases;
2186 spin_unlock(&session->s_cap_lock);
2187}
2188
2189static void ceph_cap_release_work(struct work_struct *work)
2190{
2191 struct ceph_mds_session *session =
2192 container_of(work, struct ceph_mds_session, s_cap_release_work);
2193
2194 mutex_lock(&session->s_mutex);
2195 if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2196 session->s_state == CEPH_MDS_SESSION_HUNG)
2197 ceph_send_cap_releases(session->s_mdsc, session);
2198 mutex_unlock(&session->s_mutex);
2199 ceph_put_mds_session(session);
2200}
2201
2202void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2203 struct ceph_mds_session *session)
2204{
2205 if (mdsc->stopping)
2206 return;
2207
2208 ceph_get_mds_session(session);
2209 if (queue_work(mdsc->fsc->cap_wq,
2210 &session->s_cap_release_work)) {
2211 dout("cap release work queued\n");
2212 } else {
2213 ceph_put_mds_session(session);
2214 dout("failed to queue cap release work\n");
2215 }
2216}
2217
2218/*
2219 * caller holds session->s_cap_lock
2220 */
2221void __ceph_queue_cap_release(struct ceph_mds_session *session,
2222 struct ceph_cap *cap)
2223{
2224 list_add_tail(&cap->session_caps, &session->s_cap_releases);
2225 session->s_num_cap_releases++;
2226
2227 if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2228 ceph_flush_cap_releases(session->s_mdsc, session);
2229}
2230
2231static void ceph_cap_reclaim_work(struct work_struct *work)
2232{
2233 struct ceph_mds_client *mdsc =
2234 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2235 int ret = ceph_trim_dentries(mdsc);
2236 if (ret == -EAGAIN)
2237 ceph_queue_cap_reclaim_work(mdsc);
2238}
2239
2240void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2241{
2242 if (mdsc->stopping)
2243 return;
2244
2245 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2246 dout("caps reclaim work queued\n");
2247 } else {
2248 dout("failed to queue caps release work\n");
2249 }
2250}
2251
2252void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2253{
2254 int val;
2255 if (!nr)
2256 return;
2257 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2258 if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2259 atomic_set(&mdsc->cap_reclaim_pending, 0);
2260 ceph_queue_cap_reclaim_work(mdsc);
2261 }
2262}
2263
2264/*
2265 * requests
2266 */
2267
2268int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2269 struct inode *dir)
2270{
2271 struct ceph_inode_info *ci = ceph_inode(dir);
2272 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2273 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2274 size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2275 unsigned int num_entries;
2276 int order;
2277
2278 spin_lock(&ci->i_ceph_lock);
2279 num_entries = ci->i_files + ci->i_subdirs;
2280 spin_unlock(&ci->i_ceph_lock);
2281 num_entries = max(num_entries, 1U);
2282 num_entries = min(num_entries, opt->max_readdir);
2283
2284 order = get_order(size * num_entries);
2285 while (order >= 0) {
2286 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2287 __GFP_NOWARN |
2288 __GFP_ZERO,
2289 order);
2290 if (rinfo->dir_entries)
2291 break;
2292 order--;
2293 }
2294 if (!rinfo->dir_entries)
2295 return -ENOMEM;
2296
2297 num_entries = (PAGE_SIZE << order) / size;
2298 num_entries = min(num_entries, opt->max_readdir);
2299
2300 rinfo->dir_buf_size = PAGE_SIZE << order;
2301 req->r_num_caps = num_entries + 1;
2302 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2303 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2304 return 0;
2305}
2306
2307/*
2308 * Create an mds request.
2309 */
2310struct ceph_mds_request *
2311ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2312{
2313 struct ceph_mds_request *req;
2314
2315 req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2316 if (!req)
2317 return ERR_PTR(-ENOMEM);
2318
2319 mutex_init(&req->r_fill_mutex);
2320 req->r_mdsc = mdsc;
2321 req->r_started = jiffies;
2322 req->r_start_latency = ktime_get();
2323 req->r_resend_mds = -1;
2324 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2325 INIT_LIST_HEAD(&req->r_unsafe_target_item);
2326 req->r_fmode = -1;
2327 req->r_feature_needed = -1;
2328 kref_init(&req->r_kref);
2329 RB_CLEAR_NODE(&req->r_node);
2330 INIT_LIST_HEAD(&req->r_wait);
2331 init_completion(&req->r_completion);
2332 init_completion(&req->r_safe_completion);
2333 INIT_LIST_HEAD(&req->r_unsafe_item);
2334
2335 ktime_get_coarse_real_ts64(&req->r_stamp);
2336
2337 req->r_op = op;
2338 req->r_direct_mode = mode;
2339 return req;
2340}
2341
2342/*
2343 * return oldest (lowest) request, tid in request tree, 0 if none.
2344 *
2345 * called under mdsc->mutex.
2346 */
2347static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2348{
2349 if (RB_EMPTY_ROOT(&mdsc->request_tree))
2350 return NULL;
2351 return rb_entry(rb_first(&mdsc->request_tree),
2352 struct ceph_mds_request, r_node);
2353}
2354
2355static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2356{
2357 return mdsc->oldest_tid;
2358}
2359
2360/*
2361 * Build a dentry's path. Allocate on heap; caller must kfree. Based
2362 * on build_path_from_dentry in fs/cifs/dir.c.
2363 *
2364 * If @stop_on_nosnap, generate path relative to the first non-snapped
2365 * inode.
2366 *
2367 * Encode hidden .snap dirs as a double /, i.e.
2368 * foo/.snap/bar -> foo//bar
2369 */
2370char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2371 int stop_on_nosnap)
2372{
2373 struct dentry *temp;
2374 char *path;
2375 int pos;
2376 unsigned seq;
2377 u64 base;
2378
2379 if (!dentry)
2380 return ERR_PTR(-EINVAL);
2381
2382 path = __getname();
2383 if (!path)
2384 return ERR_PTR(-ENOMEM);
2385retry:
2386 pos = PATH_MAX - 1;
2387 path[pos] = '\0';
2388
2389 seq = read_seqbegin(&rename_lock);
2390 rcu_read_lock();
2391 temp = dentry;
2392 for (;;) {
2393 struct inode *inode;
2394
2395 spin_lock(&temp->d_lock);
2396 inode = d_inode(temp);
2397 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2398 dout("build_path path+%d: %p SNAPDIR\n",
2399 pos, temp);
2400 } else if (stop_on_nosnap && inode && dentry != temp &&
2401 ceph_snap(inode) == CEPH_NOSNAP) {
2402 spin_unlock(&temp->d_lock);
2403 pos++; /* get rid of any prepended '/' */
2404 break;
2405 } else {
2406 pos -= temp->d_name.len;
2407 if (pos < 0) {
2408 spin_unlock(&temp->d_lock);
2409 break;
2410 }
2411 memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2412 }
2413 spin_unlock(&temp->d_lock);
2414 temp = READ_ONCE(temp->d_parent);
2415
2416 /* Are we at the root? */
2417 if (IS_ROOT(temp))
2418 break;
2419
2420 /* Are we out of buffer? */
2421 if (--pos < 0)
2422 break;
2423
2424 path[pos] = '/';
2425 }
2426 base = ceph_ino(d_inode(temp));
2427 rcu_read_unlock();
2428
2429 if (read_seqretry(&rename_lock, seq))
2430 goto retry;
2431
2432 if (pos < 0) {
2433 /*
2434 * A rename didn't occur, but somehow we didn't end up where
2435 * we thought we would. Throw a warning and try again.
2436 */
2437 pr_warn("build_path did not end path lookup where "
2438 "expected, pos is %d\n", pos);
2439 goto retry;
2440 }
2441
2442 *pbase = base;
2443 *plen = PATH_MAX - 1 - pos;
2444 dout("build_path on %p %d built %llx '%.*s'\n",
2445 dentry, d_count(dentry), base, *plen, path + pos);
2446 return path + pos;
2447}
2448
2449static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2450 const char **ppath, int *ppathlen, u64 *pino,
2451 bool *pfreepath, bool parent_locked)
2452{
2453 char *path;
2454
2455 rcu_read_lock();
2456 if (!dir)
2457 dir = d_inode_rcu(dentry->d_parent);
2458 if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2459 *pino = ceph_ino(dir);
2460 rcu_read_unlock();
2461 *ppath = dentry->d_name.name;
2462 *ppathlen = dentry->d_name.len;
2463 return 0;
2464 }
2465 rcu_read_unlock();
2466 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2467 if (IS_ERR(path))
2468 return PTR_ERR(path);
2469 *ppath = path;
2470 *pfreepath = true;
2471 return 0;
2472}
2473
2474static int build_inode_path(struct inode *inode,
2475 const char **ppath, int *ppathlen, u64 *pino,
2476 bool *pfreepath)
2477{
2478 struct dentry *dentry;
2479 char *path;
2480
2481 if (ceph_snap(inode) == CEPH_NOSNAP) {
2482 *pino = ceph_ino(inode);
2483 *ppathlen = 0;
2484 return 0;
2485 }
2486 dentry = d_find_alias(inode);
2487 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2488 dput(dentry);
2489 if (IS_ERR(path))
2490 return PTR_ERR(path);
2491 *ppath = path;
2492 *pfreepath = true;
2493 return 0;
2494}
2495
2496/*
2497 * request arguments may be specified via an inode *, a dentry *, or
2498 * an explicit ino+path.
2499 */
2500static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2501 struct inode *rdiri, const char *rpath,
2502 u64 rino, const char **ppath, int *pathlen,
2503 u64 *ino, bool *freepath, bool parent_locked)
2504{
2505 int r = 0;
2506
2507 if (rinode) {
2508 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2509 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2510 ceph_snap(rinode));
2511 } else if (rdentry) {
2512 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2513 freepath, parent_locked);
2514 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2515 *ppath);
2516 } else if (rpath || rino) {
2517 *ino = rino;
2518 *ppath = rpath;
2519 *pathlen = rpath ? strlen(rpath) : 0;
2520 dout(" path %.*s\n", *pathlen, rpath);
2521 }
2522
2523 return r;
2524}
2525
2526static void encode_timestamp_and_gids(void **p,
2527 const struct ceph_mds_request *req)
2528{
2529 struct ceph_timespec ts;
2530 int i;
2531
2532 ceph_encode_timespec64(&ts, &req->r_stamp);
2533 ceph_encode_copy(p, &ts, sizeof(ts));
2534
2535 /* gid_list */
2536 ceph_encode_32(p, req->r_cred->group_info->ngroups);
2537 for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2538 ceph_encode_64(p, from_kgid(&init_user_ns,
2539 req->r_cred->group_info->gid[i]));
2540}
2541
2542/*
2543 * called under mdsc->mutex
2544 */
2545static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2546 struct ceph_mds_request *req,
2547 bool drop_cap_releases)
2548{
2549 int mds = session->s_mds;
2550 struct ceph_mds_client *mdsc = session->s_mdsc;
2551 struct ceph_msg *msg;
2552 struct ceph_mds_request_head_old *head;
2553 const char *path1 = NULL;
2554 const char *path2 = NULL;
2555 u64 ino1 = 0, ino2 = 0;
2556 int pathlen1 = 0, pathlen2 = 0;
2557 bool freepath1 = false, freepath2 = false;
2558 int len;
2559 u16 releases;
2560 void *p, *end;
2561 int ret;
2562 bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2563
2564 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2565 req->r_parent, req->r_path1, req->r_ino1.ino,
2566 &path1, &pathlen1, &ino1, &freepath1,
2567 test_bit(CEPH_MDS_R_PARENT_LOCKED,
2568 &req->r_req_flags));
2569 if (ret < 0) {
2570 msg = ERR_PTR(ret);
2571 goto out;
2572 }
2573
2574 /* If r_old_dentry is set, then assume that its parent is locked */
2575 ret = set_request_path_attr(NULL, req->r_old_dentry,
2576 req->r_old_dentry_dir,
2577 req->r_path2, req->r_ino2.ino,
2578 &path2, &pathlen2, &ino2, &freepath2, true);
2579 if (ret < 0) {
2580 msg = ERR_PTR(ret);
2581 goto out_free1;
2582 }
2583
2584 len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2585 len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2586 sizeof(struct ceph_timespec);
2587 len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2588
2589 /* calculate (max) length for cap releases */
2590 len += sizeof(struct ceph_mds_request_release) *
2591 (!!req->r_inode_drop + !!req->r_dentry_drop +
2592 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2593
2594 if (req->r_dentry_drop)
2595 len += pathlen1;
2596 if (req->r_old_dentry_drop)
2597 len += pathlen2;
2598
2599 msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2600 if (!msg) {
2601 msg = ERR_PTR(-ENOMEM);
2602 goto out_free2;
2603 }
2604
2605 msg->hdr.tid = cpu_to_le64(req->r_tid);
2606
2607 /*
2608 * The old ceph_mds_request_head didn't contain a version field, and
2609 * one was added when we moved the message version from 3->4.
2610 */
2611 if (legacy) {
2612 msg->hdr.version = cpu_to_le16(3);
2613 head = msg->front.iov_base;
2614 p = msg->front.iov_base + sizeof(*head);
2615 } else {
2616 struct ceph_mds_request_head *new_head = msg->front.iov_base;
2617
2618 msg->hdr.version = cpu_to_le16(4);
2619 new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2620 head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2621 p = msg->front.iov_base + sizeof(*new_head);
2622 }
2623
2624 end = msg->front.iov_base + msg->front.iov_len;
2625
2626 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2627 head->op = cpu_to_le32(req->r_op);
2628 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2629 req->r_cred->fsuid));
2630 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2631 req->r_cred->fsgid));
2632 head->ino = cpu_to_le64(req->r_deleg_ino);
2633 head->args = req->r_args;
2634
2635 ceph_encode_filepath(&p, end, ino1, path1);
2636 ceph_encode_filepath(&p, end, ino2, path2);
2637
2638 /* make note of release offset, in case we need to replay */
2639 req->r_request_release_offset = p - msg->front.iov_base;
2640
2641 /* cap releases */
2642 releases = 0;
2643 if (req->r_inode_drop)
2644 releases += ceph_encode_inode_release(&p,
2645 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2646 mds, req->r_inode_drop, req->r_inode_unless,
2647 req->r_op == CEPH_MDS_OP_READDIR);
2648 if (req->r_dentry_drop)
2649 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2650 req->r_parent, mds, req->r_dentry_drop,
2651 req->r_dentry_unless);
2652 if (req->r_old_dentry_drop)
2653 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2654 req->r_old_dentry_dir, mds,
2655 req->r_old_dentry_drop,
2656 req->r_old_dentry_unless);
2657 if (req->r_old_inode_drop)
2658 releases += ceph_encode_inode_release(&p,
2659 d_inode(req->r_old_dentry),
2660 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2661
2662 if (drop_cap_releases) {
2663 releases = 0;
2664 p = msg->front.iov_base + req->r_request_release_offset;
2665 }
2666
2667 head->num_releases = cpu_to_le16(releases);
2668
2669 encode_timestamp_and_gids(&p, req);
2670
2671 if (WARN_ON_ONCE(p > end)) {
2672 ceph_msg_put(msg);
2673 msg = ERR_PTR(-ERANGE);
2674 goto out_free2;
2675 }
2676
2677 msg->front.iov_len = p - msg->front.iov_base;
2678 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2679
2680 if (req->r_pagelist) {
2681 struct ceph_pagelist *pagelist = req->r_pagelist;
2682 ceph_msg_data_add_pagelist(msg, pagelist);
2683 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2684 } else {
2685 msg->hdr.data_len = 0;
2686 }
2687
2688 msg->hdr.data_off = cpu_to_le16(0);
2689
2690out_free2:
2691 if (freepath2)
2692 ceph_mdsc_free_path((char *)path2, pathlen2);
2693out_free1:
2694 if (freepath1)
2695 ceph_mdsc_free_path((char *)path1, pathlen1);
2696out:
2697 return msg;
2698}
2699
2700/*
2701 * called under mdsc->mutex if error, under no mutex if
2702 * success.
2703 */
2704static void complete_request(struct ceph_mds_client *mdsc,
2705 struct ceph_mds_request *req)
2706{
2707 req->r_end_latency = ktime_get();
2708
2709 if (req->r_callback)
2710 req->r_callback(mdsc, req);
2711 complete_all(&req->r_completion);
2712}
2713
2714static struct ceph_mds_request_head_old *
2715find_old_request_head(void *p, u64 features)
2716{
2717 bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2718 struct ceph_mds_request_head *new_head;
2719
2720 if (legacy)
2721 return (struct ceph_mds_request_head_old *)p;
2722 new_head = (struct ceph_mds_request_head *)p;
2723 return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2724}
2725
2726/*
2727 * called under mdsc->mutex
2728 */
2729static int __prepare_send_request(struct ceph_mds_session *session,
2730 struct ceph_mds_request *req,
2731 bool drop_cap_releases)
2732{
2733 int mds = session->s_mds;
2734 struct ceph_mds_client *mdsc = session->s_mdsc;
2735 struct ceph_mds_request_head_old *rhead;
2736 struct ceph_msg *msg;
2737 int flags = 0, max_retry;
2738
2739 /*
2740 * The type of 'r_attempts' in kernel 'ceph_mds_request'
2741 * is 'int', while in 'ceph_mds_request_head' the type of
2742 * 'num_retry' is '__u8'. So in case the request retries
2743 * exceeding 256 times, the MDS will receive a incorrect
2744 * retry seq.
2745 *
2746 * In this case it's ususally a bug in MDS and continue
2747 * retrying the request makes no sense.
2748 *
2749 * In future this could be fixed in ceph code, so avoid
2750 * using the hardcode here.
2751 */
2752 max_retry = sizeof_field(struct ceph_mds_request_head, num_retry);
2753 max_retry = 1 << (max_retry * BITS_PER_BYTE);
2754 if (req->r_attempts >= max_retry) {
2755 pr_warn_ratelimited("%s request tid %llu seq overflow\n",
2756 __func__, req->r_tid);
2757 return -EMULTIHOP;
2758 }
2759
2760 req->r_attempts++;
2761 if (req->r_inode) {
2762 struct ceph_cap *cap =
2763 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2764
2765 if (cap)
2766 req->r_sent_on_mseq = cap->mseq;
2767 else
2768 req->r_sent_on_mseq = -1;
2769 }
2770 dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
2771 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2772
2773 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2774 void *p;
2775
2776 /*
2777 * Replay. Do not regenerate message (and rebuild
2778 * paths, etc.); just use the original message.
2779 * Rebuilding paths will break for renames because
2780 * d_move mangles the src name.
2781 */
2782 msg = req->r_request;
2783 rhead = find_old_request_head(msg->front.iov_base,
2784 session->s_con.peer_features);
2785
2786 flags = le32_to_cpu(rhead->flags);
2787 flags |= CEPH_MDS_FLAG_REPLAY;
2788 rhead->flags = cpu_to_le32(flags);
2789
2790 if (req->r_target_inode)
2791 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2792
2793 rhead->num_retry = req->r_attempts - 1;
2794
2795 /* remove cap/dentry releases from message */
2796 rhead->num_releases = 0;
2797
2798 p = msg->front.iov_base + req->r_request_release_offset;
2799 encode_timestamp_and_gids(&p, req);
2800
2801 msg->front.iov_len = p - msg->front.iov_base;
2802 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2803 return 0;
2804 }
2805
2806 if (req->r_request) {
2807 ceph_msg_put(req->r_request);
2808 req->r_request = NULL;
2809 }
2810 msg = create_request_message(session, req, drop_cap_releases);
2811 if (IS_ERR(msg)) {
2812 req->r_err = PTR_ERR(msg);
2813 return PTR_ERR(msg);
2814 }
2815 req->r_request = msg;
2816
2817 rhead = find_old_request_head(msg->front.iov_base,
2818 session->s_con.peer_features);
2819 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2820 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2821 flags |= CEPH_MDS_FLAG_REPLAY;
2822 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2823 flags |= CEPH_MDS_FLAG_ASYNC;
2824 if (req->r_parent)
2825 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2826 rhead->flags = cpu_to_le32(flags);
2827 rhead->num_fwd = req->r_num_fwd;
2828 rhead->num_retry = req->r_attempts - 1;
2829
2830 dout(" r_parent = %p\n", req->r_parent);
2831 return 0;
2832}
2833
2834/*
2835 * called under mdsc->mutex
2836 */
2837static int __send_request(struct ceph_mds_session *session,
2838 struct ceph_mds_request *req,
2839 bool drop_cap_releases)
2840{
2841 int err;
2842
2843 err = __prepare_send_request(session, req, drop_cap_releases);
2844 if (!err) {
2845 ceph_msg_get(req->r_request);
2846 ceph_con_send(&session->s_con, req->r_request);
2847 }
2848
2849 return err;
2850}
2851
2852/*
2853 * send request, or put it on the appropriate wait list.
2854 */
2855static void __do_request(struct ceph_mds_client *mdsc,
2856 struct ceph_mds_request *req)
2857{
2858 struct ceph_mds_session *session = NULL;
2859 int mds = -1;
2860 int err = 0;
2861 bool random;
2862
2863 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2864 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2865 __unregister_request(mdsc, req);
2866 return;
2867 }
2868
2869 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
2870 dout("do_request metadata corrupted\n");
2871 err = -EIO;
2872 goto finish;
2873 }
2874 if (req->r_timeout &&
2875 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2876 dout("do_request timed out\n");
2877 err = -ETIMEDOUT;
2878 goto finish;
2879 }
2880 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2881 dout("do_request forced umount\n");
2882 err = -EIO;
2883 goto finish;
2884 }
2885 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2886 if (mdsc->mdsmap_err) {
2887 err = mdsc->mdsmap_err;
2888 dout("do_request mdsmap err %d\n", err);
2889 goto finish;
2890 }
2891 if (mdsc->mdsmap->m_epoch == 0) {
2892 dout("do_request no mdsmap, waiting for map\n");
2893 list_add(&req->r_wait, &mdsc->waiting_for_map);
2894 return;
2895 }
2896 if (!(mdsc->fsc->mount_options->flags &
2897 CEPH_MOUNT_OPT_MOUNTWAIT) &&
2898 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2899 err = -EHOSTUNREACH;
2900 goto finish;
2901 }
2902 }
2903
2904 put_request_session(req);
2905
2906 mds = __choose_mds(mdsc, req, &random);
2907 if (mds < 0 ||
2908 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2909 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2910 err = -EJUKEBOX;
2911 goto finish;
2912 }
2913 dout("do_request no mds or not active, waiting for map\n");
2914 list_add(&req->r_wait, &mdsc->waiting_for_map);
2915 return;
2916 }
2917
2918 /* get, open session */
2919 session = __ceph_lookup_mds_session(mdsc, mds);
2920 if (!session) {
2921 session = register_session(mdsc, mds);
2922 if (IS_ERR(session)) {
2923 err = PTR_ERR(session);
2924 goto finish;
2925 }
2926 }
2927 req->r_session = ceph_get_mds_session(session);
2928
2929 dout("do_request mds%d session %p state %s\n", mds, session,
2930 ceph_session_state_name(session->s_state));
2931
2932 /*
2933 * The old ceph will crash the MDSs when see unknown OPs
2934 */
2935 if (req->r_feature_needed > 0 &&
2936 !test_bit(req->r_feature_needed, &session->s_features)) {
2937 err = -EOPNOTSUPP;
2938 goto out_session;
2939 }
2940
2941 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2942 session->s_state != CEPH_MDS_SESSION_HUNG) {
2943 /*
2944 * We cannot queue async requests since the caps and delegated
2945 * inodes are bound to the session. Just return -EJUKEBOX and
2946 * let the caller retry a sync request in that case.
2947 */
2948 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2949 err = -EJUKEBOX;
2950 goto out_session;
2951 }
2952
2953 /*
2954 * If the session has been REJECTED, then return a hard error,
2955 * unless it's a CLEANRECOVER mount, in which case we'll queue
2956 * it to the mdsc queue.
2957 */
2958 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2959 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2960 list_add(&req->r_wait, &mdsc->waiting_for_map);
2961 else
2962 err = -EACCES;
2963 goto out_session;
2964 }
2965
2966 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2967 session->s_state == CEPH_MDS_SESSION_CLOSING) {
2968 err = __open_session(mdsc, session);
2969 if (err)
2970 goto out_session;
2971 /* retry the same mds later */
2972 if (random)
2973 req->r_resend_mds = mds;
2974 }
2975 list_add(&req->r_wait, &session->s_waiting);
2976 goto out_session;
2977 }
2978
2979 /* send request */
2980 req->r_resend_mds = -1; /* forget any previous mds hint */
2981
2982 if (req->r_request_started == 0) /* note request start time */
2983 req->r_request_started = jiffies;
2984
2985 /*
2986 * For async create we will choose the auth MDS of frag in parent
2987 * directory to send the request and ususally this works fine, but
2988 * if the migrated the dirtory to another MDS before it could handle
2989 * it the request will be forwarded.
2990 *
2991 * And then the auth cap will be changed.
2992 */
2993 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
2994 struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
2995 struct ceph_inode_info *ci;
2996 struct ceph_cap *cap;
2997
2998 /*
2999 * The request maybe handled very fast and the new inode
3000 * hasn't been linked to the dentry yet. We need to wait
3001 * for the ceph_finish_async_create(), which shouldn't be
3002 * stuck too long or fail in thoery, to finish when forwarding
3003 * the request.
3004 */
3005 if (!d_inode(req->r_dentry)) {
3006 err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
3007 TASK_KILLABLE);
3008 if (err) {
3009 mutex_lock(&req->r_fill_mutex);
3010 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3011 mutex_unlock(&req->r_fill_mutex);
3012 goto out_session;
3013 }
3014 }
3015
3016 ci = ceph_inode(d_inode(req->r_dentry));
3017
3018 spin_lock(&ci->i_ceph_lock);
3019 cap = ci->i_auth_cap;
3020 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
3021 dout("do_request session changed for auth cap %d -> %d\n",
3022 cap->session->s_mds, session->s_mds);
3023
3024 /* Remove the auth cap from old session */
3025 spin_lock(&cap->session->s_cap_lock);
3026 cap->session->s_nr_caps--;
3027 list_del_init(&cap->session_caps);
3028 spin_unlock(&cap->session->s_cap_lock);
3029
3030 /* Add the auth cap to the new session */
3031 cap->mds = mds;
3032 cap->session = session;
3033 spin_lock(&session->s_cap_lock);
3034 session->s_nr_caps++;
3035 list_add_tail(&cap->session_caps, &session->s_caps);
3036 spin_unlock(&session->s_cap_lock);
3037
3038 change_auth_cap_ses(ci, session);
3039 }
3040 spin_unlock(&ci->i_ceph_lock);
3041 }
3042
3043 err = __send_request(session, req, false);
3044
3045out_session:
3046 ceph_put_mds_session(session);
3047finish:
3048 if (err) {
3049 dout("__do_request early error %d\n", err);
3050 req->r_err = err;
3051 complete_request(mdsc, req);
3052 __unregister_request(mdsc, req);
3053 }
3054 return;
3055}
3056
3057/*
3058 * called under mdsc->mutex
3059 */
3060static void __wake_requests(struct ceph_mds_client *mdsc,
3061 struct list_head *head)
3062{
3063 struct ceph_mds_request *req;
3064 LIST_HEAD(tmp_list);
3065
3066 list_splice_init(head, &tmp_list);
3067
3068 while (!list_empty(&tmp_list)) {
3069 req = list_entry(tmp_list.next,
3070 struct ceph_mds_request, r_wait);
3071 list_del_init(&req->r_wait);
3072 dout(" wake request %p tid %llu\n", req, req->r_tid);
3073 __do_request(mdsc, req);
3074 }
3075}
3076
3077/*
3078 * Wake up threads with requests pending for @mds, so that they can
3079 * resubmit their requests to a possibly different mds.
3080 */
3081static void kick_requests(struct ceph_mds_client *mdsc, int mds)
3082{
3083 struct ceph_mds_request *req;
3084 struct rb_node *p = rb_first(&mdsc->request_tree);
3085
3086 dout("kick_requests mds%d\n", mds);
3087 while (p) {
3088 req = rb_entry(p, struct ceph_mds_request, r_node);
3089 p = rb_next(p);
3090 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3091 continue;
3092 if (req->r_attempts > 0)
3093 continue; /* only new requests */
3094 if (req->r_session &&
3095 req->r_session->s_mds == mds) {
3096 dout(" kicking tid %llu\n", req->r_tid);
3097 list_del_init(&req->r_wait);
3098 __do_request(mdsc, req);
3099 }
3100 }
3101}
3102
3103int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3104 struct ceph_mds_request *req)
3105{
3106 int err = 0;
3107
3108 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3109 if (req->r_inode)
3110 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3111 if (req->r_parent) {
3112 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3113 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3114 CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3115 spin_lock(&ci->i_ceph_lock);
3116 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3117 __ceph_touch_fmode(ci, mdsc, fmode);
3118 spin_unlock(&ci->i_ceph_lock);
3119 }
3120 if (req->r_old_dentry_dir)
3121 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3122 CEPH_CAP_PIN);
3123
3124 if (req->r_inode) {
3125 err = ceph_wait_on_async_create(req->r_inode);
3126 if (err) {
3127 dout("%s: wait for async create returned: %d\n",
3128 __func__, err);
3129 return err;
3130 }
3131 }
3132
3133 if (!err && req->r_old_inode) {
3134 err = ceph_wait_on_async_create(req->r_old_inode);
3135 if (err) {
3136 dout("%s: wait for async create returned: %d\n",
3137 __func__, err);
3138 return err;
3139 }
3140 }
3141
3142 dout("submit_request on %p for inode %p\n", req, dir);
3143 mutex_lock(&mdsc->mutex);
3144 __register_request(mdsc, req, dir);
3145 __do_request(mdsc, req);
3146 err = req->r_err;
3147 mutex_unlock(&mdsc->mutex);
3148 return err;
3149}
3150
3151int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3152 struct ceph_mds_request *req,
3153 ceph_mds_request_wait_callback_t wait_func)
3154{
3155 int err;
3156
3157 /* wait */
3158 dout("do_request waiting\n");
3159 if (wait_func) {
3160 err = wait_func(mdsc, req);
3161 } else {
3162 long timeleft = wait_for_completion_killable_timeout(
3163 &req->r_completion,
3164 ceph_timeout_jiffies(req->r_timeout));
3165 if (timeleft > 0)
3166 err = 0;
3167 else if (!timeleft)
3168 err = -ETIMEDOUT; /* timed out */
3169 else
3170 err = timeleft; /* killed */
3171 }
3172 dout("do_request waited, got %d\n", err);
3173 mutex_lock(&mdsc->mutex);
3174
3175 /* only abort if we didn't race with a real reply */
3176 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3177 err = le32_to_cpu(req->r_reply_info.head->result);
3178 } else if (err < 0) {
3179 dout("aborted request %lld with %d\n", req->r_tid, err);
3180
3181 /*
3182 * ensure we aren't running concurrently with
3183 * ceph_fill_trace or ceph_readdir_prepopulate, which
3184 * rely on locks (dir mutex) held by our caller.
3185 */
3186 mutex_lock(&req->r_fill_mutex);
3187 req->r_err = err;
3188 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3189 mutex_unlock(&req->r_fill_mutex);
3190
3191 if (req->r_parent &&
3192 (req->r_op & CEPH_MDS_OP_WRITE))
3193 ceph_invalidate_dir_request(req);
3194 } else {
3195 err = req->r_err;
3196 }
3197
3198 mutex_unlock(&mdsc->mutex);
3199 return err;
3200}
3201
3202/*
3203 * Synchrously perform an mds request. Take care of all of the
3204 * session setup, forwarding, retry details.
3205 */
3206int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3207 struct inode *dir,
3208 struct ceph_mds_request *req)
3209{
3210 int err;
3211
3212 dout("do_request on %p\n", req);
3213
3214 /* issue */
3215 err = ceph_mdsc_submit_request(mdsc, dir, req);
3216 if (!err)
3217 err = ceph_mdsc_wait_request(mdsc, req, NULL);
3218 dout("do_request %p done, result %d\n", req, err);
3219 return err;
3220}
3221
3222/*
3223 * Invalidate dir's completeness, dentry lease state on an aborted MDS
3224 * namespace request.
3225 */
3226void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3227{
3228 struct inode *dir = req->r_parent;
3229 struct inode *old_dir = req->r_old_dentry_dir;
3230
3231 dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3232
3233 ceph_dir_clear_complete(dir);
3234 if (old_dir)
3235 ceph_dir_clear_complete(old_dir);
3236 if (req->r_dentry)
3237 ceph_invalidate_dentry_lease(req->r_dentry);
3238 if (req->r_old_dentry)
3239 ceph_invalidate_dentry_lease(req->r_old_dentry);
3240}
3241
3242/*
3243 * Handle mds reply.
3244 *
3245 * We take the session mutex and parse and process the reply immediately.
3246 * This preserves the logical ordering of replies, capabilities, etc., sent
3247 * by the MDS as they are applied to our local cache.
3248 */
3249static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3250{
3251 struct ceph_mds_client *mdsc = session->s_mdsc;
3252 struct ceph_mds_request *req;
3253 struct ceph_mds_reply_head *head = msg->front.iov_base;
3254 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
3255 struct ceph_snap_realm *realm;
3256 u64 tid;
3257 int err, result;
3258 int mds = session->s_mds;
3259 bool close_sessions = false;
3260
3261 if (msg->front.iov_len < sizeof(*head)) {
3262 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3263 ceph_msg_dump(msg);
3264 return;
3265 }
3266
3267 /* get request, session */
3268 tid = le64_to_cpu(msg->hdr.tid);
3269 mutex_lock(&mdsc->mutex);
3270 req = lookup_get_request(mdsc, tid);
3271 if (!req) {
3272 dout("handle_reply on unknown tid %llu\n", tid);
3273 mutex_unlock(&mdsc->mutex);
3274 return;
3275 }
3276 dout("handle_reply %p\n", req);
3277
3278 /* correct session? */
3279 if (req->r_session != session) {
3280 pr_err("mdsc_handle_reply got %llu on session mds%d"
3281 " not mds%d\n", tid, session->s_mds,
3282 req->r_session ? req->r_session->s_mds : -1);
3283 mutex_unlock(&mdsc->mutex);
3284 goto out;
3285 }
3286
3287 /* dup? */
3288 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3289 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3290 pr_warn("got a dup %s reply on %llu from mds%d\n",
3291 head->safe ? "safe" : "unsafe", tid, mds);
3292 mutex_unlock(&mdsc->mutex);
3293 goto out;
3294 }
3295 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3296 pr_warn("got unsafe after safe on %llu from mds%d\n",
3297 tid, mds);
3298 mutex_unlock(&mdsc->mutex);
3299 goto out;
3300 }
3301
3302 result = le32_to_cpu(head->result);
3303
3304 if (head->safe) {
3305 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3306 __unregister_request(mdsc, req);
3307
3308 /* last request during umount? */
3309 if (mdsc->stopping && !__get_oldest_req(mdsc))
3310 complete_all(&mdsc->safe_umount_waiters);
3311
3312 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3313 /*
3314 * We already handled the unsafe response, now do the
3315 * cleanup. No need to examine the response; the MDS
3316 * doesn't include any result info in the safe
3317 * response. And even if it did, there is nothing
3318 * useful we could do with a revised return value.
3319 */
3320 dout("got safe reply %llu, mds%d\n", tid, mds);
3321
3322 mutex_unlock(&mdsc->mutex);
3323 goto out;
3324 }
3325 } else {
3326 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3327 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3328 }
3329
3330 dout("handle_reply tid %lld result %d\n", tid, result);
3331 rinfo = &req->r_reply_info;
3332 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3333 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3334 else
3335 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3336 mutex_unlock(&mdsc->mutex);
3337
3338 /* Must find target inode outside of mutexes to avoid deadlocks */
3339 if ((err >= 0) && rinfo->head->is_target) {
3340 struct inode *in;
3341 struct ceph_vino tvino = {
3342 .ino = le64_to_cpu(rinfo->targeti.in->ino),
3343 .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3344 };
3345
3346 in = ceph_get_inode(mdsc->fsc->sb, tvino);
3347 if (IS_ERR(in)) {
3348 err = PTR_ERR(in);
3349 mutex_lock(&session->s_mutex);
3350 goto out_err;
3351 }
3352 req->r_target_inode = in;
3353 }
3354
3355 mutex_lock(&session->s_mutex);
3356 if (err < 0) {
3357 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3358 ceph_msg_dump(msg);
3359 goto out_err;
3360 }
3361
3362 /* snap trace */
3363 realm = NULL;
3364 if (rinfo->snapblob_len) {
3365 down_write(&mdsc->snap_rwsem);
3366 err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
3367 rinfo->snapblob + rinfo->snapblob_len,
3368 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3369 &realm);
3370 if (err) {
3371 up_write(&mdsc->snap_rwsem);
3372 close_sessions = true;
3373 if (err == -EIO)
3374 ceph_msg_dump(msg);
3375 goto out_err;
3376 }
3377 downgrade_write(&mdsc->snap_rwsem);
3378 } else {
3379 down_read(&mdsc->snap_rwsem);
3380 }
3381
3382 /* insert trace into our cache */
3383 mutex_lock(&req->r_fill_mutex);
3384 current->journal_info = req;
3385 err = ceph_fill_trace(mdsc->fsc->sb, req);
3386 if (err == 0) {
3387 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3388 req->r_op == CEPH_MDS_OP_LSSNAP))
3389 ceph_readdir_prepopulate(req, req->r_session);
3390 }
3391 current->journal_info = NULL;
3392 mutex_unlock(&req->r_fill_mutex);
3393
3394 up_read(&mdsc->snap_rwsem);
3395 if (realm)
3396 ceph_put_snap_realm(mdsc, realm);
3397
3398 if (err == 0) {
3399 if (req->r_target_inode &&
3400 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3401 struct ceph_inode_info *ci =
3402 ceph_inode(req->r_target_inode);
3403 spin_lock(&ci->i_unsafe_lock);
3404 list_add_tail(&req->r_unsafe_target_item,
3405 &ci->i_unsafe_iops);
3406 spin_unlock(&ci->i_unsafe_lock);
3407 }
3408
3409 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3410 }
3411out_err:
3412 mutex_lock(&mdsc->mutex);
3413 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3414 if (err) {
3415 req->r_err = err;
3416 } else {
3417 req->r_reply = ceph_msg_get(msg);
3418 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3419 }
3420 } else {
3421 dout("reply arrived after request %lld was aborted\n", tid);
3422 }
3423 mutex_unlock(&mdsc->mutex);
3424
3425 mutex_unlock(&session->s_mutex);
3426
3427 /* kick calling process */
3428 complete_request(mdsc, req);
3429
3430 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3431 req->r_end_latency, err);
3432out:
3433 ceph_mdsc_put_request(req);
3434
3435 /* Defer closing the sessions after s_mutex lock being released */
3436 if (close_sessions)
3437 ceph_mdsc_close_sessions(mdsc);
3438 return;
3439}
3440
3441
3442
3443/*
3444 * handle mds notification that our request has been forwarded.
3445 */
3446static void handle_forward(struct ceph_mds_client *mdsc,
3447 struct ceph_mds_session *session,
3448 struct ceph_msg *msg)
3449{
3450 struct ceph_mds_request *req;
3451 u64 tid = le64_to_cpu(msg->hdr.tid);
3452 u32 next_mds;
3453 u32 fwd_seq;
3454 int err = -EINVAL;
3455 void *p = msg->front.iov_base;
3456 void *end = p + msg->front.iov_len;
3457 bool aborted = false;
3458
3459 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3460 next_mds = ceph_decode_32(&p);
3461 fwd_seq = ceph_decode_32(&p);
3462
3463 mutex_lock(&mdsc->mutex);
3464 req = lookup_get_request(mdsc, tid);
3465 if (!req) {
3466 mutex_unlock(&mdsc->mutex);
3467 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3468 return; /* dup reply? */
3469 }
3470
3471 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3472 dout("forward tid %llu aborted, unregistering\n", tid);
3473 __unregister_request(mdsc, req);
3474 } else if (fwd_seq <= req->r_num_fwd) {
3475 /*
3476 * The type of 'num_fwd' in ceph 'MClientRequestForward'
3477 * is 'int32_t', while in 'ceph_mds_request_head' the
3478 * type is '__u8'. So in case the request bounces between
3479 * MDSes exceeding 256 times, the client will get stuck.
3480 *
3481 * In this case it's ususally a bug in MDS and continue
3482 * bouncing the request makes no sense.
3483 *
3484 * In future this could be fixed in ceph code, so avoid
3485 * using the hardcode here.
3486 */
3487 int max = sizeof_field(struct ceph_mds_request_head, num_fwd);
3488 max = 1 << (max * BITS_PER_BYTE);
3489 if (req->r_num_fwd >= max) {
3490 mutex_lock(&req->r_fill_mutex);
3491 req->r_err = -EMULTIHOP;
3492 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3493 mutex_unlock(&req->r_fill_mutex);
3494 aborted = true;
3495 pr_warn_ratelimited("forward tid %llu seq overflow\n",
3496 tid);
3497 } else {
3498 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3499 tid, next_mds, req->r_num_fwd, fwd_seq);
3500 }
3501 } else {
3502 /* resend. forward race not possible; mds would drop */
3503 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3504 BUG_ON(req->r_err);
3505 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3506 req->r_attempts = 0;
3507 req->r_num_fwd = fwd_seq;
3508 req->r_resend_mds = next_mds;
3509 put_request_session(req);
3510 __do_request(mdsc, req);
3511 }
3512 mutex_unlock(&mdsc->mutex);
3513
3514 /* kick calling process */
3515 if (aborted)
3516 complete_request(mdsc, req);
3517 ceph_mdsc_put_request(req);
3518 return;
3519
3520bad:
3521 pr_err("mdsc_handle_forward decode error err=%d\n", err);
3522}
3523
3524static int __decode_session_metadata(void **p, void *end,
3525 bool *blocklisted)
3526{
3527 /* map<string,string> */
3528 u32 n;
3529 bool err_str;
3530 ceph_decode_32_safe(p, end, n, bad);
3531 while (n-- > 0) {
3532 u32 len;
3533 ceph_decode_32_safe(p, end, len, bad);
3534 ceph_decode_need(p, end, len, bad);
3535 err_str = !strncmp(*p, "error_string", len);
3536 *p += len;
3537 ceph_decode_32_safe(p, end, len, bad);
3538 ceph_decode_need(p, end, len, bad);
3539 /*
3540 * Match "blocklisted (blacklisted)" from newer MDSes,
3541 * or "blacklisted" from older MDSes.
3542 */
3543 if (err_str && strnstr(*p, "blacklisted", len))
3544 *blocklisted = true;
3545 *p += len;
3546 }
3547 return 0;
3548bad:
3549 return -1;
3550}
3551
3552/*
3553 * handle a mds session control message
3554 */
3555static void handle_session(struct ceph_mds_session *session,
3556 struct ceph_msg *msg)
3557{
3558 struct ceph_mds_client *mdsc = session->s_mdsc;
3559 int mds = session->s_mds;
3560 int msg_version = le16_to_cpu(msg->hdr.version);
3561 void *p = msg->front.iov_base;
3562 void *end = p + msg->front.iov_len;
3563 struct ceph_mds_session_head *h;
3564 u32 op;
3565 u64 seq, features = 0;
3566 int wake = 0;
3567 bool blocklisted = false;
3568
3569 /* decode */
3570 ceph_decode_need(&p, end, sizeof(*h), bad);
3571 h = p;
3572 p += sizeof(*h);
3573
3574 op = le32_to_cpu(h->op);
3575 seq = le64_to_cpu(h->seq);
3576
3577 if (msg_version >= 3) {
3578 u32 len;
3579 /* version >= 2 and < 5, decode metadata, skip otherwise
3580 * as it's handled via flags.
3581 */
3582 if (msg_version >= 5)
3583 ceph_decode_skip_map(&p, end, string, string, bad);
3584 else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3585 goto bad;
3586
3587 /* version >= 3, feature bits */
3588 ceph_decode_32_safe(&p, end, len, bad);
3589 if (len) {
3590 ceph_decode_64_safe(&p, end, features, bad);
3591 p += len - sizeof(features);
3592 }
3593 }
3594
3595 if (msg_version >= 5) {
3596 u32 flags, len;
3597
3598 /* version >= 4 */
3599 ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
3600 ceph_decode_32_safe(&p, end, len, bad); /* len */
3601 ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
3602
3603 /* version >= 5, flags */
3604 ceph_decode_32_safe(&p, end, flags, bad);
3605 if (flags & CEPH_SESSION_BLOCKLISTED) {
3606 pr_warn("mds%d session blocklisted\n", session->s_mds);
3607 blocklisted = true;
3608 }
3609 }
3610
3611 mutex_lock(&mdsc->mutex);
3612 if (op == CEPH_SESSION_CLOSE) {
3613 ceph_get_mds_session(session);
3614 __unregister_session(mdsc, session);
3615 }
3616 /* FIXME: this ttl calculation is generous */
3617 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3618 mutex_unlock(&mdsc->mutex);
3619
3620 mutex_lock(&session->s_mutex);
3621
3622 dout("handle_session mds%d %s %p state %s seq %llu\n",
3623 mds, ceph_session_op_name(op), session,
3624 ceph_session_state_name(session->s_state), seq);
3625
3626 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3627 session->s_state = CEPH_MDS_SESSION_OPEN;
3628 pr_info("mds%d came back\n", session->s_mds);
3629 }
3630
3631 switch (op) {
3632 case CEPH_SESSION_OPEN:
3633 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3634 pr_info("mds%d reconnect success\n", session->s_mds);
3635
3636 if (session->s_state == CEPH_MDS_SESSION_OPEN) {
3637 pr_notice("mds%d is already opened\n", session->s_mds);
3638 } else {
3639 session->s_state = CEPH_MDS_SESSION_OPEN;
3640 session->s_features = features;
3641 renewed_caps(mdsc, session, 0);
3642 if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
3643 &session->s_features))
3644 metric_schedule_delayed(&mdsc->metric);
3645 }
3646
3647 /*
3648 * The connection maybe broken and the session in client
3649 * side has been reinitialized, need to update the seq
3650 * anyway.
3651 */
3652 if (!session->s_seq && seq)
3653 session->s_seq = seq;
3654
3655 wake = 1;
3656 if (mdsc->stopping)
3657 __close_session(mdsc, session);
3658 break;
3659
3660 case CEPH_SESSION_RENEWCAPS:
3661 if (session->s_renew_seq == seq)
3662 renewed_caps(mdsc, session, 1);
3663 break;
3664
3665 case CEPH_SESSION_CLOSE:
3666 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3667 pr_info("mds%d reconnect denied\n", session->s_mds);
3668 session->s_state = CEPH_MDS_SESSION_CLOSED;
3669 cleanup_session_requests(mdsc, session);
3670 remove_session_caps(session);
3671 wake = 2; /* for good measure */
3672 wake_up_all(&mdsc->session_close_wq);
3673 break;
3674
3675 case CEPH_SESSION_STALE:
3676 pr_info("mds%d caps went stale, renewing\n",
3677 session->s_mds);
3678 atomic_inc(&session->s_cap_gen);
3679 session->s_cap_ttl = jiffies - 1;
3680 send_renew_caps(mdsc, session);
3681 break;
3682
3683 case CEPH_SESSION_RECALL_STATE:
3684 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3685 break;
3686
3687 case CEPH_SESSION_FLUSHMSG:
3688 /* flush cap releases */
3689 spin_lock(&session->s_cap_lock);
3690 if (session->s_num_cap_releases)
3691 ceph_flush_cap_releases(mdsc, session);
3692 spin_unlock(&session->s_cap_lock);
3693
3694 send_flushmsg_ack(mdsc, session, seq);
3695 break;
3696
3697 case CEPH_SESSION_FORCE_RO:
3698 dout("force_session_readonly %p\n", session);
3699 spin_lock(&session->s_cap_lock);
3700 session->s_readonly = true;
3701 spin_unlock(&session->s_cap_lock);
3702 wake_up_session_caps(session, FORCE_RO);
3703 break;
3704
3705 case CEPH_SESSION_REJECT:
3706 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3707 pr_info("mds%d rejected session\n", session->s_mds);
3708 session->s_state = CEPH_MDS_SESSION_REJECTED;
3709 cleanup_session_requests(mdsc, session);
3710 remove_session_caps(session);
3711 if (blocklisted)
3712 mdsc->fsc->blocklisted = true;
3713 wake = 2; /* for good measure */
3714 break;
3715
3716 default:
3717 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3718 WARN_ON(1);
3719 }
3720
3721 mutex_unlock(&session->s_mutex);
3722 if (wake) {
3723 mutex_lock(&mdsc->mutex);
3724 __wake_requests(mdsc, &session->s_waiting);
3725 if (wake == 2)
3726 kick_requests(mdsc, mds);
3727 mutex_unlock(&mdsc->mutex);
3728 }
3729 if (op == CEPH_SESSION_CLOSE)
3730 ceph_put_mds_session(session);
3731 return;
3732
3733bad:
3734 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3735 (int)msg->front.iov_len);
3736 ceph_msg_dump(msg);
3737 return;
3738}
3739
3740void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3741{
3742 int dcaps;
3743
3744 dcaps = xchg(&req->r_dir_caps, 0);
3745 if (dcaps) {
3746 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3747 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3748 }
3749}
3750
3751void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3752{
3753 int dcaps;
3754
3755 dcaps = xchg(&req->r_dir_caps, 0);
3756 if (dcaps) {
3757 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3758 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3759 dcaps);
3760 }
3761}
3762
3763/*
3764 * called under session->mutex.
3765 */
3766static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3767 struct ceph_mds_session *session)
3768{
3769 struct ceph_mds_request *req, *nreq;
3770 struct rb_node *p;
3771
3772 dout("replay_unsafe_requests mds%d\n", session->s_mds);
3773
3774 mutex_lock(&mdsc->mutex);
3775 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3776 __send_request(session, req, true);
3777
3778 /*
3779 * also re-send old requests when MDS enters reconnect stage. So that MDS
3780 * can process completed request in clientreplay stage.
3781 */
3782 p = rb_first(&mdsc->request_tree);
3783 while (p) {
3784 req = rb_entry(p, struct ceph_mds_request, r_node);
3785 p = rb_next(p);
3786 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3787 continue;
3788 if (req->r_attempts == 0)
3789 continue; /* only old requests */
3790 if (!req->r_session)
3791 continue;
3792 if (req->r_session->s_mds != session->s_mds)
3793 continue;
3794
3795 ceph_mdsc_release_dir_caps_no_check(req);
3796
3797 __send_request(session, req, true);
3798 }
3799 mutex_unlock(&mdsc->mutex);
3800}
3801
3802static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3803{
3804 struct ceph_msg *reply;
3805 struct ceph_pagelist *_pagelist;
3806 struct page *page;
3807 __le32 *addr;
3808 int err = -ENOMEM;
3809
3810 if (!recon_state->allow_multi)
3811 return -ENOSPC;
3812
3813 /* can't handle message that contains both caps and realm */
3814 BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3815
3816 /* pre-allocate new pagelist */
3817 _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3818 if (!_pagelist)
3819 return -ENOMEM;
3820
3821 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3822 if (!reply)
3823 goto fail_msg;
3824
3825 /* placeholder for nr_caps */
3826 err = ceph_pagelist_encode_32(_pagelist, 0);
3827 if (err < 0)
3828 goto fail;
3829
3830 if (recon_state->nr_caps) {
3831 /* currently encoding caps */
3832 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3833 if (err)
3834 goto fail;
3835 } else {
3836 /* placeholder for nr_realms (currently encoding relams) */
3837 err = ceph_pagelist_encode_32(_pagelist, 0);
3838 if (err < 0)
3839 goto fail;
3840 }
3841
3842 err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3843 if (err)
3844 goto fail;
3845
3846 page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3847 addr = kmap_atomic(page);
3848 if (recon_state->nr_caps) {
3849 /* currently encoding caps */
3850 *addr = cpu_to_le32(recon_state->nr_caps);
3851 } else {
3852 /* currently encoding relams */
3853 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3854 }
3855 kunmap_atomic(addr);
3856
3857 reply->hdr.version = cpu_to_le16(5);
3858 reply->hdr.compat_version = cpu_to_le16(4);
3859
3860 reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3861 ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3862
3863 ceph_con_send(&recon_state->session->s_con, reply);
3864 ceph_pagelist_release(recon_state->pagelist);
3865
3866 recon_state->pagelist = _pagelist;
3867 recon_state->nr_caps = 0;
3868 recon_state->nr_realms = 0;
3869 recon_state->msg_version = 5;
3870 return 0;
3871fail:
3872 ceph_msg_put(reply);
3873fail_msg:
3874 ceph_pagelist_release(_pagelist);
3875 return err;
3876}
3877
3878static struct dentry* d_find_primary(struct inode *inode)
3879{
3880 struct dentry *alias, *dn = NULL;
3881
3882 if (hlist_empty(&inode->i_dentry))
3883 return NULL;
3884
3885 spin_lock(&inode->i_lock);
3886 if (hlist_empty(&inode->i_dentry))
3887 goto out_unlock;
3888
3889 if (S_ISDIR(inode->i_mode)) {
3890 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3891 if (!IS_ROOT(alias))
3892 dn = dget(alias);
3893 goto out_unlock;
3894 }
3895
3896 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3897 spin_lock(&alias->d_lock);
3898 if (!d_unhashed(alias) &&
3899 (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3900 dn = dget_dlock(alias);
3901 }
3902 spin_unlock(&alias->d_lock);
3903 if (dn)
3904 break;
3905 }
3906out_unlock:
3907 spin_unlock(&inode->i_lock);
3908 return dn;
3909}
3910
3911/*
3912 * Encode information about a cap for a reconnect with the MDS.
3913 */
3914static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3915 void *arg)
3916{
3917 union {
3918 struct ceph_mds_cap_reconnect v2;
3919 struct ceph_mds_cap_reconnect_v1 v1;
3920 } rec;
3921 struct ceph_inode_info *ci = cap->ci;
3922 struct ceph_reconnect_state *recon_state = arg;
3923 struct ceph_pagelist *pagelist = recon_state->pagelist;
3924 struct dentry *dentry;
3925 char *path;
3926 int pathlen = 0, err;
3927 u64 pathbase;
3928 u64 snap_follows;
3929
3930 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3931 inode, ceph_vinop(inode), cap, cap->cap_id,
3932 ceph_cap_string(cap->issued));
3933
3934 dentry = d_find_primary(inode);
3935 if (dentry) {
3936 /* set pathbase to parent dir when msg_version >= 2 */
3937 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3938 recon_state->msg_version >= 2);
3939 dput(dentry);
3940 if (IS_ERR(path)) {
3941 err = PTR_ERR(path);
3942 goto out_err;
3943 }
3944 } else {
3945 path = NULL;
3946 pathbase = 0;
3947 }
3948
3949 spin_lock(&ci->i_ceph_lock);
3950 cap->seq = 0; /* reset cap seq */
3951 cap->issue_seq = 0; /* and issue_seq */
3952 cap->mseq = 0; /* and migrate_seq */
3953 cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3954
3955 /* These are lost when the session goes away */
3956 if (S_ISDIR(inode->i_mode)) {
3957 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3958 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3959 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3960 }
3961 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3962 }
3963
3964 if (recon_state->msg_version >= 2) {
3965 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3966 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3967 rec.v2.issued = cpu_to_le32(cap->issued);
3968 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3969 rec.v2.pathbase = cpu_to_le64(pathbase);
3970 rec.v2.flock_len = (__force __le32)
3971 ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3972 } else {
3973 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3974 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3975 rec.v1.issued = cpu_to_le32(cap->issued);
3976 rec.v1.size = cpu_to_le64(i_size_read(inode));
3977 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3978 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3979 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3980 rec.v1.pathbase = cpu_to_le64(pathbase);
3981 }
3982
3983 if (list_empty(&ci->i_cap_snaps)) {
3984 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3985 } else {
3986 struct ceph_cap_snap *capsnap =
3987 list_first_entry(&ci->i_cap_snaps,
3988 struct ceph_cap_snap, ci_item);
3989 snap_follows = capsnap->follows;
3990 }
3991 spin_unlock(&ci->i_ceph_lock);
3992
3993 if (recon_state->msg_version >= 2) {
3994 int num_fcntl_locks, num_flock_locks;
3995 struct ceph_filelock *flocks = NULL;
3996 size_t struct_len, total_len = sizeof(u64);
3997 u8 struct_v = 0;
3998
3999encode_again:
4000 if (rec.v2.flock_len) {
4001 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
4002 } else {
4003 num_fcntl_locks = 0;
4004 num_flock_locks = 0;
4005 }
4006 if (num_fcntl_locks + num_flock_locks > 0) {
4007 flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
4008 sizeof(struct ceph_filelock),
4009 GFP_NOFS);
4010 if (!flocks) {
4011 err = -ENOMEM;
4012 goto out_err;
4013 }
4014 err = ceph_encode_locks_to_buffer(inode, flocks,
4015 num_fcntl_locks,
4016 num_flock_locks);
4017 if (err) {
4018 kfree(flocks);
4019 flocks = NULL;
4020 if (err == -ENOSPC)
4021 goto encode_again;
4022 goto out_err;
4023 }
4024 } else {
4025 kfree(flocks);
4026 flocks = NULL;
4027 }
4028
4029 if (recon_state->msg_version >= 3) {
4030 /* version, compat_version and struct_len */
4031 total_len += 2 * sizeof(u8) + sizeof(u32);
4032 struct_v = 2;
4033 }
4034 /*
4035 * number of encoded locks is stable, so copy to pagelist
4036 */
4037 struct_len = 2 * sizeof(u32) +
4038 (num_fcntl_locks + num_flock_locks) *
4039 sizeof(struct ceph_filelock);
4040 rec.v2.flock_len = cpu_to_le32(struct_len);
4041
4042 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
4043
4044 if (struct_v >= 2)
4045 struct_len += sizeof(u64); /* snap_follows */
4046
4047 total_len += struct_len;
4048
4049 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
4050 err = send_reconnect_partial(recon_state);
4051 if (err)
4052 goto out_freeflocks;
4053 pagelist = recon_state->pagelist;
4054 }
4055
4056 err = ceph_pagelist_reserve(pagelist, total_len);
4057 if (err)
4058 goto out_freeflocks;
4059
4060 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4061 if (recon_state->msg_version >= 3) {
4062 ceph_pagelist_encode_8(pagelist, struct_v);
4063 ceph_pagelist_encode_8(pagelist, 1);
4064 ceph_pagelist_encode_32(pagelist, struct_len);
4065 }
4066 ceph_pagelist_encode_string(pagelist, path, pathlen);
4067 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
4068 ceph_locks_to_pagelist(flocks, pagelist,
4069 num_fcntl_locks, num_flock_locks);
4070 if (struct_v >= 2)
4071 ceph_pagelist_encode_64(pagelist, snap_follows);
4072out_freeflocks:
4073 kfree(flocks);
4074 } else {
4075 err = ceph_pagelist_reserve(pagelist,
4076 sizeof(u64) + sizeof(u32) +
4077 pathlen + sizeof(rec.v1));
4078 if (err)
4079 goto out_err;
4080
4081 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4082 ceph_pagelist_encode_string(pagelist, path, pathlen);
4083 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
4084 }
4085
4086out_err:
4087 ceph_mdsc_free_path(path, pathlen);
4088 if (!err)
4089 recon_state->nr_caps++;
4090 return err;
4091}
4092
4093static int encode_snap_realms(struct ceph_mds_client *mdsc,
4094 struct ceph_reconnect_state *recon_state)
4095{
4096 struct rb_node *p;
4097 struct ceph_pagelist *pagelist = recon_state->pagelist;
4098 int err = 0;
4099
4100 if (recon_state->msg_version >= 4) {
4101 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
4102 if (err < 0)
4103 goto fail;
4104 }
4105
4106 /*
4107 * snaprealms. we provide mds with the ino, seq (version), and
4108 * parent for all of our realms. If the mds has any newer info,
4109 * it will tell us.
4110 */
4111 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
4112 struct ceph_snap_realm *realm =
4113 rb_entry(p, struct ceph_snap_realm, node);
4114 struct ceph_mds_snaprealm_reconnect sr_rec;
4115
4116 if (recon_state->msg_version >= 4) {
4117 size_t need = sizeof(u8) * 2 + sizeof(u32) +
4118 sizeof(sr_rec);
4119
4120 if (pagelist->length + need > RECONNECT_MAX_SIZE) {
4121 err = send_reconnect_partial(recon_state);
4122 if (err)
4123 goto fail;
4124 pagelist = recon_state->pagelist;
4125 }
4126
4127 err = ceph_pagelist_reserve(pagelist, need);
4128 if (err)
4129 goto fail;
4130
4131 ceph_pagelist_encode_8(pagelist, 1);
4132 ceph_pagelist_encode_8(pagelist, 1);
4133 ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
4134 }
4135
4136 dout(" adding snap realm %llx seq %lld parent %llx\n",
4137 realm->ino, realm->seq, realm->parent_ino);
4138 sr_rec.ino = cpu_to_le64(realm->ino);
4139 sr_rec.seq = cpu_to_le64(realm->seq);
4140 sr_rec.parent = cpu_to_le64(realm->parent_ino);
4141
4142 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
4143 if (err)
4144 goto fail;
4145
4146 recon_state->nr_realms++;
4147 }
4148fail:
4149 return err;
4150}
4151
4152
4153/*
4154 * If an MDS fails and recovers, clients need to reconnect in order to
4155 * reestablish shared state. This includes all caps issued through
4156 * this session _and_ the snap_realm hierarchy. Because it's not
4157 * clear which snap realms the mds cares about, we send everything we
4158 * know about.. that ensures we'll then get any new info the
4159 * recovering MDS might have.
4160 *
4161 * This is a relatively heavyweight operation, but it's rare.
4162 */
4163static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4164 struct ceph_mds_session *session)
4165{
4166 struct ceph_msg *reply;
4167 int mds = session->s_mds;
4168 int err = -ENOMEM;
4169 struct ceph_reconnect_state recon_state = {
4170 .session = session,
4171 };
4172 LIST_HEAD(dispose);
4173
4174 pr_info("mds%d reconnect start\n", mds);
4175
4176 recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4177 if (!recon_state.pagelist)
4178 goto fail_nopagelist;
4179
4180 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4181 if (!reply)
4182 goto fail_nomsg;
4183
4184 xa_destroy(&session->s_delegated_inos);
4185
4186 mutex_lock(&session->s_mutex);
4187 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4188 session->s_seq = 0;
4189
4190 dout("session %p state %s\n", session,
4191 ceph_session_state_name(session->s_state));
4192
4193 atomic_inc(&session->s_cap_gen);
4194
4195 spin_lock(&session->s_cap_lock);
4196 /* don't know if session is readonly */
4197 session->s_readonly = 0;
4198 /*
4199 * notify __ceph_remove_cap() that we are composing cap reconnect.
4200 * If a cap get released before being added to the cap reconnect,
4201 * __ceph_remove_cap() should skip queuing cap release.
4202 */
4203 session->s_cap_reconnect = 1;
4204 /* drop old cap expires; we're about to reestablish that state */
4205 detach_cap_releases(session, &dispose);
4206 spin_unlock(&session->s_cap_lock);
4207 dispose_cap_releases(mdsc, &dispose);
4208
4209 /* trim unused caps to reduce MDS's cache rejoin time */
4210 if (mdsc->fsc->sb->s_root)
4211 shrink_dcache_parent(mdsc->fsc->sb->s_root);
4212
4213 ceph_con_close(&session->s_con);
4214 ceph_con_open(&session->s_con,
4215 CEPH_ENTITY_TYPE_MDS, mds,
4216 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4217
4218 /* replay unsafe requests */
4219 replay_unsafe_requests(mdsc, session);
4220
4221 ceph_early_kick_flushing_caps(mdsc, session);
4222
4223 down_read(&mdsc->snap_rwsem);
4224
4225 /* placeholder for nr_caps */
4226 err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4227 if (err)
4228 goto fail;
4229
4230 if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4231 recon_state.msg_version = 3;
4232 recon_state.allow_multi = true;
4233 } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4234 recon_state.msg_version = 3;
4235 } else {
4236 recon_state.msg_version = 2;
4237 }
4238 /* trsaverse this session's caps */
4239 err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4240
4241 spin_lock(&session->s_cap_lock);
4242 session->s_cap_reconnect = 0;
4243 spin_unlock(&session->s_cap_lock);
4244
4245 if (err < 0)
4246 goto fail;
4247
4248 /* check if all realms can be encoded into current message */
4249 if (mdsc->num_snap_realms) {
4250 size_t total_len =
4251 recon_state.pagelist->length +
4252 mdsc->num_snap_realms *
4253 sizeof(struct ceph_mds_snaprealm_reconnect);
4254 if (recon_state.msg_version >= 4) {
4255 /* number of realms */
4256 total_len += sizeof(u32);
4257 /* version, compat_version and struct_len */
4258 total_len += mdsc->num_snap_realms *
4259 (2 * sizeof(u8) + sizeof(u32));
4260 }
4261 if (total_len > RECONNECT_MAX_SIZE) {
4262 if (!recon_state.allow_multi) {
4263 err = -ENOSPC;
4264 goto fail;
4265 }
4266 if (recon_state.nr_caps) {
4267 err = send_reconnect_partial(&recon_state);
4268 if (err)
4269 goto fail;
4270 }
4271 recon_state.msg_version = 5;
4272 }
4273 }
4274
4275 err = encode_snap_realms(mdsc, &recon_state);
4276 if (err < 0)
4277 goto fail;
4278
4279 if (recon_state.msg_version >= 5) {
4280 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4281 if (err < 0)
4282 goto fail;
4283 }
4284
4285 if (recon_state.nr_caps || recon_state.nr_realms) {
4286 struct page *page =
4287 list_first_entry(&recon_state.pagelist->head,
4288 struct page, lru);
4289 __le32 *addr = kmap_atomic(page);
4290 if (recon_state.nr_caps) {
4291 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4292 *addr = cpu_to_le32(recon_state.nr_caps);
4293 } else if (recon_state.msg_version >= 4) {
4294 *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4295 }
4296 kunmap_atomic(addr);
4297 }
4298
4299 reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4300 if (recon_state.msg_version >= 4)
4301 reply->hdr.compat_version = cpu_to_le16(4);
4302
4303 reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4304 ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4305
4306 ceph_con_send(&session->s_con, reply);
4307
4308 mutex_unlock(&session->s_mutex);
4309
4310 mutex_lock(&mdsc->mutex);
4311 __wake_requests(mdsc, &session->s_waiting);
4312 mutex_unlock(&mdsc->mutex);
4313
4314 up_read(&mdsc->snap_rwsem);
4315 ceph_pagelist_release(recon_state.pagelist);
4316 return;
4317
4318fail:
4319 ceph_msg_put(reply);
4320 up_read(&mdsc->snap_rwsem);
4321 mutex_unlock(&session->s_mutex);
4322fail_nomsg:
4323 ceph_pagelist_release(recon_state.pagelist);
4324fail_nopagelist:
4325 pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4326 return;
4327}
4328
4329
4330/*
4331 * compare old and new mdsmaps, kicking requests
4332 * and closing out old connections as necessary
4333 *
4334 * called under mdsc->mutex.
4335 */
4336static void check_new_map(struct ceph_mds_client *mdsc,
4337 struct ceph_mdsmap *newmap,
4338 struct ceph_mdsmap *oldmap)
4339{
4340 int i, j, err;
4341 int oldstate, newstate;
4342 struct ceph_mds_session *s;
4343 unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
4344
4345 dout("check_new_map new %u old %u\n",
4346 newmap->m_epoch, oldmap->m_epoch);
4347
4348 if (newmap->m_info) {
4349 for (i = 0; i < newmap->possible_max_rank; i++) {
4350 for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
4351 set_bit(newmap->m_info[i].export_targets[j], targets);
4352 }
4353 }
4354
4355 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4356 if (!mdsc->sessions[i])
4357 continue;
4358 s = mdsc->sessions[i];
4359 oldstate = ceph_mdsmap_get_state(oldmap, i);
4360 newstate = ceph_mdsmap_get_state(newmap, i);
4361
4362 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4363 i, ceph_mds_state_name(oldstate),
4364 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4365 ceph_mds_state_name(newstate),
4366 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4367 ceph_session_state_name(s->s_state));
4368
4369 if (i >= newmap->possible_max_rank) {
4370 /* force close session for stopped mds */
4371 ceph_get_mds_session(s);
4372 __unregister_session(mdsc, s);
4373 __wake_requests(mdsc, &s->s_waiting);
4374 mutex_unlock(&mdsc->mutex);
4375
4376 mutex_lock(&s->s_mutex);
4377 cleanup_session_requests(mdsc, s);
4378 remove_session_caps(s);
4379 mutex_unlock(&s->s_mutex);
4380
4381 ceph_put_mds_session(s);
4382
4383 mutex_lock(&mdsc->mutex);
4384 kick_requests(mdsc, i);
4385 continue;
4386 }
4387
4388 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4389 ceph_mdsmap_get_addr(newmap, i),
4390 sizeof(struct ceph_entity_addr))) {
4391 /* just close it */
4392 mutex_unlock(&mdsc->mutex);
4393 mutex_lock(&s->s_mutex);
4394 mutex_lock(&mdsc->mutex);
4395 ceph_con_close(&s->s_con);
4396 mutex_unlock(&s->s_mutex);
4397 s->s_state = CEPH_MDS_SESSION_RESTARTING;
4398 } else if (oldstate == newstate) {
4399 continue; /* nothing new with this mds */
4400 }
4401
4402 /*
4403 * send reconnect?
4404 */
4405 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4406 newstate >= CEPH_MDS_STATE_RECONNECT) {
4407 mutex_unlock(&mdsc->mutex);
4408 clear_bit(i, targets);
4409 send_mds_reconnect(mdsc, s);
4410 mutex_lock(&mdsc->mutex);
4411 }
4412
4413 /*
4414 * kick request on any mds that has gone active.
4415 */
4416 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4417 newstate >= CEPH_MDS_STATE_ACTIVE) {
4418 if (oldstate != CEPH_MDS_STATE_CREATING &&
4419 oldstate != CEPH_MDS_STATE_STARTING)
4420 pr_info("mds%d recovery completed\n", s->s_mds);
4421 kick_requests(mdsc, i);
4422 mutex_unlock(&mdsc->mutex);
4423 mutex_lock(&s->s_mutex);
4424 mutex_lock(&mdsc->mutex);
4425 ceph_kick_flushing_caps(mdsc, s);
4426 mutex_unlock(&s->s_mutex);
4427 wake_up_session_caps(s, RECONNECT);
4428 }
4429 }
4430
4431 /*
4432 * Only open and reconnect sessions that don't exist yet.
4433 */
4434 for (i = 0; i < newmap->possible_max_rank; i++) {
4435 /*
4436 * In case the import MDS is crashed just after
4437 * the EImportStart journal is flushed, so when
4438 * a standby MDS takes over it and is replaying
4439 * the EImportStart journal the new MDS daemon
4440 * will wait the client to reconnect it, but the
4441 * client may never register/open the session yet.
4442 *
4443 * Will try to reconnect that MDS daemon if the
4444 * rank number is in the export targets array and
4445 * is the up:reconnect state.
4446 */
4447 newstate = ceph_mdsmap_get_state(newmap, i);
4448 if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
4449 continue;
4450
4451 /*
4452 * The session maybe registered and opened by some
4453 * requests which were choosing random MDSes during
4454 * the mdsc->mutex's unlock/lock gap below in rare
4455 * case. But the related MDS daemon will just queue
4456 * that requests and be still waiting for the client's
4457 * reconnection request in up:reconnect state.
4458 */
4459 s = __ceph_lookup_mds_session(mdsc, i);
4460 if (likely(!s)) {
4461 s = __open_export_target_session(mdsc, i);
4462 if (IS_ERR(s)) {
4463 err = PTR_ERR(s);
4464 pr_err("failed to open export target session, err %d\n",
4465 err);
4466 continue;
4467 }
4468 }
4469 dout("send reconnect to export target mds.%d\n", i);
4470 mutex_unlock(&mdsc->mutex);
4471 send_mds_reconnect(mdsc, s);
4472 ceph_put_mds_session(s);
4473 mutex_lock(&mdsc->mutex);
4474 }
4475
4476 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4477 s = mdsc->sessions[i];
4478 if (!s)
4479 continue;
4480 if (!ceph_mdsmap_is_laggy(newmap, i))
4481 continue;
4482 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4483 s->s_state == CEPH_MDS_SESSION_HUNG ||
4484 s->s_state == CEPH_MDS_SESSION_CLOSING) {
4485 dout(" connecting to export targets of laggy mds%d\n",
4486 i);
4487 __open_export_target_sessions(mdsc, s);
4488 }
4489 }
4490}
4491
4492
4493
4494/*
4495 * leases
4496 */
4497
4498/*
4499 * caller must hold session s_mutex, dentry->d_lock
4500 */
4501void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4502{
4503 struct ceph_dentry_info *di = ceph_dentry(dentry);
4504
4505 ceph_put_mds_session(di->lease_session);
4506 di->lease_session = NULL;
4507}
4508
4509static void handle_lease(struct ceph_mds_client *mdsc,
4510 struct ceph_mds_session *session,
4511 struct ceph_msg *msg)
4512{
4513 struct super_block *sb = mdsc->fsc->sb;
4514 struct inode *inode;
4515 struct dentry *parent, *dentry;
4516 struct ceph_dentry_info *di;
4517 int mds = session->s_mds;
4518 struct ceph_mds_lease *h = msg->front.iov_base;
4519 u32 seq;
4520 struct ceph_vino vino;
4521 struct qstr dname;
4522 int release = 0;
4523
4524 dout("handle_lease from mds%d\n", mds);
4525
4526 /* decode */
4527 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4528 goto bad;
4529 vino.ino = le64_to_cpu(h->ino);
4530 vino.snap = CEPH_NOSNAP;
4531 seq = le32_to_cpu(h->seq);
4532 dname.len = get_unaligned_le32(h + 1);
4533 if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4534 goto bad;
4535 dname.name = (void *)(h + 1) + sizeof(u32);
4536
4537 /* lookup inode */
4538 inode = ceph_find_inode(sb, vino);
4539 dout("handle_lease %s, ino %llx %p %.*s\n",
4540 ceph_lease_op_name(h->action), vino.ino, inode,
4541 dname.len, dname.name);
4542
4543 mutex_lock(&session->s_mutex);
4544 inc_session_sequence(session);
4545
4546 if (!inode) {
4547 dout("handle_lease no inode %llx\n", vino.ino);
4548 goto release;
4549 }
4550
4551 /* dentry */
4552 parent = d_find_alias(inode);
4553 if (!parent) {
4554 dout("no parent dentry on inode %p\n", inode);
4555 WARN_ON(1);
4556 goto release; /* hrm... */
4557 }
4558 dname.hash = full_name_hash(parent, dname.name, dname.len);
4559 dentry = d_lookup(parent, &dname);
4560 dput(parent);
4561 if (!dentry)
4562 goto release;
4563
4564 spin_lock(&dentry->d_lock);
4565 di = ceph_dentry(dentry);
4566 switch (h->action) {
4567 case CEPH_MDS_LEASE_REVOKE:
4568 if (di->lease_session == session) {
4569 if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4570 h->seq = cpu_to_le32(di->lease_seq);
4571 __ceph_mdsc_drop_dentry_lease(dentry);
4572 }
4573 release = 1;
4574 break;
4575
4576 case CEPH_MDS_LEASE_RENEW:
4577 if (di->lease_session == session &&
4578 di->lease_gen == atomic_read(&session->s_cap_gen) &&
4579 di->lease_renew_from &&
4580 di->lease_renew_after == 0) {
4581 unsigned long duration =
4582 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4583
4584 di->lease_seq = seq;
4585 di->time = di->lease_renew_from + duration;
4586 di->lease_renew_after = di->lease_renew_from +
4587 (duration >> 1);
4588 di->lease_renew_from = 0;
4589 }
4590 break;
4591 }
4592 spin_unlock(&dentry->d_lock);
4593 dput(dentry);
4594
4595 if (!release)
4596 goto out;
4597
4598release:
4599 /* let's just reuse the same message */
4600 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4601 ceph_msg_get(msg);
4602 ceph_con_send(&session->s_con, msg);
4603
4604out:
4605 mutex_unlock(&session->s_mutex);
4606 iput(inode);
4607 return;
4608
4609bad:
4610 pr_err("corrupt lease message\n");
4611 ceph_msg_dump(msg);
4612}
4613
4614void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4615 struct dentry *dentry, char action,
4616 u32 seq)
4617{
4618 struct ceph_msg *msg;
4619 struct ceph_mds_lease *lease;
4620 struct inode *dir;
4621 int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4622
4623 dout("lease_send_msg identry %p %s to mds%d\n",
4624 dentry, ceph_lease_op_name(action), session->s_mds);
4625
4626 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4627 if (!msg)
4628 return;
4629 lease = msg->front.iov_base;
4630 lease->action = action;
4631 lease->seq = cpu_to_le32(seq);
4632
4633 spin_lock(&dentry->d_lock);
4634 dir = d_inode(dentry->d_parent);
4635 lease->ino = cpu_to_le64(ceph_ino(dir));
4636 lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4637
4638 put_unaligned_le32(dentry->d_name.len, lease + 1);
4639 memcpy((void *)(lease + 1) + 4,
4640 dentry->d_name.name, dentry->d_name.len);
4641 spin_unlock(&dentry->d_lock);
4642
4643 ceph_con_send(&session->s_con, msg);
4644}
4645
4646/*
4647 * lock unlock the session, to wait ongoing session activities
4648 */
4649static void lock_unlock_session(struct ceph_mds_session *s)
4650{
4651 mutex_lock(&s->s_mutex);
4652 mutex_unlock(&s->s_mutex);
4653}
4654
4655static void maybe_recover_session(struct ceph_mds_client *mdsc)
4656{
4657 struct ceph_fs_client *fsc = mdsc->fsc;
4658
4659 if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4660 return;
4661
4662 if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4663 return;
4664
4665 if (!READ_ONCE(fsc->blocklisted))
4666 return;
4667
4668 pr_info("auto reconnect after blocklisted\n");
4669 ceph_force_reconnect(fsc->sb);
4670}
4671
4672bool check_session_state(struct ceph_mds_session *s)
4673{
4674 switch (s->s_state) {
4675 case CEPH_MDS_SESSION_OPEN:
4676 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4677 s->s_state = CEPH_MDS_SESSION_HUNG;
4678 pr_info("mds%d hung\n", s->s_mds);
4679 }
4680 break;
4681 case CEPH_MDS_SESSION_CLOSING:
4682 case CEPH_MDS_SESSION_NEW:
4683 case CEPH_MDS_SESSION_RESTARTING:
4684 case CEPH_MDS_SESSION_CLOSED:
4685 case CEPH_MDS_SESSION_REJECTED:
4686 return false;
4687 }
4688
4689 return true;
4690}
4691
4692/*
4693 * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4694 * then we need to retransmit that request.
4695 */
4696void inc_session_sequence(struct ceph_mds_session *s)
4697{
4698 lockdep_assert_held(&s->s_mutex);
4699
4700 s->s_seq++;
4701
4702 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4703 int ret;
4704
4705 dout("resending session close request for mds%d\n", s->s_mds);
4706 ret = request_close_session(s);
4707 if (ret < 0)
4708 pr_err("unable to close session to mds%d: %d\n",
4709 s->s_mds, ret);
4710 }
4711}
4712
4713/*
4714 * delayed work -- periodically trim expired leases, renew caps with mds. If
4715 * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4716 * workqueue delay value of 5 secs will be used.
4717 */
4718static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4719{
4720 unsigned long max_delay = HZ * 5;
4721
4722 /* 5 secs default delay */
4723 if (!delay || (delay > max_delay))
4724 delay = max_delay;
4725 schedule_delayed_work(&mdsc->delayed_work,
4726 round_jiffies_relative(delay));
4727}
4728
4729static void delayed_work(struct work_struct *work)
4730{
4731 struct ceph_mds_client *mdsc =
4732 container_of(work, struct ceph_mds_client, delayed_work.work);
4733 unsigned long delay;
4734 int renew_interval;
4735 int renew_caps;
4736 int i;
4737
4738 dout("mdsc delayed_work\n");
4739
4740 if (mdsc->stopping)
4741 return;
4742
4743 mutex_lock(&mdsc->mutex);
4744 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4745 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4746 mdsc->last_renew_caps);
4747 if (renew_caps)
4748 mdsc->last_renew_caps = jiffies;
4749
4750 for (i = 0; i < mdsc->max_sessions; i++) {
4751 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4752 if (!s)
4753 continue;
4754
4755 if (!check_session_state(s)) {
4756 ceph_put_mds_session(s);
4757 continue;
4758 }
4759 mutex_unlock(&mdsc->mutex);
4760
4761 mutex_lock(&s->s_mutex);
4762 if (renew_caps)
4763 send_renew_caps(mdsc, s);
4764 else
4765 ceph_con_keepalive(&s->s_con);
4766 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4767 s->s_state == CEPH_MDS_SESSION_HUNG)
4768 ceph_send_cap_releases(mdsc, s);
4769 mutex_unlock(&s->s_mutex);
4770 ceph_put_mds_session(s);
4771
4772 mutex_lock(&mdsc->mutex);
4773 }
4774 mutex_unlock(&mdsc->mutex);
4775
4776 delay = ceph_check_delayed_caps(mdsc);
4777
4778 ceph_queue_cap_reclaim_work(mdsc);
4779
4780 ceph_trim_snapid_map(mdsc);
4781
4782 maybe_recover_session(mdsc);
4783
4784 schedule_delayed(mdsc, delay);
4785}
4786
4787int ceph_mdsc_init(struct ceph_fs_client *fsc)
4788
4789{
4790 struct ceph_mds_client *mdsc;
4791 int err;
4792
4793 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4794 if (!mdsc)
4795 return -ENOMEM;
4796 mdsc->fsc = fsc;
4797 mutex_init(&mdsc->mutex);
4798 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4799 if (!mdsc->mdsmap) {
4800 err = -ENOMEM;
4801 goto err_mdsc;
4802 }
4803
4804 init_completion(&mdsc->safe_umount_waiters);
4805 init_waitqueue_head(&mdsc->session_close_wq);
4806 INIT_LIST_HEAD(&mdsc->waiting_for_map);
4807 mdsc->quotarealms_inodes = RB_ROOT;
4808 mutex_init(&mdsc->quotarealms_inodes_mutex);
4809 init_rwsem(&mdsc->snap_rwsem);
4810 mdsc->snap_realms = RB_ROOT;
4811 INIT_LIST_HEAD(&mdsc->snap_empty);
4812 spin_lock_init(&mdsc->snap_empty_lock);
4813 mdsc->request_tree = RB_ROOT;
4814 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4815 mdsc->last_renew_caps = jiffies;
4816 INIT_LIST_HEAD(&mdsc->cap_delay_list);
4817 INIT_LIST_HEAD(&mdsc->cap_wait_list);
4818 spin_lock_init(&mdsc->cap_delay_lock);
4819 INIT_LIST_HEAD(&mdsc->snap_flush_list);
4820 spin_lock_init(&mdsc->snap_flush_lock);
4821 mdsc->last_cap_flush_tid = 1;
4822 INIT_LIST_HEAD(&mdsc->cap_flush_list);
4823 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4824 spin_lock_init(&mdsc->cap_dirty_lock);
4825 init_waitqueue_head(&mdsc->cap_flushing_wq);
4826 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4827 err = ceph_metric_init(&mdsc->metric);
4828 if (err)
4829 goto err_mdsmap;
4830
4831 spin_lock_init(&mdsc->dentry_list_lock);
4832 INIT_LIST_HEAD(&mdsc->dentry_leases);
4833 INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4834
4835 ceph_caps_init(mdsc);
4836 ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4837
4838 spin_lock_init(&mdsc->snapid_map_lock);
4839 mdsc->snapid_map_tree = RB_ROOT;
4840 INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4841
4842 init_rwsem(&mdsc->pool_perm_rwsem);
4843 mdsc->pool_perm_tree = RB_ROOT;
4844
4845 strscpy(mdsc->nodename, utsname()->nodename,
4846 sizeof(mdsc->nodename));
4847
4848 fsc->mdsc = mdsc;
4849 return 0;
4850
4851err_mdsmap:
4852 kfree(mdsc->mdsmap);
4853err_mdsc:
4854 kfree(mdsc);
4855 return err;
4856}
4857
4858/*
4859 * Wait for safe replies on open mds requests. If we time out, drop
4860 * all requests from the tree to avoid dangling dentry refs.
4861 */
4862static void wait_requests(struct ceph_mds_client *mdsc)
4863{
4864 struct ceph_options *opts = mdsc->fsc->client->options;
4865 struct ceph_mds_request *req;
4866
4867 mutex_lock(&mdsc->mutex);
4868 if (__get_oldest_req(mdsc)) {
4869 mutex_unlock(&mdsc->mutex);
4870
4871 dout("wait_requests waiting for requests\n");
4872 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4873 ceph_timeout_jiffies(opts->mount_timeout));
4874
4875 /* tear down remaining requests */
4876 mutex_lock(&mdsc->mutex);
4877 while ((req = __get_oldest_req(mdsc))) {
4878 dout("wait_requests timed out on tid %llu\n",
4879 req->r_tid);
4880 list_del_init(&req->r_wait);
4881 __unregister_request(mdsc, req);
4882 }
4883 }
4884 mutex_unlock(&mdsc->mutex);
4885 dout("wait_requests done\n");
4886}
4887
4888void send_flush_mdlog(struct ceph_mds_session *s)
4889{
4890 struct ceph_msg *msg;
4891
4892 /*
4893 * Pre-luminous MDS crashes when it sees an unknown session request
4894 */
4895 if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
4896 return;
4897
4898 mutex_lock(&s->s_mutex);
4899 dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
4900 ceph_session_state_name(s->s_state), s->s_seq);
4901 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
4902 s->s_seq);
4903 if (!msg) {
4904 pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
4905 s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
4906 } else {
4907 ceph_con_send(&s->s_con, msg);
4908 }
4909 mutex_unlock(&s->s_mutex);
4910}
4911
4912/*
4913 * called before mount is ro, and before dentries are torn down.
4914 * (hmm, does this still race with new lookups?)
4915 */
4916void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4917{
4918 dout("pre_umount\n");
4919 mdsc->stopping = 1;
4920
4921 ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
4922 ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
4923 ceph_flush_dirty_caps(mdsc);
4924 wait_requests(mdsc);
4925
4926 /*
4927 * wait for reply handlers to drop their request refs and
4928 * their inode/dcache refs
4929 */
4930 ceph_msgr_flush();
4931
4932 ceph_cleanup_quotarealms_inodes(mdsc);
4933}
4934
4935/*
4936 * flush the mdlog and wait for all write mds requests to flush.
4937 */
4938static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
4939 u64 want_tid)
4940{
4941 struct ceph_mds_request *req = NULL, *nextreq;
4942 struct ceph_mds_session *last_session = NULL;
4943 struct rb_node *n;
4944
4945 mutex_lock(&mdsc->mutex);
4946 dout("%s want %lld\n", __func__, want_tid);
4947restart:
4948 req = __get_oldest_req(mdsc);
4949 while (req && req->r_tid <= want_tid) {
4950 /* find next request */
4951 n = rb_next(&req->r_node);
4952 if (n)
4953 nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4954 else
4955 nextreq = NULL;
4956 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4957 (req->r_op & CEPH_MDS_OP_WRITE)) {
4958 struct ceph_mds_session *s = req->r_session;
4959
4960 if (!s) {
4961 req = nextreq;
4962 continue;
4963 }
4964
4965 /* write op */
4966 ceph_mdsc_get_request(req);
4967 if (nextreq)
4968 ceph_mdsc_get_request(nextreq);
4969 s = ceph_get_mds_session(s);
4970 mutex_unlock(&mdsc->mutex);
4971
4972 /* send flush mdlog request to MDS */
4973 if (last_session != s) {
4974 send_flush_mdlog(s);
4975 ceph_put_mds_session(last_session);
4976 last_session = s;
4977 } else {
4978 ceph_put_mds_session(s);
4979 }
4980 dout("%s wait on %llu (want %llu)\n", __func__,
4981 req->r_tid, want_tid);
4982 wait_for_completion(&req->r_safe_completion);
4983
4984 mutex_lock(&mdsc->mutex);
4985 ceph_mdsc_put_request(req);
4986 if (!nextreq)
4987 break; /* next dne before, so we're done! */
4988 if (RB_EMPTY_NODE(&nextreq->r_node)) {
4989 /* next request was removed from tree */
4990 ceph_mdsc_put_request(nextreq);
4991 goto restart;
4992 }
4993 ceph_mdsc_put_request(nextreq); /* won't go away */
4994 }
4995 req = nextreq;
4996 }
4997 mutex_unlock(&mdsc->mutex);
4998 ceph_put_mds_session(last_session);
4999 dout("%s done\n", __func__);
5000}
5001
5002void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
5003{
5004 u64 want_tid, want_flush;
5005
5006 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
5007 return;
5008
5009 dout("sync\n");
5010 mutex_lock(&mdsc->mutex);
5011 want_tid = mdsc->last_tid;
5012 mutex_unlock(&mdsc->mutex);
5013
5014 ceph_flush_dirty_caps(mdsc);
5015 spin_lock(&mdsc->cap_dirty_lock);
5016 want_flush = mdsc->last_cap_flush_tid;
5017 if (!list_empty(&mdsc->cap_flush_list)) {
5018 struct ceph_cap_flush *cf =
5019 list_last_entry(&mdsc->cap_flush_list,
5020 struct ceph_cap_flush, g_list);
5021 cf->wake = true;
5022 }
5023 spin_unlock(&mdsc->cap_dirty_lock);
5024
5025 dout("sync want tid %lld flush_seq %lld\n",
5026 want_tid, want_flush);
5027
5028 flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
5029 wait_caps_flush(mdsc, want_flush);
5030}
5031
5032/*
5033 * true if all sessions are closed, or we force unmount
5034 */
5035static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
5036{
5037 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
5038 return true;
5039 return atomic_read(&mdsc->num_sessions) <= skipped;
5040}
5041
5042/*
5043 * called after sb is ro or when metadata corrupted.
5044 */
5045void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
5046{
5047 struct ceph_options *opts = mdsc->fsc->client->options;
5048 struct ceph_mds_session *session;
5049 int i;
5050 int skipped = 0;
5051
5052 dout("close_sessions\n");
5053
5054 /* close sessions */
5055 mutex_lock(&mdsc->mutex);
5056 for (i = 0; i < mdsc->max_sessions; i++) {
5057 session = __ceph_lookup_mds_session(mdsc, i);
5058 if (!session)
5059 continue;
5060 mutex_unlock(&mdsc->mutex);
5061 mutex_lock(&session->s_mutex);
5062 if (__close_session(mdsc, session) <= 0)
5063 skipped++;
5064 mutex_unlock(&session->s_mutex);
5065 ceph_put_mds_session(session);
5066 mutex_lock(&mdsc->mutex);
5067 }
5068 mutex_unlock(&mdsc->mutex);
5069
5070 dout("waiting for sessions to close\n");
5071 wait_event_timeout(mdsc->session_close_wq,
5072 done_closing_sessions(mdsc, skipped),
5073 ceph_timeout_jiffies(opts->mount_timeout));
5074
5075 /* tear down remaining sessions */
5076 mutex_lock(&mdsc->mutex);
5077 for (i = 0; i < mdsc->max_sessions; i++) {
5078 if (mdsc->sessions[i]) {
5079 session = ceph_get_mds_session(mdsc->sessions[i]);
5080 __unregister_session(mdsc, session);
5081 mutex_unlock(&mdsc->mutex);
5082 mutex_lock(&session->s_mutex);
5083 remove_session_caps(session);
5084 mutex_unlock(&session->s_mutex);
5085 ceph_put_mds_session(session);
5086 mutex_lock(&mdsc->mutex);
5087 }
5088 }
5089 WARN_ON(!list_empty(&mdsc->cap_delay_list));
5090 mutex_unlock(&mdsc->mutex);
5091
5092 ceph_cleanup_snapid_map(mdsc);
5093 ceph_cleanup_global_and_empty_realms(mdsc);
5094
5095 cancel_work_sync(&mdsc->cap_reclaim_work);
5096 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
5097
5098 dout("stopped\n");
5099}
5100
5101void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
5102{
5103 struct ceph_mds_session *session;
5104 int mds;
5105
5106 dout("force umount\n");
5107
5108 mutex_lock(&mdsc->mutex);
5109 for (mds = 0; mds < mdsc->max_sessions; mds++) {
5110 session = __ceph_lookup_mds_session(mdsc, mds);
5111 if (!session)
5112 continue;
5113
5114 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
5115 __unregister_session(mdsc, session);
5116 __wake_requests(mdsc, &session->s_waiting);
5117 mutex_unlock(&mdsc->mutex);
5118
5119 mutex_lock(&session->s_mutex);
5120 __close_session(mdsc, session);
5121 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
5122 cleanup_session_requests(mdsc, session);
5123 remove_session_caps(session);
5124 }
5125 mutex_unlock(&session->s_mutex);
5126 ceph_put_mds_session(session);
5127
5128 mutex_lock(&mdsc->mutex);
5129 kick_requests(mdsc, mds);
5130 }
5131 __wake_requests(mdsc, &mdsc->waiting_for_map);
5132 mutex_unlock(&mdsc->mutex);
5133}
5134
5135static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5136{
5137 dout("stop\n");
5138 /*
5139 * Make sure the delayed work stopped before releasing
5140 * the resources.
5141 *
5142 * Because the cancel_delayed_work_sync() will only
5143 * guarantee that the work finishes executing. But the
5144 * delayed work will re-arm itself again after that.
5145 */
5146 flush_delayed_work(&mdsc->delayed_work);
5147
5148 if (mdsc->mdsmap)
5149 ceph_mdsmap_destroy(mdsc->mdsmap);
5150 kfree(mdsc->sessions);
5151 ceph_caps_finalize(mdsc);
5152 ceph_pool_perm_destroy(mdsc);
5153}
5154
5155void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
5156{
5157 struct ceph_mds_client *mdsc = fsc->mdsc;
5158 dout("mdsc_destroy %p\n", mdsc);
5159
5160 if (!mdsc)
5161 return;
5162
5163 /* flush out any connection work with references to us */
5164 ceph_msgr_flush();
5165
5166 ceph_mdsc_stop(mdsc);
5167
5168 ceph_metric_destroy(&mdsc->metric);
5169
5170 fsc->mdsc = NULL;
5171 kfree(mdsc);
5172 dout("mdsc_destroy %p done\n", mdsc);
5173}
5174
5175void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5176{
5177 struct ceph_fs_client *fsc = mdsc->fsc;
5178 const char *mds_namespace = fsc->mount_options->mds_namespace;
5179 void *p = msg->front.iov_base;
5180 void *end = p + msg->front.iov_len;
5181 u32 epoch;
5182 u32 num_fs;
5183 u32 mount_fscid = (u32)-1;
5184 int err = -EINVAL;
5185
5186 ceph_decode_need(&p, end, sizeof(u32), bad);
5187 epoch = ceph_decode_32(&p);
5188
5189 dout("handle_fsmap epoch %u\n", epoch);
5190
5191 /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
5192 ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
5193
5194 ceph_decode_32_safe(&p, end, num_fs, bad);
5195 while (num_fs-- > 0) {
5196 void *info_p, *info_end;
5197 u32 info_len;
5198 u32 fscid, namelen;
5199
5200 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
5201 p += 2; // info_v, info_cv
5202 info_len = ceph_decode_32(&p);
5203 ceph_decode_need(&p, end, info_len, bad);
5204 info_p = p;
5205 info_end = p + info_len;
5206 p = info_end;
5207
5208 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
5209 fscid = ceph_decode_32(&info_p);
5210 namelen = ceph_decode_32(&info_p);
5211 ceph_decode_need(&info_p, info_end, namelen, bad);
5212
5213 if (mds_namespace &&
5214 strlen(mds_namespace) == namelen &&
5215 !strncmp(mds_namespace, (char *)info_p, namelen)) {
5216 mount_fscid = fscid;
5217 break;
5218 }
5219 }
5220
5221 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
5222 if (mount_fscid != (u32)-1) {
5223 fsc->client->monc.fs_cluster_id = mount_fscid;
5224 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
5225 0, true);
5226 ceph_monc_renew_subs(&fsc->client->monc);
5227 } else {
5228 err = -ENOENT;
5229 goto err_out;
5230 }
5231 return;
5232
5233bad:
5234 pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
5235 ceph_umount_begin(mdsc->fsc->sb);
5236err_out:
5237 mutex_lock(&mdsc->mutex);
5238 mdsc->mdsmap_err = err;
5239 __wake_requests(mdsc, &mdsc->waiting_for_map);
5240 mutex_unlock(&mdsc->mutex);
5241}
5242
5243/*
5244 * handle mds map update.
5245 */
5246void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5247{
5248 u32 epoch;
5249 u32 maplen;
5250 void *p = msg->front.iov_base;
5251 void *end = p + msg->front.iov_len;
5252 struct ceph_mdsmap *newmap, *oldmap;
5253 struct ceph_fsid fsid;
5254 int err = -EINVAL;
5255
5256 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5257 ceph_decode_copy(&p, &fsid, sizeof(fsid));
5258 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5259 return;
5260 epoch = ceph_decode_32(&p);
5261 maplen = ceph_decode_32(&p);
5262 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5263
5264 /* do we need it? */
5265 mutex_lock(&mdsc->mutex);
5266 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5267 dout("handle_map epoch %u <= our %u\n",
5268 epoch, mdsc->mdsmap->m_epoch);
5269 mutex_unlock(&mdsc->mutex);
5270 return;
5271 }
5272
5273 newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5274 if (IS_ERR(newmap)) {
5275 err = PTR_ERR(newmap);
5276 goto bad_unlock;
5277 }
5278
5279 /* swap into place */
5280 if (mdsc->mdsmap) {
5281 oldmap = mdsc->mdsmap;
5282 mdsc->mdsmap = newmap;
5283 check_new_map(mdsc, newmap, oldmap);
5284 ceph_mdsmap_destroy(oldmap);
5285 } else {
5286 mdsc->mdsmap = newmap; /* first mds map */
5287 }
5288 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5289 MAX_LFS_FILESIZE);
5290
5291 __wake_requests(mdsc, &mdsc->waiting_for_map);
5292 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5293 mdsc->mdsmap->m_epoch);
5294
5295 mutex_unlock(&mdsc->mutex);
5296 schedule_delayed(mdsc, 0);
5297 return;
5298
5299bad_unlock:
5300 mutex_unlock(&mdsc->mutex);
5301bad:
5302 pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
5303 ceph_umount_begin(mdsc->fsc->sb);
5304 return;
5305}
5306
5307static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5308{
5309 struct ceph_mds_session *s = con->private;
5310
5311 if (ceph_get_mds_session(s))
5312 return con;
5313 return NULL;
5314}
5315
5316static void mds_put_con(struct ceph_connection *con)
5317{
5318 struct ceph_mds_session *s = con->private;
5319
5320 ceph_put_mds_session(s);
5321}
5322
5323/*
5324 * if the client is unresponsive for long enough, the mds will kill
5325 * the session entirely.
5326 */
5327static void mds_peer_reset(struct ceph_connection *con)
5328{
5329 struct ceph_mds_session *s = con->private;
5330 struct ceph_mds_client *mdsc = s->s_mdsc;
5331
5332 pr_warn("mds%d closed our session\n", s->s_mds);
5333 if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
5334 send_mds_reconnect(mdsc, s);
5335}
5336
5337static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5338{
5339 struct ceph_mds_session *s = con->private;
5340 struct ceph_mds_client *mdsc = s->s_mdsc;
5341 int type = le16_to_cpu(msg->hdr.type);
5342
5343 mutex_lock(&mdsc->mutex);
5344 if (__verify_registered_session(mdsc, s) < 0) {
5345 mutex_unlock(&mdsc->mutex);
5346 goto out;
5347 }
5348 mutex_unlock(&mdsc->mutex);
5349
5350 switch (type) {
5351 case CEPH_MSG_MDS_MAP:
5352 ceph_mdsc_handle_mdsmap(mdsc, msg);
5353 break;
5354 case CEPH_MSG_FS_MAP_USER:
5355 ceph_mdsc_handle_fsmap(mdsc, msg);
5356 break;
5357 case CEPH_MSG_CLIENT_SESSION:
5358 handle_session(s, msg);
5359 break;
5360 case CEPH_MSG_CLIENT_REPLY:
5361 handle_reply(s, msg);
5362 break;
5363 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5364 handle_forward(mdsc, s, msg);
5365 break;
5366 case CEPH_MSG_CLIENT_CAPS:
5367 ceph_handle_caps(s, msg);
5368 break;
5369 case CEPH_MSG_CLIENT_SNAP:
5370 ceph_handle_snap(mdsc, s, msg);
5371 break;
5372 case CEPH_MSG_CLIENT_LEASE:
5373 handle_lease(mdsc, s, msg);
5374 break;
5375 case CEPH_MSG_CLIENT_QUOTA:
5376 ceph_handle_quota(mdsc, s, msg);
5377 break;
5378
5379 default:
5380 pr_err("received unknown message type %d %s\n", type,
5381 ceph_msg_type_name(type));
5382 }
5383out:
5384 ceph_msg_put(msg);
5385}
5386
5387/*
5388 * authentication
5389 */
5390
5391/*
5392 * Note: returned pointer is the address of a structure that's
5393 * managed separately. Caller must *not* attempt to free it.
5394 */
5395static struct ceph_auth_handshake *
5396mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5397{
5398 struct ceph_mds_session *s = con->private;
5399 struct ceph_mds_client *mdsc = s->s_mdsc;
5400 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5401 struct ceph_auth_handshake *auth = &s->s_auth;
5402 int ret;
5403
5404 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5405 force_new, proto, NULL, NULL);
5406 if (ret)
5407 return ERR_PTR(ret);
5408
5409 return auth;
5410}
5411
5412static int mds_add_authorizer_challenge(struct ceph_connection *con,
5413 void *challenge_buf, int challenge_buf_len)
5414{
5415 struct ceph_mds_session *s = con->private;
5416 struct ceph_mds_client *mdsc = s->s_mdsc;
5417 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5418
5419 return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5420 challenge_buf, challenge_buf_len);
5421}
5422
5423static int mds_verify_authorizer_reply(struct ceph_connection *con)
5424{
5425 struct ceph_mds_session *s = con->private;
5426 struct ceph_mds_client *mdsc = s->s_mdsc;
5427 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5428 struct ceph_auth_handshake *auth = &s->s_auth;
5429
5430 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5431 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5432 NULL, NULL, NULL, NULL);
5433}
5434
5435static int mds_invalidate_authorizer(struct ceph_connection *con)
5436{
5437 struct ceph_mds_session *s = con->private;
5438 struct ceph_mds_client *mdsc = s->s_mdsc;
5439 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5440
5441 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5442
5443 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5444}
5445
5446static int mds_get_auth_request(struct ceph_connection *con,
5447 void *buf, int *buf_len,
5448 void **authorizer, int *authorizer_len)
5449{
5450 struct ceph_mds_session *s = con->private;
5451 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5452 struct ceph_auth_handshake *auth = &s->s_auth;
5453 int ret;
5454
5455 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5456 buf, buf_len);
5457 if (ret)
5458 return ret;
5459
5460 *authorizer = auth->authorizer_buf;
5461 *authorizer_len = auth->authorizer_buf_len;
5462 return 0;
5463}
5464
5465static int mds_handle_auth_reply_more(struct ceph_connection *con,
5466 void *reply, int reply_len,
5467 void *buf, int *buf_len,
5468 void **authorizer, int *authorizer_len)
5469{
5470 struct ceph_mds_session *s = con->private;
5471 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5472 struct ceph_auth_handshake *auth = &s->s_auth;
5473 int ret;
5474
5475 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5476 buf, buf_len);
5477 if (ret)
5478 return ret;
5479
5480 *authorizer = auth->authorizer_buf;
5481 *authorizer_len = auth->authorizer_buf_len;
5482 return 0;
5483}
5484
5485static int mds_handle_auth_done(struct ceph_connection *con,
5486 u64 global_id, void *reply, int reply_len,
5487 u8 *session_key, int *session_key_len,
5488 u8 *con_secret, int *con_secret_len)
5489{
5490 struct ceph_mds_session *s = con->private;
5491 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5492 struct ceph_auth_handshake *auth = &s->s_auth;
5493
5494 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5495 session_key, session_key_len,
5496 con_secret, con_secret_len);
5497}
5498
5499static int mds_handle_auth_bad_method(struct ceph_connection *con,
5500 int used_proto, int result,
5501 const int *allowed_protos, int proto_cnt,
5502 const int *allowed_modes, int mode_cnt)
5503{
5504 struct ceph_mds_session *s = con->private;
5505 struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5506 int ret;
5507
5508 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5509 used_proto, result,
5510 allowed_protos, proto_cnt,
5511 allowed_modes, mode_cnt)) {
5512 ret = ceph_monc_validate_auth(monc);
5513 if (ret)
5514 return ret;
5515 }
5516
5517 return -EACCES;
5518}
5519
5520static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5521 struct ceph_msg_header *hdr, int *skip)
5522{
5523 struct ceph_msg *msg;
5524 int type = (int) le16_to_cpu(hdr->type);
5525 int front_len = (int) le32_to_cpu(hdr->front_len);
5526
5527 if (con->in_msg)
5528 return con->in_msg;
5529
5530 *skip = 0;
5531 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5532 if (!msg) {
5533 pr_err("unable to allocate msg type %d len %d\n",
5534 type, front_len);
5535 return NULL;
5536 }
5537
5538 return msg;
5539}
5540
5541static int mds_sign_message(struct ceph_msg *msg)
5542{
5543 struct ceph_mds_session *s = msg->con->private;
5544 struct ceph_auth_handshake *auth = &s->s_auth;
5545
5546 return ceph_auth_sign_message(auth, msg);
5547}
5548
5549static int mds_check_message_signature(struct ceph_msg *msg)
5550{
5551 struct ceph_mds_session *s = msg->con->private;
5552 struct ceph_auth_handshake *auth = &s->s_auth;
5553
5554 return ceph_auth_check_message_signature(auth, msg);
5555}
5556
5557static const struct ceph_connection_operations mds_con_ops = {
5558 .get = mds_get_con,
5559 .put = mds_put_con,
5560 .alloc_msg = mds_alloc_msg,
5561 .dispatch = mds_dispatch,
5562 .peer_reset = mds_peer_reset,
5563 .get_authorizer = mds_get_authorizer,
5564 .add_authorizer_challenge = mds_add_authorizer_challenge,
5565 .verify_authorizer_reply = mds_verify_authorizer_reply,
5566 .invalidate_authorizer = mds_invalidate_authorizer,
5567 .sign_message = mds_sign_message,
5568 .check_message_signature = mds_check_message_signature,
5569 .get_auth_request = mds_get_auth_request,
5570 .handle_auth_reply_more = mds_handle_auth_reply_more,
5571 .handle_auth_done = mds_handle_auth_done,
5572 .handle_auth_bad_method = mds_handle_auth_bad_method,
5573};
5574
5575/* eof */
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/fs.h>
5#include <linux/wait.h>
6#include <linux/slab.h>
7#include <linux/gfp.h>
8#include <linux/sched.h>
9#include <linux/debugfs.h>
10#include <linux/seq_file.h>
11#include <linux/ratelimit.h>
12#include <linux/bits.h>
13#include <linux/ktime.h>
14
15#include "super.h"
16#include "mds_client.h"
17
18#include <linux/ceph/ceph_features.h>
19#include <linux/ceph/messenger.h>
20#include <linux/ceph/decode.h>
21#include <linux/ceph/pagelist.h>
22#include <linux/ceph/auth.h>
23#include <linux/ceph/debugfs.h>
24
25#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
26
27/*
28 * A cluster of MDS (metadata server) daemons is responsible for
29 * managing the file system namespace (the directory hierarchy and
30 * inodes) and for coordinating shared access to storage. Metadata is
31 * partitioning hierarchically across a number of servers, and that
32 * partition varies over time as the cluster adjusts the distribution
33 * in order to balance load.
34 *
35 * The MDS client is primarily responsible to managing synchronous
36 * metadata requests for operations like open, unlink, and so forth.
37 * If there is a MDS failure, we find out about it when we (possibly
38 * request and) receive a new MDS map, and can resubmit affected
39 * requests.
40 *
41 * For the most part, though, we take advantage of a lossless
42 * communications channel to the MDS, and do not need to worry about
43 * timing out or resubmitting requests.
44 *
45 * We maintain a stateful "session" with each MDS we interact with.
46 * Within each session, we sent periodic heartbeat messages to ensure
47 * any capabilities or leases we have been issues remain valid. If
48 * the session times out and goes stale, our leases and capabilities
49 * are no longer valid.
50 */
51
52struct ceph_reconnect_state {
53 struct ceph_mds_session *session;
54 int nr_caps, nr_realms;
55 struct ceph_pagelist *pagelist;
56 unsigned msg_version;
57 bool allow_multi;
58};
59
60static void __wake_requests(struct ceph_mds_client *mdsc,
61 struct list_head *head);
62static void ceph_cap_release_work(struct work_struct *work);
63static void ceph_cap_reclaim_work(struct work_struct *work);
64
65static const struct ceph_connection_operations mds_con_ops;
66
67
68/*
69 * mds reply parsing
70 */
71
72static int parse_reply_info_quota(void **p, void *end,
73 struct ceph_mds_reply_info_in *info)
74{
75 u8 struct_v, struct_compat;
76 u32 struct_len;
77
78 ceph_decode_8_safe(p, end, struct_v, bad);
79 ceph_decode_8_safe(p, end, struct_compat, bad);
80 /* struct_v is expected to be >= 1. we only
81 * understand encoding with struct_compat == 1. */
82 if (!struct_v || struct_compat != 1)
83 goto bad;
84 ceph_decode_32_safe(p, end, struct_len, bad);
85 ceph_decode_need(p, end, struct_len, bad);
86 end = *p + struct_len;
87 ceph_decode_64_safe(p, end, info->max_bytes, bad);
88 ceph_decode_64_safe(p, end, info->max_files, bad);
89 *p = end;
90 return 0;
91bad:
92 return -EIO;
93}
94
95/*
96 * parse individual inode info
97 */
98static int parse_reply_info_in(void **p, void *end,
99 struct ceph_mds_reply_info_in *info,
100 u64 features)
101{
102 int err = 0;
103 u8 struct_v = 0;
104
105 if (features == (u64)-1) {
106 u32 struct_len;
107 u8 struct_compat;
108 ceph_decode_8_safe(p, end, struct_v, bad);
109 ceph_decode_8_safe(p, end, struct_compat, bad);
110 /* struct_v is expected to be >= 1. we only understand
111 * encoding with struct_compat == 1. */
112 if (!struct_v || struct_compat != 1)
113 goto bad;
114 ceph_decode_32_safe(p, end, struct_len, bad);
115 ceph_decode_need(p, end, struct_len, bad);
116 end = *p + struct_len;
117 }
118
119 ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
120 info->in = *p;
121 *p += sizeof(struct ceph_mds_reply_inode) +
122 sizeof(*info->in->fragtree.splits) *
123 le32_to_cpu(info->in->fragtree.nsplits);
124
125 ceph_decode_32_safe(p, end, info->symlink_len, bad);
126 ceph_decode_need(p, end, info->symlink_len, bad);
127 info->symlink = *p;
128 *p += info->symlink_len;
129
130 ceph_decode_copy_safe(p, end, &info->dir_layout,
131 sizeof(info->dir_layout), bad);
132 ceph_decode_32_safe(p, end, info->xattr_len, bad);
133 ceph_decode_need(p, end, info->xattr_len, bad);
134 info->xattr_data = *p;
135 *p += info->xattr_len;
136
137 if (features == (u64)-1) {
138 /* inline data */
139 ceph_decode_64_safe(p, end, info->inline_version, bad);
140 ceph_decode_32_safe(p, end, info->inline_len, bad);
141 ceph_decode_need(p, end, info->inline_len, bad);
142 info->inline_data = *p;
143 *p += info->inline_len;
144 /* quota */
145 err = parse_reply_info_quota(p, end, info);
146 if (err < 0)
147 goto out_bad;
148 /* pool namespace */
149 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
150 if (info->pool_ns_len > 0) {
151 ceph_decode_need(p, end, info->pool_ns_len, bad);
152 info->pool_ns_data = *p;
153 *p += info->pool_ns_len;
154 }
155
156 /* btime */
157 ceph_decode_need(p, end, sizeof(info->btime), bad);
158 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
159
160 /* change attribute */
161 ceph_decode_64_safe(p, end, info->change_attr, bad);
162
163 /* dir pin */
164 if (struct_v >= 2) {
165 ceph_decode_32_safe(p, end, info->dir_pin, bad);
166 } else {
167 info->dir_pin = -ENODATA;
168 }
169
170 /* snapshot birth time, remains zero for v<=2 */
171 if (struct_v >= 3) {
172 ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
173 ceph_decode_copy(p, &info->snap_btime,
174 sizeof(info->snap_btime));
175 } else {
176 memset(&info->snap_btime, 0, sizeof(info->snap_btime));
177 }
178
179 /* snapshot count, remains zero for v<=3 */
180 if (struct_v >= 4) {
181 ceph_decode_64_safe(p, end, info->rsnaps, bad);
182 } else {
183 info->rsnaps = 0;
184 }
185
186 *p = end;
187 } else {
188 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
189 ceph_decode_64_safe(p, end, info->inline_version, bad);
190 ceph_decode_32_safe(p, end, info->inline_len, bad);
191 ceph_decode_need(p, end, info->inline_len, bad);
192 info->inline_data = *p;
193 *p += info->inline_len;
194 } else
195 info->inline_version = CEPH_INLINE_NONE;
196
197 if (features & CEPH_FEATURE_MDS_QUOTA) {
198 err = parse_reply_info_quota(p, end, info);
199 if (err < 0)
200 goto out_bad;
201 } else {
202 info->max_bytes = 0;
203 info->max_files = 0;
204 }
205
206 info->pool_ns_len = 0;
207 info->pool_ns_data = NULL;
208 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
209 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
210 if (info->pool_ns_len > 0) {
211 ceph_decode_need(p, end, info->pool_ns_len, bad);
212 info->pool_ns_data = *p;
213 *p += info->pool_ns_len;
214 }
215 }
216
217 if (features & CEPH_FEATURE_FS_BTIME) {
218 ceph_decode_need(p, end, sizeof(info->btime), bad);
219 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
220 ceph_decode_64_safe(p, end, info->change_attr, bad);
221 }
222
223 info->dir_pin = -ENODATA;
224 /* info->snap_btime and info->rsnaps remain zero */
225 }
226 return 0;
227bad:
228 err = -EIO;
229out_bad:
230 return err;
231}
232
233static int parse_reply_info_dir(void **p, void *end,
234 struct ceph_mds_reply_dirfrag **dirfrag,
235 u64 features)
236{
237 if (features == (u64)-1) {
238 u8 struct_v, struct_compat;
239 u32 struct_len;
240 ceph_decode_8_safe(p, end, struct_v, bad);
241 ceph_decode_8_safe(p, end, struct_compat, bad);
242 /* struct_v is expected to be >= 1. we only understand
243 * encoding whose struct_compat == 1. */
244 if (!struct_v || struct_compat != 1)
245 goto bad;
246 ceph_decode_32_safe(p, end, struct_len, bad);
247 ceph_decode_need(p, end, struct_len, bad);
248 end = *p + struct_len;
249 }
250
251 ceph_decode_need(p, end, sizeof(**dirfrag), bad);
252 *dirfrag = *p;
253 *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
254 if (unlikely(*p > end))
255 goto bad;
256 if (features == (u64)-1)
257 *p = end;
258 return 0;
259bad:
260 return -EIO;
261}
262
263static int parse_reply_info_lease(void **p, void *end,
264 struct ceph_mds_reply_lease **lease,
265 u64 features)
266{
267 if (features == (u64)-1) {
268 u8 struct_v, struct_compat;
269 u32 struct_len;
270 ceph_decode_8_safe(p, end, struct_v, bad);
271 ceph_decode_8_safe(p, end, struct_compat, bad);
272 /* struct_v is expected to be >= 1. we only understand
273 * encoding whose struct_compat == 1. */
274 if (!struct_v || struct_compat != 1)
275 goto bad;
276 ceph_decode_32_safe(p, end, struct_len, bad);
277 ceph_decode_need(p, end, struct_len, bad);
278 end = *p + struct_len;
279 }
280
281 ceph_decode_need(p, end, sizeof(**lease), bad);
282 *lease = *p;
283 *p += sizeof(**lease);
284 if (features == (u64)-1)
285 *p = end;
286 return 0;
287bad:
288 return -EIO;
289}
290
291/*
292 * parse a normal reply, which may contain a (dir+)dentry and/or a
293 * target inode.
294 */
295static int parse_reply_info_trace(void **p, void *end,
296 struct ceph_mds_reply_info_parsed *info,
297 u64 features)
298{
299 int err;
300
301 if (info->head->is_dentry) {
302 err = parse_reply_info_in(p, end, &info->diri, features);
303 if (err < 0)
304 goto out_bad;
305
306 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
307 if (err < 0)
308 goto out_bad;
309
310 ceph_decode_32_safe(p, end, info->dname_len, bad);
311 ceph_decode_need(p, end, info->dname_len, bad);
312 info->dname = *p;
313 *p += info->dname_len;
314
315 err = parse_reply_info_lease(p, end, &info->dlease, features);
316 if (err < 0)
317 goto out_bad;
318 }
319
320 if (info->head->is_target) {
321 err = parse_reply_info_in(p, end, &info->targeti, features);
322 if (err < 0)
323 goto out_bad;
324 }
325
326 if (unlikely(*p != end))
327 goto bad;
328 return 0;
329
330bad:
331 err = -EIO;
332out_bad:
333 pr_err("problem parsing mds trace %d\n", err);
334 return err;
335}
336
337/*
338 * parse readdir results
339 */
340static int parse_reply_info_readdir(void **p, void *end,
341 struct ceph_mds_reply_info_parsed *info,
342 u64 features)
343{
344 u32 num, i = 0;
345 int err;
346
347 err = parse_reply_info_dir(p, end, &info->dir_dir, features);
348 if (err < 0)
349 goto out_bad;
350
351 ceph_decode_need(p, end, sizeof(num) + 2, bad);
352 num = ceph_decode_32(p);
353 {
354 u16 flags = ceph_decode_16(p);
355 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
356 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
357 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
358 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
359 }
360 if (num == 0)
361 goto done;
362
363 BUG_ON(!info->dir_entries);
364 if ((unsigned long)(info->dir_entries + num) >
365 (unsigned long)info->dir_entries + info->dir_buf_size) {
366 pr_err("dir contents are larger than expected\n");
367 WARN_ON(1);
368 goto bad;
369 }
370
371 info->dir_nr = num;
372 while (num) {
373 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
374 /* dentry */
375 ceph_decode_32_safe(p, end, rde->name_len, bad);
376 ceph_decode_need(p, end, rde->name_len, bad);
377 rde->name = *p;
378 *p += rde->name_len;
379 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
380
381 /* dentry lease */
382 err = parse_reply_info_lease(p, end, &rde->lease, features);
383 if (err)
384 goto out_bad;
385 /* inode */
386 err = parse_reply_info_in(p, end, &rde->inode, features);
387 if (err < 0)
388 goto out_bad;
389 /* ceph_readdir_prepopulate() will update it */
390 rde->offset = 0;
391 i++;
392 num--;
393 }
394
395done:
396 /* Skip over any unrecognized fields */
397 *p = end;
398 return 0;
399
400bad:
401 err = -EIO;
402out_bad:
403 pr_err("problem parsing dir contents %d\n", err);
404 return err;
405}
406
407/*
408 * parse fcntl F_GETLK results
409 */
410static int parse_reply_info_filelock(void **p, void *end,
411 struct ceph_mds_reply_info_parsed *info,
412 u64 features)
413{
414 if (*p + sizeof(*info->filelock_reply) > end)
415 goto bad;
416
417 info->filelock_reply = *p;
418
419 /* Skip over any unrecognized fields */
420 *p = end;
421 return 0;
422bad:
423 return -EIO;
424}
425
426
427#if BITS_PER_LONG == 64
428
429#define DELEGATED_INO_AVAILABLE xa_mk_value(1)
430
431static int ceph_parse_deleg_inos(void **p, void *end,
432 struct ceph_mds_session *s)
433{
434 u32 sets;
435
436 ceph_decode_32_safe(p, end, sets, bad);
437 dout("got %u sets of delegated inodes\n", sets);
438 while (sets--) {
439 u64 start, len, ino;
440
441 ceph_decode_64_safe(p, end, start, bad);
442 ceph_decode_64_safe(p, end, len, bad);
443
444 /* Don't accept a delegation of system inodes */
445 if (start < CEPH_INO_SYSTEM_BASE) {
446 pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
447 start, len);
448 continue;
449 }
450 while (len--) {
451 int err = xa_insert(&s->s_delegated_inos, ino = start++,
452 DELEGATED_INO_AVAILABLE,
453 GFP_KERNEL);
454 if (!err) {
455 dout("added delegated inode 0x%llx\n",
456 start - 1);
457 } else if (err == -EBUSY) {
458 pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
459 start - 1);
460 } else {
461 return err;
462 }
463 }
464 }
465 return 0;
466bad:
467 return -EIO;
468}
469
470u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
471{
472 unsigned long ino;
473 void *val;
474
475 xa_for_each(&s->s_delegated_inos, ino, val) {
476 val = xa_erase(&s->s_delegated_inos, ino);
477 if (val == DELEGATED_INO_AVAILABLE)
478 return ino;
479 }
480 return 0;
481}
482
483int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
484{
485 return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
486 GFP_KERNEL);
487}
488#else /* BITS_PER_LONG == 64 */
489/*
490 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
491 * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
492 * and bottom words?
493 */
494static int ceph_parse_deleg_inos(void **p, void *end,
495 struct ceph_mds_session *s)
496{
497 u32 sets;
498
499 ceph_decode_32_safe(p, end, sets, bad);
500 if (sets)
501 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
502 return 0;
503bad:
504 return -EIO;
505}
506
507u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
508{
509 return 0;
510}
511
512int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
513{
514 return 0;
515}
516#endif /* BITS_PER_LONG == 64 */
517
518/*
519 * parse create results
520 */
521static int parse_reply_info_create(void **p, void *end,
522 struct ceph_mds_reply_info_parsed *info,
523 u64 features, struct ceph_mds_session *s)
524{
525 int ret;
526
527 if (features == (u64)-1 ||
528 (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
529 if (*p == end) {
530 /* Malformed reply? */
531 info->has_create_ino = false;
532 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
533 info->has_create_ino = true;
534 /* struct_v, struct_compat, and len */
535 ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
536 ceph_decode_64_safe(p, end, info->ino, bad);
537 ret = ceph_parse_deleg_inos(p, end, s);
538 if (ret)
539 return ret;
540 } else {
541 /* legacy */
542 ceph_decode_64_safe(p, end, info->ino, bad);
543 info->has_create_ino = true;
544 }
545 } else {
546 if (*p != end)
547 goto bad;
548 }
549
550 /* Skip over any unrecognized fields */
551 *p = end;
552 return 0;
553bad:
554 return -EIO;
555}
556
557/*
558 * parse extra results
559 */
560static int parse_reply_info_extra(void **p, void *end,
561 struct ceph_mds_reply_info_parsed *info,
562 u64 features, struct ceph_mds_session *s)
563{
564 u32 op = le32_to_cpu(info->head->op);
565
566 if (op == CEPH_MDS_OP_GETFILELOCK)
567 return parse_reply_info_filelock(p, end, info, features);
568 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
569 return parse_reply_info_readdir(p, end, info, features);
570 else if (op == CEPH_MDS_OP_CREATE)
571 return parse_reply_info_create(p, end, info, features, s);
572 else
573 return -EIO;
574}
575
576/*
577 * parse entire mds reply
578 */
579static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
580 struct ceph_mds_reply_info_parsed *info,
581 u64 features)
582{
583 void *p, *end;
584 u32 len;
585 int err;
586
587 info->head = msg->front.iov_base;
588 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
589 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
590
591 /* trace */
592 ceph_decode_32_safe(&p, end, len, bad);
593 if (len > 0) {
594 ceph_decode_need(&p, end, len, bad);
595 err = parse_reply_info_trace(&p, p+len, info, features);
596 if (err < 0)
597 goto out_bad;
598 }
599
600 /* extra */
601 ceph_decode_32_safe(&p, end, len, bad);
602 if (len > 0) {
603 ceph_decode_need(&p, end, len, bad);
604 err = parse_reply_info_extra(&p, p+len, info, features, s);
605 if (err < 0)
606 goto out_bad;
607 }
608
609 /* snap blob */
610 ceph_decode_32_safe(&p, end, len, bad);
611 info->snapblob_len = len;
612 info->snapblob = p;
613 p += len;
614
615 if (p != end)
616 goto bad;
617 return 0;
618
619bad:
620 err = -EIO;
621out_bad:
622 pr_err("mds parse_reply err %d\n", err);
623 return err;
624}
625
626static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
627{
628 if (!info->dir_entries)
629 return;
630 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
631}
632
633
634/*
635 * sessions
636 */
637const char *ceph_session_state_name(int s)
638{
639 switch (s) {
640 case CEPH_MDS_SESSION_NEW: return "new";
641 case CEPH_MDS_SESSION_OPENING: return "opening";
642 case CEPH_MDS_SESSION_OPEN: return "open";
643 case CEPH_MDS_SESSION_HUNG: return "hung";
644 case CEPH_MDS_SESSION_CLOSING: return "closing";
645 case CEPH_MDS_SESSION_CLOSED: return "closed";
646 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
647 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
648 case CEPH_MDS_SESSION_REJECTED: return "rejected";
649 default: return "???";
650 }
651}
652
653struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
654{
655 if (refcount_inc_not_zero(&s->s_ref)) {
656 dout("mdsc get_session %p %d -> %d\n", s,
657 refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
658 return s;
659 } else {
660 dout("mdsc get_session %p 0 -- FAIL\n", s);
661 return NULL;
662 }
663}
664
665void ceph_put_mds_session(struct ceph_mds_session *s)
666{
667 if (IS_ERR_OR_NULL(s))
668 return;
669
670 dout("mdsc put_session %p %d -> %d\n", s,
671 refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
672 if (refcount_dec_and_test(&s->s_ref)) {
673 if (s->s_auth.authorizer)
674 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
675 WARN_ON(mutex_is_locked(&s->s_mutex));
676 xa_destroy(&s->s_delegated_inos);
677 kfree(s);
678 }
679}
680
681/*
682 * called under mdsc->mutex
683 */
684struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
685 int mds)
686{
687 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
688 return NULL;
689 return ceph_get_mds_session(mdsc->sessions[mds]);
690}
691
692static bool __have_session(struct ceph_mds_client *mdsc, int mds)
693{
694 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
695 return false;
696 else
697 return true;
698}
699
700static int __verify_registered_session(struct ceph_mds_client *mdsc,
701 struct ceph_mds_session *s)
702{
703 if (s->s_mds >= mdsc->max_sessions ||
704 mdsc->sessions[s->s_mds] != s)
705 return -ENOENT;
706 return 0;
707}
708
709/*
710 * create+register a new session for given mds.
711 * called under mdsc->mutex.
712 */
713static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
714 int mds)
715{
716 struct ceph_mds_session *s;
717
718 if (mds >= mdsc->mdsmap->possible_max_rank)
719 return ERR_PTR(-EINVAL);
720
721 s = kzalloc(sizeof(*s), GFP_NOFS);
722 if (!s)
723 return ERR_PTR(-ENOMEM);
724
725 if (mds >= mdsc->max_sessions) {
726 int newmax = 1 << get_count_order(mds + 1);
727 struct ceph_mds_session **sa;
728
729 dout("%s: realloc to %d\n", __func__, newmax);
730 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
731 if (!sa)
732 goto fail_realloc;
733 if (mdsc->sessions) {
734 memcpy(sa, mdsc->sessions,
735 mdsc->max_sessions * sizeof(void *));
736 kfree(mdsc->sessions);
737 }
738 mdsc->sessions = sa;
739 mdsc->max_sessions = newmax;
740 }
741
742 dout("%s: mds%d\n", __func__, mds);
743 s->s_mdsc = mdsc;
744 s->s_mds = mds;
745 s->s_state = CEPH_MDS_SESSION_NEW;
746 s->s_ttl = 0;
747 s->s_seq = 0;
748 mutex_init(&s->s_mutex);
749
750 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
751
752 atomic_set(&s->s_cap_gen, 1);
753 s->s_cap_ttl = jiffies - 1;
754
755 spin_lock_init(&s->s_cap_lock);
756 s->s_renew_requested = 0;
757 s->s_renew_seq = 0;
758 INIT_LIST_HEAD(&s->s_caps);
759 s->s_nr_caps = 0;
760 refcount_set(&s->s_ref, 1);
761 INIT_LIST_HEAD(&s->s_waiting);
762 INIT_LIST_HEAD(&s->s_unsafe);
763 xa_init(&s->s_delegated_inos);
764 s->s_num_cap_releases = 0;
765 s->s_cap_reconnect = 0;
766 s->s_cap_iterator = NULL;
767 INIT_LIST_HEAD(&s->s_cap_releases);
768 INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
769
770 INIT_LIST_HEAD(&s->s_cap_dirty);
771 INIT_LIST_HEAD(&s->s_cap_flushing);
772
773 mdsc->sessions[mds] = s;
774 atomic_inc(&mdsc->num_sessions);
775 refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
776
777 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
778 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
779
780 return s;
781
782fail_realloc:
783 kfree(s);
784 return ERR_PTR(-ENOMEM);
785}
786
787/*
788 * called under mdsc->mutex
789 */
790static void __unregister_session(struct ceph_mds_client *mdsc,
791 struct ceph_mds_session *s)
792{
793 dout("__unregister_session mds%d %p\n", s->s_mds, s);
794 BUG_ON(mdsc->sessions[s->s_mds] != s);
795 mdsc->sessions[s->s_mds] = NULL;
796 ceph_con_close(&s->s_con);
797 ceph_put_mds_session(s);
798 atomic_dec(&mdsc->num_sessions);
799}
800
801/*
802 * drop session refs in request.
803 *
804 * should be last request ref, or hold mdsc->mutex
805 */
806static void put_request_session(struct ceph_mds_request *req)
807{
808 if (req->r_session) {
809 ceph_put_mds_session(req->r_session);
810 req->r_session = NULL;
811 }
812}
813
814void ceph_mdsc_release_request(struct kref *kref)
815{
816 struct ceph_mds_request *req = container_of(kref,
817 struct ceph_mds_request,
818 r_kref);
819 ceph_mdsc_release_dir_caps_no_check(req);
820 destroy_reply_info(&req->r_reply_info);
821 if (req->r_request)
822 ceph_msg_put(req->r_request);
823 if (req->r_reply)
824 ceph_msg_put(req->r_reply);
825 if (req->r_inode) {
826 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
827 iput(req->r_inode);
828 }
829 if (req->r_parent) {
830 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
831 iput(req->r_parent);
832 }
833 iput(req->r_target_inode);
834 if (req->r_dentry)
835 dput(req->r_dentry);
836 if (req->r_old_dentry)
837 dput(req->r_old_dentry);
838 if (req->r_old_dentry_dir) {
839 /*
840 * track (and drop pins for) r_old_dentry_dir
841 * separately, since r_old_dentry's d_parent may have
842 * changed between the dir mutex being dropped and
843 * this request being freed.
844 */
845 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
846 CEPH_CAP_PIN);
847 iput(req->r_old_dentry_dir);
848 }
849 kfree(req->r_path1);
850 kfree(req->r_path2);
851 put_cred(req->r_cred);
852 if (req->r_pagelist)
853 ceph_pagelist_release(req->r_pagelist);
854 put_request_session(req);
855 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
856 WARN_ON_ONCE(!list_empty(&req->r_wait));
857 kmem_cache_free(ceph_mds_request_cachep, req);
858}
859
860DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
861
862/*
863 * lookup session, bump ref if found.
864 *
865 * called under mdsc->mutex.
866 */
867static struct ceph_mds_request *
868lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
869{
870 struct ceph_mds_request *req;
871
872 req = lookup_request(&mdsc->request_tree, tid);
873 if (req)
874 ceph_mdsc_get_request(req);
875
876 return req;
877}
878
879/*
880 * Register an in-flight request, and assign a tid. Link to directory
881 * are modifying (if any).
882 *
883 * Called under mdsc->mutex.
884 */
885static void __register_request(struct ceph_mds_client *mdsc,
886 struct ceph_mds_request *req,
887 struct inode *dir)
888{
889 int ret = 0;
890
891 req->r_tid = ++mdsc->last_tid;
892 if (req->r_num_caps) {
893 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
894 req->r_num_caps);
895 if (ret < 0) {
896 pr_err("__register_request %p "
897 "failed to reserve caps: %d\n", req, ret);
898 /* set req->r_err to fail early from __do_request */
899 req->r_err = ret;
900 return;
901 }
902 }
903 dout("__register_request %p tid %lld\n", req, req->r_tid);
904 ceph_mdsc_get_request(req);
905 insert_request(&mdsc->request_tree, req);
906
907 req->r_cred = get_current_cred();
908
909 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
910 mdsc->oldest_tid = req->r_tid;
911
912 if (dir) {
913 struct ceph_inode_info *ci = ceph_inode(dir);
914
915 ihold(dir);
916 req->r_unsafe_dir = dir;
917 spin_lock(&ci->i_unsafe_lock);
918 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
919 spin_unlock(&ci->i_unsafe_lock);
920 }
921}
922
923static void __unregister_request(struct ceph_mds_client *mdsc,
924 struct ceph_mds_request *req)
925{
926 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
927
928 /* Never leave an unregistered request on an unsafe list! */
929 list_del_init(&req->r_unsafe_item);
930
931 if (req->r_tid == mdsc->oldest_tid) {
932 struct rb_node *p = rb_next(&req->r_node);
933 mdsc->oldest_tid = 0;
934 while (p) {
935 struct ceph_mds_request *next_req =
936 rb_entry(p, struct ceph_mds_request, r_node);
937 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
938 mdsc->oldest_tid = next_req->r_tid;
939 break;
940 }
941 p = rb_next(p);
942 }
943 }
944
945 erase_request(&mdsc->request_tree, req);
946
947 if (req->r_unsafe_dir) {
948 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
949 spin_lock(&ci->i_unsafe_lock);
950 list_del_init(&req->r_unsafe_dir_item);
951 spin_unlock(&ci->i_unsafe_lock);
952 }
953 if (req->r_target_inode &&
954 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
955 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
956 spin_lock(&ci->i_unsafe_lock);
957 list_del_init(&req->r_unsafe_target_item);
958 spin_unlock(&ci->i_unsafe_lock);
959 }
960
961 if (req->r_unsafe_dir) {
962 iput(req->r_unsafe_dir);
963 req->r_unsafe_dir = NULL;
964 }
965
966 complete_all(&req->r_safe_completion);
967
968 ceph_mdsc_put_request(req);
969}
970
971/*
972 * Walk back up the dentry tree until we hit a dentry representing a
973 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
974 * when calling this) to ensure that the objects won't disappear while we're
975 * working with them. Once we hit a candidate dentry, we attempt to take a
976 * reference to it, and return that as the result.
977 */
978static struct inode *get_nonsnap_parent(struct dentry *dentry)
979{
980 struct inode *inode = NULL;
981
982 while (dentry && !IS_ROOT(dentry)) {
983 inode = d_inode_rcu(dentry);
984 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
985 break;
986 dentry = dentry->d_parent;
987 }
988 if (inode)
989 inode = igrab(inode);
990 return inode;
991}
992
993/*
994 * Choose mds to send request to next. If there is a hint set in the
995 * request (e.g., due to a prior forward hint from the mds), use that.
996 * Otherwise, consult frag tree and/or caps to identify the
997 * appropriate mds. If all else fails, choose randomly.
998 *
999 * Called under mdsc->mutex.
1000 */
1001static int __choose_mds(struct ceph_mds_client *mdsc,
1002 struct ceph_mds_request *req,
1003 bool *random)
1004{
1005 struct inode *inode;
1006 struct ceph_inode_info *ci;
1007 struct ceph_cap *cap;
1008 int mode = req->r_direct_mode;
1009 int mds = -1;
1010 u32 hash = req->r_direct_hash;
1011 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1012
1013 if (random)
1014 *random = false;
1015
1016 /*
1017 * is there a specific mds we should try? ignore hint if we have
1018 * no session and the mds is not up (active or recovering).
1019 */
1020 if (req->r_resend_mds >= 0 &&
1021 (__have_session(mdsc, req->r_resend_mds) ||
1022 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1023 dout("%s using resend_mds mds%d\n", __func__,
1024 req->r_resend_mds);
1025 return req->r_resend_mds;
1026 }
1027
1028 if (mode == USE_RANDOM_MDS)
1029 goto random;
1030
1031 inode = NULL;
1032 if (req->r_inode) {
1033 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1034 inode = req->r_inode;
1035 ihold(inode);
1036 } else {
1037 /* req->r_dentry is non-null for LSSNAP request */
1038 rcu_read_lock();
1039 inode = get_nonsnap_parent(req->r_dentry);
1040 rcu_read_unlock();
1041 dout("%s using snapdir's parent %p\n", __func__, inode);
1042 }
1043 } else if (req->r_dentry) {
1044 /* ignore race with rename; old or new d_parent is okay */
1045 struct dentry *parent;
1046 struct inode *dir;
1047
1048 rcu_read_lock();
1049 parent = READ_ONCE(req->r_dentry->d_parent);
1050 dir = req->r_parent ? : d_inode_rcu(parent);
1051
1052 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1053 /* not this fs or parent went negative */
1054 inode = d_inode(req->r_dentry);
1055 if (inode)
1056 ihold(inode);
1057 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1058 /* direct snapped/virtual snapdir requests
1059 * based on parent dir inode */
1060 inode = get_nonsnap_parent(parent);
1061 dout("%s using nonsnap parent %p\n", __func__, inode);
1062 } else {
1063 /* dentry target */
1064 inode = d_inode(req->r_dentry);
1065 if (!inode || mode == USE_AUTH_MDS) {
1066 /* dir + name */
1067 inode = igrab(dir);
1068 hash = ceph_dentry_hash(dir, req->r_dentry);
1069 is_hash = true;
1070 } else {
1071 ihold(inode);
1072 }
1073 }
1074 rcu_read_unlock();
1075 }
1076
1077 dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1078 hash, mode);
1079 if (!inode)
1080 goto random;
1081 ci = ceph_inode(inode);
1082
1083 if (is_hash && S_ISDIR(inode->i_mode)) {
1084 struct ceph_inode_frag frag;
1085 int found;
1086
1087 ceph_choose_frag(ci, hash, &frag, &found);
1088 if (found) {
1089 if (mode == USE_ANY_MDS && frag.ndist > 0) {
1090 u8 r;
1091
1092 /* choose a random replica */
1093 get_random_bytes(&r, 1);
1094 r %= frag.ndist;
1095 mds = frag.dist[r];
1096 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1097 __func__, inode, ceph_vinop(inode),
1098 frag.frag, mds, (int)r, frag.ndist);
1099 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1100 CEPH_MDS_STATE_ACTIVE &&
1101 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1102 goto out;
1103 }
1104
1105 /* since this file/dir wasn't known to be
1106 * replicated, then we want to look for the
1107 * authoritative mds. */
1108 if (frag.mds >= 0) {
1109 /* choose auth mds */
1110 mds = frag.mds;
1111 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1112 __func__, inode, ceph_vinop(inode),
1113 frag.frag, mds);
1114 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1115 CEPH_MDS_STATE_ACTIVE) {
1116 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1117 mds))
1118 goto out;
1119 }
1120 }
1121 mode = USE_AUTH_MDS;
1122 }
1123 }
1124
1125 spin_lock(&ci->i_ceph_lock);
1126 cap = NULL;
1127 if (mode == USE_AUTH_MDS)
1128 cap = ci->i_auth_cap;
1129 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1130 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1131 if (!cap) {
1132 spin_unlock(&ci->i_ceph_lock);
1133 iput(inode);
1134 goto random;
1135 }
1136 mds = cap->session->s_mds;
1137 dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1138 inode, ceph_vinop(inode), mds,
1139 cap == ci->i_auth_cap ? "auth " : "", cap);
1140 spin_unlock(&ci->i_ceph_lock);
1141out:
1142 iput(inode);
1143 return mds;
1144
1145random:
1146 if (random)
1147 *random = true;
1148
1149 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1150 dout("%s chose random mds%d\n", __func__, mds);
1151 return mds;
1152}
1153
1154
1155/*
1156 * session messages
1157 */
1158static struct ceph_msg *create_session_msg(u32 op, u64 seq)
1159{
1160 struct ceph_msg *msg;
1161 struct ceph_mds_session_head *h;
1162
1163 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1164 false);
1165 if (!msg) {
1166 pr_err("create_session_msg ENOMEM creating msg\n");
1167 return NULL;
1168 }
1169 h = msg->front.iov_base;
1170 h->op = cpu_to_le32(op);
1171 h->seq = cpu_to_le64(seq);
1172
1173 return msg;
1174}
1175
1176static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1177#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1178static int encode_supported_features(void **p, void *end)
1179{
1180 static const size_t count = ARRAY_SIZE(feature_bits);
1181
1182 if (count > 0) {
1183 size_t i;
1184 size_t size = FEATURE_BYTES(count);
1185
1186 if (WARN_ON_ONCE(*p + 4 + size > end))
1187 return -ERANGE;
1188
1189 ceph_encode_32(p, size);
1190 memset(*p, 0, size);
1191 for (i = 0; i < count; i++)
1192 ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
1193 *p += size;
1194 } else {
1195 if (WARN_ON_ONCE(*p + 4 > end))
1196 return -ERANGE;
1197
1198 ceph_encode_32(p, 0);
1199 }
1200
1201 return 0;
1202}
1203
1204static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1205#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1206static int encode_metric_spec(void **p, void *end)
1207{
1208 static const size_t count = ARRAY_SIZE(metric_bits);
1209
1210 /* header */
1211 if (WARN_ON_ONCE(*p + 2 > end))
1212 return -ERANGE;
1213
1214 ceph_encode_8(p, 1); /* version */
1215 ceph_encode_8(p, 1); /* compat */
1216
1217 if (count > 0) {
1218 size_t i;
1219 size_t size = METRIC_BYTES(count);
1220
1221 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1222 return -ERANGE;
1223
1224 /* metric spec info length */
1225 ceph_encode_32(p, 4 + size);
1226
1227 /* metric spec */
1228 ceph_encode_32(p, size);
1229 memset(*p, 0, size);
1230 for (i = 0; i < count; i++)
1231 ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1232 *p += size;
1233 } else {
1234 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1235 return -ERANGE;
1236
1237 /* metric spec info length */
1238 ceph_encode_32(p, 4);
1239 /* metric spec */
1240 ceph_encode_32(p, 0);
1241 }
1242
1243 return 0;
1244}
1245
1246/*
1247 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1248 * to include additional client metadata fields.
1249 */
1250static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1251{
1252 struct ceph_msg *msg;
1253 struct ceph_mds_session_head *h;
1254 int i;
1255 int extra_bytes = 0;
1256 int metadata_key_count = 0;
1257 struct ceph_options *opt = mdsc->fsc->client->options;
1258 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1259 size_t size, count;
1260 void *p, *end;
1261 int ret;
1262
1263 const char* metadata[][2] = {
1264 {"hostname", mdsc->nodename},
1265 {"kernel_version", init_utsname()->release},
1266 {"entity_id", opt->name ? : ""},
1267 {"root", fsopt->server_path ? : "/"},
1268 {NULL, NULL}
1269 };
1270
1271 /* Calculate serialized length of metadata */
1272 extra_bytes = 4; /* map length */
1273 for (i = 0; metadata[i][0]; ++i) {
1274 extra_bytes += 8 + strlen(metadata[i][0]) +
1275 strlen(metadata[i][1]);
1276 metadata_key_count++;
1277 }
1278
1279 /* supported feature */
1280 size = 0;
1281 count = ARRAY_SIZE(feature_bits);
1282 if (count > 0)
1283 size = FEATURE_BYTES(count);
1284 extra_bytes += 4 + size;
1285
1286 /* metric spec */
1287 size = 0;
1288 count = ARRAY_SIZE(metric_bits);
1289 if (count > 0)
1290 size = METRIC_BYTES(count);
1291 extra_bytes += 2 + 4 + 4 + size;
1292
1293 /* Allocate the message */
1294 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1295 GFP_NOFS, false);
1296 if (!msg) {
1297 pr_err("create_session_msg ENOMEM creating msg\n");
1298 return ERR_PTR(-ENOMEM);
1299 }
1300 p = msg->front.iov_base;
1301 end = p + msg->front.iov_len;
1302
1303 h = p;
1304 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1305 h->seq = cpu_to_le64(seq);
1306
1307 /*
1308 * Serialize client metadata into waiting buffer space, using
1309 * the format that userspace expects for map<string, string>
1310 *
1311 * ClientSession messages with metadata are v4
1312 */
1313 msg->hdr.version = cpu_to_le16(4);
1314 msg->hdr.compat_version = cpu_to_le16(1);
1315
1316 /* The write pointer, following the session_head structure */
1317 p += sizeof(*h);
1318
1319 /* Number of entries in the map */
1320 ceph_encode_32(&p, metadata_key_count);
1321
1322 /* Two length-prefixed strings for each entry in the map */
1323 for (i = 0; metadata[i][0]; ++i) {
1324 size_t const key_len = strlen(metadata[i][0]);
1325 size_t const val_len = strlen(metadata[i][1]);
1326
1327 ceph_encode_32(&p, key_len);
1328 memcpy(p, metadata[i][0], key_len);
1329 p += key_len;
1330 ceph_encode_32(&p, val_len);
1331 memcpy(p, metadata[i][1], val_len);
1332 p += val_len;
1333 }
1334
1335 ret = encode_supported_features(&p, end);
1336 if (ret) {
1337 pr_err("encode_supported_features failed!\n");
1338 ceph_msg_put(msg);
1339 return ERR_PTR(ret);
1340 }
1341
1342 ret = encode_metric_spec(&p, end);
1343 if (ret) {
1344 pr_err("encode_metric_spec failed!\n");
1345 ceph_msg_put(msg);
1346 return ERR_PTR(ret);
1347 }
1348
1349 msg->front.iov_len = p - msg->front.iov_base;
1350 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1351
1352 return msg;
1353}
1354
1355/*
1356 * send session open request.
1357 *
1358 * called under mdsc->mutex
1359 */
1360static int __open_session(struct ceph_mds_client *mdsc,
1361 struct ceph_mds_session *session)
1362{
1363 struct ceph_msg *msg;
1364 int mstate;
1365 int mds = session->s_mds;
1366
1367 /* wait for mds to go active? */
1368 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1369 dout("open_session to mds%d (%s)\n", mds,
1370 ceph_mds_state_name(mstate));
1371 session->s_state = CEPH_MDS_SESSION_OPENING;
1372 session->s_renew_requested = jiffies;
1373
1374 /* send connect message */
1375 msg = create_session_open_msg(mdsc, session->s_seq);
1376 if (IS_ERR(msg))
1377 return PTR_ERR(msg);
1378 ceph_con_send(&session->s_con, msg);
1379 return 0;
1380}
1381
1382/*
1383 * open sessions for any export targets for the given mds
1384 *
1385 * called under mdsc->mutex
1386 */
1387static struct ceph_mds_session *
1388__open_export_target_session(struct ceph_mds_client *mdsc, int target)
1389{
1390 struct ceph_mds_session *session;
1391 int ret;
1392
1393 session = __ceph_lookup_mds_session(mdsc, target);
1394 if (!session) {
1395 session = register_session(mdsc, target);
1396 if (IS_ERR(session))
1397 return session;
1398 }
1399 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1400 session->s_state == CEPH_MDS_SESSION_CLOSING) {
1401 ret = __open_session(mdsc, session);
1402 if (ret)
1403 return ERR_PTR(ret);
1404 }
1405
1406 return session;
1407}
1408
1409struct ceph_mds_session *
1410ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1411{
1412 struct ceph_mds_session *session;
1413
1414 dout("open_export_target_session to mds%d\n", target);
1415
1416 mutex_lock(&mdsc->mutex);
1417 session = __open_export_target_session(mdsc, target);
1418 mutex_unlock(&mdsc->mutex);
1419
1420 return session;
1421}
1422
1423static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1424 struct ceph_mds_session *session)
1425{
1426 struct ceph_mds_info *mi;
1427 struct ceph_mds_session *ts;
1428 int i, mds = session->s_mds;
1429
1430 if (mds >= mdsc->mdsmap->possible_max_rank)
1431 return;
1432
1433 mi = &mdsc->mdsmap->m_info[mds];
1434 dout("open_export_target_sessions for mds%d (%d targets)\n",
1435 session->s_mds, mi->num_export_targets);
1436
1437 for (i = 0; i < mi->num_export_targets; i++) {
1438 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1439 ceph_put_mds_session(ts);
1440 }
1441}
1442
1443void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1444 struct ceph_mds_session *session)
1445{
1446 mutex_lock(&mdsc->mutex);
1447 __open_export_target_sessions(mdsc, session);
1448 mutex_unlock(&mdsc->mutex);
1449}
1450
1451/*
1452 * session caps
1453 */
1454
1455static void detach_cap_releases(struct ceph_mds_session *session,
1456 struct list_head *target)
1457{
1458 lockdep_assert_held(&session->s_cap_lock);
1459
1460 list_splice_init(&session->s_cap_releases, target);
1461 session->s_num_cap_releases = 0;
1462 dout("dispose_cap_releases mds%d\n", session->s_mds);
1463}
1464
1465static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1466 struct list_head *dispose)
1467{
1468 while (!list_empty(dispose)) {
1469 struct ceph_cap *cap;
1470 /* zero out the in-progress message */
1471 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1472 list_del(&cap->session_caps);
1473 ceph_put_cap(mdsc, cap);
1474 }
1475}
1476
1477static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1478 struct ceph_mds_session *session)
1479{
1480 struct ceph_mds_request *req;
1481 struct rb_node *p;
1482
1483 dout("cleanup_session_requests mds%d\n", session->s_mds);
1484 mutex_lock(&mdsc->mutex);
1485 while (!list_empty(&session->s_unsafe)) {
1486 req = list_first_entry(&session->s_unsafe,
1487 struct ceph_mds_request, r_unsafe_item);
1488 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1489 req->r_tid);
1490 if (req->r_target_inode)
1491 mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1492 if (req->r_unsafe_dir)
1493 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1494 __unregister_request(mdsc, req);
1495 }
1496 /* zero r_attempts, so kick_requests() will re-send requests */
1497 p = rb_first(&mdsc->request_tree);
1498 while (p) {
1499 req = rb_entry(p, struct ceph_mds_request, r_node);
1500 p = rb_next(p);
1501 if (req->r_session &&
1502 req->r_session->s_mds == session->s_mds)
1503 req->r_attempts = 0;
1504 }
1505 mutex_unlock(&mdsc->mutex);
1506}
1507
1508/*
1509 * Helper to safely iterate over all caps associated with a session, with
1510 * special care taken to handle a racing __ceph_remove_cap().
1511 *
1512 * Caller must hold session s_mutex.
1513 */
1514int ceph_iterate_session_caps(struct ceph_mds_session *session,
1515 int (*cb)(struct inode *, struct ceph_cap *,
1516 void *), void *arg)
1517{
1518 struct list_head *p;
1519 struct ceph_cap *cap;
1520 struct inode *inode, *last_inode = NULL;
1521 struct ceph_cap *old_cap = NULL;
1522 int ret;
1523
1524 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1525 spin_lock(&session->s_cap_lock);
1526 p = session->s_caps.next;
1527 while (p != &session->s_caps) {
1528 cap = list_entry(p, struct ceph_cap, session_caps);
1529 inode = igrab(&cap->ci->vfs_inode);
1530 if (!inode) {
1531 p = p->next;
1532 continue;
1533 }
1534 session->s_cap_iterator = cap;
1535 spin_unlock(&session->s_cap_lock);
1536
1537 if (last_inode) {
1538 iput(last_inode);
1539 last_inode = NULL;
1540 }
1541 if (old_cap) {
1542 ceph_put_cap(session->s_mdsc, old_cap);
1543 old_cap = NULL;
1544 }
1545
1546 ret = cb(inode, cap, arg);
1547 last_inode = inode;
1548
1549 spin_lock(&session->s_cap_lock);
1550 p = p->next;
1551 if (!cap->ci) {
1552 dout("iterate_session_caps finishing cap %p removal\n",
1553 cap);
1554 BUG_ON(cap->session != session);
1555 cap->session = NULL;
1556 list_del_init(&cap->session_caps);
1557 session->s_nr_caps--;
1558 atomic64_dec(&session->s_mdsc->metric.total_caps);
1559 if (cap->queue_release)
1560 __ceph_queue_cap_release(session, cap);
1561 else
1562 old_cap = cap; /* put_cap it w/o locks held */
1563 }
1564 if (ret < 0)
1565 goto out;
1566 }
1567 ret = 0;
1568out:
1569 session->s_cap_iterator = NULL;
1570 spin_unlock(&session->s_cap_lock);
1571
1572 iput(last_inode);
1573 if (old_cap)
1574 ceph_put_cap(session->s_mdsc, old_cap);
1575
1576 return ret;
1577}
1578
1579static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
1580{
1581 struct ceph_inode_info *ci = ceph_inode(inode);
1582 struct ceph_cap_snap *capsnap;
1583 int capsnap_release = 0;
1584
1585 lockdep_assert_held(&ci->i_ceph_lock);
1586
1587 dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
1588
1589 while (!list_empty(&ci->i_cap_snaps)) {
1590 capsnap = list_first_entry(&ci->i_cap_snaps,
1591 struct ceph_cap_snap, ci_item);
1592 __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
1593 ceph_put_snap_context(capsnap->context);
1594 ceph_put_cap_snap(capsnap);
1595 capsnap_release++;
1596 }
1597 wake_up_all(&ci->i_cap_wq);
1598 wake_up_all(&mdsc->cap_flushing_wq);
1599 return capsnap_release;
1600}
1601
1602static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1603 void *arg)
1604{
1605 struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1606 struct ceph_mds_client *mdsc = fsc->mdsc;
1607 struct ceph_inode_info *ci = ceph_inode(inode);
1608 LIST_HEAD(to_remove);
1609 bool dirty_dropped = false;
1610 bool invalidate = false;
1611 int capsnap_release = 0;
1612
1613 dout("removing cap %p, ci is %p, inode is %p\n",
1614 cap, ci, &ci->vfs_inode);
1615 spin_lock(&ci->i_ceph_lock);
1616 __ceph_remove_cap(cap, false);
1617 if (!ci->i_auth_cap) {
1618 struct ceph_cap_flush *cf;
1619
1620 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
1621 if (inode->i_data.nrpages > 0)
1622 invalidate = true;
1623 if (ci->i_wrbuffer_ref > 0)
1624 mapping_set_error(&inode->i_data, -EIO);
1625 }
1626
1627 while (!list_empty(&ci->i_cap_flush_list)) {
1628 cf = list_first_entry(&ci->i_cap_flush_list,
1629 struct ceph_cap_flush, i_list);
1630 list_move(&cf->i_list, &to_remove);
1631 }
1632
1633 spin_lock(&mdsc->cap_dirty_lock);
1634
1635 list_for_each_entry(cf, &to_remove, i_list)
1636 list_del_init(&cf->g_list);
1637
1638 if (!list_empty(&ci->i_dirty_item)) {
1639 pr_warn_ratelimited(
1640 " dropping dirty %s state for %p %lld\n",
1641 ceph_cap_string(ci->i_dirty_caps),
1642 inode, ceph_ino(inode));
1643 ci->i_dirty_caps = 0;
1644 list_del_init(&ci->i_dirty_item);
1645 dirty_dropped = true;
1646 }
1647 if (!list_empty(&ci->i_flushing_item)) {
1648 pr_warn_ratelimited(
1649 " dropping dirty+flushing %s state for %p %lld\n",
1650 ceph_cap_string(ci->i_flushing_caps),
1651 inode, ceph_ino(inode));
1652 ci->i_flushing_caps = 0;
1653 list_del_init(&ci->i_flushing_item);
1654 mdsc->num_cap_flushing--;
1655 dirty_dropped = true;
1656 }
1657 spin_unlock(&mdsc->cap_dirty_lock);
1658
1659 if (dirty_dropped) {
1660 mapping_set_error(inode->i_mapping, -EIO);
1661
1662 if (ci->i_wrbuffer_ref_head == 0 &&
1663 ci->i_wr_ref == 0 &&
1664 ci->i_dirty_caps == 0 &&
1665 ci->i_flushing_caps == 0) {
1666 ceph_put_snap_context(ci->i_head_snapc);
1667 ci->i_head_snapc = NULL;
1668 }
1669 }
1670
1671 if (atomic_read(&ci->i_filelock_ref) > 0) {
1672 /* make further file lock syscall return -EIO */
1673 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1674 pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1675 inode, ceph_ino(inode));
1676 }
1677
1678 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1679 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1680 ci->i_prealloc_cap_flush = NULL;
1681 }
1682
1683 if (!list_empty(&ci->i_cap_snaps))
1684 capsnap_release = remove_capsnaps(mdsc, inode);
1685 }
1686 spin_unlock(&ci->i_ceph_lock);
1687 while (!list_empty(&to_remove)) {
1688 struct ceph_cap_flush *cf;
1689 cf = list_first_entry(&to_remove,
1690 struct ceph_cap_flush, i_list);
1691 list_del_init(&cf->i_list);
1692 if (!cf->is_capsnap)
1693 ceph_free_cap_flush(cf);
1694 }
1695
1696 wake_up_all(&ci->i_cap_wq);
1697 if (invalidate)
1698 ceph_queue_invalidate(inode);
1699 if (dirty_dropped)
1700 iput(inode);
1701 while (capsnap_release--)
1702 iput(inode);
1703 return 0;
1704}
1705
1706/*
1707 * caller must hold session s_mutex
1708 */
1709static void remove_session_caps(struct ceph_mds_session *session)
1710{
1711 struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1712 struct super_block *sb = fsc->sb;
1713 LIST_HEAD(dispose);
1714
1715 dout("remove_session_caps on %p\n", session);
1716 ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1717
1718 wake_up_all(&fsc->mdsc->cap_flushing_wq);
1719
1720 spin_lock(&session->s_cap_lock);
1721 if (session->s_nr_caps > 0) {
1722 struct inode *inode;
1723 struct ceph_cap *cap, *prev = NULL;
1724 struct ceph_vino vino;
1725 /*
1726 * iterate_session_caps() skips inodes that are being
1727 * deleted, we need to wait until deletions are complete.
1728 * __wait_on_freeing_inode() is designed for the job,
1729 * but it is not exported, so use lookup inode function
1730 * to access it.
1731 */
1732 while (!list_empty(&session->s_caps)) {
1733 cap = list_entry(session->s_caps.next,
1734 struct ceph_cap, session_caps);
1735 if (cap == prev)
1736 break;
1737 prev = cap;
1738 vino = cap->ci->i_vino;
1739 spin_unlock(&session->s_cap_lock);
1740
1741 inode = ceph_find_inode(sb, vino);
1742 iput(inode);
1743
1744 spin_lock(&session->s_cap_lock);
1745 }
1746 }
1747
1748 // drop cap expires and unlock s_cap_lock
1749 detach_cap_releases(session, &dispose);
1750
1751 BUG_ON(session->s_nr_caps > 0);
1752 BUG_ON(!list_empty(&session->s_cap_flushing));
1753 spin_unlock(&session->s_cap_lock);
1754 dispose_cap_releases(session->s_mdsc, &dispose);
1755}
1756
1757enum {
1758 RECONNECT,
1759 RENEWCAPS,
1760 FORCE_RO,
1761};
1762
1763/*
1764 * wake up any threads waiting on this session's caps. if the cap is
1765 * old (didn't get renewed on the client reconnect), remove it now.
1766 *
1767 * caller must hold s_mutex.
1768 */
1769static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1770 void *arg)
1771{
1772 struct ceph_inode_info *ci = ceph_inode(inode);
1773 unsigned long ev = (unsigned long)arg;
1774
1775 if (ev == RECONNECT) {
1776 spin_lock(&ci->i_ceph_lock);
1777 ci->i_wanted_max_size = 0;
1778 ci->i_requested_max_size = 0;
1779 spin_unlock(&ci->i_ceph_lock);
1780 } else if (ev == RENEWCAPS) {
1781 if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1782 /* mds did not re-issue stale cap */
1783 spin_lock(&ci->i_ceph_lock);
1784 cap->issued = cap->implemented = CEPH_CAP_PIN;
1785 spin_unlock(&ci->i_ceph_lock);
1786 }
1787 } else if (ev == FORCE_RO) {
1788 }
1789 wake_up_all(&ci->i_cap_wq);
1790 return 0;
1791}
1792
1793static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1794{
1795 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1796 ceph_iterate_session_caps(session, wake_up_session_cb,
1797 (void *)(unsigned long)ev);
1798}
1799
1800/*
1801 * Send periodic message to MDS renewing all currently held caps. The
1802 * ack will reset the expiration for all caps from this session.
1803 *
1804 * caller holds s_mutex
1805 */
1806static int send_renew_caps(struct ceph_mds_client *mdsc,
1807 struct ceph_mds_session *session)
1808{
1809 struct ceph_msg *msg;
1810 int state;
1811
1812 if (time_after_eq(jiffies, session->s_cap_ttl) &&
1813 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1814 pr_info("mds%d caps stale\n", session->s_mds);
1815 session->s_renew_requested = jiffies;
1816
1817 /* do not try to renew caps until a recovering mds has reconnected
1818 * with its clients. */
1819 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1820 if (state < CEPH_MDS_STATE_RECONNECT) {
1821 dout("send_renew_caps ignoring mds%d (%s)\n",
1822 session->s_mds, ceph_mds_state_name(state));
1823 return 0;
1824 }
1825
1826 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1827 ceph_mds_state_name(state));
1828 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1829 ++session->s_renew_seq);
1830 if (!msg)
1831 return -ENOMEM;
1832 ceph_con_send(&session->s_con, msg);
1833 return 0;
1834}
1835
1836static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1837 struct ceph_mds_session *session, u64 seq)
1838{
1839 struct ceph_msg *msg;
1840
1841 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1842 session->s_mds, ceph_session_state_name(session->s_state), seq);
1843 msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1844 if (!msg)
1845 return -ENOMEM;
1846 ceph_con_send(&session->s_con, msg);
1847 return 0;
1848}
1849
1850
1851/*
1852 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1853 *
1854 * Called under session->s_mutex
1855 */
1856static void renewed_caps(struct ceph_mds_client *mdsc,
1857 struct ceph_mds_session *session, int is_renew)
1858{
1859 int was_stale;
1860 int wake = 0;
1861
1862 spin_lock(&session->s_cap_lock);
1863 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1864
1865 session->s_cap_ttl = session->s_renew_requested +
1866 mdsc->mdsmap->m_session_timeout*HZ;
1867
1868 if (was_stale) {
1869 if (time_before(jiffies, session->s_cap_ttl)) {
1870 pr_info("mds%d caps renewed\n", session->s_mds);
1871 wake = 1;
1872 } else {
1873 pr_info("mds%d caps still stale\n", session->s_mds);
1874 }
1875 }
1876 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1877 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1878 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1879 spin_unlock(&session->s_cap_lock);
1880
1881 if (wake)
1882 wake_up_session_caps(session, RENEWCAPS);
1883}
1884
1885/*
1886 * send a session close request
1887 */
1888static int request_close_session(struct ceph_mds_session *session)
1889{
1890 struct ceph_msg *msg;
1891
1892 dout("request_close_session mds%d state %s seq %lld\n",
1893 session->s_mds, ceph_session_state_name(session->s_state),
1894 session->s_seq);
1895 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1896 if (!msg)
1897 return -ENOMEM;
1898 ceph_con_send(&session->s_con, msg);
1899 return 1;
1900}
1901
1902/*
1903 * Called with s_mutex held.
1904 */
1905static int __close_session(struct ceph_mds_client *mdsc,
1906 struct ceph_mds_session *session)
1907{
1908 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1909 return 0;
1910 session->s_state = CEPH_MDS_SESSION_CLOSING;
1911 return request_close_session(session);
1912}
1913
1914static bool drop_negative_children(struct dentry *dentry)
1915{
1916 struct dentry *child;
1917 bool all_negative = true;
1918
1919 if (!d_is_dir(dentry))
1920 goto out;
1921
1922 spin_lock(&dentry->d_lock);
1923 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1924 if (d_really_is_positive(child)) {
1925 all_negative = false;
1926 break;
1927 }
1928 }
1929 spin_unlock(&dentry->d_lock);
1930
1931 if (all_negative)
1932 shrink_dcache_parent(dentry);
1933out:
1934 return all_negative;
1935}
1936
1937/*
1938 * Trim old(er) caps.
1939 *
1940 * Because we can't cache an inode without one or more caps, we do
1941 * this indirectly: if a cap is unused, we prune its aliases, at which
1942 * point the inode will hopefully get dropped to.
1943 *
1944 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1945 * memory pressure from the MDS, though, so it needn't be perfect.
1946 */
1947static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1948{
1949 int *remaining = arg;
1950 struct ceph_inode_info *ci = ceph_inode(inode);
1951 int used, wanted, oissued, mine;
1952
1953 if (*remaining <= 0)
1954 return -1;
1955
1956 spin_lock(&ci->i_ceph_lock);
1957 mine = cap->issued | cap->implemented;
1958 used = __ceph_caps_used(ci);
1959 wanted = __ceph_caps_file_wanted(ci);
1960 oissued = __ceph_caps_issued_other(ci, cap);
1961
1962 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1963 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1964 ceph_cap_string(used), ceph_cap_string(wanted));
1965 if (cap == ci->i_auth_cap) {
1966 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1967 !list_empty(&ci->i_cap_snaps))
1968 goto out;
1969 if ((used | wanted) & CEPH_CAP_ANY_WR)
1970 goto out;
1971 /* Note: it's possible that i_filelock_ref becomes non-zero
1972 * after dropping auth caps. It doesn't hurt because reply
1973 * of lock mds request will re-add auth caps. */
1974 if (atomic_read(&ci->i_filelock_ref) > 0)
1975 goto out;
1976 }
1977 /* The inode has cached pages, but it's no longer used.
1978 * we can safely drop it */
1979 if (S_ISREG(inode->i_mode) &&
1980 wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1981 !(oissued & CEPH_CAP_FILE_CACHE)) {
1982 used = 0;
1983 oissued = 0;
1984 }
1985 if ((used | wanted) & ~oissued & mine)
1986 goto out; /* we need these caps */
1987
1988 if (oissued) {
1989 /* we aren't the only cap.. just remove us */
1990 __ceph_remove_cap(cap, true);
1991 (*remaining)--;
1992 } else {
1993 struct dentry *dentry;
1994 /* try dropping referring dentries */
1995 spin_unlock(&ci->i_ceph_lock);
1996 dentry = d_find_any_alias(inode);
1997 if (dentry && drop_negative_children(dentry)) {
1998 int count;
1999 dput(dentry);
2000 d_prune_aliases(inode);
2001 count = atomic_read(&inode->i_count);
2002 if (count == 1)
2003 (*remaining)--;
2004 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2005 inode, cap, count);
2006 } else {
2007 dput(dentry);
2008 }
2009 return 0;
2010 }
2011
2012out:
2013 spin_unlock(&ci->i_ceph_lock);
2014 return 0;
2015}
2016
2017/*
2018 * Trim session cap count down to some max number.
2019 */
2020int ceph_trim_caps(struct ceph_mds_client *mdsc,
2021 struct ceph_mds_session *session,
2022 int max_caps)
2023{
2024 int trim_caps = session->s_nr_caps - max_caps;
2025
2026 dout("trim_caps mds%d start: %d / %d, trim %d\n",
2027 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2028 if (trim_caps > 0) {
2029 int remaining = trim_caps;
2030
2031 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2032 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2033 session->s_mds, session->s_nr_caps, max_caps,
2034 trim_caps - remaining);
2035 }
2036
2037 ceph_flush_cap_releases(mdsc, session);
2038 return 0;
2039}
2040
2041static int check_caps_flush(struct ceph_mds_client *mdsc,
2042 u64 want_flush_tid)
2043{
2044 int ret = 1;
2045
2046 spin_lock(&mdsc->cap_dirty_lock);
2047 if (!list_empty(&mdsc->cap_flush_list)) {
2048 struct ceph_cap_flush *cf =
2049 list_first_entry(&mdsc->cap_flush_list,
2050 struct ceph_cap_flush, g_list);
2051 if (cf->tid <= want_flush_tid) {
2052 dout("check_caps_flush still flushing tid "
2053 "%llu <= %llu\n", cf->tid, want_flush_tid);
2054 ret = 0;
2055 }
2056 }
2057 spin_unlock(&mdsc->cap_dirty_lock);
2058 return ret;
2059}
2060
2061/*
2062 * flush all dirty inode data to disk.
2063 *
2064 * returns true if we've flushed through want_flush_tid
2065 */
2066static void wait_caps_flush(struct ceph_mds_client *mdsc,
2067 u64 want_flush_tid)
2068{
2069 dout("check_caps_flush want %llu\n", want_flush_tid);
2070
2071 wait_event(mdsc->cap_flushing_wq,
2072 check_caps_flush(mdsc, want_flush_tid));
2073
2074 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2075}
2076
2077/*
2078 * called under s_mutex
2079 */
2080static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2081 struct ceph_mds_session *session)
2082{
2083 struct ceph_msg *msg = NULL;
2084 struct ceph_mds_cap_release *head;
2085 struct ceph_mds_cap_item *item;
2086 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2087 struct ceph_cap *cap;
2088 LIST_HEAD(tmp_list);
2089 int num_cap_releases;
2090 __le32 barrier, *cap_barrier;
2091
2092 down_read(&osdc->lock);
2093 barrier = cpu_to_le32(osdc->epoch_barrier);
2094 up_read(&osdc->lock);
2095
2096 spin_lock(&session->s_cap_lock);
2097again:
2098 list_splice_init(&session->s_cap_releases, &tmp_list);
2099 num_cap_releases = session->s_num_cap_releases;
2100 session->s_num_cap_releases = 0;
2101 spin_unlock(&session->s_cap_lock);
2102
2103 while (!list_empty(&tmp_list)) {
2104 if (!msg) {
2105 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2106 PAGE_SIZE, GFP_NOFS, false);
2107 if (!msg)
2108 goto out_err;
2109 head = msg->front.iov_base;
2110 head->num = cpu_to_le32(0);
2111 msg->front.iov_len = sizeof(*head);
2112
2113 msg->hdr.version = cpu_to_le16(2);
2114 msg->hdr.compat_version = cpu_to_le16(1);
2115 }
2116
2117 cap = list_first_entry(&tmp_list, struct ceph_cap,
2118 session_caps);
2119 list_del(&cap->session_caps);
2120 num_cap_releases--;
2121
2122 head = msg->front.iov_base;
2123 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2124 &head->num);
2125 item = msg->front.iov_base + msg->front.iov_len;
2126 item->ino = cpu_to_le64(cap->cap_ino);
2127 item->cap_id = cpu_to_le64(cap->cap_id);
2128 item->migrate_seq = cpu_to_le32(cap->mseq);
2129 item->seq = cpu_to_le32(cap->issue_seq);
2130 msg->front.iov_len += sizeof(*item);
2131
2132 ceph_put_cap(mdsc, cap);
2133
2134 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2135 // Append cap_barrier field
2136 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2137 *cap_barrier = barrier;
2138 msg->front.iov_len += sizeof(*cap_barrier);
2139
2140 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2141 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2142 ceph_con_send(&session->s_con, msg);
2143 msg = NULL;
2144 }
2145 }
2146
2147 BUG_ON(num_cap_releases != 0);
2148
2149 spin_lock(&session->s_cap_lock);
2150 if (!list_empty(&session->s_cap_releases))
2151 goto again;
2152 spin_unlock(&session->s_cap_lock);
2153
2154 if (msg) {
2155 // Append cap_barrier field
2156 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2157 *cap_barrier = barrier;
2158 msg->front.iov_len += sizeof(*cap_barrier);
2159
2160 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2161 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2162 ceph_con_send(&session->s_con, msg);
2163 }
2164 return;
2165out_err:
2166 pr_err("send_cap_releases mds%d, failed to allocate message\n",
2167 session->s_mds);
2168 spin_lock(&session->s_cap_lock);
2169 list_splice(&tmp_list, &session->s_cap_releases);
2170 session->s_num_cap_releases += num_cap_releases;
2171 spin_unlock(&session->s_cap_lock);
2172}
2173
2174static void ceph_cap_release_work(struct work_struct *work)
2175{
2176 struct ceph_mds_session *session =
2177 container_of(work, struct ceph_mds_session, s_cap_release_work);
2178
2179 mutex_lock(&session->s_mutex);
2180 if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2181 session->s_state == CEPH_MDS_SESSION_HUNG)
2182 ceph_send_cap_releases(session->s_mdsc, session);
2183 mutex_unlock(&session->s_mutex);
2184 ceph_put_mds_session(session);
2185}
2186
2187void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2188 struct ceph_mds_session *session)
2189{
2190 if (mdsc->stopping)
2191 return;
2192
2193 ceph_get_mds_session(session);
2194 if (queue_work(mdsc->fsc->cap_wq,
2195 &session->s_cap_release_work)) {
2196 dout("cap release work queued\n");
2197 } else {
2198 ceph_put_mds_session(session);
2199 dout("failed to queue cap release work\n");
2200 }
2201}
2202
2203/*
2204 * caller holds session->s_cap_lock
2205 */
2206void __ceph_queue_cap_release(struct ceph_mds_session *session,
2207 struct ceph_cap *cap)
2208{
2209 list_add_tail(&cap->session_caps, &session->s_cap_releases);
2210 session->s_num_cap_releases++;
2211
2212 if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2213 ceph_flush_cap_releases(session->s_mdsc, session);
2214}
2215
2216static void ceph_cap_reclaim_work(struct work_struct *work)
2217{
2218 struct ceph_mds_client *mdsc =
2219 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2220 int ret = ceph_trim_dentries(mdsc);
2221 if (ret == -EAGAIN)
2222 ceph_queue_cap_reclaim_work(mdsc);
2223}
2224
2225void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2226{
2227 if (mdsc->stopping)
2228 return;
2229
2230 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2231 dout("caps reclaim work queued\n");
2232 } else {
2233 dout("failed to queue caps release work\n");
2234 }
2235}
2236
2237void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2238{
2239 int val;
2240 if (!nr)
2241 return;
2242 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2243 if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2244 atomic_set(&mdsc->cap_reclaim_pending, 0);
2245 ceph_queue_cap_reclaim_work(mdsc);
2246 }
2247}
2248
2249/*
2250 * requests
2251 */
2252
2253int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2254 struct inode *dir)
2255{
2256 struct ceph_inode_info *ci = ceph_inode(dir);
2257 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2258 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2259 size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2260 unsigned int num_entries;
2261 int order;
2262
2263 spin_lock(&ci->i_ceph_lock);
2264 num_entries = ci->i_files + ci->i_subdirs;
2265 spin_unlock(&ci->i_ceph_lock);
2266 num_entries = max(num_entries, 1U);
2267 num_entries = min(num_entries, opt->max_readdir);
2268
2269 order = get_order(size * num_entries);
2270 while (order >= 0) {
2271 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2272 __GFP_NOWARN,
2273 order);
2274 if (rinfo->dir_entries)
2275 break;
2276 order--;
2277 }
2278 if (!rinfo->dir_entries)
2279 return -ENOMEM;
2280
2281 num_entries = (PAGE_SIZE << order) / size;
2282 num_entries = min(num_entries, opt->max_readdir);
2283
2284 rinfo->dir_buf_size = PAGE_SIZE << order;
2285 req->r_num_caps = num_entries + 1;
2286 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2287 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2288 return 0;
2289}
2290
2291/*
2292 * Create an mds request.
2293 */
2294struct ceph_mds_request *
2295ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2296{
2297 struct ceph_mds_request *req;
2298
2299 req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2300 if (!req)
2301 return ERR_PTR(-ENOMEM);
2302
2303 mutex_init(&req->r_fill_mutex);
2304 req->r_mdsc = mdsc;
2305 req->r_started = jiffies;
2306 req->r_start_latency = ktime_get();
2307 req->r_resend_mds = -1;
2308 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2309 INIT_LIST_HEAD(&req->r_unsafe_target_item);
2310 req->r_fmode = -1;
2311 kref_init(&req->r_kref);
2312 RB_CLEAR_NODE(&req->r_node);
2313 INIT_LIST_HEAD(&req->r_wait);
2314 init_completion(&req->r_completion);
2315 init_completion(&req->r_safe_completion);
2316 INIT_LIST_HEAD(&req->r_unsafe_item);
2317
2318 ktime_get_coarse_real_ts64(&req->r_stamp);
2319
2320 req->r_op = op;
2321 req->r_direct_mode = mode;
2322 return req;
2323}
2324
2325/*
2326 * return oldest (lowest) request, tid in request tree, 0 if none.
2327 *
2328 * called under mdsc->mutex.
2329 */
2330static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2331{
2332 if (RB_EMPTY_ROOT(&mdsc->request_tree))
2333 return NULL;
2334 return rb_entry(rb_first(&mdsc->request_tree),
2335 struct ceph_mds_request, r_node);
2336}
2337
2338static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2339{
2340 return mdsc->oldest_tid;
2341}
2342
2343/*
2344 * Build a dentry's path. Allocate on heap; caller must kfree. Based
2345 * on build_path_from_dentry in fs/cifs/dir.c.
2346 *
2347 * If @stop_on_nosnap, generate path relative to the first non-snapped
2348 * inode.
2349 *
2350 * Encode hidden .snap dirs as a double /, i.e.
2351 * foo/.snap/bar -> foo//bar
2352 */
2353char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2354 int stop_on_nosnap)
2355{
2356 struct dentry *temp;
2357 char *path;
2358 int pos;
2359 unsigned seq;
2360 u64 base;
2361
2362 if (!dentry)
2363 return ERR_PTR(-EINVAL);
2364
2365 path = __getname();
2366 if (!path)
2367 return ERR_PTR(-ENOMEM);
2368retry:
2369 pos = PATH_MAX - 1;
2370 path[pos] = '\0';
2371
2372 seq = read_seqbegin(&rename_lock);
2373 rcu_read_lock();
2374 temp = dentry;
2375 for (;;) {
2376 struct inode *inode;
2377
2378 spin_lock(&temp->d_lock);
2379 inode = d_inode(temp);
2380 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2381 dout("build_path path+%d: %p SNAPDIR\n",
2382 pos, temp);
2383 } else if (stop_on_nosnap && inode && dentry != temp &&
2384 ceph_snap(inode) == CEPH_NOSNAP) {
2385 spin_unlock(&temp->d_lock);
2386 pos++; /* get rid of any prepended '/' */
2387 break;
2388 } else {
2389 pos -= temp->d_name.len;
2390 if (pos < 0) {
2391 spin_unlock(&temp->d_lock);
2392 break;
2393 }
2394 memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2395 }
2396 spin_unlock(&temp->d_lock);
2397 temp = READ_ONCE(temp->d_parent);
2398
2399 /* Are we at the root? */
2400 if (IS_ROOT(temp))
2401 break;
2402
2403 /* Are we out of buffer? */
2404 if (--pos < 0)
2405 break;
2406
2407 path[pos] = '/';
2408 }
2409 base = ceph_ino(d_inode(temp));
2410 rcu_read_unlock();
2411
2412 if (read_seqretry(&rename_lock, seq))
2413 goto retry;
2414
2415 if (pos < 0) {
2416 /*
2417 * A rename didn't occur, but somehow we didn't end up where
2418 * we thought we would. Throw a warning and try again.
2419 */
2420 pr_warn("build_path did not end path lookup where "
2421 "expected, pos is %d\n", pos);
2422 goto retry;
2423 }
2424
2425 *pbase = base;
2426 *plen = PATH_MAX - 1 - pos;
2427 dout("build_path on %p %d built %llx '%.*s'\n",
2428 dentry, d_count(dentry), base, *plen, path + pos);
2429 return path + pos;
2430}
2431
2432static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2433 const char **ppath, int *ppathlen, u64 *pino,
2434 bool *pfreepath, bool parent_locked)
2435{
2436 char *path;
2437
2438 rcu_read_lock();
2439 if (!dir)
2440 dir = d_inode_rcu(dentry->d_parent);
2441 if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2442 *pino = ceph_ino(dir);
2443 rcu_read_unlock();
2444 *ppath = dentry->d_name.name;
2445 *ppathlen = dentry->d_name.len;
2446 return 0;
2447 }
2448 rcu_read_unlock();
2449 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2450 if (IS_ERR(path))
2451 return PTR_ERR(path);
2452 *ppath = path;
2453 *pfreepath = true;
2454 return 0;
2455}
2456
2457static int build_inode_path(struct inode *inode,
2458 const char **ppath, int *ppathlen, u64 *pino,
2459 bool *pfreepath)
2460{
2461 struct dentry *dentry;
2462 char *path;
2463
2464 if (ceph_snap(inode) == CEPH_NOSNAP) {
2465 *pino = ceph_ino(inode);
2466 *ppathlen = 0;
2467 return 0;
2468 }
2469 dentry = d_find_alias(inode);
2470 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2471 dput(dentry);
2472 if (IS_ERR(path))
2473 return PTR_ERR(path);
2474 *ppath = path;
2475 *pfreepath = true;
2476 return 0;
2477}
2478
2479/*
2480 * request arguments may be specified via an inode *, a dentry *, or
2481 * an explicit ino+path.
2482 */
2483static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2484 struct inode *rdiri, const char *rpath,
2485 u64 rino, const char **ppath, int *pathlen,
2486 u64 *ino, bool *freepath, bool parent_locked)
2487{
2488 int r = 0;
2489
2490 if (rinode) {
2491 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2492 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2493 ceph_snap(rinode));
2494 } else if (rdentry) {
2495 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2496 freepath, parent_locked);
2497 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2498 *ppath);
2499 } else if (rpath || rino) {
2500 *ino = rino;
2501 *ppath = rpath;
2502 *pathlen = rpath ? strlen(rpath) : 0;
2503 dout(" path %.*s\n", *pathlen, rpath);
2504 }
2505
2506 return r;
2507}
2508
2509static void encode_timestamp_and_gids(void **p,
2510 const struct ceph_mds_request *req)
2511{
2512 struct ceph_timespec ts;
2513 int i;
2514
2515 ceph_encode_timespec64(&ts, &req->r_stamp);
2516 ceph_encode_copy(p, &ts, sizeof(ts));
2517
2518 /* gid_list */
2519 ceph_encode_32(p, req->r_cred->group_info->ngroups);
2520 for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2521 ceph_encode_64(p, from_kgid(&init_user_ns,
2522 req->r_cred->group_info->gid[i]));
2523}
2524
2525/*
2526 * called under mdsc->mutex
2527 */
2528static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2529 struct ceph_mds_request *req,
2530 bool drop_cap_releases)
2531{
2532 int mds = session->s_mds;
2533 struct ceph_mds_client *mdsc = session->s_mdsc;
2534 struct ceph_msg *msg;
2535 struct ceph_mds_request_head_old *head;
2536 const char *path1 = NULL;
2537 const char *path2 = NULL;
2538 u64 ino1 = 0, ino2 = 0;
2539 int pathlen1 = 0, pathlen2 = 0;
2540 bool freepath1 = false, freepath2 = false;
2541 int len;
2542 u16 releases;
2543 void *p, *end;
2544 int ret;
2545 bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2546
2547 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2548 req->r_parent, req->r_path1, req->r_ino1.ino,
2549 &path1, &pathlen1, &ino1, &freepath1,
2550 test_bit(CEPH_MDS_R_PARENT_LOCKED,
2551 &req->r_req_flags));
2552 if (ret < 0) {
2553 msg = ERR_PTR(ret);
2554 goto out;
2555 }
2556
2557 /* If r_old_dentry is set, then assume that its parent is locked */
2558 ret = set_request_path_attr(NULL, req->r_old_dentry,
2559 req->r_old_dentry_dir,
2560 req->r_path2, req->r_ino2.ino,
2561 &path2, &pathlen2, &ino2, &freepath2, true);
2562 if (ret < 0) {
2563 msg = ERR_PTR(ret);
2564 goto out_free1;
2565 }
2566
2567 len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2568 len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2569 sizeof(struct ceph_timespec);
2570 len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2571
2572 /* calculate (max) length for cap releases */
2573 len += sizeof(struct ceph_mds_request_release) *
2574 (!!req->r_inode_drop + !!req->r_dentry_drop +
2575 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2576
2577 if (req->r_dentry_drop)
2578 len += pathlen1;
2579 if (req->r_old_dentry_drop)
2580 len += pathlen2;
2581
2582 msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2583 if (!msg) {
2584 msg = ERR_PTR(-ENOMEM);
2585 goto out_free2;
2586 }
2587
2588 msg->hdr.tid = cpu_to_le64(req->r_tid);
2589
2590 /*
2591 * The old ceph_mds_request_head didn't contain a version field, and
2592 * one was added when we moved the message version from 3->4.
2593 */
2594 if (legacy) {
2595 msg->hdr.version = cpu_to_le16(3);
2596 head = msg->front.iov_base;
2597 p = msg->front.iov_base + sizeof(*head);
2598 } else {
2599 struct ceph_mds_request_head *new_head = msg->front.iov_base;
2600
2601 msg->hdr.version = cpu_to_le16(4);
2602 new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2603 head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2604 p = msg->front.iov_base + sizeof(*new_head);
2605 }
2606
2607 end = msg->front.iov_base + msg->front.iov_len;
2608
2609 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2610 head->op = cpu_to_le32(req->r_op);
2611 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2612 req->r_cred->fsuid));
2613 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2614 req->r_cred->fsgid));
2615 head->ino = cpu_to_le64(req->r_deleg_ino);
2616 head->args = req->r_args;
2617
2618 ceph_encode_filepath(&p, end, ino1, path1);
2619 ceph_encode_filepath(&p, end, ino2, path2);
2620
2621 /* make note of release offset, in case we need to replay */
2622 req->r_request_release_offset = p - msg->front.iov_base;
2623
2624 /* cap releases */
2625 releases = 0;
2626 if (req->r_inode_drop)
2627 releases += ceph_encode_inode_release(&p,
2628 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2629 mds, req->r_inode_drop, req->r_inode_unless,
2630 req->r_op == CEPH_MDS_OP_READDIR);
2631 if (req->r_dentry_drop)
2632 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2633 req->r_parent, mds, req->r_dentry_drop,
2634 req->r_dentry_unless);
2635 if (req->r_old_dentry_drop)
2636 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2637 req->r_old_dentry_dir, mds,
2638 req->r_old_dentry_drop,
2639 req->r_old_dentry_unless);
2640 if (req->r_old_inode_drop)
2641 releases += ceph_encode_inode_release(&p,
2642 d_inode(req->r_old_dentry),
2643 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2644
2645 if (drop_cap_releases) {
2646 releases = 0;
2647 p = msg->front.iov_base + req->r_request_release_offset;
2648 }
2649
2650 head->num_releases = cpu_to_le16(releases);
2651
2652 encode_timestamp_and_gids(&p, req);
2653
2654 if (WARN_ON_ONCE(p > end)) {
2655 ceph_msg_put(msg);
2656 msg = ERR_PTR(-ERANGE);
2657 goto out_free2;
2658 }
2659
2660 msg->front.iov_len = p - msg->front.iov_base;
2661 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2662
2663 if (req->r_pagelist) {
2664 struct ceph_pagelist *pagelist = req->r_pagelist;
2665 ceph_msg_data_add_pagelist(msg, pagelist);
2666 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2667 } else {
2668 msg->hdr.data_len = 0;
2669 }
2670
2671 msg->hdr.data_off = cpu_to_le16(0);
2672
2673out_free2:
2674 if (freepath2)
2675 ceph_mdsc_free_path((char *)path2, pathlen2);
2676out_free1:
2677 if (freepath1)
2678 ceph_mdsc_free_path((char *)path1, pathlen1);
2679out:
2680 return msg;
2681}
2682
2683/*
2684 * called under mdsc->mutex if error, under no mutex if
2685 * success.
2686 */
2687static void complete_request(struct ceph_mds_client *mdsc,
2688 struct ceph_mds_request *req)
2689{
2690 req->r_end_latency = ktime_get();
2691
2692 if (req->r_callback)
2693 req->r_callback(mdsc, req);
2694 complete_all(&req->r_completion);
2695}
2696
2697static struct ceph_mds_request_head_old *
2698find_old_request_head(void *p, u64 features)
2699{
2700 bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2701 struct ceph_mds_request_head *new_head;
2702
2703 if (legacy)
2704 return (struct ceph_mds_request_head_old *)p;
2705 new_head = (struct ceph_mds_request_head *)p;
2706 return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2707}
2708
2709/*
2710 * called under mdsc->mutex
2711 */
2712static int __prepare_send_request(struct ceph_mds_session *session,
2713 struct ceph_mds_request *req,
2714 bool drop_cap_releases)
2715{
2716 int mds = session->s_mds;
2717 struct ceph_mds_client *mdsc = session->s_mdsc;
2718 struct ceph_mds_request_head_old *rhead;
2719 struct ceph_msg *msg;
2720 int flags = 0;
2721
2722 req->r_attempts++;
2723 if (req->r_inode) {
2724 struct ceph_cap *cap =
2725 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2726
2727 if (cap)
2728 req->r_sent_on_mseq = cap->mseq;
2729 else
2730 req->r_sent_on_mseq = -1;
2731 }
2732 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2733 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2734
2735 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2736 void *p;
2737
2738 /*
2739 * Replay. Do not regenerate message (and rebuild
2740 * paths, etc.); just use the original message.
2741 * Rebuilding paths will break for renames because
2742 * d_move mangles the src name.
2743 */
2744 msg = req->r_request;
2745 rhead = find_old_request_head(msg->front.iov_base,
2746 session->s_con.peer_features);
2747
2748 flags = le32_to_cpu(rhead->flags);
2749 flags |= CEPH_MDS_FLAG_REPLAY;
2750 rhead->flags = cpu_to_le32(flags);
2751
2752 if (req->r_target_inode)
2753 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2754
2755 rhead->num_retry = req->r_attempts - 1;
2756
2757 /* remove cap/dentry releases from message */
2758 rhead->num_releases = 0;
2759
2760 p = msg->front.iov_base + req->r_request_release_offset;
2761 encode_timestamp_and_gids(&p, req);
2762
2763 msg->front.iov_len = p - msg->front.iov_base;
2764 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2765 return 0;
2766 }
2767
2768 if (req->r_request) {
2769 ceph_msg_put(req->r_request);
2770 req->r_request = NULL;
2771 }
2772 msg = create_request_message(session, req, drop_cap_releases);
2773 if (IS_ERR(msg)) {
2774 req->r_err = PTR_ERR(msg);
2775 return PTR_ERR(msg);
2776 }
2777 req->r_request = msg;
2778
2779 rhead = find_old_request_head(msg->front.iov_base,
2780 session->s_con.peer_features);
2781 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2782 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2783 flags |= CEPH_MDS_FLAG_REPLAY;
2784 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2785 flags |= CEPH_MDS_FLAG_ASYNC;
2786 if (req->r_parent)
2787 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2788 rhead->flags = cpu_to_le32(flags);
2789 rhead->num_fwd = req->r_num_fwd;
2790 rhead->num_retry = req->r_attempts - 1;
2791
2792 dout(" r_parent = %p\n", req->r_parent);
2793 return 0;
2794}
2795
2796/*
2797 * called under mdsc->mutex
2798 */
2799static int __send_request(struct ceph_mds_session *session,
2800 struct ceph_mds_request *req,
2801 bool drop_cap_releases)
2802{
2803 int err;
2804
2805 err = __prepare_send_request(session, req, drop_cap_releases);
2806 if (!err) {
2807 ceph_msg_get(req->r_request);
2808 ceph_con_send(&session->s_con, req->r_request);
2809 }
2810
2811 return err;
2812}
2813
2814/*
2815 * send request, or put it on the appropriate wait list.
2816 */
2817static void __do_request(struct ceph_mds_client *mdsc,
2818 struct ceph_mds_request *req)
2819{
2820 struct ceph_mds_session *session = NULL;
2821 int mds = -1;
2822 int err = 0;
2823 bool random;
2824
2825 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2826 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2827 __unregister_request(mdsc, req);
2828 return;
2829 }
2830
2831 if (req->r_timeout &&
2832 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2833 dout("do_request timed out\n");
2834 err = -ETIMEDOUT;
2835 goto finish;
2836 }
2837 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2838 dout("do_request forced umount\n");
2839 err = -EIO;
2840 goto finish;
2841 }
2842 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2843 if (mdsc->mdsmap_err) {
2844 err = mdsc->mdsmap_err;
2845 dout("do_request mdsmap err %d\n", err);
2846 goto finish;
2847 }
2848 if (mdsc->mdsmap->m_epoch == 0) {
2849 dout("do_request no mdsmap, waiting for map\n");
2850 list_add(&req->r_wait, &mdsc->waiting_for_map);
2851 return;
2852 }
2853 if (!(mdsc->fsc->mount_options->flags &
2854 CEPH_MOUNT_OPT_MOUNTWAIT) &&
2855 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2856 err = -EHOSTUNREACH;
2857 goto finish;
2858 }
2859 }
2860
2861 put_request_session(req);
2862
2863 mds = __choose_mds(mdsc, req, &random);
2864 if (mds < 0 ||
2865 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2866 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2867 err = -EJUKEBOX;
2868 goto finish;
2869 }
2870 dout("do_request no mds or not active, waiting for map\n");
2871 list_add(&req->r_wait, &mdsc->waiting_for_map);
2872 return;
2873 }
2874
2875 /* get, open session */
2876 session = __ceph_lookup_mds_session(mdsc, mds);
2877 if (!session) {
2878 session = register_session(mdsc, mds);
2879 if (IS_ERR(session)) {
2880 err = PTR_ERR(session);
2881 goto finish;
2882 }
2883 }
2884 req->r_session = ceph_get_mds_session(session);
2885
2886 dout("do_request mds%d session %p state %s\n", mds, session,
2887 ceph_session_state_name(session->s_state));
2888 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2889 session->s_state != CEPH_MDS_SESSION_HUNG) {
2890 /*
2891 * We cannot queue async requests since the caps and delegated
2892 * inodes are bound to the session. Just return -EJUKEBOX and
2893 * let the caller retry a sync request in that case.
2894 */
2895 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2896 err = -EJUKEBOX;
2897 goto out_session;
2898 }
2899
2900 /*
2901 * If the session has been REJECTED, then return a hard error,
2902 * unless it's a CLEANRECOVER mount, in which case we'll queue
2903 * it to the mdsc queue.
2904 */
2905 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2906 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2907 list_add(&req->r_wait, &mdsc->waiting_for_map);
2908 else
2909 err = -EACCES;
2910 goto out_session;
2911 }
2912
2913 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2914 session->s_state == CEPH_MDS_SESSION_CLOSING) {
2915 err = __open_session(mdsc, session);
2916 if (err)
2917 goto out_session;
2918 /* retry the same mds later */
2919 if (random)
2920 req->r_resend_mds = mds;
2921 }
2922 list_add(&req->r_wait, &session->s_waiting);
2923 goto out_session;
2924 }
2925
2926 /* send request */
2927 req->r_resend_mds = -1; /* forget any previous mds hint */
2928
2929 if (req->r_request_started == 0) /* note request start time */
2930 req->r_request_started = jiffies;
2931
2932 err = __send_request(session, req, false);
2933
2934out_session:
2935 ceph_put_mds_session(session);
2936finish:
2937 if (err) {
2938 dout("__do_request early error %d\n", err);
2939 req->r_err = err;
2940 complete_request(mdsc, req);
2941 __unregister_request(mdsc, req);
2942 }
2943 return;
2944}
2945
2946/*
2947 * called under mdsc->mutex
2948 */
2949static void __wake_requests(struct ceph_mds_client *mdsc,
2950 struct list_head *head)
2951{
2952 struct ceph_mds_request *req;
2953 LIST_HEAD(tmp_list);
2954
2955 list_splice_init(head, &tmp_list);
2956
2957 while (!list_empty(&tmp_list)) {
2958 req = list_entry(tmp_list.next,
2959 struct ceph_mds_request, r_wait);
2960 list_del_init(&req->r_wait);
2961 dout(" wake request %p tid %llu\n", req, req->r_tid);
2962 __do_request(mdsc, req);
2963 }
2964}
2965
2966/*
2967 * Wake up threads with requests pending for @mds, so that they can
2968 * resubmit their requests to a possibly different mds.
2969 */
2970static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2971{
2972 struct ceph_mds_request *req;
2973 struct rb_node *p = rb_first(&mdsc->request_tree);
2974
2975 dout("kick_requests mds%d\n", mds);
2976 while (p) {
2977 req = rb_entry(p, struct ceph_mds_request, r_node);
2978 p = rb_next(p);
2979 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2980 continue;
2981 if (req->r_attempts > 0)
2982 continue; /* only new requests */
2983 if (req->r_session &&
2984 req->r_session->s_mds == mds) {
2985 dout(" kicking tid %llu\n", req->r_tid);
2986 list_del_init(&req->r_wait);
2987 __do_request(mdsc, req);
2988 }
2989 }
2990}
2991
2992int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
2993 struct ceph_mds_request *req)
2994{
2995 int err = 0;
2996
2997 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2998 if (req->r_inode)
2999 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3000 if (req->r_parent) {
3001 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3002 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3003 CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3004 spin_lock(&ci->i_ceph_lock);
3005 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3006 __ceph_touch_fmode(ci, mdsc, fmode);
3007 spin_unlock(&ci->i_ceph_lock);
3008 }
3009 if (req->r_old_dentry_dir)
3010 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3011 CEPH_CAP_PIN);
3012
3013 if (req->r_inode) {
3014 err = ceph_wait_on_async_create(req->r_inode);
3015 if (err) {
3016 dout("%s: wait for async create returned: %d\n",
3017 __func__, err);
3018 return err;
3019 }
3020 }
3021
3022 if (!err && req->r_old_inode) {
3023 err = ceph_wait_on_async_create(req->r_old_inode);
3024 if (err) {
3025 dout("%s: wait for async create returned: %d\n",
3026 __func__, err);
3027 return err;
3028 }
3029 }
3030
3031 dout("submit_request on %p for inode %p\n", req, dir);
3032 mutex_lock(&mdsc->mutex);
3033 __register_request(mdsc, req, dir);
3034 __do_request(mdsc, req);
3035 err = req->r_err;
3036 mutex_unlock(&mdsc->mutex);
3037 return err;
3038}
3039
3040static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3041 struct ceph_mds_request *req)
3042{
3043 int err;
3044
3045 /* wait */
3046 dout("do_request waiting\n");
3047 if (!req->r_timeout && req->r_wait_for_completion) {
3048 err = req->r_wait_for_completion(mdsc, req);
3049 } else {
3050 long timeleft = wait_for_completion_killable_timeout(
3051 &req->r_completion,
3052 ceph_timeout_jiffies(req->r_timeout));
3053 if (timeleft > 0)
3054 err = 0;
3055 else if (!timeleft)
3056 err = -ETIMEDOUT; /* timed out */
3057 else
3058 err = timeleft; /* killed */
3059 }
3060 dout("do_request waited, got %d\n", err);
3061 mutex_lock(&mdsc->mutex);
3062
3063 /* only abort if we didn't race with a real reply */
3064 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3065 err = le32_to_cpu(req->r_reply_info.head->result);
3066 } else if (err < 0) {
3067 dout("aborted request %lld with %d\n", req->r_tid, err);
3068
3069 /*
3070 * ensure we aren't running concurrently with
3071 * ceph_fill_trace or ceph_readdir_prepopulate, which
3072 * rely on locks (dir mutex) held by our caller.
3073 */
3074 mutex_lock(&req->r_fill_mutex);
3075 req->r_err = err;
3076 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3077 mutex_unlock(&req->r_fill_mutex);
3078
3079 if (req->r_parent &&
3080 (req->r_op & CEPH_MDS_OP_WRITE))
3081 ceph_invalidate_dir_request(req);
3082 } else {
3083 err = req->r_err;
3084 }
3085
3086 mutex_unlock(&mdsc->mutex);
3087 return err;
3088}
3089
3090/*
3091 * Synchrously perform an mds request. Take care of all of the
3092 * session setup, forwarding, retry details.
3093 */
3094int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3095 struct inode *dir,
3096 struct ceph_mds_request *req)
3097{
3098 int err;
3099
3100 dout("do_request on %p\n", req);
3101
3102 /* issue */
3103 err = ceph_mdsc_submit_request(mdsc, dir, req);
3104 if (!err)
3105 err = ceph_mdsc_wait_request(mdsc, req);
3106 dout("do_request %p done, result %d\n", req, err);
3107 return err;
3108}
3109
3110/*
3111 * Invalidate dir's completeness, dentry lease state on an aborted MDS
3112 * namespace request.
3113 */
3114void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3115{
3116 struct inode *dir = req->r_parent;
3117 struct inode *old_dir = req->r_old_dentry_dir;
3118
3119 dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3120
3121 ceph_dir_clear_complete(dir);
3122 if (old_dir)
3123 ceph_dir_clear_complete(old_dir);
3124 if (req->r_dentry)
3125 ceph_invalidate_dentry_lease(req->r_dentry);
3126 if (req->r_old_dentry)
3127 ceph_invalidate_dentry_lease(req->r_old_dentry);
3128}
3129
3130/*
3131 * Handle mds reply.
3132 *
3133 * We take the session mutex and parse and process the reply immediately.
3134 * This preserves the logical ordering of replies, capabilities, etc., sent
3135 * by the MDS as they are applied to our local cache.
3136 */
3137static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3138{
3139 struct ceph_mds_client *mdsc = session->s_mdsc;
3140 struct ceph_mds_request *req;
3141 struct ceph_mds_reply_head *head = msg->front.iov_base;
3142 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
3143 struct ceph_snap_realm *realm;
3144 u64 tid;
3145 int err, result;
3146 int mds = session->s_mds;
3147
3148 if (msg->front.iov_len < sizeof(*head)) {
3149 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3150 ceph_msg_dump(msg);
3151 return;
3152 }
3153
3154 /* get request, session */
3155 tid = le64_to_cpu(msg->hdr.tid);
3156 mutex_lock(&mdsc->mutex);
3157 req = lookup_get_request(mdsc, tid);
3158 if (!req) {
3159 dout("handle_reply on unknown tid %llu\n", tid);
3160 mutex_unlock(&mdsc->mutex);
3161 return;
3162 }
3163 dout("handle_reply %p\n", req);
3164
3165 /* correct session? */
3166 if (req->r_session != session) {
3167 pr_err("mdsc_handle_reply got %llu on session mds%d"
3168 " not mds%d\n", tid, session->s_mds,
3169 req->r_session ? req->r_session->s_mds : -1);
3170 mutex_unlock(&mdsc->mutex);
3171 goto out;
3172 }
3173
3174 /* dup? */
3175 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3176 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3177 pr_warn("got a dup %s reply on %llu from mds%d\n",
3178 head->safe ? "safe" : "unsafe", tid, mds);
3179 mutex_unlock(&mdsc->mutex);
3180 goto out;
3181 }
3182 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3183 pr_warn("got unsafe after safe on %llu from mds%d\n",
3184 tid, mds);
3185 mutex_unlock(&mdsc->mutex);
3186 goto out;
3187 }
3188
3189 result = le32_to_cpu(head->result);
3190
3191 /*
3192 * Handle an ESTALE
3193 * if we're not talking to the authority, send to them
3194 * if the authority has changed while we weren't looking,
3195 * send to new authority
3196 * Otherwise we just have to return an ESTALE
3197 */
3198 if (result == -ESTALE) {
3199 dout("got ESTALE on request %llu\n", req->r_tid);
3200 req->r_resend_mds = -1;
3201 if (req->r_direct_mode != USE_AUTH_MDS) {
3202 dout("not using auth, setting for that now\n");
3203 req->r_direct_mode = USE_AUTH_MDS;
3204 __do_request(mdsc, req);
3205 mutex_unlock(&mdsc->mutex);
3206 goto out;
3207 } else {
3208 int mds = __choose_mds(mdsc, req, NULL);
3209 if (mds >= 0 && mds != req->r_session->s_mds) {
3210 dout("but auth changed, so resending\n");
3211 __do_request(mdsc, req);
3212 mutex_unlock(&mdsc->mutex);
3213 goto out;
3214 }
3215 }
3216 dout("have to return ESTALE on request %llu\n", req->r_tid);
3217 }
3218
3219
3220 if (head->safe) {
3221 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3222 __unregister_request(mdsc, req);
3223
3224 /* last request during umount? */
3225 if (mdsc->stopping && !__get_oldest_req(mdsc))
3226 complete_all(&mdsc->safe_umount_waiters);
3227
3228 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3229 /*
3230 * We already handled the unsafe response, now do the
3231 * cleanup. No need to examine the response; the MDS
3232 * doesn't include any result info in the safe
3233 * response. And even if it did, there is nothing
3234 * useful we could do with a revised return value.
3235 */
3236 dout("got safe reply %llu, mds%d\n", tid, mds);
3237
3238 mutex_unlock(&mdsc->mutex);
3239 goto out;
3240 }
3241 } else {
3242 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3243 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3244 }
3245
3246 dout("handle_reply tid %lld result %d\n", tid, result);
3247 rinfo = &req->r_reply_info;
3248 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3249 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3250 else
3251 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3252 mutex_unlock(&mdsc->mutex);
3253
3254 /* Must find target inode outside of mutexes to avoid deadlocks */
3255 if ((err >= 0) && rinfo->head->is_target) {
3256 struct inode *in;
3257 struct ceph_vino tvino = {
3258 .ino = le64_to_cpu(rinfo->targeti.in->ino),
3259 .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3260 };
3261
3262 in = ceph_get_inode(mdsc->fsc->sb, tvino);
3263 if (IS_ERR(in)) {
3264 err = PTR_ERR(in);
3265 mutex_lock(&session->s_mutex);
3266 goto out_err;
3267 }
3268 req->r_target_inode = in;
3269 }
3270
3271 mutex_lock(&session->s_mutex);
3272 if (err < 0) {
3273 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3274 ceph_msg_dump(msg);
3275 goto out_err;
3276 }
3277
3278 /* snap trace */
3279 realm = NULL;
3280 if (rinfo->snapblob_len) {
3281 down_write(&mdsc->snap_rwsem);
3282 ceph_update_snap_trace(mdsc, rinfo->snapblob,
3283 rinfo->snapblob + rinfo->snapblob_len,
3284 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3285 &realm);
3286 downgrade_write(&mdsc->snap_rwsem);
3287 } else {
3288 down_read(&mdsc->snap_rwsem);
3289 }
3290
3291 /* insert trace into our cache */
3292 mutex_lock(&req->r_fill_mutex);
3293 current->journal_info = req;
3294 err = ceph_fill_trace(mdsc->fsc->sb, req);
3295 if (err == 0) {
3296 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3297 req->r_op == CEPH_MDS_OP_LSSNAP))
3298 ceph_readdir_prepopulate(req, req->r_session);
3299 }
3300 current->journal_info = NULL;
3301 mutex_unlock(&req->r_fill_mutex);
3302
3303 up_read(&mdsc->snap_rwsem);
3304 if (realm)
3305 ceph_put_snap_realm(mdsc, realm);
3306
3307 if (err == 0) {
3308 if (req->r_target_inode &&
3309 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3310 struct ceph_inode_info *ci =
3311 ceph_inode(req->r_target_inode);
3312 spin_lock(&ci->i_unsafe_lock);
3313 list_add_tail(&req->r_unsafe_target_item,
3314 &ci->i_unsafe_iops);
3315 spin_unlock(&ci->i_unsafe_lock);
3316 }
3317
3318 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3319 }
3320out_err:
3321 mutex_lock(&mdsc->mutex);
3322 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3323 if (err) {
3324 req->r_err = err;
3325 } else {
3326 req->r_reply = ceph_msg_get(msg);
3327 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3328 }
3329 } else {
3330 dout("reply arrived after request %lld was aborted\n", tid);
3331 }
3332 mutex_unlock(&mdsc->mutex);
3333
3334 mutex_unlock(&session->s_mutex);
3335
3336 /* kick calling process */
3337 complete_request(mdsc, req);
3338
3339 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3340 req->r_end_latency, err);
3341out:
3342 ceph_mdsc_put_request(req);
3343 return;
3344}
3345
3346
3347
3348/*
3349 * handle mds notification that our request has been forwarded.
3350 */
3351static void handle_forward(struct ceph_mds_client *mdsc,
3352 struct ceph_mds_session *session,
3353 struct ceph_msg *msg)
3354{
3355 struct ceph_mds_request *req;
3356 u64 tid = le64_to_cpu(msg->hdr.tid);
3357 u32 next_mds;
3358 u32 fwd_seq;
3359 int err = -EINVAL;
3360 void *p = msg->front.iov_base;
3361 void *end = p + msg->front.iov_len;
3362
3363 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3364 next_mds = ceph_decode_32(&p);
3365 fwd_seq = ceph_decode_32(&p);
3366
3367 mutex_lock(&mdsc->mutex);
3368 req = lookup_get_request(mdsc, tid);
3369 if (!req) {
3370 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3371 goto out; /* dup reply? */
3372 }
3373
3374 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3375 dout("forward tid %llu aborted, unregistering\n", tid);
3376 __unregister_request(mdsc, req);
3377 } else if (fwd_seq <= req->r_num_fwd) {
3378 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3379 tid, next_mds, req->r_num_fwd, fwd_seq);
3380 } else {
3381 /* resend. forward race not possible; mds would drop */
3382 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3383 BUG_ON(req->r_err);
3384 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3385 req->r_attempts = 0;
3386 req->r_num_fwd = fwd_seq;
3387 req->r_resend_mds = next_mds;
3388 put_request_session(req);
3389 __do_request(mdsc, req);
3390 }
3391 ceph_mdsc_put_request(req);
3392out:
3393 mutex_unlock(&mdsc->mutex);
3394 return;
3395
3396bad:
3397 pr_err("mdsc_handle_forward decode error err=%d\n", err);
3398}
3399
3400static int __decode_session_metadata(void **p, void *end,
3401 bool *blocklisted)
3402{
3403 /* map<string,string> */
3404 u32 n;
3405 bool err_str;
3406 ceph_decode_32_safe(p, end, n, bad);
3407 while (n-- > 0) {
3408 u32 len;
3409 ceph_decode_32_safe(p, end, len, bad);
3410 ceph_decode_need(p, end, len, bad);
3411 err_str = !strncmp(*p, "error_string", len);
3412 *p += len;
3413 ceph_decode_32_safe(p, end, len, bad);
3414 ceph_decode_need(p, end, len, bad);
3415 /*
3416 * Match "blocklisted (blacklisted)" from newer MDSes,
3417 * or "blacklisted" from older MDSes.
3418 */
3419 if (err_str && strnstr(*p, "blacklisted", len))
3420 *blocklisted = true;
3421 *p += len;
3422 }
3423 return 0;
3424bad:
3425 return -1;
3426}
3427
3428/*
3429 * handle a mds session control message
3430 */
3431static void handle_session(struct ceph_mds_session *session,
3432 struct ceph_msg *msg)
3433{
3434 struct ceph_mds_client *mdsc = session->s_mdsc;
3435 int mds = session->s_mds;
3436 int msg_version = le16_to_cpu(msg->hdr.version);
3437 void *p = msg->front.iov_base;
3438 void *end = p + msg->front.iov_len;
3439 struct ceph_mds_session_head *h;
3440 u32 op;
3441 u64 seq, features = 0;
3442 int wake = 0;
3443 bool blocklisted = false;
3444
3445 /* decode */
3446 ceph_decode_need(&p, end, sizeof(*h), bad);
3447 h = p;
3448 p += sizeof(*h);
3449
3450 op = le32_to_cpu(h->op);
3451 seq = le64_to_cpu(h->seq);
3452
3453 if (msg_version >= 3) {
3454 u32 len;
3455 /* version >= 2, metadata */
3456 if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3457 goto bad;
3458 /* version >= 3, feature bits */
3459 ceph_decode_32_safe(&p, end, len, bad);
3460 if (len) {
3461 ceph_decode_64_safe(&p, end, features, bad);
3462 p += len - sizeof(features);
3463 }
3464 }
3465
3466 mutex_lock(&mdsc->mutex);
3467 if (op == CEPH_SESSION_CLOSE) {
3468 ceph_get_mds_session(session);
3469 __unregister_session(mdsc, session);
3470 }
3471 /* FIXME: this ttl calculation is generous */
3472 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3473 mutex_unlock(&mdsc->mutex);
3474
3475 mutex_lock(&session->s_mutex);
3476
3477 dout("handle_session mds%d %s %p state %s seq %llu\n",
3478 mds, ceph_session_op_name(op), session,
3479 ceph_session_state_name(session->s_state), seq);
3480
3481 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3482 session->s_state = CEPH_MDS_SESSION_OPEN;
3483 pr_info("mds%d came back\n", session->s_mds);
3484 }
3485
3486 switch (op) {
3487 case CEPH_SESSION_OPEN:
3488 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3489 pr_info("mds%d reconnect success\n", session->s_mds);
3490 session->s_state = CEPH_MDS_SESSION_OPEN;
3491 session->s_features = features;
3492 renewed_caps(mdsc, session, 0);
3493 if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
3494 metric_schedule_delayed(&mdsc->metric);
3495 wake = 1;
3496 if (mdsc->stopping)
3497 __close_session(mdsc, session);
3498 break;
3499
3500 case CEPH_SESSION_RENEWCAPS:
3501 if (session->s_renew_seq == seq)
3502 renewed_caps(mdsc, session, 1);
3503 break;
3504
3505 case CEPH_SESSION_CLOSE:
3506 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3507 pr_info("mds%d reconnect denied\n", session->s_mds);
3508 session->s_state = CEPH_MDS_SESSION_CLOSED;
3509 cleanup_session_requests(mdsc, session);
3510 remove_session_caps(session);
3511 wake = 2; /* for good measure */
3512 wake_up_all(&mdsc->session_close_wq);
3513 break;
3514
3515 case CEPH_SESSION_STALE:
3516 pr_info("mds%d caps went stale, renewing\n",
3517 session->s_mds);
3518 atomic_inc(&session->s_cap_gen);
3519 session->s_cap_ttl = jiffies - 1;
3520 send_renew_caps(mdsc, session);
3521 break;
3522
3523 case CEPH_SESSION_RECALL_STATE:
3524 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3525 break;
3526
3527 case CEPH_SESSION_FLUSHMSG:
3528 send_flushmsg_ack(mdsc, session, seq);
3529 break;
3530
3531 case CEPH_SESSION_FORCE_RO:
3532 dout("force_session_readonly %p\n", session);
3533 spin_lock(&session->s_cap_lock);
3534 session->s_readonly = true;
3535 spin_unlock(&session->s_cap_lock);
3536 wake_up_session_caps(session, FORCE_RO);
3537 break;
3538
3539 case CEPH_SESSION_REJECT:
3540 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3541 pr_info("mds%d rejected session\n", session->s_mds);
3542 session->s_state = CEPH_MDS_SESSION_REJECTED;
3543 cleanup_session_requests(mdsc, session);
3544 remove_session_caps(session);
3545 if (blocklisted)
3546 mdsc->fsc->blocklisted = true;
3547 wake = 2; /* for good measure */
3548 break;
3549
3550 default:
3551 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3552 WARN_ON(1);
3553 }
3554
3555 mutex_unlock(&session->s_mutex);
3556 if (wake) {
3557 mutex_lock(&mdsc->mutex);
3558 __wake_requests(mdsc, &session->s_waiting);
3559 if (wake == 2)
3560 kick_requests(mdsc, mds);
3561 mutex_unlock(&mdsc->mutex);
3562 }
3563 if (op == CEPH_SESSION_CLOSE)
3564 ceph_put_mds_session(session);
3565 return;
3566
3567bad:
3568 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3569 (int)msg->front.iov_len);
3570 ceph_msg_dump(msg);
3571 return;
3572}
3573
3574void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3575{
3576 int dcaps;
3577
3578 dcaps = xchg(&req->r_dir_caps, 0);
3579 if (dcaps) {
3580 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3581 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3582 }
3583}
3584
3585void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3586{
3587 int dcaps;
3588
3589 dcaps = xchg(&req->r_dir_caps, 0);
3590 if (dcaps) {
3591 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3592 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3593 dcaps);
3594 }
3595}
3596
3597/*
3598 * called under session->mutex.
3599 */
3600static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3601 struct ceph_mds_session *session)
3602{
3603 struct ceph_mds_request *req, *nreq;
3604 struct rb_node *p;
3605
3606 dout("replay_unsafe_requests mds%d\n", session->s_mds);
3607
3608 mutex_lock(&mdsc->mutex);
3609 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3610 __send_request(session, req, true);
3611
3612 /*
3613 * also re-send old requests when MDS enters reconnect stage. So that MDS
3614 * can process completed request in clientreplay stage.
3615 */
3616 p = rb_first(&mdsc->request_tree);
3617 while (p) {
3618 req = rb_entry(p, struct ceph_mds_request, r_node);
3619 p = rb_next(p);
3620 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3621 continue;
3622 if (req->r_attempts == 0)
3623 continue; /* only old requests */
3624 if (!req->r_session)
3625 continue;
3626 if (req->r_session->s_mds != session->s_mds)
3627 continue;
3628
3629 ceph_mdsc_release_dir_caps_no_check(req);
3630
3631 __send_request(session, req, true);
3632 }
3633 mutex_unlock(&mdsc->mutex);
3634}
3635
3636static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3637{
3638 struct ceph_msg *reply;
3639 struct ceph_pagelist *_pagelist;
3640 struct page *page;
3641 __le32 *addr;
3642 int err = -ENOMEM;
3643
3644 if (!recon_state->allow_multi)
3645 return -ENOSPC;
3646
3647 /* can't handle message that contains both caps and realm */
3648 BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3649
3650 /* pre-allocate new pagelist */
3651 _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3652 if (!_pagelist)
3653 return -ENOMEM;
3654
3655 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3656 if (!reply)
3657 goto fail_msg;
3658
3659 /* placeholder for nr_caps */
3660 err = ceph_pagelist_encode_32(_pagelist, 0);
3661 if (err < 0)
3662 goto fail;
3663
3664 if (recon_state->nr_caps) {
3665 /* currently encoding caps */
3666 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3667 if (err)
3668 goto fail;
3669 } else {
3670 /* placeholder for nr_realms (currently encoding relams) */
3671 err = ceph_pagelist_encode_32(_pagelist, 0);
3672 if (err < 0)
3673 goto fail;
3674 }
3675
3676 err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3677 if (err)
3678 goto fail;
3679
3680 page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3681 addr = kmap_atomic(page);
3682 if (recon_state->nr_caps) {
3683 /* currently encoding caps */
3684 *addr = cpu_to_le32(recon_state->nr_caps);
3685 } else {
3686 /* currently encoding relams */
3687 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3688 }
3689 kunmap_atomic(addr);
3690
3691 reply->hdr.version = cpu_to_le16(5);
3692 reply->hdr.compat_version = cpu_to_le16(4);
3693
3694 reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3695 ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3696
3697 ceph_con_send(&recon_state->session->s_con, reply);
3698 ceph_pagelist_release(recon_state->pagelist);
3699
3700 recon_state->pagelist = _pagelist;
3701 recon_state->nr_caps = 0;
3702 recon_state->nr_realms = 0;
3703 recon_state->msg_version = 5;
3704 return 0;
3705fail:
3706 ceph_msg_put(reply);
3707fail_msg:
3708 ceph_pagelist_release(_pagelist);
3709 return err;
3710}
3711
3712static struct dentry* d_find_primary(struct inode *inode)
3713{
3714 struct dentry *alias, *dn = NULL;
3715
3716 if (hlist_empty(&inode->i_dentry))
3717 return NULL;
3718
3719 spin_lock(&inode->i_lock);
3720 if (hlist_empty(&inode->i_dentry))
3721 goto out_unlock;
3722
3723 if (S_ISDIR(inode->i_mode)) {
3724 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3725 if (!IS_ROOT(alias))
3726 dn = dget(alias);
3727 goto out_unlock;
3728 }
3729
3730 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3731 spin_lock(&alias->d_lock);
3732 if (!d_unhashed(alias) &&
3733 (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3734 dn = dget_dlock(alias);
3735 }
3736 spin_unlock(&alias->d_lock);
3737 if (dn)
3738 break;
3739 }
3740out_unlock:
3741 spin_unlock(&inode->i_lock);
3742 return dn;
3743}
3744
3745/*
3746 * Encode information about a cap for a reconnect with the MDS.
3747 */
3748static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3749 void *arg)
3750{
3751 union {
3752 struct ceph_mds_cap_reconnect v2;
3753 struct ceph_mds_cap_reconnect_v1 v1;
3754 } rec;
3755 struct ceph_inode_info *ci = cap->ci;
3756 struct ceph_reconnect_state *recon_state = arg;
3757 struct ceph_pagelist *pagelist = recon_state->pagelist;
3758 struct dentry *dentry;
3759 char *path;
3760 int pathlen, err;
3761 u64 pathbase;
3762 u64 snap_follows;
3763
3764 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3765 inode, ceph_vinop(inode), cap, cap->cap_id,
3766 ceph_cap_string(cap->issued));
3767
3768 dentry = d_find_primary(inode);
3769 if (dentry) {
3770 /* set pathbase to parent dir when msg_version >= 2 */
3771 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3772 recon_state->msg_version >= 2);
3773 dput(dentry);
3774 if (IS_ERR(path)) {
3775 err = PTR_ERR(path);
3776 goto out_err;
3777 }
3778 } else {
3779 path = NULL;
3780 pathlen = 0;
3781 pathbase = 0;
3782 }
3783
3784 spin_lock(&ci->i_ceph_lock);
3785 cap->seq = 0; /* reset cap seq */
3786 cap->issue_seq = 0; /* and issue_seq */
3787 cap->mseq = 0; /* and migrate_seq */
3788 cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3789
3790 /* These are lost when the session goes away */
3791 if (S_ISDIR(inode->i_mode)) {
3792 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3793 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3794 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3795 }
3796 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3797 }
3798
3799 if (recon_state->msg_version >= 2) {
3800 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3801 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3802 rec.v2.issued = cpu_to_le32(cap->issued);
3803 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3804 rec.v2.pathbase = cpu_to_le64(pathbase);
3805 rec.v2.flock_len = (__force __le32)
3806 ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3807 } else {
3808 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3809 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3810 rec.v1.issued = cpu_to_le32(cap->issued);
3811 rec.v1.size = cpu_to_le64(i_size_read(inode));
3812 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3813 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3814 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3815 rec.v1.pathbase = cpu_to_le64(pathbase);
3816 }
3817
3818 if (list_empty(&ci->i_cap_snaps)) {
3819 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3820 } else {
3821 struct ceph_cap_snap *capsnap =
3822 list_first_entry(&ci->i_cap_snaps,
3823 struct ceph_cap_snap, ci_item);
3824 snap_follows = capsnap->follows;
3825 }
3826 spin_unlock(&ci->i_ceph_lock);
3827
3828 if (recon_state->msg_version >= 2) {
3829 int num_fcntl_locks, num_flock_locks;
3830 struct ceph_filelock *flocks = NULL;
3831 size_t struct_len, total_len = sizeof(u64);
3832 u8 struct_v = 0;
3833
3834encode_again:
3835 if (rec.v2.flock_len) {
3836 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3837 } else {
3838 num_fcntl_locks = 0;
3839 num_flock_locks = 0;
3840 }
3841 if (num_fcntl_locks + num_flock_locks > 0) {
3842 flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3843 sizeof(struct ceph_filelock),
3844 GFP_NOFS);
3845 if (!flocks) {
3846 err = -ENOMEM;
3847 goto out_err;
3848 }
3849 err = ceph_encode_locks_to_buffer(inode, flocks,
3850 num_fcntl_locks,
3851 num_flock_locks);
3852 if (err) {
3853 kfree(flocks);
3854 flocks = NULL;
3855 if (err == -ENOSPC)
3856 goto encode_again;
3857 goto out_err;
3858 }
3859 } else {
3860 kfree(flocks);
3861 flocks = NULL;
3862 }
3863
3864 if (recon_state->msg_version >= 3) {
3865 /* version, compat_version and struct_len */
3866 total_len += 2 * sizeof(u8) + sizeof(u32);
3867 struct_v = 2;
3868 }
3869 /*
3870 * number of encoded locks is stable, so copy to pagelist
3871 */
3872 struct_len = 2 * sizeof(u32) +
3873 (num_fcntl_locks + num_flock_locks) *
3874 sizeof(struct ceph_filelock);
3875 rec.v2.flock_len = cpu_to_le32(struct_len);
3876
3877 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
3878
3879 if (struct_v >= 2)
3880 struct_len += sizeof(u64); /* snap_follows */
3881
3882 total_len += struct_len;
3883
3884 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3885 err = send_reconnect_partial(recon_state);
3886 if (err)
3887 goto out_freeflocks;
3888 pagelist = recon_state->pagelist;
3889 }
3890
3891 err = ceph_pagelist_reserve(pagelist, total_len);
3892 if (err)
3893 goto out_freeflocks;
3894
3895 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3896 if (recon_state->msg_version >= 3) {
3897 ceph_pagelist_encode_8(pagelist, struct_v);
3898 ceph_pagelist_encode_8(pagelist, 1);
3899 ceph_pagelist_encode_32(pagelist, struct_len);
3900 }
3901 ceph_pagelist_encode_string(pagelist, path, pathlen);
3902 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3903 ceph_locks_to_pagelist(flocks, pagelist,
3904 num_fcntl_locks, num_flock_locks);
3905 if (struct_v >= 2)
3906 ceph_pagelist_encode_64(pagelist, snap_follows);
3907out_freeflocks:
3908 kfree(flocks);
3909 } else {
3910 err = ceph_pagelist_reserve(pagelist,
3911 sizeof(u64) + sizeof(u32) +
3912 pathlen + sizeof(rec.v1));
3913 if (err)
3914 goto out_err;
3915
3916 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3917 ceph_pagelist_encode_string(pagelist, path, pathlen);
3918 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3919 }
3920
3921out_err:
3922 ceph_mdsc_free_path(path, pathlen);
3923 if (!err)
3924 recon_state->nr_caps++;
3925 return err;
3926}
3927
3928static int encode_snap_realms(struct ceph_mds_client *mdsc,
3929 struct ceph_reconnect_state *recon_state)
3930{
3931 struct rb_node *p;
3932 struct ceph_pagelist *pagelist = recon_state->pagelist;
3933 int err = 0;
3934
3935 if (recon_state->msg_version >= 4) {
3936 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3937 if (err < 0)
3938 goto fail;
3939 }
3940
3941 /*
3942 * snaprealms. we provide mds with the ino, seq (version), and
3943 * parent for all of our realms. If the mds has any newer info,
3944 * it will tell us.
3945 */
3946 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3947 struct ceph_snap_realm *realm =
3948 rb_entry(p, struct ceph_snap_realm, node);
3949 struct ceph_mds_snaprealm_reconnect sr_rec;
3950
3951 if (recon_state->msg_version >= 4) {
3952 size_t need = sizeof(u8) * 2 + sizeof(u32) +
3953 sizeof(sr_rec);
3954
3955 if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3956 err = send_reconnect_partial(recon_state);
3957 if (err)
3958 goto fail;
3959 pagelist = recon_state->pagelist;
3960 }
3961
3962 err = ceph_pagelist_reserve(pagelist, need);
3963 if (err)
3964 goto fail;
3965
3966 ceph_pagelist_encode_8(pagelist, 1);
3967 ceph_pagelist_encode_8(pagelist, 1);
3968 ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3969 }
3970
3971 dout(" adding snap realm %llx seq %lld parent %llx\n",
3972 realm->ino, realm->seq, realm->parent_ino);
3973 sr_rec.ino = cpu_to_le64(realm->ino);
3974 sr_rec.seq = cpu_to_le64(realm->seq);
3975 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3976
3977 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3978 if (err)
3979 goto fail;
3980
3981 recon_state->nr_realms++;
3982 }
3983fail:
3984 return err;
3985}
3986
3987
3988/*
3989 * If an MDS fails and recovers, clients need to reconnect in order to
3990 * reestablish shared state. This includes all caps issued through
3991 * this session _and_ the snap_realm hierarchy. Because it's not
3992 * clear which snap realms the mds cares about, we send everything we
3993 * know about.. that ensures we'll then get any new info the
3994 * recovering MDS might have.
3995 *
3996 * This is a relatively heavyweight operation, but it's rare.
3997 */
3998static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3999 struct ceph_mds_session *session)
4000{
4001 struct ceph_msg *reply;
4002 int mds = session->s_mds;
4003 int err = -ENOMEM;
4004 struct ceph_reconnect_state recon_state = {
4005 .session = session,
4006 };
4007 LIST_HEAD(dispose);
4008
4009 pr_info("mds%d reconnect start\n", mds);
4010
4011 recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4012 if (!recon_state.pagelist)
4013 goto fail_nopagelist;
4014
4015 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4016 if (!reply)
4017 goto fail_nomsg;
4018
4019 xa_destroy(&session->s_delegated_inos);
4020
4021 mutex_lock(&session->s_mutex);
4022 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4023 session->s_seq = 0;
4024
4025 dout("session %p state %s\n", session,
4026 ceph_session_state_name(session->s_state));
4027
4028 atomic_inc(&session->s_cap_gen);
4029
4030 spin_lock(&session->s_cap_lock);
4031 /* don't know if session is readonly */
4032 session->s_readonly = 0;
4033 /*
4034 * notify __ceph_remove_cap() that we are composing cap reconnect.
4035 * If a cap get released before being added to the cap reconnect,
4036 * __ceph_remove_cap() should skip queuing cap release.
4037 */
4038 session->s_cap_reconnect = 1;
4039 /* drop old cap expires; we're about to reestablish that state */
4040 detach_cap_releases(session, &dispose);
4041 spin_unlock(&session->s_cap_lock);
4042 dispose_cap_releases(mdsc, &dispose);
4043
4044 /* trim unused caps to reduce MDS's cache rejoin time */
4045 if (mdsc->fsc->sb->s_root)
4046 shrink_dcache_parent(mdsc->fsc->sb->s_root);
4047
4048 ceph_con_close(&session->s_con);
4049 ceph_con_open(&session->s_con,
4050 CEPH_ENTITY_TYPE_MDS, mds,
4051 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4052
4053 /* replay unsafe requests */
4054 replay_unsafe_requests(mdsc, session);
4055
4056 ceph_early_kick_flushing_caps(mdsc, session);
4057
4058 down_read(&mdsc->snap_rwsem);
4059
4060 /* placeholder for nr_caps */
4061 err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4062 if (err)
4063 goto fail;
4064
4065 if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4066 recon_state.msg_version = 3;
4067 recon_state.allow_multi = true;
4068 } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4069 recon_state.msg_version = 3;
4070 } else {
4071 recon_state.msg_version = 2;
4072 }
4073 /* trsaverse this session's caps */
4074 err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4075
4076 spin_lock(&session->s_cap_lock);
4077 session->s_cap_reconnect = 0;
4078 spin_unlock(&session->s_cap_lock);
4079
4080 if (err < 0)
4081 goto fail;
4082
4083 /* check if all realms can be encoded into current message */
4084 if (mdsc->num_snap_realms) {
4085 size_t total_len =
4086 recon_state.pagelist->length +
4087 mdsc->num_snap_realms *
4088 sizeof(struct ceph_mds_snaprealm_reconnect);
4089 if (recon_state.msg_version >= 4) {
4090 /* number of realms */
4091 total_len += sizeof(u32);
4092 /* version, compat_version and struct_len */
4093 total_len += mdsc->num_snap_realms *
4094 (2 * sizeof(u8) + sizeof(u32));
4095 }
4096 if (total_len > RECONNECT_MAX_SIZE) {
4097 if (!recon_state.allow_multi) {
4098 err = -ENOSPC;
4099 goto fail;
4100 }
4101 if (recon_state.nr_caps) {
4102 err = send_reconnect_partial(&recon_state);
4103 if (err)
4104 goto fail;
4105 }
4106 recon_state.msg_version = 5;
4107 }
4108 }
4109
4110 err = encode_snap_realms(mdsc, &recon_state);
4111 if (err < 0)
4112 goto fail;
4113
4114 if (recon_state.msg_version >= 5) {
4115 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4116 if (err < 0)
4117 goto fail;
4118 }
4119
4120 if (recon_state.nr_caps || recon_state.nr_realms) {
4121 struct page *page =
4122 list_first_entry(&recon_state.pagelist->head,
4123 struct page, lru);
4124 __le32 *addr = kmap_atomic(page);
4125 if (recon_state.nr_caps) {
4126 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4127 *addr = cpu_to_le32(recon_state.nr_caps);
4128 } else if (recon_state.msg_version >= 4) {
4129 *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4130 }
4131 kunmap_atomic(addr);
4132 }
4133
4134 reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4135 if (recon_state.msg_version >= 4)
4136 reply->hdr.compat_version = cpu_to_le16(4);
4137
4138 reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4139 ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4140
4141 ceph_con_send(&session->s_con, reply);
4142
4143 mutex_unlock(&session->s_mutex);
4144
4145 mutex_lock(&mdsc->mutex);
4146 __wake_requests(mdsc, &session->s_waiting);
4147 mutex_unlock(&mdsc->mutex);
4148
4149 up_read(&mdsc->snap_rwsem);
4150 ceph_pagelist_release(recon_state.pagelist);
4151 return;
4152
4153fail:
4154 ceph_msg_put(reply);
4155 up_read(&mdsc->snap_rwsem);
4156 mutex_unlock(&session->s_mutex);
4157fail_nomsg:
4158 ceph_pagelist_release(recon_state.pagelist);
4159fail_nopagelist:
4160 pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4161 return;
4162}
4163
4164
4165/*
4166 * compare old and new mdsmaps, kicking requests
4167 * and closing out old connections as necessary
4168 *
4169 * called under mdsc->mutex.
4170 */
4171static void check_new_map(struct ceph_mds_client *mdsc,
4172 struct ceph_mdsmap *newmap,
4173 struct ceph_mdsmap *oldmap)
4174{
4175 int i;
4176 int oldstate, newstate;
4177 struct ceph_mds_session *s;
4178
4179 dout("check_new_map new %u old %u\n",
4180 newmap->m_epoch, oldmap->m_epoch);
4181
4182 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4183 if (!mdsc->sessions[i])
4184 continue;
4185 s = mdsc->sessions[i];
4186 oldstate = ceph_mdsmap_get_state(oldmap, i);
4187 newstate = ceph_mdsmap_get_state(newmap, i);
4188
4189 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4190 i, ceph_mds_state_name(oldstate),
4191 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4192 ceph_mds_state_name(newstate),
4193 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4194 ceph_session_state_name(s->s_state));
4195
4196 if (i >= newmap->possible_max_rank) {
4197 /* force close session for stopped mds */
4198 ceph_get_mds_session(s);
4199 __unregister_session(mdsc, s);
4200 __wake_requests(mdsc, &s->s_waiting);
4201 mutex_unlock(&mdsc->mutex);
4202
4203 mutex_lock(&s->s_mutex);
4204 cleanup_session_requests(mdsc, s);
4205 remove_session_caps(s);
4206 mutex_unlock(&s->s_mutex);
4207
4208 ceph_put_mds_session(s);
4209
4210 mutex_lock(&mdsc->mutex);
4211 kick_requests(mdsc, i);
4212 continue;
4213 }
4214
4215 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4216 ceph_mdsmap_get_addr(newmap, i),
4217 sizeof(struct ceph_entity_addr))) {
4218 /* just close it */
4219 mutex_unlock(&mdsc->mutex);
4220 mutex_lock(&s->s_mutex);
4221 mutex_lock(&mdsc->mutex);
4222 ceph_con_close(&s->s_con);
4223 mutex_unlock(&s->s_mutex);
4224 s->s_state = CEPH_MDS_SESSION_RESTARTING;
4225 } else if (oldstate == newstate) {
4226 continue; /* nothing new with this mds */
4227 }
4228
4229 /*
4230 * send reconnect?
4231 */
4232 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4233 newstate >= CEPH_MDS_STATE_RECONNECT) {
4234 mutex_unlock(&mdsc->mutex);
4235 send_mds_reconnect(mdsc, s);
4236 mutex_lock(&mdsc->mutex);
4237 }
4238
4239 /*
4240 * kick request on any mds that has gone active.
4241 */
4242 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4243 newstate >= CEPH_MDS_STATE_ACTIVE) {
4244 if (oldstate != CEPH_MDS_STATE_CREATING &&
4245 oldstate != CEPH_MDS_STATE_STARTING)
4246 pr_info("mds%d recovery completed\n", s->s_mds);
4247 kick_requests(mdsc, i);
4248 mutex_unlock(&mdsc->mutex);
4249 mutex_lock(&s->s_mutex);
4250 mutex_lock(&mdsc->mutex);
4251 ceph_kick_flushing_caps(mdsc, s);
4252 mutex_unlock(&s->s_mutex);
4253 wake_up_session_caps(s, RECONNECT);
4254 }
4255 }
4256
4257 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4258 s = mdsc->sessions[i];
4259 if (!s)
4260 continue;
4261 if (!ceph_mdsmap_is_laggy(newmap, i))
4262 continue;
4263 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4264 s->s_state == CEPH_MDS_SESSION_HUNG ||
4265 s->s_state == CEPH_MDS_SESSION_CLOSING) {
4266 dout(" connecting to export targets of laggy mds%d\n",
4267 i);
4268 __open_export_target_sessions(mdsc, s);
4269 }
4270 }
4271}
4272
4273
4274
4275/*
4276 * leases
4277 */
4278
4279/*
4280 * caller must hold session s_mutex, dentry->d_lock
4281 */
4282void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4283{
4284 struct ceph_dentry_info *di = ceph_dentry(dentry);
4285
4286 ceph_put_mds_session(di->lease_session);
4287 di->lease_session = NULL;
4288}
4289
4290static void handle_lease(struct ceph_mds_client *mdsc,
4291 struct ceph_mds_session *session,
4292 struct ceph_msg *msg)
4293{
4294 struct super_block *sb = mdsc->fsc->sb;
4295 struct inode *inode;
4296 struct dentry *parent, *dentry;
4297 struct ceph_dentry_info *di;
4298 int mds = session->s_mds;
4299 struct ceph_mds_lease *h = msg->front.iov_base;
4300 u32 seq;
4301 struct ceph_vino vino;
4302 struct qstr dname;
4303 int release = 0;
4304
4305 dout("handle_lease from mds%d\n", mds);
4306
4307 /* decode */
4308 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4309 goto bad;
4310 vino.ino = le64_to_cpu(h->ino);
4311 vino.snap = CEPH_NOSNAP;
4312 seq = le32_to_cpu(h->seq);
4313 dname.len = get_unaligned_le32(h + 1);
4314 if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4315 goto bad;
4316 dname.name = (void *)(h + 1) + sizeof(u32);
4317
4318 /* lookup inode */
4319 inode = ceph_find_inode(sb, vino);
4320 dout("handle_lease %s, ino %llx %p %.*s\n",
4321 ceph_lease_op_name(h->action), vino.ino, inode,
4322 dname.len, dname.name);
4323
4324 mutex_lock(&session->s_mutex);
4325 inc_session_sequence(session);
4326
4327 if (!inode) {
4328 dout("handle_lease no inode %llx\n", vino.ino);
4329 goto release;
4330 }
4331
4332 /* dentry */
4333 parent = d_find_alias(inode);
4334 if (!parent) {
4335 dout("no parent dentry on inode %p\n", inode);
4336 WARN_ON(1);
4337 goto release; /* hrm... */
4338 }
4339 dname.hash = full_name_hash(parent, dname.name, dname.len);
4340 dentry = d_lookup(parent, &dname);
4341 dput(parent);
4342 if (!dentry)
4343 goto release;
4344
4345 spin_lock(&dentry->d_lock);
4346 di = ceph_dentry(dentry);
4347 switch (h->action) {
4348 case CEPH_MDS_LEASE_REVOKE:
4349 if (di->lease_session == session) {
4350 if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4351 h->seq = cpu_to_le32(di->lease_seq);
4352 __ceph_mdsc_drop_dentry_lease(dentry);
4353 }
4354 release = 1;
4355 break;
4356
4357 case CEPH_MDS_LEASE_RENEW:
4358 if (di->lease_session == session &&
4359 di->lease_gen == atomic_read(&session->s_cap_gen) &&
4360 di->lease_renew_from &&
4361 di->lease_renew_after == 0) {
4362 unsigned long duration =
4363 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4364
4365 di->lease_seq = seq;
4366 di->time = di->lease_renew_from + duration;
4367 di->lease_renew_after = di->lease_renew_from +
4368 (duration >> 1);
4369 di->lease_renew_from = 0;
4370 }
4371 break;
4372 }
4373 spin_unlock(&dentry->d_lock);
4374 dput(dentry);
4375
4376 if (!release)
4377 goto out;
4378
4379release:
4380 /* let's just reuse the same message */
4381 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4382 ceph_msg_get(msg);
4383 ceph_con_send(&session->s_con, msg);
4384
4385out:
4386 mutex_unlock(&session->s_mutex);
4387 iput(inode);
4388 return;
4389
4390bad:
4391 pr_err("corrupt lease message\n");
4392 ceph_msg_dump(msg);
4393}
4394
4395void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4396 struct dentry *dentry, char action,
4397 u32 seq)
4398{
4399 struct ceph_msg *msg;
4400 struct ceph_mds_lease *lease;
4401 struct inode *dir;
4402 int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4403
4404 dout("lease_send_msg identry %p %s to mds%d\n",
4405 dentry, ceph_lease_op_name(action), session->s_mds);
4406
4407 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4408 if (!msg)
4409 return;
4410 lease = msg->front.iov_base;
4411 lease->action = action;
4412 lease->seq = cpu_to_le32(seq);
4413
4414 spin_lock(&dentry->d_lock);
4415 dir = d_inode(dentry->d_parent);
4416 lease->ino = cpu_to_le64(ceph_ino(dir));
4417 lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4418
4419 put_unaligned_le32(dentry->d_name.len, lease + 1);
4420 memcpy((void *)(lease + 1) + 4,
4421 dentry->d_name.name, dentry->d_name.len);
4422 spin_unlock(&dentry->d_lock);
4423 /*
4424 * if this is a preemptive lease RELEASE, no need to
4425 * flush request stream, since the actual request will
4426 * soon follow.
4427 */
4428 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4429
4430 ceph_con_send(&session->s_con, msg);
4431}
4432
4433/*
4434 * lock unlock sessions, to wait ongoing session activities
4435 */
4436static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
4437{
4438 int i;
4439
4440 mutex_lock(&mdsc->mutex);
4441 for (i = 0; i < mdsc->max_sessions; i++) {
4442 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4443 if (!s)
4444 continue;
4445 mutex_unlock(&mdsc->mutex);
4446 mutex_lock(&s->s_mutex);
4447 mutex_unlock(&s->s_mutex);
4448 ceph_put_mds_session(s);
4449 mutex_lock(&mdsc->mutex);
4450 }
4451 mutex_unlock(&mdsc->mutex);
4452}
4453
4454static void maybe_recover_session(struct ceph_mds_client *mdsc)
4455{
4456 struct ceph_fs_client *fsc = mdsc->fsc;
4457
4458 if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4459 return;
4460
4461 if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4462 return;
4463
4464 if (!READ_ONCE(fsc->blocklisted))
4465 return;
4466
4467 pr_info("auto reconnect after blocklisted\n");
4468 ceph_force_reconnect(fsc->sb);
4469}
4470
4471bool check_session_state(struct ceph_mds_session *s)
4472{
4473 switch (s->s_state) {
4474 case CEPH_MDS_SESSION_OPEN:
4475 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4476 s->s_state = CEPH_MDS_SESSION_HUNG;
4477 pr_info("mds%d hung\n", s->s_mds);
4478 }
4479 break;
4480 case CEPH_MDS_SESSION_CLOSING:
4481 /* Should never reach this when we're unmounting */
4482 WARN_ON_ONCE(s->s_ttl);
4483 fallthrough;
4484 case CEPH_MDS_SESSION_NEW:
4485 case CEPH_MDS_SESSION_RESTARTING:
4486 case CEPH_MDS_SESSION_CLOSED:
4487 case CEPH_MDS_SESSION_REJECTED:
4488 return false;
4489 }
4490
4491 return true;
4492}
4493
4494/*
4495 * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4496 * then we need to retransmit that request.
4497 */
4498void inc_session_sequence(struct ceph_mds_session *s)
4499{
4500 lockdep_assert_held(&s->s_mutex);
4501
4502 s->s_seq++;
4503
4504 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4505 int ret;
4506
4507 dout("resending session close request for mds%d\n", s->s_mds);
4508 ret = request_close_session(s);
4509 if (ret < 0)
4510 pr_err("unable to close session to mds%d: %d\n",
4511 s->s_mds, ret);
4512 }
4513}
4514
4515/*
4516 * delayed work -- periodically trim expired leases, renew caps with mds. If
4517 * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4518 * workqueue delay value of 5 secs will be used.
4519 */
4520static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4521{
4522 unsigned long max_delay = HZ * 5;
4523
4524 /* 5 secs default delay */
4525 if (!delay || (delay > max_delay))
4526 delay = max_delay;
4527 schedule_delayed_work(&mdsc->delayed_work,
4528 round_jiffies_relative(delay));
4529}
4530
4531static void delayed_work(struct work_struct *work)
4532{
4533 struct ceph_mds_client *mdsc =
4534 container_of(work, struct ceph_mds_client, delayed_work.work);
4535 unsigned long delay;
4536 int renew_interval;
4537 int renew_caps;
4538 int i;
4539
4540 dout("mdsc delayed_work\n");
4541
4542 if (mdsc->stopping)
4543 return;
4544
4545 mutex_lock(&mdsc->mutex);
4546 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4547 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4548 mdsc->last_renew_caps);
4549 if (renew_caps)
4550 mdsc->last_renew_caps = jiffies;
4551
4552 for (i = 0; i < mdsc->max_sessions; i++) {
4553 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4554 if (!s)
4555 continue;
4556
4557 if (!check_session_state(s)) {
4558 ceph_put_mds_session(s);
4559 continue;
4560 }
4561 mutex_unlock(&mdsc->mutex);
4562
4563 mutex_lock(&s->s_mutex);
4564 if (renew_caps)
4565 send_renew_caps(mdsc, s);
4566 else
4567 ceph_con_keepalive(&s->s_con);
4568 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4569 s->s_state == CEPH_MDS_SESSION_HUNG)
4570 ceph_send_cap_releases(mdsc, s);
4571 mutex_unlock(&s->s_mutex);
4572 ceph_put_mds_session(s);
4573
4574 mutex_lock(&mdsc->mutex);
4575 }
4576 mutex_unlock(&mdsc->mutex);
4577
4578 delay = ceph_check_delayed_caps(mdsc);
4579
4580 ceph_queue_cap_reclaim_work(mdsc);
4581
4582 ceph_trim_snapid_map(mdsc);
4583
4584 maybe_recover_session(mdsc);
4585
4586 schedule_delayed(mdsc, delay);
4587}
4588
4589int ceph_mdsc_init(struct ceph_fs_client *fsc)
4590
4591{
4592 struct ceph_mds_client *mdsc;
4593 int err;
4594
4595 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4596 if (!mdsc)
4597 return -ENOMEM;
4598 mdsc->fsc = fsc;
4599 mutex_init(&mdsc->mutex);
4600 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4601 if (!mdsc->mdsmap) {
4602 err = -ENOMEM;
4603 goto err_mdsc;
4604 }
4605
4606 init_completion(&mdsc->safe_umount_waiters);
4607 init_waitqueue_head(&mdsc->session_close_wq);
4608 INIT_LIST_HEAD(&mdsc->waiting_for_map);
4609 mdsc->sessions = NULL;
4610 atomic_set(&mdsc->num_sessions, 0);
4611 mdsc->max_sessions = 0;
4612 mdsc->stopping = 0;
4613 atomic64_set(&mdsc->quotarealms_count, 0);
4614 mdsc->quotarealms_inodes = RB_ROOT;
4615 mutex_init(&mdsc->quotarealms_inodes_mutex);
4616 mdsc->last_snap_seq = 0;
4617 init_rwsem(&mdsc->snap_rwsem);
4618 mdsc->snap_realms = RB_ROOT;
4619 INIT_LIST_HEAD(&mdsc->snap_empty);
4620 mdsc->num_snap_realms = 0;
4621 spin_lock_init(&mdsc->snap_empty_lock);
4622 mdsc->last_tid = 0;
4623 mdsc->oldest_tid = 0;
4624 mdsc->request_tree = RB_ROOT;
4625 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4626 mdsc->last_renew_caps = jiffies;
4627 INIT_LIST_HEAD(&mdsc->cap_delay_list);
4628 INIT_LIST_HEAD(&mdsc->cap_wait_list);
4629 spin_lock_init(&mdsc->cap_delay_lock);
4630 INIT_LIST_HEAD(&mdsc->snap_flush_list);
4631 spin_lock_init(&mdsc->snap_flush_lock);
4632 mdsc->last_cap_flush_tid = 1;
4633 INIT_LIST_HEAD(&mdsc->cap_flush_list);
4634 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4635 mdsc->num_cap_flushing = 0;
4636 spin_lock_init(&mdsc->cap_dirty_lock);
4637 init_waitqueue_head(&mdsc->cap_flushing_wq);
4638 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4639 atomic_set(&mdsc->cap_reclaim_pending, 0);
4640 err = ceph_metric_init(&mdsc->metric);
4641 if (err)
4642 goto err_mdsmap;
4643
4644 spin_lock_init(&mdsc->dentry_list_lock);
4645 INIT_LIST_HEAD(&mdsc->dentry_leases);
4646 INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4647
4648 ceph_caps_init(mdsc);
4649 ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4650
4651 spin_lock_init(&mdsc->snapid_map_lock);
4652 mdsc->snapid_map_tree = RB_ROOT;
4653 INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4654
4655 init_rwsem(&mdsc->pool_perm_rwsem);
4656 mdsc->pool_perm_tree = RB_ROOT;
4657
4658 strscpy(mdsc->nodename, utsname()->nodename,
4659 sizeof(mdsc->nodename));
4660
4661 fsc->mdsc = mdsc;
4662 return 0;
4663
4664err_mdsmap:
4665 kfree(mdsc->mdsmap);
4666err_mdsc:
4667 kfree(mdsc);
4668 return err;
4669}
4670
4671/*
4672 * Wait for safe replies on open mds requests. If we time out, drop
4673 * all requests from the tree to avoid dangling dentry refs.
4674 */
4675static void wait_requests(struct ceph_mds_client *mdsc)
4676{
4677 struct ceph_options *opts = mdsc->fsc->client->options;
4678 struct ceph_mds_request *req;
4679
4680 mutex_lock(&mdsc->mutex);
4681 if (__get_oldest_req(mdsc)) {
4682 mutex_unlock(&mdsc->mutex);
4683
4684 dout("wait_requests waiting for requests\n");
4685 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4686 ceph_timeout_jiffies(opts->mount_timeout));
4687
4688 /* tear down remaining requests */
4689 mutex_lock(&mdsc->mutex);
4690 while ((req = __get_oldest_req(mdsc))) {
4691 dout("wait_requests timed out on tid %llu\n",
4692 req->r_tid);
4693 list_del_init(&req->r_wait);
4694 __unregister_request(mdsc, req);
4695 }
4696 }
4697 mutex_unlock(&mdsc->mutex);
4698 dout("wait_requests done\n");
4699}
4700
4701/*
4702 * called before mount is ro, and before dentries are torn down.
4703 * (hmm, does this still race with new lookups?)
4704 */
4705void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4706{
4707 dout("pre_umount\n");
4708 mdsc->stopping = 1;
4709
4710 lock_unlock_sessions(mdsc);
4711 ceph_flush_dirty_caps(mdsc);
4712 wait_requests(mdsc);
4713
4714 /*
4715 * wait for reply handlers to drop their request refs and
4716 * their inode/dcache refs
4717 */
4718 ceph_msgr_flush();
4719
4720 ceph_cleanup_quotarealms_inodes(mdsc);
4721}
4722
4723/*
4724 * wait for all write mds requests to flush.
4725 */
4726static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4727{
4728 struct ceph_mds_request *req = NULL, *nextreq;
4729 struct rb_node *n;
4730
4731 mutex_lock(&mdsc->mutex);
4732 dout("wait_unsafe_requests want %lld\n", want_tid);
4733restart:
4734 req = __get_oldest_req(mdsc);
4735 while (req && req->r_tid <= want_tid) {
4736 /* find next request */
4737 n = rb_next(&req->r_node);
4738 if (n)
4739 nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4740 else
4741 nextreq = NULL;
4742 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4743 (req->r_op & CEPH_MDS_OP_WRITE)) {
4744 /* write op */
4745 ceph_mdsc_get_request(req);
4746 if (nextreq)
4747 ceph_mdsc_get_request(nextreq);
4748 mutex_unlock(&mdsc->mutex);
4749 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
4750 req->r_tid, want_tid);
4751 wait_for_completion(&req->r_safe_completion);
4752 mutex_lock(&mdsc->mutex);
4753 ceph_mdsc_put_request(req);
4754 if (!nextreq)
4755 break; /* next dne before, so we're done! */
4756 if (RB_EMPTY_NODE(&nextreq->r_node)) {
4757 /* next request was removed from tree */
4758 ceph_mdsc_put_request(nextreq);
4759 goto restart;
4760 }
4761 ceph_mdsc_put_request(nextreq); /* won't go away */
4762 }
4763 req = nextreq;
4764 }
4765 mutex_unlock(&mdsc->mutex);
4766 dout("wait_unsafe_requests done\n");
4767}
4768
4769void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4770{
4771 u64 want_tid, want_flush;
4772
4773 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
4774 return;
4775
4776 dout("sync\n");
4777 mutex_lock(&mdsc->mutex);
4778 want_tid = mdsc->last_tid;
4779 mutex_unlock(&mdsc->mutex);
4780
4781 ceph_flush_dirty_caps(mdsc);
4782 spin_lock(&mdsc->cap_dirty_lock);
4783 want_flush = mdsc->last_cap_flush_tid;
4784 if (!list_empty(&mdsc->cap_flush_list)) {
4785 struct ceph_cap_flush *cf =
4786 list_last_entry(&mdsc->cap_flush_list,
4787 struct ceph_cap_flush, g_list);
4788 cf->wake = true;
4789 }
4790 spin_unlock(&mdsc->cap_dirty_lock);
4791
4792 dout("sync want tid %lld flush_seq %lld\n",
4793 want_tid, want_flush);
4794
4795 wait_unsafe_requests(mdsc, want_tid);
4796 wait_caps_flush(mdsc, want_flush);
4797}
4798
4799/*
4800 * true if all sessions are closed, or we force unmount
4801 */
4802static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4803{
4804 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4805 return true;
4806 return atomic_read(&mdsc->num_sessions) <= skipped;
4807}
4808
4809/*
4810 * called after sb is ro.
4811 */
4812void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4813{
4814 struct ceph_options *opts = mdsc->fsc->client->options;
4815 struct ceph_mds_session *session;
4816 int i;
4817 int skipped = 0;
4818
4819 dout("close_sessions\n");
4820
4821 /* close sessions */
4822 mutex_lock(&mdsc->mutex);
4823 for (i = 0; i < mdsc->max_sessions; i++) {
4824 session = __ceph_lookup_mds_session(mdsc, i);
4825 if (!session)
4826 continue;
4827 mutex_unlock(&mdsc->mutex);
4828 mutex_lock(&session->s_mutex);
4829 if (__close_session(mdsc, session) <= 0)
4830 skipped++;
4831 mutex_unlock(&session->s_mutex);
4832 ceph_put_mds_session(session);
4833 mutex_lock(&mdsc->mutex);
4834 }
4835 mutex_unlock(&mdsc->mutex);
4836
4837 dout("waiting for sessions to close\n");
4838 wait_event_timeout(mdsc->session_close_wq,
4839 done_closing_sessions(mdsc, skipped),
4840 ceph_timeout_jiffies(opts->mount_timeout));
4841
4842 /* tear down remaining sessions */
4843 mutex_lock(&mdsc->mutex);
4844 for (i = 0; i < mdsc->max_sessions; i++) {
4845 if (mdsc->sessions[i]) {
4846 session = ceph_get_mds_session(mdsc->sessions[i]);
4847 __unregister_session(mdsc, session);
4848 mutex_unlock(&mdsc->mutex);
4849 mutex_lock(&session->s_mutex);
4850 remove_session_caps(session);
4851 mutex_unlock(&session->s_mutex);
4852 ceph_put_mds_session(session);
4853 mutex_lock(&mdsc->mutex);
4854 }
4855 }
4856 WARN_ON(!list_empty(&mdsc->cap_delay_list));
4857 mutex_unlock(&mdsc->mutex);
4858
4859 ceph_cleanup_snapid_map(mdsc);
4860 ceph_cleanup_empty_realms(mdsc);
4861
4862 cancel_work_sync(&mdsc->cap_reclaim_work);
4863 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4864
4865 dout("stopped\n");
4866}
4867
4868void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4869{
4870 struct ceph_mds_session *session;
4871 int mds;
4872
4873 dout("force umount\n");
4874
4875 mutex_lock(&mdsc->mutex);
4876 for (mds = 0; mds < mdsc->max_sessions; mds++) {
4877 session = __ceph_lookup_mds_session(mdsc, mds);
4878 if (!session)
4879 continue;
4880
4881 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4882 __unregister_session(mdsc, session);
4883 __wake_requests(mdsc, &session->s_waiting);
4884 mutex_unlock(&mdsc->mutex);
4885
4886 mutex_lock(&session->s_mutex);
4887 __close_session(mdsc, session);
4888 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4889 cleanup_session_requests(mdsc, session);
4890 remove_session_caps(session);
4891 }
4892 mutex_unlock(&session->s_mutex);
4893 ceph_put_mds_session(session);
4894
4895 mutex_lock(&mdsc->mutex);
4896 kick_requests(mdsc, mds);
4897 }
4898 __wake_requests(mdsc, &mdsc->waiting_for_map);
4899 mutex_unlock(&mdsc->mutex);
4900}
4901
4902static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4903{
4904 dout("stop\n");
4905 /*
4906 * Make sure the delayed work stopped before releasing
4907 * the resources.
4908 *
4909 * Because the cancel_delayed_work_sync() will only
4910 * guarantee that the work finishes executing. But the
4911 * delayed work will re-arm itself again after that.
4912 */
4913 flush_delayed_work(&mdsc->delayed_work);
4914
4915 if (mdsc->mdsmap)
4916 ceph_mdsmap_destroy(mdsc->mdsmap);
4917 kfree(mdsc->sessions);
4918 ceph_caps_finalize(mdsc);
4919 ceph_pool_perm_destroy(mdsc);
4920}
4921
4922void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4923{
4924 struct ceph_mds_client *mdsc = fsc->mdsc;
4925 dout("mdsc_destroy %p\n", mdsc);
4926
4927 if (!mdsc)
4928 return;
4929
4930 /* flush out any connection work with references to us */
4931 ceph_msgr_flush();
4932
4933 ceph_mdsc_stop(mdsc);
4934
4935 ceph_metric_destroy(&mdsc->metric);
4936
4937 fsc->mdsc = NULL;
4938 kfree(mdsc);
4939 dout("mdsc_destroy %p done\n", mdsc);
4940}
4941
4942void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4943{
4944 struct ceph_fs_client *fsc = mdsc->fsc;
4945 const char *mds_namespace = fsc->mount_options->mds_namespace;
4946 void *p = msg->front.iov_base;
4947 void *end = p + msg->front.iov_len;
4948 u32 epoch;
4949 u32 num_fs;
4950 u32 mount_fscid = (u32)-1;
4951 int err = -EINVAL;
4952
4953 ceph_decode_need(&p, end, sizeof(u32), bad);
4954 epoch = ceph_decode_32(&p);
4955
4956 dout("handle_fsmap epoch %u\n", epoch);
4957
4958 /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
4959 ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
4960
4961 ceph_decode_32_safe(&p, end, num_fs, bad);
4962 while (num_fs-- > 0) {
4963 void *info_p, *info_end;
4964 u32 info_len;
4965 u32 fscid, namelen;
4966
4967 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4968 p += 2; // info_v, info_cv
4969 info_len = ceph_decode_32(&p);
4970 ceph_decode_need(&p, end, info_len, bad);
4971 info_p = p;
4972 info_end = p + info_len;
4973 p = info_end;
4974
4975 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
4976 fscid = ceph_decode_32(&info_p);
4977 namelen = ceph_decode_32(&info_p);
4978 ceph_decode_need(&info_p, info_end, namelen, bad);
4979
4980 if (mds_namespace &&
4981 strlen(mds_namespace) == namelen &&
4982 !strncmp(mds_namespace, (char *)info_p, namelen)) {
4983 mount_fscid = fscid;
4984 break;
4985 }
4986 }
4987
4988 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
4989 if (mount_fscid != (u32)-1) {
4990 fsc->client->monc.fs_cluster_id = mount_fscid;
4991 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
4992 0, true);
4993 ceph_monc_renew_subs(&fsc->client->monc);
4994 } else {
4995 err = -ENOENT;
4996 goto err_out;
4997 }
4998 return;
4999
5000bad:
5001 pr_err("error decoding fsmap\n");
5002err_out:
5003 mutex_lock(&mdsc->mutex);
5004 mdsc->mdsmap_err = err;
5005 __wake_requests(mdsc, &mdsc->waiting_for_map);
5006 mutex_unlock(&mdsc->mutex);
5007}
5008
5009/*
5010 * handle mds map update.
5011 */
5012void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5013{
5014 u32 epoch;
5015 u32 maplen;
5016 void *p = msg->front.iov_base;
5017 void *end = p + msg->front.iov_len;
5018 struct ceph_mdsmap *newmap, *oldmap;
5019 struct ceph_fsid fsid;
5020 int err = -EINVAL;
5021
5022 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5023 ceph_decode_copy(&p, &fsid, sizeof(fsid));
5024 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5025 return;
5026 epoch = ceph_decode_32(&p);
5027 maplen = ceph_decode_32(&p);
5028 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5029
5030 /* do we need it? */
5031 mutex_lock(&mdsc->mutex);
5032 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5033 dout("handle_map epoch %u <= our %u\n",
5034 epoch, mdsc->mdsmap->m_epoch);
5035 mutex_unlock(&mdsc->mutex);
5036 return;
5037 }
5038
5039 newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5040 if (IS_ERR(newmap)) {
5041 err = PTR_ERR(newmap);
5042 goto bad_unlock;
5043 }
5044
5045 /* swap into place */
5046 if (mdsc->mdsmap) {
5047 oldmap = mdsc->mdsmap;
5048 mdsc->mdsmap = newmap;
5049 check_new_map(mdsc, newmap, oldmap);
5050 ceph_mdsmap_destroy(oldmap);
5051 } else {
5052 mdsc->mdsmap = newmap; /* first mds map */
5053 }
5054 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5055 MAX_LFS_FILESIZE);
5056
5057 __wake_requests(mdsc, &mdsc->waiting_for_map);
5058 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5059 mdsc->mdsmap->m_epoch);
5060
5061 mutex_unlock(&mdsc->mutex);
5062 schedule_delayed(mdsc, 0);
5063 return;
5064
5065bad_unlock:
5066 mutex_unlock(&mdsc->mutex);
5067bad:
5068 pr_err("error decoding mdsmap %d\n", err);
5069 return;
5070}
5071
5072static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5073{
5074 struct ceph_mds_session *s = con->private;
5075
5076 if (ceph_get_mds_session(s))
5077 return con;
5078 return NULL;
5079}
5080
5081static void mds_put_con(struct ceph_connection *con)
5082{
5083 struct ceph_mds_session *s = con->private;
5084
5085 ceph_put_mds_session(s);
5086}
5087
5088/*
5089 * if the client is unresponsive for long enough, the mds will kill
5090 * the session entirely.
5091 */
5092static void mds_peer_reset(struct ceph_connection *con)
5093{
5094 struct ceph_mds_session *s = con->private;
5095 struct ceph_mds_client *mdsc = s->s_mdsc;
5096
5097 pr_warn("mds%d closed our session\n", s->s_mds);
5098 send_mds_reconnect(mdsc, s);
5099}
5100
5101static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5102{
5103 struct ceph_mds_session *s = con->private;
5104 struct ceph_mds_client *mdsc = s->s_mdsc;
5105 int type = le16_to_cpu(msg->hdr.type);
5106
5107 mutex_lock(&mdsc->mutex);
5108 if (__verify_registered_session(mdsc, s) < 0) {
5109 mutex_unlock(&mdsc->mutex);
5110 goto out;
5111 }
5112 mutex_unlock(&mdsc->mutex);
5113
5114 switch (type) {
5115 case CEPH_MSG_MDS_MAP:
5116 ceph_mdsc_handle_mdsmap(mdsc, msg);
5117 break;
5118 case CEPH_MSG_FS_MAP_USER:
5119 ceph_mdsc_handle_fsmap(mdsc, msg);
5120 break;
5121 case CEPH_MSG_CLIENT_SESSION:
5122 handle_session(s, msg);
5123 break;
5124 case CEPH_MSG_CLIENT_REPLY:
5125 handle_reply(s, msg);
5126 break;
5127 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5128 handle_forward(mdsc, s, msg);
5129 break;
5130 case CEPH_MSG_CLIENT_CAPS:
5131 ceph_handle_caps(s, msg);
5132 break;
5133 case CEPH_MSG_CLIENT_SNAP:
5134 ceph_handle_snap(mdsc, s, msg);
5135 break;
5136 case CEPH_MSG_CLIENT_LEASE:
5137 handle_lease(mdsc, s, msg);
5138 break;
5139 case CEPH_MSG_CLIENT_QUOTA:
5140 ceph_handle_quota(mdsc, s, msg);
5141 break;
5142
5143 default:
5144 pr_err("received unknown message type %d %s\n", type,
5145 ceph_msg_type_name(type));
5146 }
5147out:
5148 ceph_msg_put(msg);
5149}
5150
5151/*
5152 * authentication
5153 */
5154
5155/*
5156 * Note: returned pointer is the address of a structure that's
5157 * managed separately. Caller must *not* attempt to free it.
5158 */
5159static struct ceph_auth_handshake *
5160mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5161{
5162 struct ceph_mds_session *s = con->private;
5163 struct ceph_mds_client *mdsc = s->s_mdsc;
5164 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5165 struct ceph_auth_handshake *auth = &s->s_auth;
5166 int ret;
5167
5168 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5169 force_new, proto, NULL, NULL);
5170 if (ret)
5171 return ERR_PTR(ret);
5172
5173 return auth;
5174}
5175
5176static int mds_add_authorizer_challenge(struct ceph_connection *con,
5177 void *challenge_buf, int challenge_buf_len)
5178{
5179 struct ceph_mds_session *s = con->private;
5180 struct ceph_mds_client *mdsc = s->s_mdsc;
5181 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5182
5183 return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5184 challenge_buf, challenge_buf_len);
5185}
5186
5187static int mds_verify_authorizer_reply(struct ceph_connection *con)
5188{
5189 struct ceph_mds_session *s = con->private;
5190 struct ceph_mds_client *mdsc = s->s_mdsc;
5191 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5192 struct ceph_auth_handshake *auth = &s->s_auth;
5193
5194 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5195 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5196 NULL, NULL, NULL, NULL);
5197}
5198
5199static int mds_invalidate_authorizer(struct ceph_connection *con)
5200{
5201 struct ceph_mds_session *s = con->private;
5202 struct ceph_mds_client *mdsc = s->s_mdsc;
5203 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5204
5205 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5206
5207 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5208}
5209
5210static int mds_get_auth_request(struct ceph_connection *con,
5211 void *buf, int *buf_len,
5212 void **authorizer, int *authorizer_len)
5213{
5214 struct ceph_mds_session *s = con->private;
5215 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5216 struct ceph_auth_handshake *auth = &s->s_auth;
5217 int ret;
5218
5219 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5220 buf, buf_len);
5221 if (ret)
5222 return ret;
5223
5224 *authorizer = auth->authorizer_buf;
5225 *authorizer_len = auth->authorizer_buf_len;
5226 return 0;
5227}
5228
5229static int mds_handle_auth_reply_more(struct ceph_connection *con,
5230 void *reply, int reply_len,
5231 void *buf, int *buf_len,
5232 void **authorizer, int *authorizer_len)
5233{
5234 struct ceph_mds_session *s = con->private;
5235 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5236 struct ceph_auth_handshake *auth = &s->s_auth;
5237 int ret;
5238
5239 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5240 buf, buf_len);
5241 if (ret)
5242 return ret;
5243
5244 *authorizer = auth->authorizer_buf;
5245 *authorizer_len = auth->authorizer_buf_len;
5246 return 0;
5247}
5248
5249static int mds_handle_auth_done(struct ceph_connection *con,
5250 u64 global_id, void *reply, int reply_len,
5251 u8 *session_key, int *session_key_len,
5252 u8 *con_secret, int *con_secret_len)
5253{
5254 struct ceph_mds_session *s = con->private;
5255 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5256 struct ceph_auth_handshake *auth = &s->s_auth;
5257
5258 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5259 session_key, session_key_len,
5260 con_secret, con_secret_len);
5261}
5262
5263static int mds_handle_auth_bad_method(struct ceph_connection *con,
5264 int used_proto, int result,
5265 const int *allowed_protos, int proto_cnt,
5266 const int *allowed_modes, int mode_cnt)
5267{
5268 struct ceph_mds_session *s = con->private;
5269 struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5270 int ret;
5271
5272 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5273 used_proto, result,
5274 allowed_protos, proto_cnt,
5275 allowed_modes, mode_cnt)) {
5276 ret = ceph_monc_validate_auth(monc);
5277 if (ret)
5278 return ret;
5279 }
5280
5281 return -EACCES;
5282}
5283
5284static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5285 struct ceph_msg_header *hdr, int *skip)
5286{
5287 struct ceph_msg *msg;
5288 int type = (int) le16_to_cpu(hdr->type);
5289 int front_len = (int) le32_to_cpu(hdr->front_len);
5290
5291 if (con->in_msg)
5292 return con->in_msg;
5293
5294 *skip = 0;
5295 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5296 if (!msg) {
5297 pr_err("unable to allocate msg type %d len %d\n",
5298 type, front_len);
5299 return NULL;
5300 }
5301
5302 return msg;
5303}
5304
5305static int mds_sign_message(struct ceph_msg *msg)
5306{
5307 struct ceph_mds_session *s = msg->con->private;
5308 struct ceph_auth_handshake *auth = &s->s_auth;
5309
5310 return ceph_auth_sign_message(auth, msg);
5311}
5312
5313static int mds_check_message_signature(struct ceph_msg *msg)
5314{
5315 struct ceph_mds_session *s = msg->con->private;
5316 struct ceph_auth_handshake *auth = &s->s_auth;
5317
5318 return ceph_auth_check_message_signature(auth, msg);
5319}
5320
5321static const struct ceph_connection_operations mds_con_ops = {
5322 .get = mds_get_con,
5323 .put = mds_put_con,
5324 .alloc_msg = mds_alloc_msg,
5325 .dispatch = mds_dispatch,
5326 .peer_reset = mds_peer_reset,
5327 .get_authorizer = mds_get_authorizer,
5328 .add_authorizer_challenge = mds_add_authorizer_challenge,
5329 .verify_authorizer_reply = mds_verify_authorizer_reply,
5330 .invalidate_authorizer = mds_invalidate_authorizer,
5331 .sign_message = mds_sign_message,
5332 .check_message_signature = mds_check_message_signature,
5333 .get_auth_request = mds_get_auth_request,
5334 .handle_auth_reply_more = mds_handle_auth_reply_more,
5335 .handle_auth_done = mds_handle_auth_done,
5336 .handle_auth_bad_method = mds_handle_auth_bad_method,
5337};
5338
5339/* eof */