Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/crc32c.h>
5#include <linux/ctype.h>
6#include <linux/highmem.h>
7#include <linux/inet.h>
8#include <linux/kthread.h>
9#include <linux/net.h>
10#include <linux/nsproxy.h>
11#include <linux/sched/mm.h>
12#include <linux/slab.h>
13#include <linux/socket.h>
14#include <linux/string.h>
15#ifdef CONFIG_BLOCK
16#include <linux/bio.h>
17#endif /* CONFIG_BLOCK */
18#include <linux/dns_resolver.h>
19#include <net/tcp.h>
20#include <trace/events/sock.h>
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/libceph.h>
24#include <linux/ceph/messenger.h>
25#include <linux/ceph/decode.h>
26#include <linux/ceph/pagelist.h>
27#include <linux/export.h>
28
29/*
30 * Ceph uses the messenger to exchange ceph_msg messages with other
31 * hosts in the system. The messenger provides ordered and reliable
32 * delivery. We tolerate TCP disconnects by reconnecting (with
33 * exponential backoff) in the case of a fault (disconnection, bad
34 * crc, protocol error). Acks allow sent messages to be discarded by
35 * the sender.
36 */
37
38/*
39 * We track the state of the socket on a given connection using
40 * values defined below. The transition to a new socket state is
41 * handled by a function which verifies we aren't coming from an
42 * unexpected state.
43 *
44 * --------
45 * | NEW* | transient initial state
46 * --------
47 * | con_sock_state_init()
48 * v
49 * ----------
50 * | CLOSED | initialized, but no socket (and no
51 * ---------- TCP connection)
52 * ^ \
53 * | \ con_sock_state_connecting()
54 * | ----------------------
55 * | \
56 * + con_sock_state_closed() \
57 * |+--------------------------- \
58 * | \ \ \
59 * | ----------- \ \
60 * | | CLOSING | socket event; \ \
61 * | ----------- await close \ \
62 * | ^ \ |
63 * | | \ |
64 * | + con_sock_state_closing() \ |
65 * | / \ | |
66 * | / --------------- | |
67 * | / \ v v
68 * | / --------------
69 * | / -----------------| CONNECTING | socket created, TCP
70 * | | / -------------- connect initiated
71 * | | | con_sock_state_connected()
72 * | | v
73 * -------------
74 * | CONNECTED | TCP connection established
75 * -------------
76 *
77 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
78 */
79
80#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
81#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
82#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
83#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
84#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
85
86static bool con_flag_valid(unsigned long con_flag)
87{
88 switch (con_flag) {
89 case CEPH_CON_F_LOSSYTX:
90 case CEPH_CON_F_KEEPALIVE_PENDING:
91 case CEPH_CON_F_WRITE_PENDING:
92 case CEPH_CON_F_SOCK_CLOSED:
93 case CEPH_CON_F_BACKOFF:
94 return true;
95 default:
96 return false;
97 }
98}
99
100void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
101{
102 BUG_ON(!con_flag_valid(con_flag));
103
104 clear_bit(con_flag, &con->flags);
105}
106
107void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
108{
109 BUG_ON(!con_flag_valid(con_flag));
110
111 set_bit(con_flag, &con->flags);
112}
113
114bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
115{
116 BUG_ON(!con_flag_valid(con_flag));
117
118 return test_bit(con_flag, &con->flags);
119}
120
121bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
122 unsigned long con_flag)
123{
124 BUG_ON(!con_flag_valid(con_flag));
125
126 return test_and_clear_bit(con_flag, &con->flags);
127}
128
129bool ceph_con_flag_test_and_set(struct ceph_connection *con,
130 unsigned long con_flag)
131{
132 BUG_ON(!con_flag_valid(con_flag));
133
134 return test_and_set_bit(con_flag, &con->flags);
135}
136
137/* Slab caches for frequently-allocated structures */
138
139static struct kmem_cache *ceph_msg_cache;
140
141#ifdef CONFIG_LOCKDEP
142static struct lock_class_key socket_class;
143#endif
144
145static void queue_con(struct ceph_connection *con);
146static void cancel_con(struct ceph_connection *con);
147static void ceph_con_workfn(struct work_struct *);
148static void con_fault(struct ceph_connection *con);
149
150/*
151 * Nicely render a sockaddr as a string. An array of formatted
152 * strings is used, to approximate reentrancy.
153 */
154#define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
155#define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
156#define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
157#define MAX_ADDR_STR_LEN 64 /* 54 is enough */
158
159static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
160static atomic_t addr_str_seq = ATOMIC_INIT(0);
161
162struct page *ceph_zero_page; /* used in certain error cases */
163
164const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
165{
166 int i;
167 char *s;
168 struct sockaddr_storage ss = addr->in_addr; /* align */
169 struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
170 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
171
172 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
173 s = addr_str[i];
174
175 switch (ss.ss_family) {
176 case AF_INET:
177 snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
178 le32_to_cpu(addr->type), &in4->sin_addr,
179 ntohs(in4->sin_port));
180 break;
181
182 case AF_INET6:
183 snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
184 le32_to_cpu(addr->type), &in6->sin6_addr,
185 ntohs(in6->sin6_port));
186 break;
187
188 default:
189 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
190 ss.ss_family);
191 }
192
193 return s;
194}
195EXPORT_SYMBOL(ceph_pr_addr);
196
197void ceph_encode_my_addr(struct ceph_messenger *msgr)
198{
199 if (!ceph_msgr2(from_msgr(msgr))) {
200 memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
201 sizeof(msgr->my_enc_addr));
202 ceph_encode_banner_addr(&msgr->my_enc_addr);
203 }
204}
205
206/*
207 * work queue for all reading and writing to/from the socket.
208 */
209static struct workqueue_struct *ceph_msgr_wq;
210
211static int ceph_msgr_slab_init(void)
212{
213 BUG_ON(ceph_msg_cache);
214 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
215 if (!ceph_msg_cache)
216 return -ENOMEM;
217
218 return 0;
219}
220
221static void ceph_msgr_slab_exit(void)
222{
223 BUG_ON(!ceph_msg_cache);
224 kmem_cache_destroy(ceph_msg_cache);
225 ceph_msg_cache = NULL;
226}
227
228static void _ceph_msgr_exit(void)
229{
230 if (ceph_msgr_wq) {
231 destroy_workqueue(ceph_msgr_wq);
232 ceph_msgr_wq = NULL;
233 }
234
235 BUG_ON(!ceph_zero_page);
236 put_page(ceph_zero_page);
237 ceph_zero_page = NULL;
238
239 ceph_msgr_slab_exit();
240}
241
242int __init ceph_msgr_init(void)
243{
244 if (ceph_msgr_slab_init())
245 return -ENOMEM;
246
247 BUG_ON(ceph_zero_page);
248 ceph_zero_page = ZERO_PAGE(0);
249 get_page(ceph_zero_page);
250
251 /*
252 * The number of active work items is limited by the number of
253 * connections, so leave @max_active at default.
254 */
255 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
256 if (ceph_msgr_wq)
257 return 0;
258
259 pr_err("msgr_init failed to create workqueue\n");
260 _ceph_msgr_exit();
261
262 return -ENOMEM;
263}
264
265void ceph_msgr_exit(void)
266{
267 BUG_ON(ceph_msgr_wq == NULL);
268
269 _ceph_msgr_exit();
270}
271
272void ceph_msgr_flush(void)
273{
274 flush_workqueue(ceph_msgr_wq);
275}
276EXPORT_SYMBOL(ceph_msgr_flush);
277
278/* Connection socket state transition functions */
279
280static void con_sock_state_init(struct ceph_connection *con)
281{
282 int old_state;
283
284 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
285 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
286 printk("%s: unexpected old state %d\n", __func__, old_state);
287 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
288 CON_SOCK_STATE_CLOSED);
289}
290
291static void con_sock_state_connecting(struct ceph_connection *con)
292{
293 int old_state;
294
295 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
296 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
297 printk("%s: unexpected old state %d\n", __func__, old_state);
298 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
299 CON_SOCK_STATE_CONNECTING);
300}
301
302static void con_sock_state_connected(struct ceph_connection *con)
303{
304 int old_state;
305
306 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
307 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
308 printk("%s: unexpected old state %d\n", __func__, old_state);
309 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
310 CON_SOCK_STATE_CONNECTED);
311}
312
313static void con_sock_state_closing(struct ceph_connection *con)
314{
315 int old_state;
316
317 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
318 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
319 old_state != CON_SOCK_STATE_CONNECTED &&
320 old_state != CON_SOCK_STATE_CLOSING))
321 printk("%s: unexpected old state %d\n", __func__, old_state);
322 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
323 CON_SOCK_STATE_CLOSING);
324}
325
326static void con_sock_state_closed(struct ceph_connection *con)
327{
328 int old_state;
329
330 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
331 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
332 old_state != CON_SOCK_STATE_CLOSING &&
333 old_state != CON_SOCK_STATE_CONNECTING &&
334 old_state != CON_SOCK_STATE_CLOSED))
335 printk("%s: unexpected old state %d\n", __func__, old_state);
336 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
337 CON_SOCK_STATE_CLOSED);
338}
339
340/*
341 * socket callback functions
342 */
343
344/* data available on socket, or listen socket received a connect */
345static void ceph_sock_data_ready(struct sock *sk)
346{
347 struct ceph_connection *con = sk->sk_user_data;
348
349 trace_sk_data_ready(sk);
350
351 if (atomic_read(&con->msgr->stopping)) {
352 return;
353 }
354
355 if (sk->sk_state != TCP_CLOSE_WAIT) {
356 dout("%s %p state = %d, queueing work\n", __func__,
357 con, con->state);
358 queue_con(con);
359 }
360}
361
362/* socket has buffer space for writing */
363static void ceph_sock_write_space(struct sock *sk)
364{
365 struct ceph_connection *con = sk->sk_user_data;
366
367 /* only queue to workqueue if there is data we want to write,
368 * and there is sufficient space in the socket buffer to accept
369 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
370 * doesn't get called again until try_write() fills the socket
371 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
372 * and net/core/stream.c:sk_stream_write_space().
373 */
374 if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
375 if (sk_stream_is_writeable(sk)) {
376 dout("%s %p queueing write work\n", __func__, con);
377 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
378 queue_con(con);
379 }
380 } else {
381 dout("%s %p nothing to write\n", __func__, con);
382 }
383}
384
385/* socket's state has changed */
386static void ceph_sock_state_change(struct sock *sk)
387{
388 struct ceph_connection *con = sk->sk_user_data;
389
390 dout("%s %p state = %d sk_state = %u\n", __func__,
391 con, con->state, sk->sk_state);
392
393 switch (sk->sk_state) {
394 case TCP_CLOSE:
395 dout("%s TCP_CLOSE\n", __func__);
396 fallthrough;
397 case TCP_CLOSE_WAIT:
398 dout("%s TCP_CLOSE_WAIT\n", __func__);
399 con_sock_state_closing(con);
400 ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
401 queue_con(con);
402 break;
403 case TCP_ESTABLISHED:
404 dout("%s TCP_ESTABLISHED\n", __func__);
405 con_sock_state_connected(con);
406 queue_con(con);
407 break;
408 default: /* Everything else is uninteresting */
409 break;
410 }
411}
412
413/*
414 * set up socket callbacks
415 */
416static void set_sock_callbacks(struct socket *sock,
417 struct ceph_connection *con)
418{
419 struct sock *sk = sock->sk;
420 sk->sk_user_data = con;
421 sk->sk_data_ready = ceph_sock_data_ready;
422 sk->sk_write_space = ceph_sock_write_space;
423 sk->sk_state_change = ceph_sock_state_change;
424}
425
426
427/*
428 * socket helpers
429 */
430
431/*
432 * initiate connection to a remote socket.
433 */
434int ceph_tcp_connect(struct ceph_connection *con)
435{
436 struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
437 struct socket *sock;
438 unsigned int noio_flag;
439 int ret;
440
441 dout("%s con %p peer_addr %s\n", __func__, con,
442 ceph_pr_addr(&con->peer_addr));
443 BUG_ON(con->sock);
444
445 /* sock_create_kern() allocates with GFP_KERNEL */
446 noio_flag = memalloc_noio_save();
447 ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
448 SOCK_STREAM, IPPROTO_TCP, &sock);
449 memalloc_noio_restore(noio_flag);
450 if (ret)
451 return ret;
452 sock->sk->sk_allocation = GFP_NOFS;
453 sock->sk->sk_use_task_frag = false;
454
455#ifdef CONFIG_LOCKDEP
456 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
457#endif
458
459 set_sock_callbacks(sock, con);
460
461 con_sock_state_connecting(con);
462 ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
463 O_NONBLOCK);
464 if (ret == -EINPROGRESS) {
465 dout("connect %s EINPROGRESS sk_state = %u\n",
466 ceph_pr_addr(&con->peer_addr),
467 sock->sk->sk_state);
468 } else if (ret < 0) {
469 pr_err("connect %s error %d\n",
470 ceph_pr_addr(&con->peer_addr), ret);
471 sock_release(sock);
472 return ret;
473 }
474
475 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
476 tcp_sock_set_nodelay(sock->sk);
477
478 con->sock = sock;
479 return 0;
480}
481
482/*
483 * Shutdown/close the socket for the given connection.
484 */
485int ceph_con_close_socket(struct ceph_connection *con)
486{
487 int rc = 0;
488
489 dout("%s con %p sock %p\n", __func__, con, con->sock);
490 if (con->sock) {
491 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
492 sock_release(con->sock);
493 con->sock = NULL;
494 }
495
496 /*
497 * Forcibly clear the SOCK_CLOSED flag. It gets set
498 * independent of the connection mutex, and we could have
499 * received a socket close event before we had the chance to
500 * shut the socket down.
501 */
502 ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
503
504 con_sock_state_closed(con);
505 return rc;
506}
507
508static void ceph_con_reset_protocol(struct ceph_connection *con)
509{
510 dout("%s con %p\n", __func__, con);
511
512 ceph_con_close_socket(con);
513 if (con->in_msg) {
514 WARN_ON(con->in_msg->con != con);
515 ceph_msg_put(con->in_msg);
516 con->in_msg = NULL;
517 }
518 if (con->out_msg) {
519 WARN_ON(con->out_msg->con != con);
520 ceph_msg_put(con->out_msg);
521 con->out_msg = NULL;
522 }
523 if (con->bounce_page) {
524 __free_page(con->bounce_page);
525 con->bounce_page = NULL;
526 }
527
528 if (ceph_msgr2(from_msgr(con->msgr)))
529 ceph_con_v2_reset_protocol(con);
530 else
531 ceph_con_v1_reset_protocol(con);
532}
533
534/*
535 * Reset a connection. Discard all incoming and outgoing messages
536 * and clear *_seq state.
537 */
538static void ceph_msg_remove(struct ceph_msg *msg)
539{
540 list_del_init(&msg->list_head);
541
542 ceph_msg_put(msg);
543}
544
545static void ceph_msg_remove_list(struct list_head *head)
546{
547 while (!list_empty(head)) {
548 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
549 list_head);
550 ceph_msg_remove(msg);
551 }
552}
553
554void ceph_con_reset_session(struct ceph_connection *con)
555{
556 dout("%s con %p\n", __func__, con);
557
558 WARN_ON(con->in_msg);
559 WARN_ON(con->out_msg);
560 ceph_msg_remove_list(&con->out_queue);
561 ceph_msg_remove_list(&con->out_sent);
562 con->out_seq = 0;
563 con->in_seq = 0;
564 con->in_seq_acked = 0;
565
566 if (ceph_msgr2(from_msgr(con->msgr)))
567 ceph_con_v2_reset_session(con);
568 else
569 ceph_con_v1_reset_session(con);
570}
571
572/*
573 * mark a peer down. drop any open connections.
574 */
575void ceph_con_close(struct ceph_connection *con)
576{
577 mutex_lock(&con->mutex);
578 dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
579 con->state = CEPH_CON_S_CLOSED;
580
581 ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX); /* so we retry next
582 connect */
583 ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
584 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
585 ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
586
587 ceph_con_reset_protocol(con);
588 ceph_con_reset_session(con);
589 cancel_con(con);
590 mutex_unlock(&con->mutex);
591}
592EXPORT_SYMBOL(ceph_con_close);
593
594/*
595 * Reopen a closed connection, with a new peer address.
596 */
597void ceph_con_open(struct ceph_connection *con,
598 __u8 entity_type, __u64 entity_num,
599 struct ceph_entity_addr *addr)
600{
601 mutex_lock(&con->mutex);
602 dout("con_open %p %s\n", con, ceph_pr_addr(addr));
603
604 WARN_ON(con->state != CEPH_CON_S_CLOSED);
605 con->state = CEPH_CON_S_PREOPEN;
606
607 con->peer_name.type = (__u8) entity_type;
608 con->peer_name.num = cpu_to_le64(entity_num);
609
610 memcpy(&con->peer_addr, addr, sizeof(*addr));
611 con->delay = 0; /* reset backoff memory */
612 mutex_unlock(&con->mutex);
613 queue_con(con);
614}
615EXPORT_SYMBOL(ceph_con_open);
616
617/*
618 * return true if this connection ever successfully opened
619 */
620bool ceph_con_opened(struct ceph_connection *con)
621{
622 if (ceph_msgr2(from_msgr(con->msgr)))
623 return ceph_con_v2_opened(con);
624
625 return ceph_con_v1_opened(con);
626}
627
628/*
629 * initialize a new connection.
630 */
631void ceph_con_init(struct ceph_connection *con, void *private,
632 const struct ceph_connection_operations *ops,
633 struct ceph_messenger *msgr)
634{
635 dout("con_init %p\n", con);
636 memset(con, 0, sizeof(*con));
637 con->private = private;
638 con->ops = ops;
639 con->msgr = msgr;
640
641 con_sock_state_init(con);
642
643 mutex_init(&con->mutex);
644 INIT_LIST_HEAD(&con->out_queue);
645 INIT_LIST_HEAD(&con->out_sent);
646 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
647
648 con->state = CEPH_CON_S_CLOSED;
649}
650EXPORT_SYMBOL(ceph_con_init);
651
652/*
653 * We maintain a global counter to order connection attempts. Get
654 * a unique seq greater than @gt.
655 */
656u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
657{
658 u32 ret;
659
660 spin_lock(&msgr->global_seq_lock);
661 if (msgr->global_seq < gt)
662 msgr->global_seq = gt;
663 ret = ++msgr->global_seq;
664 spin_unlock(&msgr->global_seq_lock);
665 return ret;
666}
667
668/*
669 * Discard messages that have been acked by the server.
670 */
671void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
672{
673 struct ceph_msg *msg;
674 u64 seq;
675
676 dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
677 while (!list_empty(&con->out_sent)) {
678 msg = list_first_entry(&con->out_sent, struct ceph_msg,
679 list_head);
680 WARN_ON(msg->needs_out_seq);
681 seq = le64_to_cpu(msg->hdr.seq);
682 if (seq > ack_seq)
683 break;
684
685 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
686 msg, seq);
687 ceph_msg_remove(msg);
688 }
689}
690
691/*
692 * Discard messages that have been requeued in con_fault(), up to
693 * reconnect_seq. This avoids gratuitously resending messages that
694 * the server had received and handled prior to reconnect.
695 */
696void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
697{
698 struct ceph_msg *msg;
699 u64 seq;
700
701 dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
702 while (!list_empty(&con->out_queue)) {
703 msg = list_first_entry(&con->out_queue, struct ceph_msg,
704 list_head);
705 if (msg->needs_out_seq)
706 break;
707 seq = le64_to_cpu(msg->hdr.seq);
708 if (seq > reconnect_seq)
709 break;
710
711 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
712 msg, seq);
713 ceph_msg_remove(msg);
714 }
715}
716
717#ifdef CONFIG_BLOCK
718
719/*
720 * For a bio data item, a piece is whatever remains of the next
721 * entry in the current bio iovec, or the first entry in the next
722 * bio in the list.
723 */
724static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
725 size_t length)
726{
727 struct ceph_msg_data *data = cursor->data;
728 struct ceph_bio_iter *it = &cursor->bio_iter;
729
730 cursor->resid = min_t(size_t, length, data->bio_length);
731 *it = data->bio_pos;
732 if (cursor->resid < it->iter.bi_size)
733 it->iter.bi_size = cursor->resid;
734
735 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
736}
737
738static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
739 size_t *page_offset,
740 size_t *length)
741{
742 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
743 cursor->bio_iter.iter);
744
745 *page_offset = bv.bv_offset;
746 *length = bv.bv_len;
747 return bv.bv_page;
748}
749
750static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
751 size_t bytes)
752{
753 struct ceph_bio_iter *it = &cursor->bio_iter;
754 struct page *page = bio_iter_page(it->bio, it->iter);
755
756 BUG_ON(bytes > cursor->resid);
757 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
758 cursor->resid -= bytes;
759 bio_advance_iter(it->bio, &it->iter, bytes);
760
761 if (!cursor->resid)
762 return false; /* no more data */
763
764 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
765 page == bio_iter_page(it->bio, it->iter)))
766 return false; /* more bytes to process in this segment */
767
768 if (!it->iter.bi_size) {
769 it->bio = it->bio->bi_next;
770 it->iter = it->bio->bi_iter;
771 if (cursor->resid < it->iter.bi_size)
772 it->iter.bi_size = cursor->resid;
773 }
774
775 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
776 return true;
777}
778#endif /* CONFIG_BLOCK */
779
780static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
781 size_t length)
782{
783 struct ceph_msg_data *data = cursor->data;
784 struct bio_vec *bvecs = data->bvec_pos.bvecs;
785
786 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
787 cursor->bvec_iter = data->bvec_pos.iter;
788 cursor->bvec_iter.bi_size = cursor->resid;
789
790 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
791}
792
793static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
794 size_t *page_offset,
795 size_t *length)
796{
797 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
798 cursor->bvec_iter);
799
800 *page_offset = bv.bv_offset;
801 *length = bv.bv_len;
802 return bv.bv_page;
803}
804
805static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
806 size_t bytes)
807{
808 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
809 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
810
811 BUG_ON(bytes > cursor->resid);
812 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
813 cursor->resid -= bytes;
814 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
815
816 if (!cursor->resid)
817 return false; /* no more data */
818
819 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
820 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
821 return false; /* more bytes to process in this segment */
822
823 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
824 return true;
825}
826
827/*
828 * For a page array, a piece comes from the first page in the array
829 * that has not already been fully consumed.
830 */
831static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
832 size_t length)
833{
834 struct ceph_msg_data *data = cursor->data;
835 int page_count;
836
837 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
838
839 BUG_ON(!data->pages);
840 BUG_ON(!data->length);
841
842 cursor->resid = min(length, data->length);
843 page_count = calc_pages_for(data->alignment, (u64)data->length);
844 cursor->page_offset = data->alignment & ~PAGE_MASK;
845 cursor->page_index = 0;
846 BUG_ON(page_count > (int)USHRT_MAX);
847 cursor->page_count = (unsigned short)page_count;
848 BUG_ON(length > SIZE_MAX - cursor->page_offset);
849}
850
851static struct page *
852ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
853 size_t *page_offset, size_t *length)
854{
855 struct ceph_msg_data *data = cursor->data;
856
857 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
858
859 BUG_ON(cursor->page_index >= cursor->page_count);
860 BUG_ON(cursor->page_offset >= PAGE_SIZE);
861
862 *page_offset = cursor->page_offset;
863 *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
864 return data->pages[cursor->page_index];
865}
866
867static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
868 size_t bytes)
869{
870 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
871
872 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
873
874 /* Advance the cursor page offset */
875
876 cursor->resid -= bytes;
877 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
878 if (!bytes || cursor->page_offset)
879 return false; /* more bytes to process in the current page */
880
881 if (!cursor->resid)
882 return false; /* no more data */
883
884 /* Move on to the next page; offset is already at 0 */
885
886 BUG_ON(cursor->page_index >= cursor->page_count);
887 cursor->page_index++;
888 return true;
889}
890
891/*
892 * For a pagelist, a piece is whatever remains to be consumed in the
893 * first page in the list, or the front of the next page.
894 */
895static void
896ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
897 size_t length)
898{
899 struct ceph_msg_data *data = cursor->data;
900 struct ceph_pagelist *pagelist;
901 struct page *page;
902
903 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
904
905 pagelist = data->pagelist;
906 BUG_ON(!pagelist);
907
908 if (!length)
909 return; /* pagelist can be assigned but empty */
910
911 BUG_ON(list_empty(&pagelist->head));
912 page = list_first_entry(&pagelist->head, struct page, lru);
913
914 cursor->resid = min(length, pagelist->length);
915 cursor->page = page;
916 cursor->offset = 0;
917}
918
919static struct page *
920ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
921 size_t *page_offset, size_t *length)
922{
923 struct ceph_msg_data *data = cursor->data;
924 struct ceph_pagelist *pagelist;
925
926 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
927
928 pagelist = data->pagelist;
929 BUG_ON(!pagelist);
930
931 BUG_ON(!cursor->page);
932 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
933
934 /* offset of first page in pagelist is always 0 */
935 *page_offset = cursor->offset & ~PAGE_MASK;
936 *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
937 return cursor->page;
938}
939
940static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
941 size_t bytes)
942{
943 struct ceph_msg_data *data = cursor->data;
944 struct ceph_pagelist *pagelist;
945
946 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
947
948 pagelist = data->pagelist;
949 BUG_ON(!pagelist);
950
951 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
952 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
953
954 /* Advance the cursor offset */
955
956 cursor->resid -= bytes;
957 cursor->offset += bytes;
958 /* offset of first page in pagelist is always 0 */
959 if (!bytes || cursor->offset & ~PAGE_MASK)
960 return false; /* more bytes to process in the current page */
961
962 if (!cursor->resid)
963 return false; /* no more data */
964
965 /* Move on to the next page */
966
967 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
968 cursor->page = list_next_entry(cursor->page, lru);
969 return true;
970}
971
972static void ceph_msg_data_iter_cursor_init(struct ceph_msg_data_cursor *cursor,
973 size_t length)
974{
975 struct ceph_msg_data *data = cursor->data;
976
977 cursor->iov_iter = data->iter;
978 cursor->lastlen = 0;
979 iov_iter_truncate(&cursor->iov_iter, length);
980 cursor->resid = iov_iter_count(&cursor->iov_iter);
981}
982
983static struct page *ceph_msg_data_iter_next(struct ceph_msg_data_cursor *cursor,
984 size_t *page_offset, size_t *length)
985{
986 struct page *page;
987 ssize_t len;
988
989 if (cursor->lastlen)
990 iov_iter_revert(&cursor->iov_iter, cursor->lastlen);
991
992 len = iov_iter_get_pages2(&cursor->iov_iter, &page, PAGE_SIZE,
993 1, page_offset);
994 BUG_ON(len < 0);
995
996 cursor->lastlen = len;
997
998 /*
999 * FIXME: The assumption is that the pages represented by the iov_iter
1000 * are pinned, with the references held by the upper-level
1001 * callers, or by virtue of being under writeback. Eventually,
1002 * we'll get an iov_iter_get_pages2 variant that doesn't take
1003 * page refs. Until then, just put the page ref.
1004 */
1005 VM_BUG_ON_PAGE(!PageWriteback(page) && page_count(page) < 2, page);
1006 put_page(page);
1007
1008 *length = min_t(size_t, len, cursor->resid);
1009 return page;
1010}
1011
1012static bool ceph_msg_data_iter_advance(struct ceph_msg_data_cursor *cursor,
1013 size_t bytes)
1014{
1015 BUG_ON(bytes > cursor->resid);
1016 cursor->resid -= bytes;
1017
1018 if (bytes < cursor->lastlen) {
1019 cursor->lastlen -= bytes;
1020 } else {
1021 iov_iter_advance(&cursor->iov_iter, bytes - cursor->lastlen);
1022 cursor->lastlen = 0;
1023 }
1024
1025 return cursor->resid;
1026}
1027
1028/*
1029 * Message data is handled (sent or received) in pieces, where each
1030 * piece resides on a single page. The network layer might not
1031 * consume an entire piece at once. A data item's cursor keeps
1032 * track of which piece is next to process and how much remains to
1033 * be processed in that piece. It also tracks whether the current
1034 * piece is the last one in the data item.
1035 */
1036static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1037{
1038 size_t length = cursor->total_resid;
1039
1040 switch (cursor->data->type) {
1041 case CEPH_MSG_DATA_PAGELIST:
1042 ceph_msg_data_pagelist_cursor_init(cursor, length);
1043 break;
1044 case CEPH_MSG_DATA_PAGES:
1045 ceph_msg_data_pages_cursor_init(cursor, length);
1046 break;
1047#ifdef CONFIG_BLOCK
1048 case CEPH_MSG_DATA_BIO:
1049 ceph_msg_data_bio_cursor_init(cursor, length);
1050 break;
1051#endif /* CONFIG_BLOCK */
1052 case CEPH_MSG_DATA_BVECS:
1053 ceph_msg_data_bvecs_cursor_init(cursor, length);
1054 break;
1055 case CEPH_MSG_DATA_ITER:
1056 ceph_msg_data_iter_cursor_init(cursor, length);
1057 break;
1058 case CEPH_MSG_DATA_NONE:
1059 default:
1060 /* BUG(); */
1061 break;
1062 }
1063 cursor->need_crc = true;
1064}
1065
1066void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1067 struct ceph_msg *msg, size_t length)
1068{
1069 BUG_ON(!length);
1070 BUG_ON(length > msg->data_length);
1071 BUG_ON(!msg->num_data_items);
1072
1073 cursor->total_resid = length;
1074 cursor->data = msg->data;
1075 cursor->sr_resid = 0;
1076
1077 __ceph_msg_data_cursor_init(cursor);
1078}
1079
1080/*
1081 * Return the page containing the next piece to process for a given
1082 * data item, and supply the page offset and length of that piece.
1083 * Indicate whether this is the last piece in this data item.
1084 */
1085struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1086 size_t *page_offset, size_t *length)
1087{
1088 struct page *page;
1089
1090 switch (cursor->data->type) {
1091 case CEPH_MSG_DATA_PAGELIST:
1092 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1093 break;
1094 case CEPH_MSG_DATA_PAGES:
1095 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1096 break;
1097#ifdef CONFIG_BLOCK
1098 case CEPH_MSG_DATA_BIO:
1099 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1100 break;
1101#endif /* CONFIG_BLOCK */
1102 case CEPH_MSG_DATA_BVECS:
1103 page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1104 break;
1105 case CEPH_MSG_DATA_ITER:
1106 page = ceph_msg_data_iter_next(cursor, page_offset, length);
1107 break;
1108 case CEPH_MSG_DATA_NONE:
1109 default:
1110 page = NULL;
1111 break;
1112 }
1113
1114 BUG_ON(!page);
1115 BUG_ON(*page_offset + *length > PAGE_SIZE);
1116 BUG_ON(!*length);
1117 BUG_ON(*length > cursor->resid);
1118
1119 return page;
1120}
1121
1122/*
1123 * Returns true if the result moves the cursor on to the next piece
1124 * of the data item.
1125 */
1126void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
1127{
1128 bool new_piece;
1129
1130 BUG_ON(bytes > cursor->resid);
1131 switch (cursor->data->type) {
1132 case CEPH_MSG_DATA_PAGELIST:
1133 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1134 break;
1135 case CEPH_MSG_DATA_PAGES:
1136 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1137 break;
1138#ifdef CONFIG_BLOCK
1139 case CEPH_MSG_DATA_BIO:
1140 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1141 break;
1142#endif /* CONFIG_BLOCK */
1143 case CEPH_MSG_DATA_BVECS:
1144 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1145 break;
1146 case CEPH_MSG_DATA_ITER:
1147 new_piece = ceph_msg_data_iter_advance(cursor, bytes);
1148 break;
1149 case CEPH_MSG_DATA_NONE:
1150 default:
1151 BUG();
1152 break;
1153 }
1154 cursor->total_resid -= bytes;
1155
1156 if (!cursor->resid && cursor->total_resid) {
1157 cursor->data++;
1158 __ceph_msg_data_cursor_init(cursor);
1159 new_piece = true;
1160 }
1161 cursor->need_crc = new_piece;
1162}
1163
1164u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1165 unsigned int length)
1166{
1167 char *kaddr;
1168
1169 kaddr = kmap(page);
1170 BUG_ON(kaddr == NULL);
1171 crc = crc32c(crc, kaddr + page_offset, length);
1172 kunmap(page);
1173
1174 return crc;
1175}
1176
1177bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
1178{
1179 struct sockaddr_storage ss = addr->in_addr; /* align */
1180 struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1181 struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1182
1183 switch (ss.ss_family) {
1184 case AF_INET:
1185 return addr4->s_addr == htonl(INADDR_ANY);
1186 case AF_INET6:
1187 return ipv6_addr_any(addr6);
1188 default:
1189 return true;
1190 }
1191}
1192EXPORT_SYMBOL(ceph_addr_is_blank);
1193
1194int ceph_addr_port(const struct ceph_entity_addr *addr)
1195{
1196 switch (get_unaligned(&addr->in_addr.ss_family)) {
1197 case AF_INET:
1198 return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1199 case AF_INET6:
1200 return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1201 }
1202 return 0;
1203}
1204
1205void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
1206{
1207 switch (get_unaligned(&addr->in_addr.ss_family)) {
1208 case AF_INET:
1209 put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1210 break;
1211 case AF_INET6:
1212 put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1213 break;
1214 }
1215}
1216
1217/*
1218 * Unlike other *_pton function semantics, zero indicates success.
1219 */
1220static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1221 char delim, const char **ipend)
1222{
1223 memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1224
1225 if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1226 put_unaligned(AF_INET, &addr->in_addr.ss_family);
1227 return 0;
1228 }
1229
1230 if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1231 put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1232 return 0;
1233 }
1234
1235 return -EINVAL;
1236}
1237
1238/*
1239 * Extract hostname string and resolve using kernel DNS facility.
1240 */
1241#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1242static int ceph_dns_resolve_name(const char *name, size_t namelen,
1243 struct ceph_entity_addr *addr, char delim, const char **ipend)
1244{
1245 const char *end, *delim_p;
1246 char *colon_p, *ip_addr = NULL;
1247 int ip_len, ret;
1248
1249 /*
1250 * The end of the hostname occurs immediately preceding the delimiter or
1251 * the port marker (':') where the delimiter takes precedence.
1252 */
1253 delim_p = memchr(name, delim, namelen);
1254 colon_p = memchr(name, ':', namelen);
1255
1256 if (delim_p && colon_p)
1257 end = min(delim_p, colon_p);
1258 else if (!delim_p && colon_p)
1259 end = colon_p;
1260 else {
1261 end = delim_p;
1262 if (!end) /* case: hostname:/ */
1263 end = name + namelen;
1264 }
1265
1266 if (end <= name)
1267 return -EINVAL;
1268
1269 /* do dns_resolve upcall */
1270 ip_len = dns_query(current->nsproxy->net_ns,
1271 NULL, name, end - name, NULL, &ip_addr, NULL, false);
1272 if (ip_len > 0)
1273 ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1274 else
1275 ret = -ESRCH;
1276
1277 kfree(ip_addr);
1278
1279 *ipend = end;
1280
1281 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1282 ret, ret ? "failed" : ceph_pr_addr(addr));
1283
1284 return ret;
1285}
1286#else
1287static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1288 struct ceph_entity_addr *addr, char delim, const char **ipend)
1289{
1290 return -EINVAL;
1291}
1292#endif
1293
1294/*
1295 * Parse a server name (IP or hostname). If a valid IP address is not found
1296 * then try to extract a hostname to resolve using userspace DNS upcall.
1297 */
1298static int ceph_parse_server_name(const char *name, size_t namelen,
1299 struct ceph_entity_addr *addr, char delim, const char **ipend)
1300{
1301 int ret;
1302
1303 ret = ceph_pton(name, namelen, addr, delim, ipend);
1304 if (ret)
1305 ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1306
1307 return ret;
1308}
1309
1310/*
1311 * Parse an ip[:port] list into an addr array. Use the default
1312 * monitor port if a port isn't specified.
1313 */
1314int ceph_parse_ips(const char *c, const char *end,
1315 struct ceph_entity_addr *addr,
1316 int max_count, int *count, char delim)
1317{
1318 int i, ret = -EINVAL;
1319 const char *p = c;
1320
1321 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1322 for (i = 0; i < max_count; i++) {
1323 char cur_delim = delim;
1324 const char *ipend;
1325 int port;
1326
1327 if (*p == '[') {
1328 cur_delim = ']';
1329 p++;
1330 }
1331
1332 ret = ceph_parse_server_name(p, end - p, &addr[i], cur_delim,
1333 &ipend);
1334 if (ret)
1335 goto bad;
1336 ret = -EINVAL;
1337
1338 p = ipend;
1339
1340 if (cur_delim == ']') {
1341 if (*p != ']') {
1342 dout("missing matching ']'\n");
1343 goto bad;
1344 }
1345 p++;
1346 }
1347
1348 /* port? */
1349 if (p < end && *p == ':') {
1350 port = 0;
1351 p++;
1352 while (p < end && *p >= '0' && *p <= '9') {
1353 port = (port * 10) + (*p - '0');
1354 p++;
1355 }
1356 if (port == 0)
1357 port = CEPH_MON_PORT;
1358 else if (port > 65535)
1359 goto bad;
1360 } else {
1361 port = CEPH_MON_PORT;
1362 }
1363
1364 ceph_addr_set_port(&addr[i], port);
1365 /*
1366 * We want the type to be set according to ms_mode
1367 * option, but options are normally parsed after mon
1368 * addresses. Rather than complicating parsing, set
1369 * to LEGACY and override in build_initial_monmap()
1370 * for mon addresses and ceph_messenger_init() for
1371 * ip option.
1372 */
1373 addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
1374 addr[i].nonce = 0;
1375
1376 dout("%s got %s\n", __func__, ceph_pr_addr(&addr[i]));
1377
1378 if (p == end)
1379 break;
1380 if (*p != delim)
1381 goto bad;
1382 p++;
1383 }
1384
1385 if (p != end)
1386 goto bad;
1387
1388 if (count)
1389 *count = i + 1;
1390 return 0;
1391
1392bad:
1393 return ret;
1394}
1395
1396/*
1397 * Process message. This happens in the worker thread. The callback should
1398 * be careful not to do anything that waits on other incoming messages or it
1399 * may deadlock.
1400 */
1401void ceph_con_process_message(struct ceph_connection *con)
1402{
1403 struct ceph_msg *msg = con->in_msg;
1404
1405 BUG_ON(con->in_msg->con != con);
1406 con->in_msg = NULL;
1407
1408 /* if first message, set peer_name */
1409 if (con->peer_name.type == 0)
1410 con->peer_name = msg->hdr.src;
1411
1412 con->in_seq++;
1413 mutex_unlock(&con->mutex);
1414
1415 dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
1416 msg, le64_to_cpu(msg->hdr.seq),
1417 ENTITY_NAME(msg->hdr.src),
1418 le16_to_cpu(msg->hdr.type),
1419 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1420 le32_to_cpu(msg->hdr.front_len),
1421 le32_to_cpu(msg->hdr.middle_len),
1422 le32_to_cpu(msg->hdr.data_len),
1423 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1424 con->ops->dispatch(con, msg);
1425
1426 mutex_lock(&con->mutex);
1427}
1428
1429/*
1430 * Atomically queue work on a connection after the specified delay.
1431 * Bump @con reference to avoid races with connection teardown.
1432 * Returns 0 if work was queued, or an error code otherwise.
1433 */
1434static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
1435{
1436 if (!con->ops->get(con)) {
1437 dout("%s %p ref count 0\n", __func__, con);
1438 return -ENOENT;
1439 }
1440
1441 if (delay >= HZ)
1442 delay = round_jiffies_relative(delay);
1443
1444 dout("%s %p %lu\n", __func__, con, delay);
1445 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
1446 dout("%s %p - already queued\n", __func__, con);
1447 con->ops->put(con);
1448 return -EBUSY;
1449 }
1450
1451 return 0;
1452}
1453
1454static void queue_con(struct ceph_connection *con)
1455{
1456 (void) queue_con_delay(con, 0);
1457}
1458
1459static void cancel_con(struct ceph_connection *con)
1460{
1461 if (cancel_delayed_work(&con->work)) {
1462 dout("%s %p\n", __func__, con);
1463 con->ops->put(con);
1464 }
1465}
1466
1467static bool con_sock_closed(struct ceph_connection *con)
1468{
1469 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
1470 return false;
1471
1472#define CASE(x) \
1473 case CEPH_CON_S_ ## x: \
1474 con->error_msg = "socket closed (con state " #x ")"; \
1475 break;
1476
1477 switch (con->state) {
1478 CASE(CLOSED);
1479 CASE(PREOPEN);
1480 CASE(V1_BANNER);
1481 CASE(V1_CONNECT_MSG);
1482 CASE(V2_BANNER_PREFIX);
1483 CASE(V2_BANNER_PAYLOAD);
1484 CASE(V2_HELLO);
1485 CASE(V2_AUTH);
1486 CASE(V2_AUTH_SIGNATURE);
1487 CASE(V2_SESSION_CONNECT);
1488 CASE(V2_SESSION_RECONNECT);
1489 CASE(OPEN);
1490 CASE(STANDBY);
1491 default:
1492 BUG();
1493 }
1494#undef CASE
1495
1496 return true;
1497}
1498
1499static bool con_backoff(struct ceph_connection *con)
1500{
1501 int ret;
1502
1503 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
1504 return false;
1505
1506 ret = queue_con_delay(con, con->delay);
1507 if (ret) {
1508 dout("%s: con %p FAILED to back off %lu\n", __func__,
1509 con, con->delay);
1510 BUG_ON(ret == -ENOENT);
1511 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1512 }
1513
1514 return true;
1515}
1516
1517/* Finish fault handling; con->mutex must *not* be held here */
1518
1519static void con_fault_finish(struct ceph_connection *con)
1520{
1521 dout("%s %p\n", __func__, con);
1522
1523 /*
1524 * in case we faulted due to authentication, invalidate our
1525 * current tickets so that we can get new ones.
1526 */
1527 if (con->v1.auth_retry) {
1528 dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
1529 if (con->ops->invalidate_authorizer)
1530 con->ops->invalidate_authorizer(con);
1531 con->v1.auth_retry = 0;
1532 }
1533
1534 if (con->ops->fault)
1535 con->ops->fault(con);
1536}
1537
1538/*
1539 * Do some work on a connection. Drop a connection ref when we're done.
1540 */
1541static void ceph_con_workfn(struct work_struct *work)
1542{
1543 struct ceph_connection *con = container_of(work, struct ceph_connection,
1544 work.work);
1545 bool fault;
1546
1547 mutex_lock(&con->mutex);
1548 while (true) {
1549 int ret;
1550
1551 if ((fault = con_sock_closed(con))) {
1552 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
1553 break;
1554 }
1555 if (con_backoff(con)) {
1556 dout("%s: con %p BACKOFF\n", __func__, con);
1557 break;
1558 }
1559 if (con->state == CEPH_CON_S_STANDBY) {
1560 dout("%s: con %p STANDBY\n", __func__, con);
1561 break;
1562 }
1563 if (con->state == CEPH_CON_S_CLOSED) {
1564 dout("%s: con %p CLOSED\n", __func__, con);
1565 BUG_ON(con->sock);
1566 break;
1567 }
1568 if (con->state == CEPH_CON_S_PREOPEN) {
1569 dout("%s: con %p PREOPEN\n", __func__, con);
1570 BUG_ON(con->sock);
1571 }
1572
1573 if (ceph_msgr2(from_msgr(con->msgr)))
1574 ret = ceph_con_v2_try_read(con);
1575 else
1576 ret = ceph_con_v1_try_read(con);
1577 if (ret < 0) {
1578 if (ret == -EAGAIN)
1579 continue;
1580 if (!con->error_msg)
1581 con->error_msg = "socket error on read";
1582 fault = true;
1583 break;
1584 }
1585
1586 if (ceph_msgr2(from_msgr(con->msgr)))
1587 ret = ceph_con_v2_try_write(con);
1588 else
1589 ret = ceph_con_v1_try_write(con);
1590 if (ret < 0) {
1591 if (ret == -EAGAIN)
1592 continue;
1593 if (!con->error_msg)
1594 con->error_msg = "socket error on write";
1595 fault = true;
1596 }
1597
1598 break; /* If we make it to here, we're done */
1599 }
1600 if (fault)
1601 con_fault(con);
1602 mutex_unlock(&con->mutex);
1603
1604 if (fault)
1605 con_fault_finish(con);
1606
1607 con->ops->put(con);
1608}
1609
1610/*
1611 * Generic error/fault handler. A retry mechanism is used with
1612 * exponential backoff
1613 */
1614static void con_fault(struct ceph_connection *con)
1615{
1616 dout("fault %p state %d to peer %s\n",
1617 con, con->state, ceph_pr_addr(&con->peer_addr));
1618
1619 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1620 ceph_pr_addr(&con->peer_addr), con->error_msg);
1621 con->error_msg = NULL;
1622
1623 WARN_ON(con->state == CEPH_CON_S_STANDBY ||
1624 con->state == CEPH_CON_S_CLOSED);
1625
1626 ceph_con_reset_protocol(con);
1627
1628 if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
1629 dout("fault on LOSSYTX channel, marking CLOSED\n");
1630 con->state = CEPH_CON_S_CLOSED;
1631 return;
1632 }
1633
1634 /* Requeue anything that hasn't been acked */
1635 list_splice_init(&con->out_sent, &con->out_queue);
1636
1637 /* If there are no messages queued or keepalive pending, place
1638 * the connection in a STANDBY state */
1639 if (list_empty(&con->out_queue) &&
1640 !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
1641 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
1642 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
1643 con->state = CEPH_CON_S_STANDBY;
1644 } else {
1645 /* retry after a delay. */
1646 con->state = CEPH_CON_S_PREOPEN;
1647 if (!con->delay) {
1648 con->delay = BASE_DELAY_INTERVAL;
1649 } else if (con->delay < MAX_DELAY_INTERVAL) {
1650 con->delay *= 2;
1651 if (con->delay > MAX_DELAY_INTERVAL)
1652 con->delay = MAX_DELAY_INTERVAL;
1653 }
1654 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1655 queue_con(con);
1656 }
1657}
1658
1659void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
1660{
1661 u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
1662 msgr->inst.addr.nonce = cpu_to_le32(nonce);
1663 ceph_encode_my_addr(msgr);
1664}
1665
1666/*
1667 * initialize a new messenger instance
1668 */
1669void ceph_messenger_init(struct ceph_messenger *msgr,
1670 struct ceph_entity_addr *myaddr)
1671{
1672 spin_lock_init(&msgr->global_seq_lock);
1673
1674 if (myaddr) {
1675 memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
1676 sizeof(msgr->inst.addr.in_addr));
1677 ceph_addr_set_port(&msgr->inst.addr, 0);
1678 }
1679
1680 /*
1681 * Since nautilus, clients are identified using type ANY.
1682 * For msgr1, ceph_encode_banner_addr() munges it to NONE.
1683 */
1684 msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
1685
1686 /* generate a random non-zero nonce */
1687 do {
1688 get_random_bytes(&msgr->inst.addr.nonce,
1689 sizeof(msgr->inst.addr.nonce));
1690 } while (!msgr->inst.addr.nonce);
1691 ceph_encode_my_addr(msgr);
1692
1693 atomic_set(&msgr->stopping, 0);
1694 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
1695
1696 dout("%s %p\n", __func__, msgr);
1697}
1698
1699void ceph_messenger_fini(struct ceph_messenger *msgr)
1700{
1701 put_net(read_pnet(&msgr->net));
1702}
1703
1704static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
1705{
1706 if (msg->con)
1707 msg->con->ops->put(msg->con);
1708
1709 msg->con = con ? con->ops->get(con) : NULL;
1710 BUG_ON(msg->con != con);
1711}
1712
1713static void clear_standby(struct ceph_connection *con)
1714{
1715 /* come back from STANDBY? */
1716 if (con->state == CEPH_CON_S_STANDBY) {
1717 dout("clear_standby %p and ++connect_seq\n", con);
1718 con->state = CEPH_CON_S_PREOPEN;
1719 con->v1.connect_seq++;
1720 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
1721 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
1722 }
1723}
1724
1725/*
1726 * Queue up an outgoing message on the given connection.
1727 *
1728 * Consumes a ref on @msg.
1729 */
1730void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1731{
1732 /* set src+dst */
1733 msg->hdr.src = con->msgr->inst.name;
1734 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1735 msg->needs_out_seq = true;
1736
1737 mutex_lock(&con->mutex);
1738
1739 if (con->state == CEPH_CON_S_CLOSED) {
1740 dout("con_send %p closed, dropping %p\n", con, msg);
1741 ceph_msg_put(msg);
1742 mutex_unlock(&con->mutex);
1743 return;
1744 }
1745
1746 msg_con_set(msg, con);
1747
1748 BUG_ON(!list_empty(&msg->list_head));
1749 list_add_tail(&msg->list_head, &con->out_queue);
1750 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1751 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1752 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1753 le32_to_cpu(msg->hdr.front_len),
1754 le32_to_cpu(msg->hdr.middle_len),
1755 le32_to_cpu(msg->hdr.data_len));
1756
1757 clear_standby(con);
1758 mutex_unlock(&con->mutex);
1759
1760 /* if there wasn't anything waiting to send before, queue
1761 * new work */
1762 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1763 queue_con(con);
1764}
1765EXPORT_SYMBOL(ceph_con_send);
1766
1767/*
1768 * Revoke a message that was previously queued for send
1769 */
1770void ceph_msg_revoke(struct ceph_msg *msg)
1771{
1772 struct ceph_connection *con = msg->con;
1773
1774 if (!con) {
1775 dout("%s msg %p null con\n", __func__, msg);
1776 return; /* Message not in our possession */
1777 }
1778
1779 mutex_lock(&con->mutex);
1780 if (list_empty(&msg->list_head)) {
1781 WARN_ON(con->out_msg == msg);
1782 dout("%s con %p msg %p not linked\n", __func__, con, msg);
1783 mutex_unlock(&con->mutex);
1784 return;
1785 }
1786
1787 dout("%s con %p msg %p was linked\n", __func__, con, msg);
1788 msg->hdr.seq = 0;
1789 ceph_msg_remove(msg);
1790
1791 if (con->out_msg == msg) {
1792 WARN_ON(con->state != CEPH_CON_S_OPEN);
1793 dout("%s con %p msg %p was sending\n", __func__, con, msg);
1794 if (ceph_msgr2(from_msgr(con->msgr)))
1795 ceph_con_v2_revoke(con);
1796 else
1797 ceph_con_v1_revoke(con);
1798 ceph_msg_put(con->out_msg);
1799 con->out_msg = NULL;
1800 } else {
1801 dout("%s con %p msg %p not current, out_msg %p\n", __func__,
1802 con, msg, con->out_msg);
1803 }
1804 mutex_unlock(&con->mutex);
1805}
1806
1807/*
1808 * Revoke a message that we may be reading data into
1809 */
1810void ceph_msg_revoke_incoming(struct ceph_msg *msg)
1811{
1812 struct ceph_connection *con = msg->con;
1813
1814 if (!con) {
1815 dout("%s msg %p null con\n", __func__, msg);
1816 return; /* Message not in our possession */
1817 }
1818
1819 mutex_lock(&con->mutex);
1820 if (con->in_msg == msg) {
1821 WARN_ON(con->state != CEPH_CON_S_OPEN);
1822 dout("%s con %p msg %p was recving\n", __func__, con, msg);
1823 if (ceph_msgr2(from_msgr(con->msgr)))
1824 ceph_con_v2_revoke_incoming(con);
1825 else
1826 ceph_con_v1_revoke_incoming(con);
1827 ceph_msg_put(con->in_msg);
1828 con->in_msg = NULL;
1829 } else {
1830 dout("%s con %p msg %p not current, in_msg %p\n", __func__,
1831 con, msg, con->in_msg);
1832 }
1833 mutex_unlock(&con->mutex);
1834}
1835
1836/*
1837 * Queue a keepalive byte to ensure the tcp connection is alive.
1838 */
1839void ceph_con_keepalive(struct ceph_connection *con)
1840{
1841 dout("con_keepalive %p\n", con);
1842 mutex_lock(&con->mutex);
1843 clear_standby(con);
1844 ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
1845 mutex_unlock(&con->mutex);
1846
1847 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1848 queue_con(con);
1849}
1850EXPORT_SYMBOL(ceph_con_keepalive);
1851
1852bool ceph_con_keepalive_expired(struct ceph_connection *con,
1853 unsigned long interval)
1854{
1855 if (interval > 0 &&
1856 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1857 struct timespec64 now;
1858 struct timespec64 ts;
1859 ktime_get_real_ts64(&now);
1860 jiffies_to_timespec64(interval, &ts);
1861 ts = timespec64_add(con->last_keepalive_ack, ts);
1862 return timespec64_compare(&now, &ts) >= 0;
1863 }
1864 return false;
1865}
1866
1867static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
1868{
1869 BUG_ON(msg->num_data_items >= msg->max_data_items);
1870 return &msg->data[msg->num_data_items++];
1871}
1872
1873static void ceph_msg_data_destroy(struct ceph_msg_data *data)
1874{
1875 if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
1876 int num_pages = calc_pages_for(data->alignment, data->length);
1877 ceph_release_page_vector(data->pages, num_pages);
1878 } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
1879 ceph_pagelist_release(data->pagelist);
1880 }
1881}
1882
1883void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
1884 size_t length, size_t alignment, bool own_pages)
1885{
1886 struct ceph_msg_data *data;
1887
1888 BUG_ON(!pages);
1889 BUG_ON(!length);
1890
1891 data = ceph_msg_data_add(msg);
1892 data->type = CEPH_MSG_DATA_PAGES;
1893 data->pages = pages;
1894 data->length = length;
1895 data->alignment = alignment & ~PAGE_MASK;
1896 data->own_pages = own_pages;
1897
1898 msg->data_length += length;
1899}
1900EXPORT_SYMBOL(ceph_msg_data_add_pages);
1901
1902void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
1903 struct ceph_pagelist *pagelist)
1904{
1905 struct ceph_msg_data *data;
1906
1907 BUG_ON(!pagelist);
1908 BUG_ON(!pagelist->length);
1909
1910 data = ceph_msg_data_add(msg);
1911 data->type = CEPH_MSG_DATA_PAGELIST;
1912 refcount_inc(&pagelist->refcnt);
1913 data->pagelist = pagelist;
1914
1915 msg->data_length += pagelist->length;
1916}
1917EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
1918
1919#ifdef CONFIG_BLOCK
1920void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
1921 u32 length)
1922{
1923 struct ceph_msg_data *data;
1924
1925 data = ceph_msg_data_add(msg);
1926 data->type = CEPH_MSG_DATA_BIO;
1927 data->bio_pos = *bio_pos;
1928 data->bio_length = length;
1929
1930 msg->data_length += length;
1931}
1932EXPORT_SYMBOL(ceph_msg_data_add_bio);
1933#endif /* CONFIG_BLOCK */
1934
1935void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
1936 struct ceph_bvec_iter *bvec_pos)
1937{
1938 struct ceph_msg_data *data;
1939
1940 data = ceph_msg_data_add(msg);
1941 data->type = CEPH_MSG_DATA_BVECS;
1942 data->bvec_pos = *bvec_pos;
1943
1944 msg->data_length += bvec_pos->iter.bi_size;
1945}
1946EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
1947
1948void ceph_msg_data_add_iter(struct ceph_msg *msg,
1949 struct iov_iter *iter)
1950{
1951 struct ceph_msg_data *data;
1952
1953 data = ceph_msg_data_add(msg);
1954 data->type = CEPH_MSG_DATA_ITER;
1955 data->iter = *iter;
1956
1957 msg->data_length += iov_iter_count(&data->iter);
1958}
1959
1960/*
1961 * construct a new message with given type, size
1962 * the new msg has a ref count of 1.
1963 */
1964struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
1965 gfp_t flags, bool can_fail)
1966{
1967 struct ceph_msg *m;
1968
1969 m = kmem_cache_zalloc(ceph_msg_cache, flags);
1970 if (m == NULL)
1971 goto out;
1972
1973 m->hdr.type = cpu_to_le16(type);
1974 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
1975 m->hdr.front_len = cpu_to_le32(front_len);
1976
1977 INIT_LIST_HEAD(&m->list_head);
1978 kref_init(&m->kref);
1979
1980 /* front */
1981 if (front_len) {
1982 m->front.iov_base = kvmalloc(front_len, flags);
1983 if (m->front.iov_base == NULL) {
1984 dout("ceph_msg_new can't allocate %d bytes\n",
1985 front_len);
1986 goto out2;
1987 }
1988 } else {
1989 m->front.iov_base = NULL;
1990 }
1991 m->front_alloc_len = m->front.iov_len = front_len;
1992
1993 if (max_data_items) {
1994 m->data = kmalloc_array(max_data_items, sizeof(*m->data),
1995 flags);
1996 if (!m->data)
1997 goto out2;
1998
1999 m->max_data_items = max_data_items;
2000 }
2001
2002 dout("ceph_msg_new %p front %d\n", m, front_len);
2003 return m;
2004
2005out2:
2006 ceph_msg_put(m);
2007out:
2008 if (!can_fail) {
2009 pr_err("msg_new can't create type %d front %d\n", type,
2010 front_len);
2011 WARN_ON(1);
2012 } else {
2013 dout("msg_new can't create type %d front %d\n", type,
2014 front_len);
2015 }
2016 return NULL;
2017}
2018EXPORT_SYMBOL(ceph_msg_new2);
2019
2020struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2021 bool can_fail)
2022{
2023 return ceph_msg_new2(type, front_len, 0, flags, can_fail);
2024}
2025EXPORT_SYMBOL(ceph_msg_new);
2026
2027/*
2028 * Allocate "middle" portion of a message, if it is needed and wasn't
2029 * allocated by alloc_msg. This allows us to read a small fixed-size
2030 * per-type header in the front and then gracefully fail (i.e.,
2031 * propagate the error to the caller based on info in the front) when
2032 * the middle is too large.
2033 */
2034static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2035{
2036 int type = le16_to_cpu(msg->hdr.type);
2037 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2038
2039 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2040 ceph_msg_type_name(type), middle_len);
2041 BUG_ON(!middle_len);
2042 BUG_ON(msg->middle);
2043
2044 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2045 if (!msg->middle)
2046 return -ENOMEM;
2047 return 0;
2048}
2049
2050/*
2051 * Allocate a message for receiving an incoming message on a
2052 * connection, and save the result in con->in_msg. Uses the
2053 * connection's private alloc_msg op if available.
2054 *
2055 * Returns 0 on success, or a negative error code.
2056 *
2057 * On success, if we set *skip = 1:
2058 * - the next message should be skipped and ignored.
2059 * - con->in_msg == NULL
2060 * or if we set *skip = 0:
2061 * - con->in_msg is non-null.
2062 * On error (ENOMEM, EAGAIN, ...),
2063 * - con->in_msg == NULL
2064 */
2065int ceph_con_in_msg_alloc(struct ceph_connection *con,
2066 struct ceph_msg_header *hdr, int *skip)
2067{
2068 int middle_len = le32_to_cpu(hdr->middle_len);
2069 struct ceph_msg *msg;
2070 int ret = 0;
2071
2072 BUG_ON(con->in_msg != NULL);
2073 BUG_ON(!con->ops->alloc_msg);
2074
2075 mutex_unlock(&con->mutex);
2076 msg = con->ops->alloc_msg(con, hdr, skip);
2077 mutex_lock(&con->mutex);
2078 if (con->state != CEPH_CON_S_OPEN) {
2079 if (msg)
2080 ceph_msg_put(msg);
2081 return -EAGAIN;
2082 }
2083 if (msg) {
2084 BUG_ON(*skip);
2085 msg_con_set(msg, con);
2086 con->in_msg = msg;
2087 } else {
2088 /*
2089 * Null message pointer means either we should skip
2090 * this message or we couldn't allocate memory. The
2091 * former is not an error.
2092 */
2093 if (*skip)
2094 return 0;
2095
2096 con->error_msg = "error allocating memory for incoming message";
2097 return -ENOMEM;
2098 }
2099 memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2100
2101 if (middle_len && !con->in_msg->middle) {
2102 ret = ceph_alloc_middle(con, con->in_msg);
2103 if (ret < 0) {
2104 ceph_msg_put(con->in_msg);
2105 con->in_msg = NULL;
2106 }
2107 }
2108
2109 return ret;
2110}
2111
2112void ceph_con_get_out_msg(struct ceph_connection *con)
2113{
2114 struct ceph_msg *msg;
2115
2116 BUG_ON(list_empty(&con->out_queue));
2117 msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
2118 WARN_ON(msg->con != con);
2119
2120 /*
2121 * Put the message on "sent" list using a ref from ceph_con_send().
2122 * It is put when the message is acked or revoked.
2123 */
2124 list_move_tail(&msg->list_head, &con->out_sent);
2125
2126 /*
2127 * Only assign outgoing seq # if we haven't sent this message
2128 * yet. If it is requeued, resend with it's original seq.
2129 */
2130 if (msg->needs_out_seq) {
2131 msg->hdr.seq = cpu_to_le64(++con->out_seq);
2132 msg->needs_out_seq = false;
2133
2134 if (con->ops->reencode_message)
2135 con->ops->reencode_message(msg);
2136 }
2137
2138 /*
2139 * Get a ref for out_msg. It is put when we are done sending the
2140 * message or in case of a fault.
2141 */
2142 WARN_ON(con->out_msg);
2143 con->out_msg = ceph_msg_get(msg);
2144}
2145
2146/*
2147 * Free a generically kmalloc'd message.
2148 */
2149static void ceph_msg_free(struct ceph_msg *m)
2150{
2151 dout("%s %p\n", __func__, m);
2152 kvfree(m->front.iov_base);
2153 kfree(m->data);
2154 kmem_cache_free(ceph_msg_cache, m);
2155}
2156
2157static void ceph_msg_release(struct kref *kref)
2158{
2159 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2160 int i;
2161
2162 dout("%s %p\n", __func__, m);
2163 WARN_ON(!list_empty(&m->list_head));
2164
2165 msg_con_set(m, NULL);
2166
2167 /* drop middle, data, if any */
2168 if (m->middle) {
2169 ceph_buffer_put(m->middle);
2170 m->middle = NULL;
2171 }
2172
2173 for (i = 0; i < m->num_data_items; i++)
2174 ceph_msg_data_destroy(&m->data[i]);
2175
2176 if (m->pool)
2177 ceph_msgpool_put(m->pool, m);
2178 else
2179 ceph_msg_free(m);
2180}
2181
2182struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
2183{
2184 dout("%s %p (was %d)\n", __func__, msg,
2185 kref_read(&msg->kref));
2186 kref_get(&msg->kref);
2187 return msg;
2188}
2189EXPORT_SYMBOL(ceph_msg_get);
2190
2191void ceph_msg_put(struct ceph_msg *msg)
2192{
2193 dout("%s %p (was %d)\n", __func__, msg,
2194 kref_read(&msg->kref));
2195 kref_put(&msg->kref, ceph_msg_release);
2196}
2197EXPORT_SYMBOL(ceph_msg_put);
2198
2199void ceph_msg_dump(struct ceph_msg *msg)
2200{
2201 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2202 msg->front_alloc_len, msg->data_length);
2203 print_hex_dump(KERN_DEBUG, "header: ",
2204 DUMP_PREFIX_OFFSET, 16, 1,
2205 &msg->hdr, sizeof(msg->hdr), true);
2206 print_hex_dump(KERN_DEBUG, " front: ",
2207 DUMP_PREFIX_OFFSET, 16, 1,
2208 msg->front.iov_base, msg->front.iov_len, true);
2209 if (msg->middle)
2210 print_hex_dump(KERN_DEBUG, "middle: ",
2211 DUMP_PREFIX_OFFSET, 16, 1,
2212 msg->middle->vec.iov_base,
2213 msg->middle->vec.iov_len, true);
2214 print_hex_dump(KERN_DEBUG, "footer: ",
2215 DUMP_PREFIX_OFFSET, 16, 1,
2216 &msg->footer, sizeof(msg->footer), true);
2217}
2218EXPORT_SYMBOL(ceph_msg_dump);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/crc32c.h>
5#include <linux/ctype.h>
6#include <linux/highmem.h>
7#include <linux/inet.h>
8#include <linux/kthread.h>
9#include <linux/net.h>
10#include <linux/nsproxy.h>
11#include <linux/sched/mm.h>
12#include <linux/slab.h>
13#include <linux/socket.h>
14#include <linux/string.h>
15#ifdef CONFIG_BLOCK
16#include <linux/bio.h>
17#endif /* CONFIG_BLOCK */
18#include <linux/dns_resolver.h>
19#include <net/tcp.h>
20
21#include <linux/ceph/ceph_features.h>
22#include <linux/ceph/libceph.h>
23#include <linux/ceph/messenger.h>
24#include <linux/ceph/decode.h>
25#include <linux/ceph/pagelist.h>
26#include <linux/export.h>
27
28/*
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
34 * the sender.
35 */
36
37/*
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
41 * unexpected state.
42 *
43 * --------
44 * | NEW* | transient initial state
45 * --------
46 * | con_sock_state_init()
47 * v
48 * ----------
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
51 * ^ \
52 * | \ con_sock_state_connecting()
53 * | ----------------------
54 * | \
55 * + con_sock_state_closed() \
56 * |+--------------------------- \
57 * | \ \ \
58 * | ----------- \ \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
61 * | ^ \ |
62 * | | \ |
63 * | + con_sock_state_closing() \ |
64 * | / \ | |
65 * | / --------------- | |
66 * | / \ v v
67 * | / --------------
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
71 * | | v
72 * -------------
73 * | CONNECTED | TCP connection established
74 * -------------
75 *
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
77 */
78
79#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
84
85/*
86 * connection states
87 */
88#define CON_STATE_CLOSED 1 /* -> PREOPEN */
89#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
90#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
91#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
92#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
93#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
94
95/*
96 * ceph_connection flag bits
97 */
98#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
99 * messages on errors */
100#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
101#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
102#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
103#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
104
105static bool con_flag_valid(unsigned long con_flag)
106{
107 switch (con_flag) {
108 case CON_FLAG_LOSSYTX:
109 case CON_FLAG_KEEPALIVE_PENDING:
110 case CON_FLAG_WRITE_PENDING:
111 case CON_FLAG_SOCK_CLOSED:
112 case CON_FLAG_BACKOFF:
113 return true;
114 default:
115 return false;
116 }
117}
118
119static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
120{
121 BUG_ON(!con_flag_valid(con_flag));
122
123 clear_bit(con_flag, &con->flags);
124}
125
126static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
127{
128 BUG_ON(!con_flag_valid(con_flag));
129
130 set_bit(con_flag, &con->flags);
131}
132
133static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
134{
135 BUG_ON(!con_flag_valid(con_flag));
136
137 return test_bit(con_flag, &con->flags);
138}
139
140static bool con_flag_test_and_clear(struct ceph_connection *con,
141 unsigned long con_flag)
142{
143 BUG_ON(!con_flag_valid(con_flag));
144
145 return test_and_clear_bit(con_flag, &con->flags);
146}
147
148static bool con_flag_test_and_set(struct ceph_connection *con,
149 unsigned long con_flag)
150{
151 BUG_ON(!con_flag_valid(con_flag));
152
153 return test_and_set_bit(con_flag, &con->flags);
154}
155
156/* Slab caches for frequently-allocated structures */
157
158static struct kmem_cache *ceph_msg_cache;
159static struct kmem_cache *ceph_msg_data_cache;
160
161/* static tag bytes (protocol control messages) */
162static char tag_msg = CEPH_MSGR_TAG_MSG;
163static char tag_ack = CEPH_MSGR_TAG_ACK;
164static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
165static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
166
167#ifdef CONFIG_LOCKDEP
168static struct lock_class_key socket_class;
169#endif
170
171/*
172 * When skipping (ignoring) a block of input we read it into a "skip
173 * buffer," which is this many bytes in size.
174 */
175#define SKIP_BUF_SIZE 1024
176
177static void queue_con(struct ceph_connection *con);
178static void cancel_con(struct ceph_connection *con);
179static void ceph_con_workfn(struct work_struct *);
180static void con_fault(struct ceph_connection *con);
181
182/*
183 * Nicely render a sockaddr as a string. An array of formatted
184 * strings is used, to approximate reentrancy.
185 */
186#define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
187#define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
188#define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
189#define MAX_ADDR_STR_LEN 64 /* 54 is enough */
190
191static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
192static atomic_t addr_str_seq = ATOMIC_INIT(0);
193
194static struct page *zero_page; /* used in certain error cases */
195
196const char *ceph_pr_addr(const struct sockaddr_storage *ss)
197{
198 int i;
199 char *s;
200 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
201 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
202
203 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
204 s = addr_str[i];
205
206 switch (ss->ss_family) {
207 case AF_INET:
208 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
209 ntohs(in4->sin_port));
210 break;
211
212 case AF_INET6:
213 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
214 ntohs(in6->sin6_port));
215 break;
216
217 default:
218 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
219 ss->ss_family);
220 }
221
222 return s;
223}
224EXPORT_SYMBOL(ceph_pr_addr);
225
226static void encode_my_addr(struct ceph_messenger *msgr)
227{
228 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
229 ceph_encode_addr(&msgr->my_enc_addr);
230}
231
232/*
233 * work queue for all reading and writing to/from the socket.
234 */
235static struct workqueue_struct *ceph_msgr_wq;
236
237static int ceph_msgr_slab_init(void)
238{
239 BUG_ON(ceph_msg_cache);
240 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
241 if (!ceph_msg_cache)
242 return -ENOMEM;
243
244 BUG_ON(ceph_msg_data_cache);
245 ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
246 if (ceph_msg_data_cache)
247 return 0;
248
249 kmem_cache_destroy(ceph_msg_cache);
250 ceph_msg_cache = NULL;
251
252 return -ENOMEM;
253}
254
255static void ceph_msgr_slab_exit(void)
256{
257 BUG_ON(!ceph_msg_data_cache);
258 kmem_cache_destroy(ceph_msg_data_cache);
259 ceph_msg_data_cache = NULL;
260
261 BUG_ON(!ceph_msg_cache);
262 kmem_cache_destroy(ceph_msg_cache);
263 ceph_msg_cache = NULL;
264}
265
266static void _ceph_msgr_exit(void)
267{
268 if (ceph_msgr_wq) {
269 destroy_workqueue(ceph_msgr_wq);
270 ceph_msgr_wq = NULL;
271 }
272
273 BUG_ON(zero_page == NULL);
274 put_page(zero_page);
275 zero_page = NULL;
276
277 ceph_msgr_slab_exit();
278}
279
280int __init ceph_msgr_init(void)
281{
282 if (ceph_msgr_slab_init())
283 return -ENOMEM;
284
285 BUG_ON(zero_page != NULL);
286 zero_page = ZERO_PAGE(0);
287 get_page(zero_page);
288
289 /*
290 * The number of active work items is limited by the number of
291 * connections, so leave @max_active at default.
292 */
293 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
294 if (ceph_msgr_wq)
295 return 0;
296
297 pr_err("msgr_init failed to create workqueue\n");
298 _ceph_msgr_exit();
299
300 return -ENOMEM;
301}
302
303void ceph_msgr_exit(void)
304{
305 BUG_ON(ceph_msgr_wq == NULL);
306
307 _ceph_msgr_exit();
308}
309
310void ceph_msgr_flush(void)
311{
312 flush_workqueue(ceph_msgr_wq);
313}
314EXPORT_SYMBOL(ceph_msgr_flush);
315
316/* Connection socket state transition functions */
317
318static void con_sock_state_init(struct ceph_connection *con)
319{
320 int old_state;
321
322 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
323 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
324 printk("%s: unexpected old state %d\n", __func__, old_state);
325 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
326 CON_SOCK_STATE_CLOSED);
327}
328
329static void con_sock_state_connecting(struct ceph_connection *con)
330{
331 int old_state;
332
333 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
334 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
335 printk("%s: unexpected old state %d\n", __func__, old_state);
336 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
337 CON_SOCK_STATE_CONNECTING);
338}
339
340static void con_sock_state_connected(struct ceph_connection *con)
341{
342 int old_state;
343
344 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
345 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
346 printk("%s: unexpected old state %d\n", __func__, old_state);
347 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
348 CON_SOCK_STATE_CONNECTED);
349}
350
351static void con_sock_state_closing(struct ceph_connection *con)
352{
353 int old_state;
354
355 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
356 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
357 old_state != CON_SOCK_STATE_CONNECTED &&
358 old_state != CON_SOCK_STATE_CLOSING))
359 printk("%s: unexpected old state %d\n", __func__, old_state);
360 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
361 CON_SOCK_STATE_CLOSING);
362}
363
364static void con_sock_state_closed(struct ceph_connection *con)
365{
366 int old_state;
367
368 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
369 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
370 old_state != CON_SOCK_STATE_CLOSING &&
371 old_state != CON_SOCK_STATE_CONNECTING &&
372 old_state != CON_SOCK_STATE_CLOSED))
373 printk("%s: unexpected old state %d\n", __func__, old_state);
374 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
375 CON_SOCK_STATE_CLOSED);
376}
377
378/*
379 * socket callback functions
380 */
381
382/* data available on socket, or listen socket received a connect */
383static void ceph_sock_data_ready(struct sock *sk)
384{
385 struct ceph_connection *con = sk->sk_user_data;
386 if (atomic_read(&con->msgr->stopping)) {
387 return;
388 }
389
390 if (sk->sk_state != TCP_CLOSE_WAIT) {
391 dout("%s on %p state = %lu, queueing work\n", __func__,
392 con, con->state);
393 queue_con(con);
394 }
395}
396
397/* socket has buffer space for writing */
398static void ceph_sock_write_space(struct sock *sk)
399{
400 struct ceph_connection *con = sk->sk_user_data;
401
402 /* only queue to workqueue if there is data we want to write,
403 * and there is sufficient space in the socket buffer to accept
404 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
405 * doesn't get called again until try_write() fills the socket
406 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
407 * and net/core/stream.c:sk_stream_write_space().
408 */
409 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
410 if (sk_stream_is_writeable(sk)) {
411 dout("%s %p queueing write work\n", __func__, con);
412 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
413 queue_con(con);
414 }
415 } else {
416 dout("%s %p nothing to write\n", __func__, con);
417 }
418}
419
420/* socket's state has changed */
421static void ceph_sock_state_change(struct sock *sk)
422{
423 struct ceph_connection *con = sk->sk_user_data;
424
425 dout("%s %p state = %lu sk_state = %u\n", __func__,
426 con, con->state, sk->sk_state);
427
428 switch (sk->sk_state) {
429 case TCP_CLOSE:
430 dout("%s TCP_CLOSE\n", __func__);
431 /* fall through */
432 case TCP_CLOSE_WAIT:
433 dout("%s TCP_CLOSE_WAIT\n", __func__);
434 con_sock_state_closing(con);
435 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
436 queue_con(con);
437 break;
438 case TCP_ESTABLISHED:
439 dout("%s TCP_ESTABLISHED\n", __func__);
440 con_sock_state_connected(con);
441 queue_con(con);
442 break;
443 default: /* Everything else is uninteresting */
444 break;
445 }
446}
447
448/*
449 * set up socket callbacks
450 */
451static void set_sock_callbacks(struct socket *sock,
452 struct ceph_connection *con)
453{
454 struct sock *sk = sock->sk;
455 sk->sk_user_data = con;
456 sk->sk_data_ready = ceph_sock_data_ready;
457 sk->sk_write_space = ceph_sock_write_space;
458 sk->sk_state_change = ceph_sock_state_change;
459}
460
461
462/*
463 * socket helpers
464 */
465
466/*
467 * initiate connection to a remote socket.
468 */
469static int ceph_tcp_connect(struct ceph_connection *con)
470{
471 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
472 struct socket *sock;
473 unsigned int noio_flag;
474 int ret;
475
476 BUG_ON(con->sock);
477
478 /* sock_create_kern() allocates with GFP_KERNEL */
479 noio_flag = memalloc_noio_save();
480 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
481 SOCK_STREAM, IPPROTO_TCP, &sock);
482 memalloc_noio_restore(noio_flag);
483 if (ret)
484 return ret;
485 sock->sk->sk_allocation = GFP_NOFS;
486
487#ifdef CONFIG_LOCKDEP
488 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
489#endif
490
491 set_sock_callbacks(sock, con);
492
493 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
494
495 con_sock_state_connecting(con);
496 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
497 O_NONBLOCK);
498 if (ret == -EINPROGRESS) {
499 dout("connect %s EINPROGRESS sk_state = %u\n",
500 ceph_pr_addr(&con->peer_addr.in_addr),
501 sock->sk->sk_state);
502 } else if (ret < 0) {
503 pr_err("connect %s error %d\n",
504 ceph_pr_addr(&con->peer_addr.in_addr), ret);
505 sock_release(sock);
506 return ret;
507 }
508
509 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
510 int optval = 1;
511
512 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
513 (char *)&optval, sizeof(optval));
514 if (ret)
515 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
516 ret);
517 }
518
519 con->sock = sock;
520 return 0;
521}
522
523static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
524{
525 struct kvec iov = {buf, len};
526 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
527 int r;
528
529 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
530 r = sock_recvmsg(sock, &msg, msg.msg_flags);
531 if (r == -EAGAIN)
532 r = 0;
533 return r;
534}
535
536static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
537 int page_offset, size_t length)
538{
539 struct bio_vec bvec = {
540 .bv_page = page,
541 .bv_offset = page_offset,
542 .bv_len = length
543 };
544 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
545 int r;
546
547 BUG_ON(page_offset + length > PAGE_SIZE);
548 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
549 r = sock_recvmsg(sock, &msg, msg.msg_flags);
550 if (r == -EAGAIN)
551 r = 0;
552 return r;
553}
554
555/*
556 * write something. @more is true if caller will be sending more data
557 * shortly.
558 */
559static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
560 size_t kvlen, size_t len, int more)
561{
562 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
563 int r;
564
565 if (more)
566 msg.msg_flags |= MSG_MORE;
567 else
568 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
569
570 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
571 if (r == -EAGAIN)
572 r = 0;
573 return r;
574}
575
576static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
577 int offset, size_t size, bool more)
578{
579 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
580 int ret;
581
582 ret = kernel_sendpage(sock, page, offset, size, flags);
583 if (ret == -EAGAIN)
584 ret = 0;
585
586 return ret;
587}
588
589static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
590 int offset, size_t size, bool more)
591{
592 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
593 struct bio_vec bvec;
594 int ret;
595
596 /* sendpage cannot properly handle pages with page_count == 0,
597 * we need to fallback to sendmsg if that's the case */
598 if (page_count(page) >= 1)
599 return __ceph_tcp_sendpage(sock, page, offset, size, more);
600
601 bvec.bv_page = page;
602 bvec.bv_offset = offset;
603 bvec.bv_len = size;
604
605 if (more)
606 msg.msg_flags |= MSG_MORE;
607 else
608 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
609
610 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
611 ret = sock_sendmsg(sock, &msg);
612 if (ret == -EAGAIN)
613 ret = 0;
614
615 return ret;
616}
617
618/*
619 * Shutdown/close the socket for the given connection.
620 */
621static int con_close_socket(struct ceph_connection *con)
622{
623 int rc = 0;
624
625 dout("con_close_socket on %p sock %p\n", con, con->sock);
626 if (con->sock) {
627 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
628 sock_release(con->sock);
629 con->sock = NULL;
630 }
631
632 /*
633 * Forcibly clear the SOCK_CLOSED flag. It gets set
634 * independent of the connection mutex, and we could have
635 * received a socket close event before we had the chance to
636 * shut the socket down.
637 */
638 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
639
640 con_sock_state_closed(con);
641 return rc;
642}
643
644/*
645 * Reset a connection. Discard all incoming and outgoing messages
646 * and clear *_seq state.
647 */
648static void ceph_msg_remove(struct ceph_msg *msg)
649{
650 list_del_init(&msg->list_head);
651
652 ceph_msg_put(msg);
653}
654static void ceph_msg_remove_list(struct list_head *head)
655{
656 while (!list_empty(head)) {
657 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
658 list_head);
659 ceph_msg_remove(msg);
660 }
661}
662
663static void reset_connection(struct ceph_connection *con)
664{
665 /* reset connection, out_queue, msg_ and connect_seq */
666 /* discard existing out_queue and msg_seq */
667 dout("reset_connection %p\n", con);
668 ceph_msg_remove_list(&con->out_queue);
669 ceph_msg_remove_list(&con->out_sent);
670
671 if (con->in_msg) {
672 BUG_ON(con->in_msg->con != con);
673 ceph_msg_put(con->in_msg);
674 con->in_msg = NULL;
675 }
676
677 con->connect_seq = 0;
678 con->out_seq = 0;
679 if (con->out_msg) {
680 BUG_ON(con->out_msg->con != con);
681 ceph_msg_put(con->out_msg);
682 con->out_msg = NULL;
683 }
684 con->in_seq = 0;
685 con->in_seq_acked = 0;
686
687 con->out_skip = 0;
688}
689
690/*
691 * mark a peer down. drop any open connections.
692 */
693void ceph_con_close(struct ceph_connection *con)
694{
695 mutex_lock(&con->mutex);
696 dout("con_close %p peer %s\n", con,
697 ceph_pr_addr(&con->peer_addr.in_addr));
698 con->state = CON_STATE_CLOSED;
699
700 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
701 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
702 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
703 con_flag_clear(con, CON_FLAG_BACKOFF);
704
705 reset_connection(con);
706 con->peer_global_seq = 0;
707 cancel_con(con);
708 con_close_socket(con);
709 mutex_unlock(&con->mutex);
710}
711EXPORT_SYMBOL(ceph_con_close);
712
713/*
714 * Reopen a closed connection, with a new peer address.
715 */
716void ceph_con_open(struct ceph_connection *con,
717 __u8 entity_type, __u64 entity_num,
718 struct ceph_entity_addr *addr)
719{
720 mutex_lock(&con->mutex);
721 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
722
723 WARN_ON(con->state != CON_STATE_CLOSED);
724 con->state = CON_STATE_PREOPEN;
725
726 con->peer_name.type = (__u8) entity_type;
727 con->peer_name.num = cpu_to_le64(entity_num);
728
729 memcpy(&con->peer_addr, addr, sizeof(*addr));
730 con->delay = 0; /* reset backoff memory */
731 mutex_unlock(&con->mutex);
732 queue_con(con);
733}
734EXPORT_SYMBOL(ceph_con_open);
735
736/*
737 * return true if this connection ever successfully opened
738 */
739bool ceph_con_opened(struct ceph_connection *con)
740{
741 return con->connect_seq > 0;
742}
743
744/*
745 * initialize a new connection.
746 */
747void ceph_con_init(struct ceph_connection *con, void *private,
748 const struct ceph_connection_operations *ops,
749 struct ceph_messenger *msgr)
750{
751 dout("con_init %p\n", con);
752 memset(con, 0, sizeof(*con));
753 con->private = private;
754 con->ops = ops;
755 con->msgr = msgr;
756
757 con_sock_state_init(con);
758
759 mutex_init(&con->mutex);
760 INIT_LIST_HEAD(&con->out_queue);
761 INIT_LIST_HEAD(&con->out_sent);
762 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
763
764 con->state = CON_STATE_CLOSED;
765}
766EXPORT_SYMBOL(ceph_con_init);
767
768
769/*
770 * We maintain a global counter to order connection attempts. Get
771 * a unique seq greater than @gt.
772 */
773static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
774{
775 u32 ret;
776
777 spin_lock(&msgr->global_seq_lock);
778 if (msgr->global_seq < gt)
779 msgr->global_seq = gt;
780 ret = ++msgr->global_seq;
781 spin_unlock(&msgr->global_seq_lock);
782 return ret;
783}
784
785static void con_out_kvec_reset(struct ceph_connection *con)
786{
787 BUG_ON(con->out_skip);
788
789 con->out_kvec_left = 0;
790 con->out_kvec_bytes = 0;
791 con->out_kvec_cur = &con->out_kvec[0];
792}
793
794static void con_out_kvec_add(struct ceph_connection *con,
795 size_t size, void *data)
796{
797 int index = con->out_kvec_left;
798
799 BUG_ON(con->out_skip);
800 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
801
802 con->out_kvec[index].iov_len = size;
803 con->out_kvec[index].iov_base = data;
804 con->out_kvec_left++;
805 con->out_kvec_bytes += size;
806}
807
808/*
809 * Chop off a kvec from the end. Return residual number of bytes for
810 * that kvec, i.e. how many bytes would have been written if the kvec
811 * hadn't been nuked.
812 */
813static int con_out_kvec_skip(struct ceph_connection *con)
814{
815 int off = con->out_kvec_cur - con->out_kvec;
816 int skip = 0;
817
818 if (con->out_kvec_bytes > 0) {
819 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
820 BUG_ON(con->out_kvec_bytes < skip);
821 BUG_ON(!con->out_kvec_left);
822 con->out_kvec_bytes -= skip;
823 con->out_kvec_left--;
824 }
825
826 return skip;
827}
828
829#ifdef CONFIG_BLOCK
830
831/*
832 * For a bio data item, a piece is whatever remains of the next
833 * entry in the current bio iovec, or the first entry in the next
834 * bio in the list.
835 */
836static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
837 size_t length)
838{
839 struct ceph_msg_data *data = cursor->data;
840 struct ceph_bio_iter *it = &cursor->bio_iter;
841
842 cursor->resid = min_t(size_t, length, data->bio_length);
843 *it = data->bio_pos;
844 if (cursor->resid < it->iter.bi_size)
845 it->iter.bi_size = cursor->resid;
846
847 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
848 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
849}
850
851static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
852 size_t *page_offset,
853 size_t *length)
854{
855 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
856 cursor->bio_iter.iter);
857
858 *page_offset = bv.bv_offset;
859 *length = bv.bv_len;
860 return bv.bv_page;
861}
862
863static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
864 size_t bytes)
865{
866 struct ceph_bio_iter *it = &cursor->bio_iter;
867
868 BUG_ON(bytes > cursor->resid);
869 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
870 cursor->resid -= bytes;
871 bio_advance_iter(it->bio, &it->iter, bytes);
872
873 if (!cursor->resid) {
874 BUG_ON(!cursor->last_piece);
875 return false; /* no more data */
876 }
877
878 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
879 return false; /* more bytes to process in this segment */
880
881 if (!it->iter.bi_size) {
882 it->bio = it->bio->bi_next;
883 it->iter = it->bio->bi_iter;
884 if (cursor->resid < it->iter.bi_size)
885 it->iter.bi_size = cursor->resid;
886 }
887
888 BUG_ON(cursor->last_piece);
889 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
890 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
891 return true;
892}
893#endif /* CONFIG_BLOCK */
894
895static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
896 size_t length)
897{
898 struct ceph_msg_data *data = cursor->data;
899 struct bio_vec *bvecs = data->bvec_pos.bvecs;
900
901 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
902 cursor->bvec_iter = data->bvec_pos.iter;
903 cursor->bvec_iter.bi_size = cursor->resid;
904
905 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
906 cursor->last_piece =
907 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
908}
909
910static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
911 size_t *page_offset,
912 size_t *length)
913{
914 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
915 cursor->bvec_iter);
916
917 *page_offset = bv.bv_offset;
918 *length = bv.bv_len;
919 return bv.bv_page;
920}
921
922static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
923 size_t bytes)
924{
925 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
926
927 BUG_ON(bytes > cursor->resid);
928 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
929 cursor->resid -= bytes;
930 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
931
932 if (!cursor->resid) {
933 BUG_ON(!cursor->last_piece);
934 return false; /* no more data */
935 }
936
937 if (!bytes || cursor->bvec_iter.bi_bvec_done)
938 return false; /* more bytes to process in this segment */
939
940 BUG_ON(cursor->last_piece);
941 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
942 cursor->last_piece =
943 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
944 return true;
945}
946
947/*
948 * For a page array, a piece comes from the first page in the array
949 * that has not already been fully consumed.
950 */
951static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
952 size_t length)
953{
954 struct ceph_msg_data *data = cursor->data;
955 int page_count;
956
957 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
958
959 BUG_ON(!data->pages);
960 BUG_ON(!data->length);
961
962 cursor->resid = min(length, data->length);
963 page_count = calc_pages_for(data->alignment, (u64)data->length);
964 cursor->page_offset = data->alignment & ~PAGE_MASK;
965 cursor->page_index = 0;
966 BUG_ON(page_count > (int)USHRT_MAX);
967 cursor->page_count = (unsigned short)page_count;
968 BUG_ON(length > SIZE_MAX - cursor->page_offset);
969 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
970}
971
972static struct page *
973ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
974 size_t *page_offset, size_t *length)
975{
976 struct ceph_msg_data *data = cursor->data;
977
978 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
979
980 BUG_ON(cursor->page_index >= cursor->page_count);
981 BUG_ON(cursor->page_offset >= PAGE_SIZE);
982
983 *page_offset = cursor->page_offset;
984 if (cursor->last_piece)
985 *length = cursor->resid;
986 else
987 *length = PAGE_SIZE - *page_offset;
988
989 return data->pages[cursor->page_index];
990}
991
992static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
993 size_t bytes)
994{
995 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
996
997 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
998
999 /* Advance the cursor page offset */
1000
1001 cursor->resid -= bytes;
1002 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
1003 if (!bytes || cursor->page_offset)
1004 return false; /* more bytes to process in the current page */
1005
1006 if (!cursor->resid)
1007 return false; /* no more data */
1008
1009 /* Move on to the next page; offset is already at 0 */
1010
1011 BUG_ON(cursor->page_index >= cursor->page_count);
1012 cursor->page_index++;
1013 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1014
1015 return true;
1016}
1017
1018/*
1019 * For a pagelist, a piece is whatever remains to be consumed in the
1020 * first page in the list, or the front of the next page.
1021 */
1022static void
1023ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
1024 size_t length)
1025{
1026 struct ceph_msg_data *data = cursor->data;
1027 struct ceph_pagelist *pagelist;
1028 struct page *page;
1029
1030 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1031
1032 pagelist = data->pagelist;
1033 BUG_ON(!pagelist);
1034
1035 if (!length)
1036 return; /* pagelist can be assigned but empty */
1037
1038 BUG_ON(list_empty(&pagelist->head));
1039 page = list_first_entry(&pagelist->head, struct page, lru);
1040
1041 cursor->resid = min(length, pagelist->length);
1042 cursor->page = page;
1043 cursor->offset = 0;
1044 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1045}
1046
1047static struct page *
1048ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1049 size_t *page_offset, size_t *length)
1050{
1051 struct ceph_msg_data *data = cursor->data;
1052 struct ceph_pagelist *pagelist;
1053
1054 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1055
1056 pagelist = data->pagelist;
1057 BUG_ON(!pagelist);
1058
1059 BUG_ON(!cursor->page);
1060 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1061
1062 /* offset of first page in pagelist is always 0 */
1063 *page_offset = cursor->offset & ~PAGE_MASK;
1064 if (cursor->last_piece)
1065 *length = cursor->resid;
1066 else
1067 *length = PAGE_SIZE - *page_offset;
1068
1069 return cursor->page;
1070}
1071
1072static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1073 size_t bytes)
1074{
1075 struct ceph_msg_data *data = cursor->data;
1076 struct ceph_pagelist *pagelist;
1077
1078 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1079
1080 pagelist = data->pagelist;
1081 BUG_ON(!pagelist);
1082
1083 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1084 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1085
1086 /* Advance the cursor offset */
1087
1088 cursor->resid -= bytes;
1089 cursor->offset += bytes;
1090 /* offset of first page in pagelist is always 0 */
1091 if (!bytes || cursor->offset & ~PAGE_MASK)
1092 return false; /* more bytes to process in the current page */
1093
1094 if (!cursor->resid)
1095 return false; /* no more data */
1096
1097 /* Move on to the next page */
1098
1099 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1100 cursor->page = list_next_entry(cursor->page, lru);
1101 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1102
1103 return true;
1104}
1105
1106/*
1107 * Message data is handled (sent or received) in pieces, where each
1108 * piece resides on a single page. The network layer might not
1109 * consume an entire piece at once. A data item's cursor keeps
1110 * track of which piece is next to process and how much remains to
1111 * be processed in that piece. It also tracks whether the current
1112 * piece is the last one in the data item.
1113 */
1114static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1115{
1116 size_t length = cursor->total_resid;
1117
1118 switch (cursor->data->type) {
1119 case CEPH_MSG_DATA_PAGELIST:
1120 ceph_msg_data_pagelist_cursor_init(cursor, length);
1121 break;
1122 case CEPH_MSG_DATA_PAGES:
1123 ceph_msg_data_pages_cursor_init(cursor, length);
1124 break;
1125#ifdef CONFIG_BLOCK
1126 case CEPH_MSG_DATA_BIO:
1127 ceph_msg_data_bio_cursor_init(cursor, length);
1128 break;
1129#endif /* CONFIG_BLOCK */
1130 case CEPH_MSG_DATA_BVECS:
1131 ceph_msg_data_bvecs_cursor_init(cursor, length);
1132 break;
1133 case CEPH_MSG_DATA_NONE:
1134 default:
1135 /* BUG(); */
1136 break;
1137 }
1138 cursor->need_crc = true;
1139}
1140
1141static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1142{
1143 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1144 struct ceph_msg_data *data;
1145
1146 BUG_ON(!length);
1147 BUG_ON(length > msg->data_length);
1148 BUG_ON(list_empty(&msg->data));
1149
1150 cursor->data_head = &msg->data;
1151 cursor->total_resid = length;
1152 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1153 cursor->data = data;
1154
1155 __ceph_msg_data_cursor_init(cursor);
1156}
1157
1158/*
1159 * Return the page containing the next piece to process for a given
1160 * data item, and supply the page offset and length of that piece.
1161 * Indicate whether this is the last piece in this data item.
1162 */
1163static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1164 size_t *page_offset, size_t *length,
1165 bool *last_piece)
1166{
1167 struct page *page;
1168
1169 switch (cursor->data->type) {
1170 case CEPH_MSG_DATA_PAGELIST:
1171 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1172 break;
1173 case CEPH_MSG_DATA_PAGES:
1174 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1175 break;
1176#ifdef CONFIG_BLOCK
1177 case CEPH_MSG_DATA_BIO:
1178 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1179 break;
1180#endif /* CONFIG_BLOCK */
1181 case CEPH_MSG_DATA_BVECS:
1182 page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1183 break;
1184 case CEPH_MSG_DATA_NONE:
1185 default:
1186 page = NULL;
1187 break;
1188 }
1189
1190 BUG_ON(!page);
1191 BUG_ON(*page_offset + *length > PAGE_SIZE);
1192 BUG_ON(!*length);
1193 BUG_ON(*length > cursor->resid);
1194 if (last_piece)
1195 *last_piece = cursor->last_piece;
1196
1197 return page;
1198}
1199
1200/*
1201 * Returns true if the result moves the cursor on to the next piece
1202 * of the data item.
1203 */
1204static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1205 size_t bytes)
1206{
1207 bool new_piece;
1208
1209 BUG_ON(bytes > cursor->resid);
1210 switch (cursor->data->type) {
1211 case CEPH_MSG_DATA_PAGELIST:
1212 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1213 break;
1214 case CEPH_MSG_DATA_PAGES:
1215 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1216 break;
1217#ifdef CONFIG_BLOCK
1218 case CEPH_MSG_DATA_BIO:
1219 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1220 break;
1221#endif /* CONFIG_BLOCK */
1222 case CEPH_MSG_DATA_BVECS:
1223 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1224 break;
1225 case CEPH_MSG_DATA_NONE:
1226 default:
1227 BUG();
1228 break;
1229 }
1230 cursor->total_resid -= bytes;
1231
1232 if (!cursor->resid && cursor->total_resid) {
1233 WARN_ON(!cursor->last_piece);
1234 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
1235 cursor->data = list_next_entry(cursor->data, links);
1236 __ceph_msg_data_cursor_init(cursor);
1237 new_piece = true;
1238 }
1239 cursor->need_crc = new_piece;
1240}
1241
1242static size_t sizeof_footer(struct ceph_connection *con)
1243{
1244 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1245 sizeof(struct ceph_msg_footer) :
1246 sizeof(struct ceph_msg_footer_old);
1247}
1248
1249static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1250{
1251 BUG_ON(!msg);
1252 BUG_ON(!data_len);
1253
1254 /* Initialize data cursor */
1255
1256 ceph_msg_data_cursor_init(msg, (size_t)data_len);
1257}
1258
1259/*
1260 * Prepare footer for currently outgoing message, and finish things
1261 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1262 */
1263static void prepare_write_message_footer(struct ceph_connection *con)
1264{
1265 struct ceph_msg *m = con->out_msg;
1266
1267 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1268
1269 dout("prepare_write_message_footer %p\n", con);
1270 con_out_kvec_add(con, sizeof_footer(con), &m->footer);
1271 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1272 if (con->ops->sign_message)
1273 con->ops->sign_message(m);
1274 else
1275 m->footer.sig = 0;
1276 } else {
1277 m->old_footer.flags = m->footer.flags;
1278 }
1279 con->out_more = m->more_to_follow;
1280 con->out_msg_done = true;
1281}
1282
1283/*
1284 * Prepare headers for the next outgoing message.
1285 */
1286static void prepare_write_message(struct ceph_connection *con)
1287{
1288 struct ceph_msg *m;
1289 u32 crc;
1290
1291 con_out_kvec_reset(con);
1292 con->out_msg_done = false;
1293
1294 /* Sneak an ack in there first? If we can get it into the same
1295 * TCP packet that's a good thing. */
1296 if (con->in_seq > con->in_seq_acked) {
1297 con->in_seq_acked = con->in_seq;
1298 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1299 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1300 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1301 &con->out_temp_ack);
1302 }
1303
1304 BUG_ON(list_empty(&con->out_queue));
1305 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1306 con->out_msg = m;
1307 BUG_ON(m->con != con);
1308
1309 /* put message on sent list */
1310 ceph_msg_get(m);
1311 list_move_tail(&m->list_head, &con->out_sent);
1312
1313 /*
1314 * only assign outgoing seq # if we haven't sent this message
1315 * yet. if it is requeued, resend with it's original seq.
1316 */
1317 if (m->needs_out_seq) {
1318 m->hdr.seq = cpu_to_le64(++con->out_seq);
1319 m->needs_out_seq = false;
1320
1321 if (con->ops->reencode_message)
1322 con->ops->reencode_message(m);
1323 }
1324
1325 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1326 m, con->out_seq, le16_to_cpu(m->hdr.type),
1327 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1328 m->data_length);
1329 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
1330 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1331
1332 /* tag + hdr + front + middle */
1333 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1334 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
1335 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1336
1337 if (m->middle)
1338 con_out_kvec_add(con, m->middle->vec.iov_len,
1339 m->middle->vec.iov_base);
1340
1341 /* fill in hdr crc and finalize hdr */
1342 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1343 con->out_msg->hdr.crc = cpu_to_le32(crc);
1344 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
1345
1346 /* fill in front and middle crc, footer */
1347 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1348 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1349 if (m->middle) {
1350 crc = crc32c(0, m->middle->vec.iov_base,
1351 m->middle->vec.iov_len);
1352 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1353 } else
1354 con->out_msg->footer.middle_crc = 0;
1355 dout("%s front_crc %u middle_crc %u\n", __func__,
1356 le32_to_cpu(con->out_msg->footer.front_crc),
1357 le32_to_cpu(con->out_msg->footer.middle_crc));
1358 con->out_msg->footer.flags = 0;
1359
1360 /* is there a data payload? */
1361 con->out_msg->footer.data_crc = 0;
1362 if (m->data_length) {
1363 prepare_message_data(con->out_msg, m->data_length);
1364 con->out_more = 1; /* data + footer will follow */
1365 } else {
1366 /* no, queue up footer too and be done */
1367 prepare_write_message_footer(con);
1368 }
1369
1370 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1371}
1372
1373/*
1374 * Prepare an ack.
1375 */
1376static void prepare_write_ack(struct ceph_connection *con)
1377{
1378 dout("prepare_write_ack %p %llu -> %llu\n", con,
1379 con->in_seq_acked, con->in_seq);
1380 con->in_seq_acked = con->in_seq;
1381
1382 con_out_kvec_reset(con);
1383
1384 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1385
1386 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1387 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1388 &con->out_temp_ack);
1389
1390 con->out_more = 1; /* more will follow.. eventually.. */
1391 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1392}
1393
1394/*
1395 * Prepare to share the seq during handshake
1396 */
1397static void prepare_write_seq(struct ceph_connection *con)
1398{
1399 dout("prepare_write_seq %p %llu -> %llu\n", con,
1400 con->in_seq_acked, con->in_seq);
1401 con->in_seq_acked = con->in_seq;
1402
1403 con_out_kvec_reset(con);
1404
1405 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1406 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1407 &con->out_temp_ack);
1408
1409 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1410}
1411
1412/*
1413 * Prepare to write keepalive byte.
1414 */
1415static void prepare_write_keepalive(struct ceph_connection *con)
1416{
1417 dout("prepare_write_keepalive %p\n", con);
1418 con_out_kvec_reset(con);
1419 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1420 struct timespec now;
1421
1422 ktime_get_real_ts(&now);
1423 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1424 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1425 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1426 &con->out_temp_keepalive2);
1427 } else {
1428 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1429 }
1430 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1431}
1432
1433/*
1434 * Connection negotiation.
1435 */
1436
1437static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1438 int *auth_proto)
1439{
1440 struct ceph_auth_handshake *auth;
1441
1442 if (!con->ops->get_authorizer) {
1443 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1444 con->out_connect.authorizer_len = 0;
1445 return NULL;
1446 }
1447
1448 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1449 if (IS_ERR(auth))
1450 return auth;
1451
1452 con->auth_reply_buf = auth->authorizer_reply_buf;
1453 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1454 return auth;
1455}
1456
1457/*
1458 * We connected to a peer and are saying hello.
1459 */
1460static void prepare_write_banner(struct ceph_connection *con)
1461{
1462 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1463 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1464 &con->msgr->my_enc_addr);
1465
1466 con->out_more = 0;
1467 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1468}
1469
1470static int prepare_write_connect(struct ceph_connection *con)
1471{
1472 unsigned int global_seq = get_global_seq(con->msgr, 0);
1473 int proto;
1474 int auth_proto;
1475 struct ceph_auth_handshake *auth;
1476
1477 switch (con->peer_name.type) {
1478 case CEPH_ENTITY_TYPE_MON:
1479 proto = CEPH_MONC_PROTOCOL;
1480 break;
1481 case CEPH_ENTITY_TYPE_OSD:
1482 proto = CEPH_OSDC_PROTOCOL;
1483 break;
1484 case CEPH_ENTITY_TYPE_MDS:
1485 proto = CEPH_MDSC_PROTOCOL;
1486 break;
1487 default:
1488 BUG();
1489 }
1490
1491 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1492 con->connect_seq, global_seq, proto);
1493
1494 con->out_connect.features =
1495 cpu_to_le64(from_msgr(con->msgr)->supported_features);
1496 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1497 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1498 con->out_connect.global_seq = cpu_to_le32(global_seq);
1499 con->out_connect.protocol_version = cpu_to_le32(proto);
1500 con->out_connect.flags = 0;
1501
1502 auth_proto = CEPH_AUTH_UNKNOWN;
1503 auth = get_connect_authorizer(con, &auth_proto);
1504 if (IS_ERR(auth))
1505 return PTR_ERR(auth);
1506
1507 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1508 con->out_connect.authorizer_len = auth ?
1509 cpu_to_le32(auth->authorizer_buf_len) : 0;
1510
1511 con_out_kvec_add(con, sizeof (con->out_connect),
1512 &con->out_connect);
1513 if (auth && auth->authorizer_buf_len)
1514 con_out_kvec_add(con, auth->authorizer_buf_len,
1515 auth->authorizer_buf);
1516
1517 con->out_more = 0;
1518 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1519
1520 return 0;
1521}
1522
1523/*
1524 * write as much of pending kvecs to the socket as we can.
1525 * 1 -> done
1526 * 0 -> socket full, but more to do
1527 * <0 -> error
1528 */
1529static int write_partial_kvec(struct ceph_connection *con)
1530{
1531 int ret;
1532
1533 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1534 while (con->out_kvec_bytes > 0) {
1535 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1536 con->out_kvec_left, con->out_kvec_bytes,
1537 con->out_more);
1538 if (ret <= 0)
1539 goto out;
1540 con->out_kvec_bytes -= ret;
1541 if (con->out_kvec_bytes == 0)
1542 break; /* done */
1543
1544 /* account for full iov entries consumed */
1545 while (ret >= con->out_kvec_cur->iov_len) {
1546 BUG_ON(!con->out_kvec_left);
1547 ret -= con->out_kvec_cur->iov_len;
1548 con->out_kvec_cur++;
1549 con->out_kvec_left--;
1550 }
1551 /* and for a partially-consumed entry */
1552 if (ret) {
1553 con->out_kvec_cur->iov_len -= ret;
1554 con->out_kvec_cur->iov_base += ret;
1555 }
1556 }
1557 con->out_kvec_left = 0;
1558 ret = 1;
1559out:
1560 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1561 con->out_kvec_bytes, con->out_kvec_left, ret);
1562 return ret; /* done! */
1563}
1564
1565static u32 ceph_crc32c_page(u32 crc, struct page *page,
1566 unsigned int page_offset,
1567 unsigned int length)
1568{
1569 char *kaddr;
1570
1571 kaddr = kmap(page);
1572 BUG_ON(kaddr == NULL);
1573 crc = crc32c(crc, kaddr + page_offset, length);
1574 kunmap(page);
1575
1576 return crc;
1577}
1578/*
1579 * Write as much message data payload as we can. If we finish, queue
1580 * up the footer.
1581 * 1 -> done, footer is now queued in out_kvec[].
1582 * 0 -> socket full, but more to do
1583 * <0 -> error
1584 */
1585static int write_partial_message_data(struct ceph_connection *con)
1586{
1587 struct ceph_msg *msg = con->out_msg;
1588 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1589 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
1590 u32 crc;
1591
1592 dout("%s %p msg %p\n", __func__, con, msg);
1593
1594 if (list_empty(&msg->data))
1595 return -EINVAL;
1596
1597 /*
1598 * Iterate through each page that contains data to be
1599 * written, and send as much as possible for each.
1600 *
1601 * If we are calculating the data crc (the default), we will
1602 * need to map the page. If we have no pages, they have
1603 * been revoked, so use the zero page.
1604 */
1605 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1606 while (cursor->total_resid) {
1607 struct page *page;
1608 size_t page_offset;
1609 size_t length;
1610 bool last_piece;
1611 int ret;
1612
1613 if (!cursor->resid) {
1614 ceph_msg_data_advance(cursor, 0);
1615 continue;
1616 }
1617
1618 page = ceph_msg_data_next(cursor, &page_offset, &length,
1619 &last_piece);
1620 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1621 length, !last_piece);
1622 if (ret <= 0) {
1623 if (do_datacrc)
1624 msg->footer.data_crc = cpu_to_le32(crc);
1625
1626 return ret;
1627 }
1628 if (do_datacrc && cursor->need_crc)
1629 crc = ceph_crc32c_page(crc, page, page_offset, length);
1630 ceph_msg_data_advance(cursor, (size_t)ret);
1631 }
1632
1633 dout("%s %p msg %p done\n", __func__, con, msg);
1634
1635 /* prepare and queue up footer, too */
1636 if (do_datacrc)
1637 msg->footer.data_crc = cpu_to_le32(crc);
1638 else
1639 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1640 con_out_kvec_reset(con);
1641 prepare_write_message_footer(con);
1642
1643 return 1; /* must return > 0 to indicate success */
1644}
1645
1646/*
1647 * write some zeros
1648 */
1649static int write_partial_skip(struct ceph_connection *con)
1650{
1651 int ret;
1652
1653 dout("%s %p %d left\n", __func__, con, con->out_skip);
1654 while (con->out_skip > 0) {
1655 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1656
1657 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1658 if (ret <= 0)
1659 goto out;
1660 con->out_skip -= ret;
1661 }
1662 ret = 1;
1663out:
1664 return ret;
1665}
1666
1667/*
1668 * Prepare to read connection handshake, or an ack.
1669 */
1670static void prepare_read_banner(struct ceph_connection *con)
1671{
1672 dout("prepare_read_banner %p\n", con);
1673 con->in_base_pos = 0;
1674}
1675
1676static void prepare_read_connect(struct ceph_connection *con)
1677{
1678 dout("prepare_read_connect %p\n", con);
1679 con->in_base_pos = 0;
1680}
1681
1682static void prepare_read_ack(struct ceph_connection *con)
1683{
1684 dout("prepare_read_ack %p\n", con);
1685 con->in_base_pos = 0;
1686}
1687
1688static void prepare_read_seq(struct ceph_connection *con)
1689{
1690 dout("prepare_read_seq %p\n", con);
1691 con->in_base_pos = 0;
1692 con->in_tag = CEPH_MSGR_TAG_SEQ;
1693}
1694
1695static void prepare_read_tag(struct ceph_connection *con)
1696{
1697 dout("prepare_read_tag %p\n", con);
1698 con->in_base_pos = 0;
1699 con->in_tag = CEPH_MSGR_TAG_READY;
1700}
1701
1702static void prepare_read_keepalive_ack(struct ceph_connection *con)
1703{
1704 dout("prepare_read_keepalive_ack %p\n", con);
1705 con->in_base_pos = 0;
1706}
1707
1708/*
1709 * Prepare to read a message.
1710 */
1711static int prepare_read_message(struct ceph_connection *con)
1712{
1713 dout("prepare_read_message %p\n", con);
1714 BUG_ON(con->in_msg != NULL);
1715 con->in_base_pos = 0;
1716 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1717 return 0;
1718}
1719
1720
1721static int read_partial(struct ceph_connection *con,
1722 int end, int size, void *object)
1723{
1724 while (con->in_base_pos < end) {
1725 int left = end - con->in_base_pos;
1726 int have = size - left;
1727 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1728 if (ret <= 0)
1729 return ret;
1730 con->in_base_pos += ret;
1731 }
1732 return 1;
1733}
1734
1735
1736/*
1737 * Read all or part of the connect-side handshake on a new connection
1738 */
1739static int read_partial_banner(struct ceph_connection *con)
1740{
1741 int size;
1742 int end;
1743 int ret;
1744
1745 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1746
1747 /* peer's banner */
1748 size = strlen(CEPH_BANNER);
1749 end = size;
1750 ret = read_partial(con, end, size, con->in_banner);
1751 if (ret <= 0)
1752 goto out;
1753
1754 size = sizeof (con->actual_peer_addr);
1755 end += size;
1756 ret = read_partial(con, end, size, &con->actual_peer_addr);
1757 if (ret <= 0)
1758 goto out;
1759
1760 size = sizeof (con->peer_addr_for_me);
1761 end += size;
1762 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1763 if (ret <= 0)
1764 goto out;
1765
1766out:
1767 return ret;
1768}
1769
1770static int read_partial_connect(struct ceph_connection *con)
1771{
1772 int size;
1773 int end;
1774 int ret;
1775
1776 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1777
1778 size = sizeof (con->in_reply);
1779 end = size;
1780 ret = read_partial(con, end, size, &con->in_reply);
1781 if (ret <= 0)
1782 goto out;
1783
1784 size = le32_to_cpu(con->in_reply.authorizer_len);
1785 end += size;
1786 ret = read_partial(con, end, size, con->auth_reply_buf);
1787 if (ret <= 0)
1788 goto out;
1789
1790 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1791 con, (int)con->in_reply.tag,
1792 le32_to_cpu(con->in_reply.connect_seq),
1793 le32_to_cpu(con->in_reply.global_seq));
1794out:
1795 return ret;
1796
1797}
1798
1799/*
1800 * Verify the hello banner looks okay.
1801 */
1802static int verify_hello(struct ceph_connection *con)
1803{
1804 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1805 pr_err("connect to %s got bad banner\n",
1806 ceph_pr_addr(&con->peer_addr.in_addr));
1807 con->error_msg = "protocol error, bad banner";
1808 return -1;
1809 }
1810 return 0;
1811}
1812
1813static bool addr_is_blank(struct sockaddr_storage *ss)
1814{
1815 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1816 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1817
1818 switch (ss->ss_family) {
1819 case AF_INET:
1820 return addr->s_addr == htonl(INADDR_ANY);
1821 case AF_INET6:
1822 return ipv6_addr_any(addr6);
1823 default:
1824 return true;
1825 }
1826}
1827
1828static int addr_port(struct sockaddr_storage *ss)
1829{
1830 switch (ss->ss_family) {
1831 case AF_INET:
1832 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1833 case AF_INET6:
1834 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1835 }
1836 return 0;
1837}
1838
1839static void addr_set_port(struct sockaddr_storage *ss, int p)
1840{
1841 switch (ss->ss_family) {
1842 case AF_INET:
1843 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1844 break;
1845 case AF_INET6:
1846 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1847 break;
1848 }
1849}
1850
1851/*
1852 * Unlike other *_pton function semantics, zero indicates success.
1853 */
1854static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1855 char delim, const char **ipend)
1856{
1857 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1858 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1859
1860 memset(ss, 0, sizeof(*ss));
1861
1862 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1863 ss->ss_family = AF_INET;
1864 return 0;
1865 }
1866
1867 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1868 ss->ss_family = AF_INET6;
1869 return 0;
1870 }
1871
1872 return -EINVAL;
1873}
1874
1875/*
1876 * Extract hostname string and resolve using kernel DNS facility.
1877 */
1878#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1879static int ceph_dns_resolve_name(const char *name, size_t namelen,
1880 struct sockaddr_storage *ss, char delim, const char **ipend)
1881{
1882 const char *end, *delim_p;
1883 char *colon_p, *ip_addr = NULL;
1884 int ip_len, ret;
1885
1886 /*
1887 * The end of the hostname occurs immediately preceding the delimiter or
1888 * the port marker (':') where the delimiter takes precedence.
1889 */
1890 delim_p = memchr(name, delim, namelen);
1891 colon_p = memchr(name, ':', namelen);
1892
1893 if (delim_p && colon_p)
1894 end = delim_p < colon_p ? delim_p : colon_p;
1895 else if (!delim_p && colon_p)
1896 end = colon_p;
1897 else {
1898 end = delim_p;
1899 if (!end) /* case: hostname:/ */
1900 end = name + namelen;
1901 }
1902
1903 if (end <= name)
1904 return -EINVAL;
1905
1906 /* do dns_resolve upcall */
1907 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1908 if (ip_len > 0)
1909 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1910 else
1911 ret = -ESRCH;
1912
1913 kfree(ip_addr);
1914
1915 *ipend = end;
1916
1917 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1918 ret, ret ? "failed" : ceph_pr_addr(ss));
1919
1920 return ret;
1921}
1922#else
1923static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1924 struct sockaddr_storage *ss, char delim, const char **ipend)
1925{
1926 return -EINVAL;
1927}
1928#endif
1929
1930/*
1931 * Parse a server name (IP or hostname). If a valid IP address is not found
1932 * then try to extract a hostname to resolve using userspace DNS upcall.
1933 */
1934static int ceph_parse_server_name(const char *name, size_t namelen,
1935 struct sockaddr_storage *ss, char delim, const char **ipend)
1936{
1937 int ret;
1938
1939 ret = ceph_pton(name, namelen, ss, delim, ipend);
1940 if (ret)
1941 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1942
1943 return ret;
1944}
1945
1946/*
1947 * Parse an ip[:port] list into an addr array. Use the default
1948 * monitor port if a port isn't specified.
1949 */
1950int ceph_parse_ips(const char *c, const char *end,
1951 struct ceph_entity_addr *addr,
1952 int max_count, int *count)
1953{
1954 int i, ret = -EINVAL;
1955 const char *p = c;
1956
1957 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1958 for (i = 0; i < max_count; i++) {
1959 const char *ipend;
1960 struct sockaddr_storage *ss = &addr[i].in_addr;
1961 int port;
1962 char delim = ',';
1963
1964 if (*p == '[') {
1965 delim = ']';
1966 p++;
1967 }
1968
1969 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1970 if (ret)
1971 goto bad;
1972 ret = -EINVAL;
1973
1974 p = ipend;
1975
1976 if (delim == ']') {
1977 if (*p != ']') {
1978 dout("missing matching ']'\n");
1979 goto bad;
1980 }
1981 p++;
1982 }
1983
1984 /* port? */
1985 if (p < end && *p == ':') {
1986 port = 0;
1987 p++;
1988 while (p < end && *p >= '0' && *p <= '9') {
1989 port = (port * 10) + (*p - '0');
1990 p++;
1991 }
1992 if (port == 0)
1993 port = CEPH_MON_PORT;
1994 else if (port > 65535)
1995 goto bad;
1996 } else {
1997 port = CEPH_MON_PORT;
1998 }
1999
2000 addr_set_port(ss, port);
2001
2002 dout("parse_ips got %s\n", ceph_pr_addr(ss));
2003
2004 if (p == end)
2005 break;
2006 if (*p != ',')
2007 goto bad;
2008 p++;
2009 }
2010
2011 if (p != end)
2012 goto bad;
2013
2014 if (count)
2015 *count = i + 1;
2016 return 0;
2017
2018bad:
2019 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
2020 return ret;
2021}
2022EXPORT_SYMBOL(ceph_parse_ips);
2023
2024static int process_banner(struct ceph_connection *con)
2025{
2026 dout("process_banner on %p\n", con);
2027
2028 if (verify_hello(con) < 0)
2029 return -1;
2030
2031 ceph_decode_addr(&con->actual_peer_addr);
2032 ceph_decode_addr(&con->peer_addr_for_me);
2033
2034 /*
2035 * Make sure the other end is who we wanted. note that the other
2036 * end may not yet know their ip address, so if it's 0.0.0.0, give
2037 * them the benefit of the doubt.
2038 */
2039 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2040 sizeof(con->peer_addr)) != 0 &&
2041 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
2042 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
2043 pr_warn("wrong peer, want %s/%d, got %s/%d\n",
2044 ceph_pr_addr(&con->peer_addr.in_addr),
2045 (int)le32_to_cpu(con->peer_addr.nonce),
2046 ceph_pr_addr(&con->actual_peer_addr.in_addr),
2047 (int)le32_to_cpu(con->actual_peer_addr.nonce));
2048 con->error_msg = "wrong peer at address";
2049 return -1;
2050 }
2051
2052 /*
2053 * did we learn our address?
2054 */
2055 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
2056 int port = addr_port(&con->msgr->inst.addr.in_addr);
2057
2058 memcpy(&con->msgr->inst.addr.in_addr,
2059 &con->peer_addr_for_me.in_addr,
2060 sizeof(con->peer_addr_for_me.in_addr));
2061 addr_set_port(&con->msgr->inst.addr.in_addr, port);
2062 encode_my_addr(con->msgr);
2063 dout("process_banner learned my addr is %s\n",
2064 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
2065 }
2066
2067 return 0;
2068}
2069
2070static int process_connect(struct ceph_connection *con)
2071{
2072 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2073 u64 req_feat = from_msgr(con->msgr)->required_features;
2074 u64 server_feat = le64_to_cpu(con->in_reply.features);
2075 int ret;
2076
2077 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2078
2079 if (con->auth_reply_buf) {
2080 /*
2081 * Any connection that defines ->get_authorizer()
2082 * should also define ->verify_authorizer_reply().
2083 * See get_connect_authorizer().
2084 */
2085 ret = con->ops->verify_authorizer_reply(con);
2086 if (ret < 0) {
2087 con->error_msg = "bad authorize reply";
2088 return ret;
2089 }
2090 }
2091
2092 switch (con->in_reply.tag) {
2093 case CEPH_MSGR_TAG_FEATURES:
2094 pr_err("%s%lld %s feature set mismatch,"
2095 " my %llx < server's %llx, missing %llx\n",
2096 ENTITY_NAME(con->peer_name),
2097 ceph_pr_addr(&con->peer_addr.in_addr),
2098 sup_feat, server_feat, server_feat & ~sup_feat);
2099 con->error_msg = "missing required protocol features";
2100 reset_connection(con);
2101 return -1;
2102
2103 case CEPH_MSGR_TAG_BADPROTOVER:
2104 pr_err("%s%lld %s protocol version mismatch,"
2105 " my %d != server's %d\n",
2106 ENTITY_NAME(con->peer_name),
2107 ceph_pr_addr(&con->peer_addr.in_addr),
2108 le32_to_cpu(con->out_connect.protocol_version),
2109 le32_to_cpu(con->in_reply.protocol_version));
2110 con->error_msg = "protocol version mismatch";
2111 reset_connection(con);
2112 return -1;
2113
2114 case CEPH_MSGR_TAG_BADAUTHORIZER:
2115 con->auth_retry++;
2116 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2117 con->auth_retry);
2118 if (con->auth_retry == 2) {
2119 con->error_msg = "connect authorization failure";
2120 return -1;
2121 }
2122 con_out_kvec_reset(con);
2123 ret = prepare_write_connect(con);
2124 if (ret < 0)
2125 return ret;
2126 prepare_read_connect(con);
2127 break;
2128
2129 case CEPH_MSGR_TAG_RESETSESSION:
2130 /*
2131 * If we connected with a large connect_seq but the peer
2132 * has no record of a session with us (no connection, or
2133 * connect_seq == 0), they will send RESETSESION to indicate
2134 * that they must have reset their session, and may have
2135 * dropped messages.
2136 */
2137 dout("process_connect got RESET peer seq %u\n",
2138 le32_to_cpu(con->in_reply.connect_seq));
2139 pr_err("%s%lld %s connection reset\n",
2140 ENTITY_NAME(con->peer_name),
2141 ceph_pr_addr(&con->peer_addr.in_addr));
2142 reset_connection(con);
2143 con_out_kvec_reset(con);
2144 ret = prepare_write_connect(con);
2145 if (ret < 0)
2146 return ret;
2147 prepare_read_connect(con);
2148
2149 /* Tell ceph about it. */
2150 mutex_unlock(&con->mutex);
2151 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2152 if (con->ops->peer_reset)
2153 con->ops->peer_reset(con);
2154 mutex_lock(&con->mutex);
2155 if (con->state != CON_STATE_NEGOTIATING)
2156 return -EAGAIN;
2157 break;
2158
2159 case CEPH_MSGR_TAG_RETRY_SESSION:
2160 /*
2161 * If we sent a smaller connect_seq than the peer has, try
2162 * again with a larger value.
2163 */
2164 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2165 le32_to_cpu(con->out_connect.connect_seq),
2166 le32_to_cpu(con->in_reply.connect_seq));
2167 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2168 con_out_kvec_reset(con);
2169 ret = prepare_write_connect(con);
2170 if (ret < 0)
2171 return ret;
2172 prepare_read_connect(con);
2173 break;
2174
2175 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2176 /*
2177 * If we sent a smaller global_seq than the peer has, try
2178 * again with a larger value.
2179 */
2180 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2181 con->peer_global_seq,
2182 le32_to_cpu(con->in_reply.global_seq));
2183 get_global_seq(con->msgr,
2184 le32_to_cpu(con->in_reply.global_seq));
2185 con_out_kvec_reset(con);
2186 ret = prepare_write_connect(con);
2187 if (ret < 0)
2188 return ret;
2189 prepare_read_connect(con);
2190 break;
2191
2192 case CEPH_MSGR_TAG_SEQ:
2193 case CEPH_MSGR_TAG_READY:
2194 if (req_feat & ~server_feat) {
2195 pr_err("%s%lld %s protocol feature mismatch,"
2196 " my required %llx > server's %llx, need %llx\n",
2197 ENTITY_NAME(con->peer_name),
2198 ceph_pr_addr(&con->peer_addr.in_addr),
2199 req_feat, server_feat, req_feat & ~server_feat);
2200 con->error_msg = "missing required protocol features";
2201 reset_connection(con);
2202 return -1;
2203 }
2204
2205 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2206 con->state = CON_STATE_OPEN;
2207 con->auth_retry = 0; /* we authenticated; clear flag */
2208 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2209 con->connect_seq++;
2210 con->peer_features = server_feat;
2211 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2212 con->peer_global_seq,
2213 le32_to_cpu(con->in_reply.connect_seq),
2214 con->connect_seq);
2215 WARN_ON(con->connect_seq !=
2216 le32_to_cpu(con->in_reply.connect_seq));
2217
2218 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2219 con_flag_set(con, CON_FLAG_LOSSYTX);
2220
2221 con->delay = 0; /* reset backoff memory */
2222
2223 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2224 prepare_write_seq(con);
2225 prepare_read_seq(con);
2226 } else {
2227 prepare_read_tag(con);
2228 }
2229 break;
2230
2231 case CEPH_MSGR_TAG_WAIT:
2232 /*
2233 * If there is a connection race (we are opening
2234 * connections to each other), one of us may just have
2235 * to WAIT. This shouldn't happen if we are the
2236 * client.
2237 */
2238 con->error_msg = "protocol error, got WAIT as client";
2239 return -1;
2240
2241 default:
2242 con->error_msg = "protocol error, garbage tag during connect";
2243 return -1;
2244 }
2245 return 0;
2246}
2247
2248
2249/*
2250 * read (part of) an ack
2251 */
2252static int read_partial_ack(struct ceph_connection *con)
2253{
2254 int size = sizeof (con->in_temp_ack);
2255 int end = size;
2256
2257 return read_partial(con, end, size, &con->in_temp_ack);
2258}
2259
2260/*
2261 * We can finally discard anything that's been acked.
2262 */
2263static void process_ack(struct ceph_connection *con)
2264{
2265 struct ceph_msg *m;
2266 u64 ack = le64_to_cpu(con->in_temp_ack);
2267 u64 seq;
2268 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2269 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
2270
2271 /*
2272 * In the reconnect case, con_fault() has requeued messages
2273 * in out_sent. We should cleanup old messages according to
2274 * the reconnect seq.
2275 */
2276 while (!list_empty(list)) {
2277 m = list_first_entry(list, struct ceph_msg, list_head);
2278 if (reconnect && m->needs_out_seq)
2279 break;
2280 seq = le64_to_cpu(m->hdr.seq);
2281 if (seq > ack)
2282 break;
2283 dout("got ack for seq %llu type %d at %p\n", seq,
2284 le16_to_cpu(m->hdr.type), m);
2285 m->ack_stamp = jiffies;
2286 ceph_msg_remove(m);
2287 }
2288
2289 prepare_read_tag(con);
2290}
2291
2292
2293static int read_partial_message_section(struct ceph_connection *con,
2294 struct kvec *section,
2295 unsigned int sec_len, u32 *crc)
2296{
2297 int ret, left;
2298
2299 BUG_ON(!section);
2300
2301 while (section->iov_len < sec_len) {
2302 BUG_ON(section->iov_base == NULL);
2303 left = sec_len - section->iov_len;
2304 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2305 section->iov_len, left);
2306 if (ret <= 0)
2307 return ret;
2308 section->iov_len += ret;
2309 }
2310 if (section->iov_len == sec_len)
2311 *crc = crc32c(0, section->iov_base, section->iov_len);
2312
2313 return 1;
2314}
2315
2316static int read_partial_msg_data(struct ceph_connection *con)
2317{
2318 struct ceph_msg *msg = con->in_msg;
2319 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2320 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2321 struct page *page;
2322 size_t page_offset;
2323 size_t length;
2324 u32 crc = 0;
2325 int ret;
2326
2327 BUG_ON(!msg);
2328 if (list_empty(&msg->data))
2329 return -EIO;
2330
2331 if (do_datacrc)
2332 crc = con->in_data_crc;
2333 while (cursor->total_resid) {
2334 if (!cursor->resid) {
2335 ceph_msg_data_advance(cursor, 0);
2336 continue;
2337 }
2338
2339 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
2340 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2341 if (ret <= 0) {
2342 if (do_datacrc)
2343 con->in_data_crc = crc;
2344
2345 return ret;
2346 }
2347
2348 if (do_datacrc)
2349 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2350 ceph_msg_data_advance(cursor, (size_t)ret);
2351 }
2352 if (do_datacrc)
2353 con->in_data_crc = crc;
2354
2355 return 1; /* must return > 0 to indicate success */
2356}
2357
2358/*
2359 * read (part of) a message.
2360 */
2361static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2362
2363static int read_partial_message(struct ceph_connection *con)
2364{
2365 struct ceph_msg *m = con->in_msg;
2366 int size;
2367 int end;
2368 int ret;
2369 unsigned int front_len, middle_len, data_len;
2370 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2371 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2372 u64 seq;
2373 u32 crc;
2374
2375 dout("read_partial_message con %p msg %p\n", con, m);
2376
2377 /* header */
2378 size = sizeof (con->in_hdr);
2379 end = size;
2380 ret = read_partial(con, end, size, &con->in_hdr);
2381 if (ret <= 0)
2382 return ret;
2383
2384 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2385 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2386 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
2387 crc, con->in_hdr.crc);
2388 return -EBADMSG;
2389 }
2390
2391 front_len = le32_to_cpu(con->in_hdr.front_len);
2392 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2393 return -EIO;
2394 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2395 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2396 return -EIO;
2397 data_len = le32_to_cpu(con->in_hdr.data_len);
2398 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2399 return -EIO;
2400
2401 /* verify seq# */
2402 seq = le64_to_cpu(con->in_hdr.seq);
2403 if ((s64)seq - (s64)con->in_seq < 1) {
2404 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2405 ENTITY_NAME(con->peer_name),
2406 ceph_pr_addr(&con->peer_addr.in_addr),
2407 seq, con->in_seq + 1);
2408 con->in_base_pos = -front_len - middle_len - data_len -
2409 sizeof_footer(con);
2410 con->in_tag = CEPH_MSGR_TAG_READY;
2411 return 1;
2412 } else if ((s64)seq - (s64)con->in_seq > 1) {
2413 pr_err("read_partial_message bad seq %lld expected %lld\n",
2414 seq, con->in_seq + 1);
2415 con->error_msg = "bad message sequence # for incoming message";
2416 return -EBADE;
2417 }
2418
2419 /* allocate message? */
2420 if (!con->in_msg) {
2421 int skip = 0;
2422
2423 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2424 front_len, data_len);
2425 ret = ceph_con_in_msg_alloc(con, &skip);
2426 if (ret < 0)
2427 return ret;
2428
2429 BUG_ON(!con->in_msg ^ skip);
2430 if (skip) {
2431 /* skip this message */
2432 dout("alloc_msg said skip message\n");
2433 con->in_base_pos = -front_len - middle_len - data_len -
2434 sizeof_footer(con);
2435 con->in_tag = CEPH_MSGR_TAG_READY;
2436 con->in_seq++;
2437 return 1;
2438 }
2439
2440 BUG_ON(!con->in_msg);
2441 BUG_ON(con->in_msg->con != con);
2442 m = con->in_msg;
2443 m->front.iov_len = 0; /* haven't read it yet */
2444 if (m->middle)
2445 m->middle->vec.iov_len = 0;
2446
2447 /* prepare for data payload, if any */
2448
2449 if (data_len)
2450 prepare_message_data(con->in_msg, data_len);
2451 }
2452
2453 /* front */
2454 ret = read_partial_message_section(con, &m->front, front_len,
2455 &con->in_front_crc);
2456 if (ret <= 0)
2457 return ret;
2458
2459 /* middle */
2460 if (m->middle) {
2461 ret = read_partial_message_section(con, &m->middle->vec,
2462 middle_len,
2463 &con->in_middle_crc);
2464 if (ret <= 0)
2465 return ret;
2466 }
2467
2468 /* (page) data */
2469 if (data_len) {
2470 ret = read_partial_msg_data(con);
2471 if (ret <= 0)
2472 return ret;
2473 }
2474
2475 /* footer */
2476 size = sizeof_footer(con);
2477 end += size;
2478 ret = read_partial(con, end, size, &m->footer);
2479 if (ret <= 0)
2480 return ret;
2481
2482 if (!need_sign) {
2483 m->footer.flags = m->old_footer.flags;
2484 m->footer.sig = 0;
2485 }
2486
2487 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2488 m, front_len, m->footer.front_crc, middle_len,
2489 m->footer.middle_crc, data_len, m->footer.data_crc);
2490
2491 /* crc ok? */
2492 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2493 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2494 m, con->in_front_crc, m->footer.front_crc);
2495 return -EBADMSG;
2496 }
2497 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2498 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2499 m, con->in_middle_crc, m->footer.middle_crc);
2500 return -EBADMSG;
2501 }
2502 if (do_datacrc &&
2503 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2504 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2505 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2506 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2507 return -EBADMSG;
2508 }
2509
2510 if (need_sign && con->ops->check_message_signature &&
2511 con->ops->check_message_signature(m)) {
2512 pr_err("read_partial_message %p signature check failed\n", m);
2513 return -EBADMSG;
2514 }
2515
2516 return 1; /* done! */
2517}
2518
2519/*
2520 * Process message. This happens in the worker thread. The callback should
2521 * be careful not to do anything that waits on other incoming messages or it
2522 * may deadlock.
2523 */
2524static void process_message(struct ceph_connection *con)
2525{
2526 struct ceph_msg *msg = con->in_msg;
2527
2528 BUG_ON(con->in_msg->con != con);
2529 con->in_msg = NULL;
2530
2531 /* if first message, set peer_name */
2532 if (con->peer_name.type == 0)
2533 con->peer_name = msg->hdr.src;
2534
2535 con->in_seq++;
2536 mutex_unlock(&con->mutex);
2537
2538 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2539 msg, le64_to_cpu(msg->hdr.seq),
2540 ENTITY_NAME(msg->hdr.src),
2541 le16_to_cpu(msg->hdr.type),
2542 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2543 le32_to_cpu(msg->hdr.front_len),
2544 le32_to_cpu(msg->hdr.data_len),
2545 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2546 con->ops->dispatch(con, msg);
2547
2548 mutex_lock(&con->mutex);
2549}
2550
2551static int read_keepalive_ack(struct ceph_connection *con)
2552{
2553 struct ceph_timespec ceph_ts;
2554 size_t size = sizeof(ceph_ts);
2555 int ret = read_partial(con, size, size, &ceph_ts);
2556 if (ret <= 0)
2557 return ret;
2558 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
2559 prepare_read_tag(con);
2560 return 1;
2561}
2562
2563/*
2564 * Write something to the socket. Called in a worker thread when the
2565 * socket appears to be writeable and we have something ready to send.
2566 */
2567static int try_write(struct ceph_connection *con)
2568{
2569 int ret = 1;
2570
2571 dout("try_write start %p state %lu\n", con, con->state);
2572 if (con->state != CON_STATE_PREOPEN &&
2573 con->state != CON_STATE_CONNECTING &&
2574 con->state != CON_STATE_NEGOTIATING &&
2575 con->state != CON_STATE_OPEN)
2576 return 0;
2577
2578more:
2579 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2580
2581 /* open the socket first? */
2582 if (con->state == CON_STATE_PREOPEN) {
2583 BUG_ON(con->sock);
2584 con->state = CON_STATE_CONNECTING;
2585
2586 con_out_kvec_reset(con);
2587 prepare_write_banner(con);
2588 prepare_read_banner(con);
2589
2590 BUG_ON(con->in_msg);
2591 con->in_tag = CEPH_MSGR_TAG_READY;
2592 dout("try_write initiating connect on %p new state %lu\n",
2593 con, con->state);
2594 ret = ceph_tcp_connect(con);
2595 if (ret < 0) {
2596 con->error_msg = "connect error";
2597 goto out;
2598 }
2599 }
2600
2601more_kvec:
2602 BUG_ON(!con->sock);
2603
2604 /* kvec data queued? */
2605 if (con->out_kvec_left) {
2606 ret = write_partial_kvec(con);
2607 if (ret <= 0)
2608 goto out;
2609 }
2610 if (con->out_skip) {
2611 ret = write_partial_skip(con);
2612 if (ret <= 0)
2613 goto out;
2614 }
2615
2616 /* msg pages? */
2617 if (con->out_msg) {
2618 if (con->out_msg_done) {
2619 ceph_msg_put(con->out_msg);
2620 con->out_msg = NULL; /* we're done with this one */
2621 goto do_next;
2622 }
2623
2624 ret = write_partial_message_data(con);
2625 if (ret == 1)
2626 goto more_kvec; /* we need to send the footer, too! */
2627 if (ret == 0)
2628 goto out;
2629 if (ret < 0) {
2630 dout("try_write write_partial_message_data err %d\n",
2631 ret);
2632 goto out;
2633 }
2634 }
2635
2636do_next:
2637 if (con->state == CON_STATE_OPEN) {
2638 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2639 prepare_write_keepalive(con);
2640 goto more;
2641 }
2642 /* is anything else pending? */
2643 if (!list_empty(&con->out_queue)) {
2644 prepare_write_message(con);
2645 goto more;
2646 }
2647 if (con->in_seq > con->in_seq_acked) {
2648 prepare_write_ack(con);
2649 goto more;
2650 }
2651 }
2652
2653 /* Nothing to do! */
2654 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2655 dout("try_write nothing else to write.\n");
2656 ret = 0;
2657out:
2658 dout("try_write done on %p ret %d\n", con, ret);
2659 return ret;
2660}
2661
2662
2663
2664/*
2665 * Read what we can from the socket.
2666 */
2667static int try_read(struct ceph_connection *con)
2668{
2669 int ret = -1;
2670
2671more:
2672 dout("try_read start on %p state %lu\n", con, con->state);
2673 if (con->state != CON_STATE_CONNECTING &&
2674 con->state != CON_STATE_NEGOTIATING &&
2675 con->state != CON_STATE_OPEN)
2676 return 0;
2677
2678 BUG_ON(!con->sock);
2679
2680 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2681 con->in_base_pos);
2682
2683 if (con->state == CON_STATE_CONNECTING) {
2684 dout("try_read connecting\n");
2685 ret = read_partial_banner(con);
2686 if (ret <= 0)
2687 goto out;
2688 ret = process_banner(con);
2689 if (ret < 0)
2690 goto out;
2691
2692 con->state = CON_STATE_NEGOTIATING;
2693
2694 /*
2695 * Received banner is good, exchange connection info.
2696 * Do not reset out_kvec, as sending our banner raced
2697 * with receiving peer banner after connect completed.
2698 */
2699 ret = prepare_write_connect(con);
2700 if (ret < 0)
2701 goto out;
2702 prepare_read_connect(con);
2703
2704 /* Send connection info before awaiting response */
2705 goto out;
2706 }
2707
2708 if (con->state == CON_STATE_NEGOTIATING) {
2709 dout("try_read negotiating\n");
2710 ret = read_partial_connect(con);
2711 if (ret <= 0)
2712 goto out;
2713 ret = process_connect(con);
2714 if (ret < 0)
2715 goto out;
2716 goto more;
2717 }
2718
2719 WARN_ON(con->state != CON_STATE_OPEN);
2720
2721 if (con->in_base_pos < 0) {
2722 /*
2723 * skipping + discarding content.
2724 *
2725 * FIXME: there must be a better way to do this!
2726 */
2727 static char buf[SKIP_BUF_SIZE];
2728 int skip = min((int) sizeof (buf), -con->in_base_pos);
2729
2730 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2731 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2732 if (ret <= 0)
2733 goto out;
2734 con->in_base_pos += ret;
2735 if (con->in_base_pos)
2736 goto more;
2737 }
2738 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2739 /*
2740 * what's next?
2741 */
2742 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2743 if (ret <= 0)
2744 goto out;
2745 dout("try_read got tag %d\n", (int)con->in_tag);
2746 switch (con->in_tag) {
2747 case CEPH_MSGR_TAG_MSG:
2748 prepare_read_message(con);
2749 break;
2750 case CEPH_MSGR_TAG_ACK:
2751 prepare_read_ack(con);
2752 break;
2753 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2754 prepare_read_keepalive_ack(con);
2755 break;
2756 case CEPH_MSGR_TAG_CLOSE:
2757 con_close_socket(con);
2758 con->state = CON_STATE_CLOSED;
2759 goto out;
2760 default:
2761 goto bad_tag;
2762 }
2763 }
2764 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2765 ret = read_partial_message(con);
2766 if (ret <= 0) {
2767 switch (ret) {
2768 case -EBADMSG:
2769 con->error_msg = "bad crc/signature";
2770 /* fall through */
2771 case -EBADE:
2772 ret = -EIO;
2773 break;
2774 case -EIO:
2775 con->error_msg = "io error";
2776 break;
2777 }
2778 goto out;
2779 }
2780 if (con->in_tag == CEPH_MSGR_TAG_READY)
2781 goto more;
2782 process_message(con);
2783 if (con->state == CON_STATE_OPEN)
2784 prepare_read_tag(con);
2785 goto more;
2786 }
2787 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2788 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2789 /*
2790 * the final handshake seq exchange is semantically
2791 * equivalent to an ACK
2792 */
2793 ret = read_partial_ack(con);
2794 if (ret <= 0)
2795 goto out;
2796 process_ack(con);
2797 goto more;
2798 }
2799 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2800 ret = read_keepalive_ack(con);
2801 if (ret <= 0)
2802 goto out;
2803 goto more;
2804 }
2805
2806out:
2807 dout("try_read done on %p ret %d\n", con, ret);
2808 return ret;
2809
2810bad_tag:
2811 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2812 con->error_msg = "protocol error, garbage tag";
2813 ret = -1;
2814 goto out;
2815}
2816
2817
2818/*
2819 * Atomically queue work on a connection after the specified delay.
2820 * Bump @con reference to avoid races with connection teardown.
2821 * Returns 0 if work was queued, or an error code otherwise.
2822 */
2823static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2824{
2825 if (!con->ops->get(con)) {
2826 dout("%s %p ref count 0\n", __func__, con);
2827 return -ENOENT;
2828 }
2829
2830 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2831 dout("%s %p - already queued\n", __func__, con);
2832 con->ops->put(con);
2833 return -EBUSY;
2834 }
2835
2836 dout("%s %p %lu\n", __func__, con, delay);
2837 return 0;
2838}
2839
2840static void queue_con(struct ceph_connection *con)
2841{
2842 (void) queue_con_delay(con, 0);
2843}
2844
2845static void cancel_con(struct ceph_connection *con)
2846{
2847 if (cancel_delayed_work(&con->work)) {
2848 dout("%s %p\n", __func__, con);
2849 con->ops->put(con);
2850 }
2851}
2852
2853static bool con_sock_closed(struct ceph_connection *con)
2854{
2855 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2856 return false;
2857
2858#define CASE(x) \
2859 case CON_STATE_ ## x: \
2860 con->error_msg = "socket closed (con state " #x ")"; \
2861 break;
2862
2863 switch (con->state) {
2864 CASE(CLOSED);
2865 CASE(PREOPEN);
2866 CASE(CONNECTING);
2867 CASE(NEGOTIATING);
2868 CASE(OPEN);
2869 CASE(STANDBY);
2870 default:
2871 pr_warn("%s con %p unrecognized state %lu\n",
2872 __func__, con, con->state);
2873 con->error_msg = "unrecognized con state";
2874 BUG();
2875 break;
2876 }
2877#undef CASE
2878
2879 return true;
2880}
2881
2882static bool con_backoff(struct ceph_connection *con)
2883{
2884 int ret;
2885
2886 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2887 return false;
2888
2889 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2890 if (ret) {
2891 dout("%s: con %p FAILED to back off %lu\n", __func__,
2892 con, con->delay);
2893 BUG_ON(ret == -ENOENT);
2894 con_flag_set(con, CON_FLAG_BACKOFF);
2895 }
2896
2897 return true;
2898}
2899
2900/* Finish fault handling; con->mutex must *not* be held here */
2901
2902static void con_fault_finish(struct ceph_connection *con)
2903{
2904 dout("%s %p\n", __func__, con);
2905
2906 /*
2907 * in case we faulted due to authentication, invalidate our
2908 * current tickets so that we can get new ones.
2909 */
2910 if (con->auth_retry) {
2911 dout("auth_retry %d, invalidating\n", con->auth_retry);
2912 if (con->ops->invalidate_authorizer)
2913 con->ops->invalidate_authorizer(con);
2914 con->auth_retry = 0;
2915 }
2916
2917 if (con->ops->fault)
2918 con->ops->fault(con);
2919}
2920
2921/*
2922 * Do some work on a connection. Drop a connection ref when we're done.
2923 */
2924static void ceph_con_workfn(struct work_struct *work)
2925{
2926 struct ceph_connection *con = container_of(work, struct ceph_connection,
2927 work.work);
2928 bool fault;
2929
2930 mutex_lock(&con->mutex);
2931 while (true) {
2932 int ret;
2933
2934 if ((fault = con_sock_closed(con))) {
2935 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2936 break;
2937 }
2938 if (con_backoff(con)) {
2939 dout("%s: con %p BACKOFF\n", __func__, con);
2940 break;
2941 }
2942 if (con->state == CON_STATE_STANDBY) {
2943 dout("%s: con %p STANDBY\n", __func__, con);
2944 break;
2945 }
2946 if (con->state == CON_STATE_CLOSED) {
2947 dout("%s: con %p CLOSED\n", __func__, con);
2948 BUG_ON(con->sock);
2949 break;
2950 }
2951 if (con->state == CON_STATE_PREOPEN) {
2952 dout("%s: con %p PREOPEN\n", __func__, con);
2953 BUG_ON(con->sock);
2954 }
2955
2956 ret = try_read(con);
2957 if (ret < 0) {
2958 if (ret == -EAGAIN)
2959 continue;
2960 if (!con->error_msg)
2961 con->error_msg = "socket error on read";
2962 fault = true;
2963 break;
2964 }
2965
2966 ret = try_write(con);
2967 if (ret < 0) {
2968 if (ret == -EAGAIN)
2969 continue;
2970 if (!con->error_msg)
2971 con->error_msg = "socket error on write";
2972 fault = true;
2973 }
2974
2975 break; /* If we make it to here, we're done */
2976 }
2977 if (fault)
2978 con_fault(con);
2979 mutex_unlock(&con->mutex);
2980
2981 if (fault)
2982 con_fault_finish(con);
2983
2984 con->ops->put(con);
2985}
2986
2987/*
2988 * Generic error/fault handler. A retry mechanism is used with
2989 * exponential backoff
2990 */
2991static void con_fault(struct ceph_connection *con)
2992{
2993 dout("fault %p state %lu to peer %s\n",
2994 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2995
2996 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2997 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2998 con->error_msg = NULL;
2999
3000 WARN_ON(con->state != CON_STATE_CONNECTING &&
3001 con->state != CON_STATE_NEGOTIATING &&
3002 con->state != CON_STATE_OPEN);
3003
3004 con_close_socket(con);
3005
3006 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
3007 dout("fault on LOSSYTX channel, marking CLOSED\n");
3008 con->state = CON_STATE_CLOSED;
3009 return;
3010 }
3011
3012 if (con->in_msg) {
3013 BUG_ON(con->in_msg->con != con);
3014 ceph_msg_put(con->in_msg);
3015 con->in_msg = NULL;
3016 }
3017
3018 /* Requeue anything that hasn't been acked */
3019 list_splice_init(&con->out_sent, &con->out_queue);
3020
3021 /* If there are no messages queued or keepalive pending, place
3022 * the connection in a STANDBY state */
3023 if (list_empty(&con->out_queue) &&
3024 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
3025 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
3026 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
3027 con->state = CON_STATE_STANDBY;
3028 } else {
3029 /* retry after a delay. */
3030 con->state = CON_STATE_PREOPEN;
3031 if (con->delay == 0)
3032 con->delay = BASE_DELAY_INTERVAL;
3033 else if (con->delay < MAX_DELAY_INTERVAL)
3034 con->delay *= 2;
3035 con_flag_set(con, CON_FLAG_BACKOFF);
3036 queue_con(con);
3037 }
3038}
3039
3040
3041
3042/*
3043 * initialize a new messenger instance
3044 */
3045void ceph_messenger_init(struct ceph_messenger *msgr,
3046 struct ceph_entity_addr *myaddr)
3047{
3048 spin_lock_init(&msgr->global_seq_lock);
3049
3050 if (myaddr)
3051 msgr->inst.addr = *myaddr;
3052
3053 /* select a random nonce */
3054 msgr->inst.addr.type = 0;
3055 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
3056 encode_my_addr(msgr);
3057
3058 atomic_set(&msgr->stopping, 0);
3059 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
3060
3061 dout("%s %p\n", __func__, msgr);
3062}
3063EXPORT_SYMBOL(ceph_messenger_init);
3064
3065void ceph_messenger_fini(struct ceph_messenger *msgr)
3066{
3067 put_net(read_pnet(&msgr->net));
3068}
3069EXPORT_SYMBOL(ceph_messenger_fini);
3070
3071static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3072{
3073 if (msg->con)
3074 msg->con->ops->put(msg->con);
3075
3076 msg->con = con ? con->ops->get(con) : NULL;
3077 BUG_ON(msg->con != con);
3078}
3079
3080static void clear_standby(struct ceph_connection *con)
3081{
3082 /* come back from STANDBY? */
3083 if (con->state == CON_STATE_STANDBY) {
3084 dout("clear_standby %p and ++connect_seq\n", con);
3085 con->state = CON_STATE_PREOPEN;
3086 con->connect_seq++;
3087 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
3088 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
3089 }
3090}
3091
3092/*
3093 * Queue up an outgoing message on the given connection.
3094 */
3095void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3096{
3097 /* set src+dst */
3098 msg->hdr.src = con->msgr->inst.name;
3099 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
3100 msg->needs_out_seq = true;
3101
3102 mutex_lock(&con->mutex);
3103
3104 if (con->state == CON_STATE_CLOSED) {
3105 dout("con_send %p closed, dropping %p\n", con, msg);
3106 ceph_msg_put(msg);
3107 mutex_unlock(&con->mutex);
3108 return;
3109 }
3110
3111 msg_con_set(msg, con);
3112
3113 BUG_ON(!list_empty(&msg->list_head));
3114 list_add_tail(&msg->list_head, &con->out_queue);
3115 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3116 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3117 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3118 le32_to_cpu(msg->hdr.front_len),
3119 le32_to_cpu(msg->hdr.middle_len),
3120 le32_to_cpu(msg->hdr.data_len));
3121
3122 clear_standby(con);
3123 mutex_unlock(&con->mutex);
3124
3125 /* if there wasn't anything waiting to send before, queue
3126 * new work */
3127 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3128 queue_con(con);
3129}
3130EXPORT_SYMBOL(ceph_con_send);
3131
3132/*
3133 * Revoke a message that was previously queued for send
3134 */
3135void ceph_msg_revoke(struct ceph_msg *msg)
3136{
3137 struct ceph_connection *con = msg->con;
3138
3139 if (!con) {
3140 dout("%s msg %p null con\n", __func__, msg);
3141 return; /* Message not in our possession */
3142 }
3143
3144 mutex_lock(&con->mutex);
3145 if (!list_empty(&msg->list_head)) {
3146 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3147 list_del_init(&msg->list_head);
3148 msg->hdr.seq = 0;
3149
3150 ceph_msg_put(msg);
3151 }
3152 if (con->out_msg == msg) {
3153 BUG_ON(con->out_skip);
3154 /* footer */
3155 if (con->out_msg_done) {
3156 con->out_skip += con_out_kvec_skip(con);
3157 } else {
3158 BUG_ON(!msg->data_length);
3159 con->out_skip += sizeof_footer(con);
3160 }
3161 /* data, middle, front */
3162 if (msg->data_length)
3163 con->out_skip += msg->cursor.total_resid;
3164 if (msg->middle)
3165 con->out_skip += con_out_kvec_skip(con);
3166 con->out_skip += con_out_kvec_skip(con);
3167
3168 dout("%s %p msg %p - was sending, will write %d skip %d\n",
3169 __func__, con, msg, con->out_kvec_bytes, con->out_skip);
3170 msg->hdr.seq = 0;
3171 con->out_msg = NULL;
3172 ceph_msg_put(msg);
3173 }
3174
3175 mutex_unlock(&con->mutex);
3176}
3177
3178/*
3179 * Revoke a message that we may be reading data into
3180 */
3181void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3182{
3183 struct ceph_connection *con = msg->con;
3184
3185 if (!con) {
3186 dout("%s msg %p null con\n", __func__, msg);
3187 return; /* Message not in our possession */
3188 }
3189
3190 mutex_lock(&con->mutex);
3191 if (con->in_msg == msg) {
3192 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3193 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3194 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3195
3196 /* skip rest of message */
3197 dout("%s %p msg %p revoked\n", __func__, con, msg);
3198 con->in_base_pos = con->in_base_pos -
3199 sizeof(struct ceph_msg_header) -
3200 front_len -
3201 middle_len -
3202 data_len -
3203 sizeof(struct ceph_msg_footer);
3204 ceph_msg_put(con->in_msg);
3205 con->in_msg = NULL;
3206 con->in_tag = CEPH_MSGR_TAG_READY;
3207 con->in_seq++;
3208 } else {
3209 dout("%s %p in_msg %p msg %p no-op\n",
3210 __func__, con, con->in_msg, msg);
3211 }
3212 mutex_unlock(&con->mutex);
3213}
3214
3215/*
3216 * Queue a keepalive byte to ensure the tcp connection is alive.
3217 */
3218void ceph_con_keepalive(struct ceph_connection *con)
3219{
3220 dout("con_keepalive %p\n", con);
3221 mutex_lock(&con->mutex);
3222 clear_standby(con);
3223 mutex_unlock(&con->mutex);
3224 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3225 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3226 queue_con(con);
3227}
3228EXPORT_SYMBOL(ceph_con_keepalive);
3229
3230bool ceph_con_keepalive_expired(struct ceph_connection *con,
3231 unsigned long interval)
3232{
3233 if (interval > 0 &&
3234 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3235 struct timespec now;
3236 struct timespec ts;
3237 ktime_get_real_ts(&now);
3238 jiffies_to_timespec(interval, &ts);
3239 ts = timespec_add(con->last_keepalive_ack, ts);
3240 return timespec_compare(&now, &ts) >= 0;
3241 }
3242 return false;
3243}
3244
3245static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3246{
3247 struct ceph_msg_data *data;
3248
3249 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3250 return NULL;
3251
3252 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3253 if (!data)
3254 return NULL;
3255
3256 data->type = type;
3257 INIT_LIST_HEAD(&data->links);
3258
3259 return data;
3260}
3261
3262static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3263{
3264 if (!data)
3265 return;
3266
3267 WARN_ON(!list_empty(&data->links));
3268 if (data->type == CEPH_MSG_DATA_PAGELIST)
3269 ceph_pagelist_release(data->pagelist);
3270 kmem_cache_free(ceph_msg_data_cache, data);
3271}
3272
3273void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3274 size_t length, size_t alignment)
3275{
3276 struct ceph_msg_data *data;
3277
3278 BUG_ON(!pages);
3279 BUG_ON(!length);
3280
3281 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3282 BUG_ON(!data);
3283 data->pages = pages;
3284 data->length = length;
3285 data->alignment = alignment & ~PAGE_MASK;
3286
3287 list_add_tail(&data->links, &msg->data);
3288 msg->data_length += length;
3289}
3290EXPORT_SYMBOL(ceph_msg_data_add_pages);
3291
3292void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3293 struct ceph_pagelist *pagelist)
3294{
3295 struct ceph_msg_data *data;
3296
3297 BUG_ON(!pagelist);
3298 BUG_ON(!pagelist->length);
3299
3300 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3301 BUG_ON(!data);
3302 data->pagelist = pagelist;
3303
3304 list_add_tail(&data->links, &msg->data);
3305 msg->data_length += pagelist->length;
3306}
3307EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3308
3309#ifdef CONFIG_BLOCK
3310void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
3311 u32 length)
3312{
3313 struct ceph_msg_data *data;
3314
3315 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3316 BUG_ON(!data);
3317 data->bio_pos = *bio_pos;
3318 data->bio_length = length;
3319
3320 list_add_tail(&data->links, &msg->data);
3321 msg->data_length += length;
3322}
3323EXPORT_SYMBOL(ceph_msg_data_add_bio);
3324#endif /* CONFIG_BLOCK */
3325
3326void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
3327 struct ceph_bvec_iter *bvec_pos)
3328{
3329 struct ceph_msg_data *data;
3330
3331 data = ceph_msg_data_create(CEPH_MSG_DATA_BVECS);
3332 BUG_ON(!data);
3333 data->bvec_pos = *bvec_pos;
3334
3335 list_add_tail(&data->links, &msg->data);
3336 msg->data_length += bvec_pos->iter.bi_size;
3337}
3338EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
3339
3340/*
3341 * construct a new message with given type, size
3342 * the new msg has a ref count of 1.
3343 */
3344struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3345 bool can_fail)
3346{
3347 struct ceph_msg *m;
3348
3349 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3350 if (m == NULL)
3351 goto out;
3352
3353 m->hdr.type = cpu_to_le16(type);
3354 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3355 m->hdr.front_len = cpu_to_le32(front_len);
3356
3357 INIT_LIST_HEAD(&m->list_head);
3358 kref_init(&m->kref);
3359 INIT_LIST_HEAD(&m->data);
3360
3361 /* front */
3362 if (front_len) {
3363 m->front.iov_base = ceph_kvmalloc(front_len, flags);
3364 if (m->front.iov_base == NULL) {
3365 dout("ceph_msg_new can't allocate %d bytes\n",
3366 front_len);
3367 goto out2;
3368 }
3369 } else {
3370 m->front.iov_base = NULL;
3371 }
3372 m->front_alloc_len = m->front.iov_len = front_len;
3373
3374 dout("ceph_msg_new %p front %d\n", m, front_len);
3375 return m;
3376
3377out2:
3378 ceph_msg_put(m);
3379out:
3380 if (!can_fail) {
3381 pr_err("msg_new can't create type %d front %d\n", type,
3382 front_len);
3383 WARN_ON(1);
3384 } else {
3385 dout("msg_new can't create type %d front %d\n", type,
3386 front_len);
3387 }
3388 return NULL;
3389}
3390EXPORT_SYMBOL(ceph_msg_new);
3391
3392/*
3393 * Allocate "middle" portion of a message, if it is needed and wasn't
3394 * allocated by alloc_msg. This allows us to read a small fixed-size
3395 * per-type header in the front and then gracefully fail (i.e.,
3396 * propagate the error to the caller based on info in the front) when
3397 * the middle is too large.
3398 */
3399static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3400{
3401 int type = le16_to_cpu(msg->hdr.type);
3402 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3403
3404 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3405 ceph_msg_type_name(type), middle_len);
3406 BUG_ON(!middle_len);
3407 BUG_ON(msg->middle);
3408
3409 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3410 if (!msg->middle)
3411 return -ENOMEM;
3412 return 0;
3413}
3414
3415/*
3416 * Allocate a message for receiving an incoming message on a
3417 * connection, and save the result in con->in_msg. Uses the
3418 * connection's private alloc_msg op if available.
3419 *
3420 * Returns 0 on success, or a negative error code.
3421 *
3422 * On success, if we set *skip = 1:
3423 * - the next message should be skipped and ignored.
3424 * - con->in_msg == NULL
3425 * or if we set *skip = 0:
3426 * - con->in_msg is non-null.
3427 * On error (ENOMEM, EAGAIN, ...),
3428 * - con->in_msg == NULL
3429 */
3430static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3431{
3432 struct ceph_msg_header *hdr = &con->in_hdr;
3433 int middle_len = le32_to_cpu(hdr->middle_len);
3434 struct ceph_msg *msg;
3435 int ret = 0;
3436
3437 BUG_ON(con->in_msg != NULL);
3438 BUG_ON(!con->ops->alloc_msg);
3439
3440 mutex_unlock(&con->mutex);
3441 msg = con->ops->alloc_msg(con, hdr, skip);
3442 mutex_lock(&con->mutex);
3443 if (con->state != CON_STATE_OPEN) {
3444 if (msg)
3445 ceph_msg_put(msg);
3446 return -EAGAIN;
3447 }
3448 if (msg) {
3449 BUG_ON(*skip);
3450 msg_con_set(msg, con);
3451 con->in_msg = msg;
3452 } else {
3453 /*
3454 * Null message pointer means either we should skip
3455 * this message or we couldn't allocate memory. The
3456 * former is not an error.
3457 */
3458 if (*skip)
3459 return 0;
3460
3461 con->error_msg = "error allocating memory for incoming message";
3462 return -ENOMEM;
3463 }
3464 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3465
3466 if (middle_len && !con->in_msg->middle) {
3467 ret = ceph_alloc_middle(con, con->in_msg);
3468 if (ret < 0) {
3469 ceph_msg_put(con->in_msg);
3470 con->in_msg = NULL;
3471 }
3472 }
3473
3474 return ret;
3475}
3476
3477
3478/*
3479 * Free a generically kmalloc'd message.
3480 */
3481static void ceph_msg_free(struct ceph_msg *m)
3482{
3483 dout("%s %p\n", __func__, m);
3484 kvfree(m->front.iov_base);
3485 kmem_cache_free(ceph_msg_cache, m);
3486}
3487
3488static void ceph_msg_release(struct kref *kref)
3489{
3490 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3491 struct ceph_msg_data *data, *next;
3492
3493 dout("%s %p\n", __func__, m);
3494 WARN_ON(!list_empty(&m->list_head));
3495
3496 msg_con_set(m, NULL);
3497
3498 /* drop middle, data, if any */
3499 if (m->middle) {
3500 ceph_buffer_put(m->middle);
3501 m->middle = NULL;
3502 }
3503
3504 list_for_each_entry_safe(data, next, &m->data, links) {
3505 list_del_init(&data->links);
3506 ceph_msg_data_destroy(data);
3507 }
3508 m->data_length = 0;
3509
3510 if (m->pool)
3511 ceph_msgpool_put(m->pool, m);
3512 else
3513 ceph_msg_free(m);
3514}
3515
3516struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3517{
3518 dout("%s %p (was %d)\n", __func__, msg,
3519 kref_read(&msg->kref));
3520 kref_get(&msg->kref);
3521 return msg;
3522}
3523EXPORT_SYMBOL(ceph_msg_get);
3524
3525void ceph_msg_put(struct ceph_msg *msg)
3526{
3527 dout("%s %p (was %d)\n", __func__, msg,
3528 kref_read(&msg->kref));
3529 kref_put(&msg->kref, ceph_msg_release);
3530}
3531EXPORT_SYMBOL(ceph_msg_put);
3532
3533void ceph_msg_dump(struct ceph_msg *msg)
3534{
3535 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3536 msg->front_alloc_len, msg->data_length);
3537 print_hex_dump(KERN_DEBUG, "header: ",
3538 DUMP_PREFIX_OFFSET, 16, 1,
3539 &msg->hdr, sizeof(msg->hdr), true);
3540 print_hex_dump(KERN_DEBUG, " front: ",
3541 DUMP_PREFIX_OFFSET, 16, 1,
3542 msg->front.iov_base, msg->front.iov_len, true);
3543 if (msg->middle)
3544 print_hex_dump(KERN_DEBUG, "middle: ",
3545 DUMP_PREFIX_OFFSET, 16, 1,
3546 msg->middle->vec.iov_base,
3547 msg->middle->vec.iov_len, true);
3548 print_hex_dump(KERN_DEBUG, "footer: ",
3549 DUMP_PREFIX_OFFSET, 16, 1,
3550 &msg->footer, sizeof(msg->footer), true);
3551}
3552EXPORT_SYMBOL(ceph_msg_dump);