Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3
   4#include <linux/crc32c.h>
   5#include <linux/ctype.h>
   6#include <linux/highmem.h>
   7#include <linux/inet.h>
   8#include <linux/kthread.h>
   9#include <linux/net.h>
  10#include <linux/nsproxy.h>
  11#include <linux/sched/mm.h>
  12#include <linux/slab.h>
  13#include <linux/socket.h>
  14#include <linux/string.h>
  15#ifdef	CONFIG_BLOCK
  16#include <linux/bio.h>
  17#endif	/* CONFIG_BLOCK */
  18#include <linux/dns_resolver.h>
  19#include <net/tcp.h>
  20
  21#include <linux/ceph/ceph_features.h>
  22#include <linux/ceph/libceph.h>
  23#include <linux/ceph/messenger.h>
  24#include <linux/ceph/decode.h>
  25#include <linux/ceph/pagelist.h>
  26#include <linux/export.h>
  27
  28/*
  29 * Ceph uses the messenger to exchange ceph_msg messages with other
  30 * hosts in the system.  The messenger provides ordered and reliable
  31 * delivery.  We tolerate TCP disconnects by reconnecting (with
  32 * exponential backoff) in the case of a fault (disconnection, bad
  33 * crc, protocol error).  Acks allow sent messages to be discarded by
  34 * the sender.
  35 */
  36
  37/*
  38 * We track the state of the socket on a given connection using
  39 * values defined below.  The transition to a new socket state is
  40 * handled by a function which verifies we aren't coming from an
  41 * unexpected state.
  42 *
  43 *      --------
  44 *      | NEW* |  transient initial state
  45 *      --------
  46 *          | con_sock_state_init()
  47 *          v
  48 *      ----------
  49 *      | CLOSED |  initialized, but no socket (and no
  50 *      ----------  TCP connection)
  51 *       ^      \
  52 *       |       \ con_sock_state_connecting()
  53 *       |        ----------------------
  54 *       |                              \
  55 *       + con_sock_state_closed()       \
  56 *       |+---------------------------    \
  57 *       | \                          \    \
  58 *       |  -----------                \    \
  59 *       |  | CLOSING |  socket event;  \    \
  60 *       |  -----------  await close     \    \
  61 *       |       ^                        \   |
  62 *       |       |                         \  |
  63 *       |       + con_sock_state_closing() \ |
  64 *       |      / \                         | |
  65 *       |     /   ---------------          | |
  66 *       |    /                   \         v v
  67 *       |   /                    --------------
  68 *       |  /    -----------------| CONNECTING |  socket created, TCP
  69 *       |  |   /                 --------------  connect initiated
  70 *       |  |   | con_sock_state_connected()
  71 *       |  |   v
  72 *      -------------
  73 *      | CONNECTED |  TCP connection established
  74 *      -------------
  75 *
  76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
  77 */
  78
  79#define CON_SOCK_STATE_NEW		0	/* -> CLOSED */
  80#define CON_SOCK_STATE_CLOSED		1	/* -> CONNECTING */
  81#define CON_SOCK_STATE_CONNECTING	2	/* -> CONNECTED or -> CLOSING */
  82#define CON_SOCK_STATE_CONNECTED	3	/* -> CLOSING or -> CLOSED */
  83#define CON_SOCK_STATE_CLOSING		4	/* -> CLOSED */
  84
  85static bool con_flag_valid(unsigned long con_flag)
  86{
  87	switch (con_flag) {
  88	case CEPH_CON_F_LOSSYTX:
  89	case CEPH_CON_F_KEEPALIVE_PENDING:
  90	case CEPH_CON_F_WRITE_PENDING:
  91	case CEPH_CON_F_SOCK_CLOSED:
  92	case CEPH_CON_F_BACKOFF:
  93		return true;
  94	default:
  95		return false;
  96	}
  97}
  98
  99void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
 100{
 101	BUG_ON(!con_flag_valid(con_flag));
 102
 103	clear_bit(con_flag, &con->flags);
 104}
 105
 106void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
 107{
 108	BUG_ON(!con_flag_valid(con_flag));
 109
 110	set_bit(con_flag, &con->flags);
 111}
 112
 113bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
 114{
 115	BUG_ON(!con_flag_valid(con_flag));
 116
 117	return test_bit(con_flag, &con->flags);
 118}
 119
 120bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
 121				  unsigned long con_flag)
 122{
 123	BUG_ON(!con_flag_valid(con_flag));
 124
 125	return test_and_clear_bit(con_flag, &con->flags);
 126}
 127
 128bool ceph_con_flag_test_and_set(struct ceph_connection *con,
 129				unsigned long con_flag)
 130{
 131	BUG_ON(!con_flag_valid(con_flag));
 132
 133	return test_and_set_bit(con_flag, &con->flags);
 134}
 135
 136/* Slab caches for frequently-allocated structures */
 137
 138static struct kmem_cache	*ceph_msg_cache;
 139
 140#ifdef CONFIG_LOCKDEP
 141static struct lock_class_key socket_class;
 142#endif
 143
 144static void queue_con(struct ceph_connection *con);
 145static void cancel_con(struct ceph_connection *con);
 146static void ceph_con_workfn(struct work_struct *);
 147static void con_fault(struct ceph_connection *con);
 148
 149/*
 150 * Nicely render a sockaddr as a string.  An array of formatted
 151 * strings is used, to approximate reentrancy.
 152 */
 153#define ADDR_STR_COUNT_LOG	5	/* log2(# address strings in array) */
 154#define ADDR_STR_COUNT		(1 << ADDR_STR_COUNT_LOG)
 155#define ADDR_STR_COUNT_MASK	(ADDR_STR_COUNT - 1)
 156#define MAX_ADDR_STR_LEN	64	/* 54 is enough */
 157
 158static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
 159static atomic_t addr_str_seq = ATOMIC_INIT(0);
 160
 161struct page *ceph_zero_page;		/* used in certain error cases */
 162
 163const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
 164{
 165	int i;
 166	char *s;
 167	struct sockaddr_storage ss = addr->in_addr; /* align */
 168	struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
 169	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
 170
 171	i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
 172	s = addr_str[i];
 173
 174	switch (ss.ss_family) {
 175	case AF_INET:
 176		snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
 177			 le32_to_cpu(addr->type), &in4->sin_addr,
 178			 ntohs(in4->sin_port));
 179		break;
 180
 181	case AF_INET6:
 182		snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
 183			 le32_to_cpu(addr->type), &in6->sin6_addr,
 184			 ntohs(in6->sin6_port));
 185		break;
 186
 187	default:
 188		snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
 189			 ss.ss_family);
 190	}
 191
 192	return s;
 193}
 194EXPORT_SYMBOL(ceph_pr_addr);
 195
 196void ceph_encode_my_addr(struct ceph_messenger *msgr)
 197{
 198	if (!ceph_msgr2(from_msgr(msgr))) {
 199		memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
 200		       sizeof(msgr->my_enc_addr));
 201		ceph_encode_banner_addr(&msgr->my_enc_addr);
 202	}
 203}
 204
 205/*
 206 * work queue for all reading and writing to/from the socket.
 207 */
 208static struct workqueue_struct *ceph_msgr_wq;
 209
 210static int ceph_msgr_slab_init(void)
 211{
 212	BUG_ON(ceph_msg_cache);
 213	ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
 214	if (!ceph_msg_cache)
 215		return -ENOMEM;
 216
 217	return 0;
 218}
 219
 220static void ceph_msgr_slab_exit(void)
 221{
 222	BUG_ON(!ceph_msg_cache);
 223	kmem_cache_destroy(ceph_msg_cache);
 224	ceph_msg_cache = NULL;
 225}
 226
 227static void _ceph_msgr_exit(void)
 228{
 229	if (ceph_msgr_wq) {
 230		destroy_workqueue(ceph_msgr_wq);
 231		ceph_msgr_wq = NULL;
 232	}
 233
 234	BUG_ON(!ceph_zero_page);
 235	put_page(ceph_zero_page);
 236	ceph_zero_page = NULL;
 237
 238	ceph_msgr_slab_exit();
 239}
 240
 241int __init ceph_msgr_init(void)
 242{
 243	if (ceph_msgr_slab_init())
 244		return -ENOMEM;
 245
 246	BUG_ON(ceph_zero_page);
 247	ceph_zero_page = ZERO_PAGE(0);
 248	get_page(ceph_zero_page);
 249
 250	/*
 251	 * The number of active work items is limited by the number of
 252	 * connections, so leave @max_active at default.
 253	 */
 254	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
 255	if (ceph_msgr_wq)
 256		return 0;
 257
 258	pr_err("msgr_init failed to create workqueue\n");
 259	_ceph_msgr_exit();
 260
 261	return -ENOMEM;
 262}
 263
 264void ceph_msgr_exit(void)
 265{
 266	BUG_ON(ceph_msgr_wq == NULL);
 267
 268	_ceph_msgr_exit();
 269}
 270
 271void ceph_msgr_flush(void)
 272{
 273	flush_workqueue(ceph_msgr_wq);
 274}
 275EXPORT_SYMBOL(ceph_msgr_flush);
 276
 277/* Connection socket state transition functions */
 278
 279static void con_sock_state_init(struct ceph_connection *con)
 280{
 281	int old_state;
 282
 283	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
 284	if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
 285		printk("%s: unexpected old state %d\n", __func__, old_state);
 286	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 287	     CON_SOCK_STATE_CLOSED);
 288}
 289
 290static void con_sock_state_connecting(struct ceph_connection *con)
 291{
 292	int old_state;
 293
 294	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
 295	if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
 296		printk("%s: unexpected old state %d\n", __func__, old_state);
 297	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 298	     CON_SOCK_STATE_CONNECTING);
 299}
 300
 301static void con_sock_state_connected(struct ceph_connection *con)
 302{
 303	int old_state;
 304
 305	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
 306	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
 307		printk("%s: unexpected old state %d\n", __func__, old_state);
 308	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 309	     CON_SOCK_STATE_CONNECTED);
 310}
 311
 312static void con_sock_state_closing(struct ceph_connection *con)
 313{
 314	int old_state;
 315
 316	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
 317	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
 318			old_state != CON_SOCK_STATE_CONNECTED &&
 319			old_state != CON_SOCK_STATE_CLOSING))
 320		printk("%s: unexpected old state %d\n", __func__, old_state);
 321	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 322	     CON_SOCK_STATE_CLOSING);
 323}
 324
 325static void con_sock_state_closed(struct ceph_connection *con)
 326{
 327	int old_state;
 328
 329	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
 330	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
 331		    old_state != CON_SOCK_STATE_CLOSING &&
 332		    old_state != CON_SOCK_STATE_CONNECTING &&
 333		    old_state != CON_SOCK_STATE_CLOSED))
 334		printk("%s: unexpected old state %d\n", __func__, old_state);
 335	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 336	     CON_SOCK_STATE_CLOSED);
 337}
 338
 339/*
 340 * socket callback functions
 341 */
 342
 343/* data available on socket, or listen socket received a connect */
 344static void ceph_sock_data_ready(struct sock *sk)
 345{
 346	struct ceph_connection *con = sk->sk_user_data;
 347	if (atomic_read(&con->msgr->stopping)) {
 348		return;
 349	}
 350
 351	if (sk->sk_state != TCP_CLOSE_WAIT) {
 352		dout("%s %p state = %d, queueing work\n", __func__,
 353		     con, con->state);
 354		queue_con(con);
 355	}
 356}
 357
 358/* socket has buffer space for writing */
 359static void ceph_sock_write_space(struct sock *sk)
 360{
 361	struct ceph_connection *con = sk->sk_user_data;
 362
 363	/* only queue to workqueue if there is data we want to write,
 364	 * and there is sufficient space in the socket buffer to accept
 365	 * more data.  clear SOCK_NOSPACE so that ceph_sock_write_space()
 366	 * doesn't get called again until try_write() fills the socket
 367	 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
 368	 * and net/core/stream.c:sk_stream_write_space().
 369	 */
 370	if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
 371		if (sk_stream_is_writeable(sk)) {
 372			dout("%s %p queueing write work\n", __func__, con);
 373			clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 374			queue_con(con);
 375		}
 376	} else {
 377		dout("%s %p nothing to write\n", __func__, con);
 378	}
 379}
 380
 381/* socket's state has changed */
 382static void ceph_sock_state_change(struct sock *sk)
 383{
 384	struct ceph_connection *con = sk->sk_user_data;
 385
 386	dout("%s %p state = %d sk_state = %u\n", __func__,
 387	     con, con->state, sk->sk_state);
 388
 389	switch (sk->sk_state) {
 390	case TCP_CLOSE:
 391		dout("%s TCP_CLOSE\n", __func__);
 392		fallthrough;
 393	case TCP_CLOSE_WAIT:
 394		dout("%s TCP_CLOSE_WAIT\n", __func__);
 395		con_sock_state_closing(con);
 396		ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
 397		queue_con(con);
 398		break;
 399	case TCP_ESTABLISHED:
 400		dout("%s TCP_ESTABLISHED\n", __func__);
 401		con_sock_state_connected(con);
 402		queue_con(con);
 403		break;
 404	default:	/* Everything else is uninteresting */
 405		break;
 406	}
 407}
 408
 409/*
 410 * set up socket callbacks
 411 */
 412static void set_sock_callbacks(struct socket *sock,
 413			       struct ceph_connection *con)
 414{
 415	struct sock *sk = sock->sk;
 416	sk->sk_user_data = con;
 417	sk->sk_data_ready = ceph_sock_data_ready;
 418	sk->sk_write_space = ceph_sock_write_space;
 419	sk->sk_state_change = ceph_sock_state_change;
 420}
 421
 422
 423/*
 424 * socket helpers
 425 */
 426
 427/*
 428 * initiate connection to a remote socket.
 429 */
 430int ceph_tcp_connect(struct ceph_connection *con)
 431{
 432	struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
 433	struct socket *sock;
 434	unsigned int noio_flag;
 435	int ret;
 436
 437	dout("%s con %p peer_addr %s\n", __func__, con,
 438	     ceph_pr_addr(&con->peer_addr));
 439	BUG_ON(con->sock);
 440
 441	/* sock_create_kern() allocates with GFP_KERNEL */
 442	noio_flag = memalloc_noio_save();
 443	ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
 444			       SOCK_STREAM, IPPROTO_TCP, &sock);
 445	memalloc_noio_restore(noio_flag);
 446	if (ret)
 447		return ret;
 448	sock->sk->sk_allocation = GFP_NOFS;
 
 449
 450#ifdef CONFIG_LOCKDEP
 451	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
 452#endif
 453
 454	set_sock_callbacks(sock, con);
 455
 456	con_sock_state_connecting(con);
 457	ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
 458				 O_NONBLOCK);
 459	if (ret == -EINPROGRESS) {
 460		dout("connect %s EINPROGRESS sk_state = %u\n",
 461		     ceph_pr_addr(&con->peer_addr),
 462		     sock->sk->sk_state);
 463	} else if (ret < 0) {
 464		pr_err("connect %s error %d\n",
 465		       ceph_pr_addr(&con->peer_addr), ret);
 466		sock_release(sock);
 467		return ret;
 468	}
 469
 470	if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
 471		tcp_sock_set_nodelay(sock->sk);
 472
 473	con->sock = sock;
 474	return 0;
 475}
 476
 477/*
 478 * Shutdown/close the socket for the given connection.
 479 */
 480int ceph_con_close_socket(struct ceph_connection *con)
 481{
 482	int rc = 0;
 483
 484	dout("%s con %p sock %p\n", __func__, con, con->sock);
 485	if (con->sock) {
 486		rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
 487		sock_release(con->sock);
 488		con->sock = NULL;
 489	}
 490
 491	/*
 492	 * Forcibly clear the SOCK_CLOSED flag.  It gets set
 493	 * independent of the connection mutex, and we could have
 494	 * received a socket close event before we had the chance to
 495	 * shut the socket down.
 496	 */
 497	ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
 498
 499	con_sock_state_closed(con);
 500	return rc;
 501}
 502
 503static void ceph_con_reset_protocol(struct ceph_connection *con)
 504{
 505	dout("%s con %p\n", __func__, con);
 506
 507	ceph_con_close_socket(con);
 508	if (con->in_msg) {
 509		WARN_ON(con->in_msg->con != con);
 510		ceph_msg_put(con->in_msg);
 511		con->in_msg = NULL;
 512	}
 513	if (con->out_msg) {
 514		WARN_ON(con->out_msg->con != con);
 515		ceph_msg_put(con->out_msg);
 516		con->out_msg = NULL;
 517	}
 
 
 
 
 518
 519	if (ceph_msgr2(from_msgr(con->msgr)))
 520		ceph_con_v2_reset_protocol(con);
 521	else
 522		ceph_con_v1_reset_protocol(con);
 523}
 524
 525/*
 526 * Reset a connection.  Discard all incoming and outgoing messages
 527 * and clear *_seq state.
 528 */
 529static void ceph_msg_remove(struct ceph_msg *msg)
 530{
 531	list_del_init(&msg->list_head);
 532
 533	ceph_msg_put(msg);
 534}
 535
 536static void ceph_msg_remove_list(struct list_head *head)
 537{
 538	while (!list_empty(head)) {
 539		struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
 540							list_head);
 541		ceph_msg_remove(msg);
 542	}
 543}
 544
 545void ceph_con_reset_session(struct ceph_connection *con)
 546{
 547	dout("%s con %p\n", __func__, con);
 548
 549	WARN_ON(con->in_msg);
 550	WARN_ON(con->out_msg);
 551	ceph_msg_remove_list(&con->out_queue);
 552	ceph_msg_remove_list(&con->out_sent);
 553	con->out_seq = 0;
 554	con->in_seq = 0;
 555	con->in_seq_acked = 0;
 556
 557	if (ceph_msgr2(from_msgr(con->msgr)))
 558		ceph_con_v2_reset_session(con);
 559	else
 560		ceph_con_v1_reset_session(con);
 561}
 562
 563/*
 564 * mark a peer down.  drop any open connections.
 565 */
 566void ceph_con_close(struct ceph_connection *con)
 567{
 568	mutex_lock(&con->mutex);
 569	dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
 570	con->state = CEPH_CON_S_CLOSED;
 571
 572	ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX);  /* so we retry next
 573							  connect */
 574	ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
 575	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
 576	ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
 577
 578	ceph_con_reset_protocol(con);
 579	ceph_con_reset_session(con);
 580	cancel_con(con);
 581	mutex_unlock(&con->mutex);
 582}
 583EXPORT_SYMBOL(ceph_con_close);
 584
 585/*
 586 * Reopen a closed connection, with a new peer address.
 587 */
 588void ceph_con_open(struct ceph_connection *con,
 589		   __u8 entity_type, __u64 entity_num,
 590		   struct ceph_entity_addr *addr)
 591{
 592	mutex_lock(&con->mutex);
 593	dout("con_open %p %s\n", con, ceph_pr_addr(addr));
 594
 595	WARN_ON(con->state != CEPH_CON_S_CLOSED);
 596	con->state = CEPH_CON_S_PREOPEN;
 597
 598	con->peer_name.type = (__u8) entity_type;
 599	con->peer_name.num = cpu_to_le64(entity_num);
 600
 601	memcpy(&con->peer_addr, addr, sizeof(*addr));
 602	con->delay = 0;      /* reset backoff memory */
 603	mutex_unlock(&con->mutex);
 604	queue_con(con);
 605}
 606EXPORT_SYMBOL(ceph_con_open);
 607
 608/*
 609 * return true if this connection ever successfully opened
 610 */
 611bool ceph_con_opened(struct ceph_connection *con)
 612{
 613	if (ceph_msgr2(from_msgr(con->msgr)))
 614		return ceph_con_v2_opened(con);
 615
 616	return ceph_con_v1_opened(con);
 617}
 618
 619/*
 620 * initialize a new connection.
 621 */
 622void ceph_con_init(struct ceph_connection *con, void *private,
 623	const struct ceph_connection_operations *ops,
 624	struct ceph_messenger *msgr)
 625{
 626	dout("con_init %p\n", con);
 627	memset(con, 0, sizeof(*con));
 628	con->private = private;
 629	con->ops = ops;
 630	con->msgr = msgr;
 631
 632	con_sock_state_init(con);
 633
 634	mutex_init(&con->mutex);
 635	INIT_LIST_HEAD(&con->out_queue);
 636	INIT_LIST_HEAD(&con->out_sent);
 637	INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
 638
 639	con->state = CEPH_CON_S_CLOSED;
 640}
 641EXPORT_SYMBOL(ceph_con_init);
 642
 643/*
 644 * We maintain a global counter to order connection attempts.  Get
 645 * a unique seq greater than @gt.
 646 */
 647u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
 648{
 649	u32 ret;
 650
 651	spin_lock(&msgr->global_seq_lock);
 652	if (msgr->global_seq < gt)
 653		msgr->global_seq = gt;
 654	ret = ++msgr->global_seq;
 655	spin_unlock(&msgr->global_seq_lock);
 656	return ret;
 657}
 658
 659/*
 660 * Discard messages that have been acked by the server.
 661 */
 662void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
 663{
 664	struct ceph_msg *msg;
 665	u64 seq;
 666
 667	dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
 668	while (!list_empty(&con->out_sent)) {
 669		msg = list_first_entry(&con->out_sent, struct ceph_msg,
 670				       list_head);
 671		WARN_ON(msg->needs_out_seq);
 672		seq = le64_to_cpu(msg->hdr.seq);
 673		if (seq > ack_seq)
 674			break;
 675
 676		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
 677		     msg, seq);
 678		ceph_msg_remove(msg);
 679	}
 680}
 681
 682/*
 683 * Discard messages that have been requeued in con_fault(), up to
 684 * reconnect_seq.  This avoids gratuitously resending messages that
 685 * the server had received and handled prior to reconnect.
 686 */
 687void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
 688{
 689	struct ceph_msg *msg;
 690	u64 seq;
 691
 692	dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
 693	while (!list_empty(&con->out_queue)) {
 694		msg = list_first_entry(&con->out_queue, struct ceph_msg,
 695				       list_head);
 696		if (msg->needs_out_seq)
 697			break;
 698		seq = le64_to_cpu(msg->hdr.seq);
 699		if (seq > reconnect_seq)
 700			break;
 701
 702		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
 703		     msg, seq);
 704		ceph_msg_remove(msg);
 705	}
 706}
 707
 708#ifdef CONFIG_BLOCK
 709
 710/*
 711 * For a bio data item, a piece is whatever remains of the next
 712 * entry in the current bio iovec, or the first entry in the next
 713 * bio in the list.
 714 */
 715static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
 716					size_t length)
 717{
 718	struct ceph_msg_data *data = cursor->data;
 719	struct ceph_bio_iter *it = &cursor->bio_iter;
 720
 721	cursor->resid = min_t(size_t, length, data->bio_length);
 722	*it = data->bio_pos;
 723	if (cursor->resid < it->iter.bi_size)
 724		it->iter.bi_size = cursor->resid;
 725
 726	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
 727	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
 728}
 729
 730static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
 731						size_t *page_offset,
 732						size_t *length)
 733{
 734	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
 735					   cursor->bio_iter.iter);
 736
 737	*page_offset = bv.bv_offset;
 738	*length = bv.bv_len;
 739	return bv.bv_page;
 740}
 741
 742static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
 743					size_t bytes)
 744{
 745	struct ceph_bio_iter *it = &cursor->bio_iter;
 746	struct page *page = bio_iter_page(it->bio, it->iter);
 747
 748	BUG_ON(bytes > cursor->resid);
 749	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
 750	cursor->resid -= bytes;
 751	bio_advance_iter(it->bio, &it->iter, bytes);
 752
 753	if (!cursor->resid) {
 754		BUG_ON(!cursor->last_piece);
 755		return false;   /* no more data */
 756	}
 757
 758	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
 759		       page == bio_iter_page(it->bio, it->iter)))
 760		return false;	/* more bytes to process in this segment */
 761
 762	if (!it->iter.bi_size) {
 763		it->bio = it->bio->bi_next;
 764		it->iter = it->bio->bi_iter;
 765		if (cursor->resid < it->iter.bi_size)
 766			it->iter.bi_size = cursor->resid;
 767	}
 768
 769	BUG_ON(cursor->last_piece);
 770	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
 771	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
 772	return true;
 773}
 774#endif /* CONFIG_BLOCK */
 775
 776static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
 777					size_t length)
 778{
 779	struct ceph_msg_data *data = cursor->data;
 780	struct bio_vec *bvecs = data->bvec_pos.bvecs;
 781
 782	cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
 783	cursor->bvec_iter = data->bvec_pos.iter;
 784	cursor->bvec_iter.bi_size = cursor->resid;
 785
 786	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
 787	cursor->last_piece =
 788	    cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
 789}
 790
 791static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
 792						size_t *page_offset,
 793						size_t *length)
 794{
 795	struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
 796					   cursor->bvec_iter);
 797
 798	*page_offset = bv.bv_offset;
 799	*length = bv.bv_len;
 800	return bv.bv_page;
 801}
 802
 803static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
 804					size_t bytes)
 805{
 806	struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
 807	struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 808
 809	BUG_ON(bytes > cursor->resid);
 810	BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
 811	cursor->resid -= bytes;
 812	bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
 813
 814	if (!cursor->resid) {
 815		BUG_ON(!cursor->last_piece);
 816		return false;   /* no more data */
 817	}
 818
 819	if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
 820		       page == bvec_iter_page(bvecs, cursor->bvec_iter)))
 821		return false;	/* more bytes to process in this segment */
 822
 823	BUG_ON(cursor->last_piece);
 824	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
 825	cursor->last_piece =
 826	    cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
 827	return true;
 828}
 829
 830/*
 831 * For a page array, a piece comes from the first page in the array
 832 * that has not already been fully consumed.
 833 */
 834static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
 835					size_t length)
 836{
 837	struct ceph_msg_data *data = cursor->data;
 838	int page_count;
 839
 840	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
 841
 842	BUG_ON(!data->pages);
 843	BUG_ON(!data->length);
 844
 845	cursor->resid = min(length, data->length);
 846	page_count = calc_pages_for(data->alignment, (u64)data->length);
 847	cursor->page_offset = data->alignment & ~PAGE_MASK;
 848	cursor->page_index = 0;
 849	BUG_ON(page_count > (int)USHRT_MAX);
 850	cursor->page_count = (unsigned short)page_count;
 851	BUG_ON(length > SIZE_MAX - cursor->page_offset);
 852	cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
 853}
 854
 855static struct page *
 856ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
 857					size_t *page_offset, size_t *length)
 858{
 859	struct ceph_msg_data *data = cursor->data;
 860
 861	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
 862
 863	BUG_ON(cursor->page_index >= cursor->page_count);
 864	BUG_ON(cursor->page_offset >= PAGE_SIZE);
 865
 866	*page_offset = cursor->page_offset;
 867	if (cursor->last_piece)
 868		*length = cursor->resid;
 869	else
 870		*length = PAGE_SIZE - *page_offset;
 871
 872	return data->pages[cursor->page_index];
 873}
 874
 875static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
 876						size_t bytes)
 877{
 878	BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
 879
 880	BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
 881
 882	/* Advance the cursor page offset */
 883
 884	cursor->resid -= bytes;
 885	cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
 886	if (!bytes || cursor->page_offset)
 887		return false;	/* more bytes to process in the current page */
 888
 889	if (!cursor->resid)
 890		return false;   /* no more data */
 891
 892	/* Move on to the next page; offset is already at 0 */
 893
 894	BUG_ON(cursor->page_index >= cursor->page_count);
 895	cursor->page_index++;
 896	cursor->last_piece = cursor->resid <= PAGE_SIZE;
 897
 898	return true;
 899}
 900
 901/*
 902 * For a pagelist, a piece is whatever remains to be consumed in the
 903 * first page in the list, or the front of the next page.
 904 */
 905static void
 906ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
 907					size_t length)
 908{
 909	struct ceph_msg_data *data = cursor->data;
 910	struct ceph_pagelist *pagelist;
 911	struct page *page;
 912
 913	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 914
 915	pagelist = data->pagelist;
 916	BUG_ON(!pagelist);
 917
 918	if (!length)
 919		return;		/* pagelist can be assigned but empty */
 920
 921	BUG_ON(list_empty(&pagelist->head));
 922	page = list_first_entry(&pagelist->head, struct page, lru);
 923
 924	cursor->resid = min(length, pagelist->length);
 925	cursor->page = page;
 926	cursor->offset = 0;
 927	cursor->last_piece = cursor->resid <= PAGE_SIZE;
 928}
 929
 930static struct page *
 931ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
 932				size_t *page_offset, size_t *length)
 933{
 934	struct ceph_msg_data *data = cursor->data;
 935	struct ceph_pagelist *pagelist;
 936
 937	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 938
 939	pagelist = data->pagelist;
 940	BUG_ON(!pagelist);
 941
 942	BUG_ON(!cursor->page);
 943	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
 944
 945	/* offset of first page in pagelist is always 0 */
 946	*page_offset = cursor->offset & ~PAGE_MASK;
 947	if (cursor->last_piece)
 948		*length = cursor->resid;
 949	else
 950		*length = PAGE_SIZE - *page_offset;
 951
 952	return cursor->page;
 953}
 954
 955static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
 956						size_t bytes)
 957{
 958	struct ceph_msg_data *data = cursor->data;
 959	struct ceph_pagelist *pagelist;
 960
 961	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 962
 963	pagelist = data->pagelist;
 964	BUG_ON(!pagelist);
 965
 966	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
 967	BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
 968
 969	/* Advance the cursor offset */
 970
 971	cursor->resid -= bytes;
 972	cursor->offset += bytes;
 973	/* offset of first page in pagelist is always 0 */
 974	if (!bytes || cursor->offset & ~PAGE_MASK)
 975		return false;	/* more bytes to process in the current page */
 976
 977	if (!cursor->resid)
 978		return false;   /* no more data */
 979
 980	/* Move on to the next page */
 981
 982	BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
 983	cursor->page = list_next_entry(cursor->page, lru);
 984	cursor->last_piece = cursor->resid <= PAGE_SIZE;
 985
 986	return true;
 987}
 988
 989/*
 990 * Message data is handled (sent or received) in pieces, where each
 991 * piece resides on a single page.  The network layer might not
 992 * consume an entire piece at once.  A data item's cursor keeps
 993 * track of which piece is next to process and how much remains to
 994 * be processed in that piece.  It also tracks whether the current
 995 * piece is the last one in the data item.
 996 */
 997static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
 998{
 999	size_t length = cursor->total_resid;
1000
1001	switch (cursor->data->type) {
1002	case CEPH_MSG_DATA_PAGELIST:
1003		ceph_msg_data_pagelist_cursor_init(cursor, length);
1004		break;
1005	case CEPH_MSG_DATA_PAGES:
1006		ceph_msg_data_pages_cursor_init(cursor, length);
1007		break;
1008#ifdef CONFIG_BLOCK
1009	case CEPH_MSG_DATA_BIO:
1010		ceph_msg_data_bio_cursor_init(cursor, length);
1011		break;
1012#endif /* CONFIG_BLOCK */
1013	case CEPH_MSG_DATA_BVECS:
1014		ceph_msg_data_bvecs_cursor_init(cursor, length);
1015		break;
1016	case CEPH_MSG_DATA_NONE:
1017	default:
1018		/* BUG(); */
1019		break;
1020	}
1021	cursor->need_crc = true;
1022}
1023
1024void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1025			       struct ceph_msg *msg, size_t length)
1026{
1027	BUG_ON(!length);
1028	BUG_ON(length > msg->data_length);
1029	BUG_ON(!msg->num_data_items);
1030
1031	cursor->total_resid = length;
1032	cursor->data = msg->data;
1033
1034	__ceph_msg_data_cursor_init(cursor);
1035}
1036
1037/*
1038 * Return the page containing the next piece to process for a given
1039 * data item, and supply the page offset and length of that piece.
1040 * Indicate whether this is the last piece in this data item.
1041 */
1042struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1043				size_t *page_offset, size_t *length,
1044				bool *last_piece)
1045{
1046	struct page *page;
1047
1048	switch (cursor->data->type) {
1049	case CEPH_MSG_DATA_PAGELIST:
1050		page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1051		break;
1052	case CEPH_MSG_DATA_PAGES:
1053		page = ceph_msg_data_pages_next(cursor, page_offset, length);
1054		break;
1055#ifdef CONFIG_BLOCK
1056	case CEPH_MSG_DATA_BIO:
1057		page = ceph_msg_data_bio_next(cursor, page_offset, length);
1058		break;
1059#endif /* CONFIG_BLOCK */
1060	case CEPH_MSG_DATA_BVECS:
1061		page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1062		break;
1063	case CEPH_MSG_DATA_NONE:
1064	default:
1065		page = NULL;
1066		break;
1067	}
1068
1069	BUG_ON(!page);
1070	BUG_ON(*page_offset + *length > PAGE_SIZE);
1071	BUG_ON(!*length);
1072	BUG_ON(*length > cursor->resid);
1073	if (last_piece)
1074		*last_piece = cursor->last_piece;
1075
1076	return page;
1077}
1078
1079/*
1080 * Returns true if the result moves the cursor on to the next piece
1081 * of the data item.
1082 */
1083void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
1084{
1085	bool new_piece;
1086
1087	BUG_ON(bytes > cursor->resid);
1088	switch (cursor->data->type) {
1089	case CEPH_MSG_DATA_PAGELIST:
1090		new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1091		break;
1092	case CEPH_MSG_DATA_PAGES:
1093		new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1094		break;
1095#ifdef CONFIG_BLOCK
1096	case CEPH_MSG_DATA_BIO:
1097		new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1098		break;
1099#endif /* CONFIG_BLOCK */
1100	case CEPH_MSG_DATA_BVECS:
1101		new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1102		break;
1103	case CEPH_MSG_DATA_NONE:
1104	default:
1105		BUG();
1106		break;
1107	}
1108	cursor->total_resid -= bytes;
1109
1110	if (!cursor->resid && cursor->total_resid) {
1111		WARN_ON(!cursor->last_piece);
1112		cursor->data++;
1113		__ceph_msg_data_cursor_init(cursor);
1114		new_piece = true;
1115	}
1116	cursor->need_crc = new_piece;
1117}
1118
1119u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1120		     unsigned int length)
1121{
1122	char *kaddr;
1123
1124	kaddr = kmap(page);
1125	BUG_ON(kaddr == NULL);
1126	crc = crc32c(crc, kaddr + page_offset, length);
1127	kunmap(page);
1128
1129	return crc;
1130}
1131
1132bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
1133{
1134	struct sockaddr_storage ss = addr->in_addr; /* align */
1135	struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1136	struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1137
1138	switch (ss.ss_family) {
1139	case AF_INET:
1140		return addr4->s_addr == htonl(INADDR_ANY);
1141	case AF_INET6:
1142		return ipv6_addr_any(addr6);
1143	default:
1144		return true;
1145	}
1146}
1147
1148int ceph_addr_port(const struct ceph_entity_addr *addr)
1149{
1150	switch (get_unaligned(&addr->in_addr.ss_family)) {
1151	case AF_INET:
1152		return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1153	case AF_INET6:
1154		return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1155	}
1156	return 0;
1157}
1158
1159void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
1160{
1161	switch (get_unaligned(&addr->in_addr.ss_family)) {
1162	case AF_INET:
1163		put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1164		break;
1165	case AF_INET6:
1166		put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1167		break;
1168	}
1169}
1170
1171/*
1172 * Unlike other *_pton function semantics, zero indicates success.
1173 */
1174static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1175		char delim, const char **ipend)
1176{
1177	memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1178
1179	if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1180		put_unaligned(AF_INET, &addr->in_addr.ss_family);
1181		return 0;
1182	}
1183
1184	if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1185		put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1186		return 0;
1187	}
1188
1189	return -EINVAL;
1190}
1191
1192/*
1193 * Extract hostname string and resolve using kernel DNS facility.
1194 */
1195#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1196static int ceph_dns_resolve_name(const char *name, size_t namelen,
1197		struct ceph_entity_addr *addr, char delim, const char **ipend)
1198{
1199	const char *end, *delim_p;
1200	char *colon_p, *ip_addr = NULL;
1201	int ip_len, ret;
1202
1203	/*
1204	 * The end of the hostname occurs immediately preceding the delimiter or
1205	 * the port marker (':') where the delimiter takes precedence.
1206	 */
1207	delim_p = memchr(name, delim, namelen);
1208	colon_p = memchr(name, ':', namelen);
1209
1210	if (delim_p && colon_p)
1211		end = delim_p < colon_p ? delim_p : colon_p;
1212	else if (!delim_p && colon_p)
1213		end = colon_p;
1214	else {
1215		end = delim_p;
1216		if (!end) /* case: hostname:/ */
1217			end = name + namelen;
1218	}
1219
1220	if (end <= name)
1221		return -EINVAL;
1222
1223	/* do dns_resolve upcall */
1224	ip_len = dns_query(current->nsproxy->net_ns,
1225			   NULL, name, end - name, NULL, &ip_addr, NULL, false);
1226	if (ip_len > 0)
1227		ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1228	else
1229		ret = -ESRCH;
1230
1231	kfree(ip_addr);
1232
1233	*ipend = end;
1234
1235	pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1236			ret, ret ? "failed" : ceph_pr_addr(addr));
1237
1238	return ret;
1239}
1240#else
1241static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1242		struct ceph_entity_addr *addr, char delim, const char **ipend)
1243{
1244	return -EINVAL;
1245}
1246#endif
1247
1248/*
1249 * Parse a server name (IP or hostname). If a valid IP address is not found
1250 * then try to extract a hostname to resolve using userspace DNS upcall.
1251 */
1252static int ceph_parse_server_name(const char *name, size_t namelen,
1253		struct ceph_entity_addr *addr, char delim, const char **ipend)
1254{
1255	int ret;
1256
1257	ret = ceph_pton(name, namelen, addr, delim, ipend);
1258	if (ret)
1259		ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1260
1261	return ret;
1262}
1263
1264/*
1265 * Parse an ip[:port] list into an addr array.  Use the default
1266 * monitor port if a port isn't specified.
1267 */
1268int ceph_parse_ips(const char *c, const char *end,
1269		   struct ceph_entity_addr *addr,
1270		   int max_count, int *count)
1271{
1272	int i, ret = -EINVAL;
1273	const char *p = c;
1274
1275	dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1276	for (i = 0; i < max_count; i++) {
 
1277		const char *ipend;
1278		int port;
1279		char delim = ',';
1280
1281		if (*p == '[') {
1282			delim = ']';
1283			p++;
1284		}
1285
1286		ret = ceph_parse_server_name(p, end - p, &addr[i], delim, &ipend);
 
1287		if (ret)
1288			goto bad;
1289		ret = -EINVAL;
1290
1291		p = ipend;
1292
1293		if (delim == ']') {
1294			if (*p != ']') {
1295				dout("missing matching ']'\n");
1296				goto bad;
1297			}
1298			p++;
1299		}
1300
1301		/* port? */
1302		if (p < end && *p == ':') {
1303			port = 0;
1304			p++;
1305			while (p < end && *p >= '0' && *p <= '9') {
1306				port = (port * 10) + (*p - '0');
1307				p++;
1308			}
1309			if (port == 0)
1310				port = CEPH_MON_PORT;
1311			else if (port > 65535)
1312				goto bad;
1313		} else {
1314			port = CEPH_MON_PORT;
1315		}
1316
1317		ceph_addr_set_port(&addr[i], port);
1318		/*
1319		 * We want the type to be set according to ms_mode
1320		 * option, but options are normally parsed after mon
1321		 * addresses.  Rather than complicating parsing, set
1322		 * to LEGACY and override in build_initial_monmap()
1323		 * for mon addresses and ceph_messenger_init() for
1324		 * ip option.
1325		 */
1326		addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
1327		addr[i].nonce = 0;
1328
1329		dout("parse_ips got %s\n", ceph_pr_addr(&addr[i]));
1330
1331		if (p == end)
1332			break;
1333		if (*p != ',')
1334			goto bad;
1335		p++;
1336	}
1337
1338	if (p != end)
1339		goto bad;
1340
1341	if (count)
1342		*count = i + 1;
1343	return 0;
1344
1345bad:
1346	return ret;
1347}
1348
1349/*
1350 * Process message.  This happens in the worker thread.  The callback should
1351 * be careful not to do anything that waits on other incoming messages or it
1352 * may deadlock.
1353 */
1354void ceph_con_process_message(struct ceph_connection *con)
1355{
1356	struct ceph_msg *msg = con->in_msg;
1357
1358	BUG_ON(con->in_msg->con != con);
1359	con->in_msg = NULL;
1360
1361	/* if first message, set peer_name */
1362	if (con->peer_name.type == 0)
1363		con->peer_name = msg->hdr.src;
1364
1365	con->in_seq++;
1366	mutex_unlock(&con->mutex);
1367
1368	dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
1369	     msg, le64_to_cpu(msg->hdr.seq),
1370	     ENTITY_NAME(msg->hdr.src),
1371	     le16_to_cpu(msg->hdr.type),
1372	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1373	     le32_to_cpu(msg->hdr.front_len),
1374	     le32_to_cpu(msg->hdr.middle_len),
1375	     le32_to_cpu(msg->hdr.data_len),
1376	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1377	con->ops->dispatch(con, msg);
1378
1379	mutex_lock(&con->mutex);
1380}
1381
1382/*
1383 * Atomically queue work on a connection after the specified delay.
1384 * Bump @con reference to avoid races with connection teardown.
1385 * Returns 0 if work was queued, or an error code otherwise.
1386 */
1387static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
1388{
1389	if (!con->ops->get(con)) {
1390		dout("%s %p ref count 0\n", __func__, con);
1391		return -ENOENT;
1392	}
1393
1394	if (delay >= HZ)
1395		delay = round_jiffies_relative(delay);
1396
1397	dout("%s %p %lu\n", __func__, con, delay);
1398	if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
1399		dout("%s %p - already queued\n", __func__, con);
1400		con->ops->put(con);
1401		return -EBUSY;
1402	}
1403
1404	return 0;
1405}
1406
1407static void queue_con(struct ceph_connection *con)
1408{
1409	(void) queue_con_delay(con, 0);
1410}
1411
1412static void cancel_con(struct ceph_connection *con)
1413{
1414	if (cancel_delayed_work(&con->work)) {
1415		dout("%s %p\n", __func__, con);
1416		con->ops->put(con);
1417	}
1418}
1419
1420static bool con_sock_closed(struct ceph_connection *con)
1421{
1422	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
1423		return false;
1424
1425#define CASE(x)								\
1426	case CEPH_CON_S_ ## x:						\
1427		con->error_msg = "socket closed (con state " #x ")";	\
1428		break;
1429
1430	switch (con->state) {
1431	CASE(CLOSED);
1432	CASE(PREOPEN);
1433	CASE(V1_BANNER);
1434	CASE(V1_CONNECT_MSG);
1435	CASE(V2_BANNER_PREFIX);
1436	CASE(V2_BANNER_PAYLOAD);
1437	CASE(V2_HELLO);
1438	CASE(V2_AUTH);
1439	CASE(V2_AUTH_SIGNATURE);
1440	CASE(V2_SESSION_CONNECT);
1441	CASE(V2_SESSION_RECONNECT);
1442	CASE(OPEN);
1443	CASE(STANDBY);
1444	default:
1445		BUG();
1446	}
1447#undef CASE
1448
1449	return true;
1450}
1451
1452static bool con_backoff(struct ceph_connection *con)
1453{
1454	int ret;
1455
1456	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
1457		return false;
1458
1459	ret = queue_con_delay(con, con->delay);
1460	if (ret) {
1461		dout("%s: con %p FAILED to back off %lu\n", __func__,
1462			con, con->delay);
1463		BUG_ON(ret == -ENOENT);
1464		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1465	}
1466
1467	return true;
1468}
1469
1470/* Finish fault handling; con->mutex must *not* be held here */
1471
1472static void con_fault_finish(struct ceph_connection *con)
1473{
1474	dout("%s %p\n", __func__, con);
1475
1476	/*
1477	 * in case we faulted due to authentication, invalidate our
1478	 * current tickets so that we can get new ones.
1479	 */
1480	if (con->v1.auth_retry) {
1481		dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
1482		if (con->ops->invalidate_authorizer)
1483			con->ops->invalidate_authorizer(con);
1484		con->v1.auth_retry = 0;
1485	}
1486
1487	if (con->ops->fault)
1488		con->ops->fault(con);
1489}
1490
1491/*
1492 * Do some work on a connection.  Drop a connection ref when we're done.
1493 */
1494static void ceph_con_workfn(struct work_struct *work)
1495{
1496	struct ceph_connection *con = container_of(work, struct ceph_connection,
1497						   work.work);
1498	bool fault;
1499
1500	mutex_lock(&con->mutex);
1501	while (true) {
1502		int ret;
1503
1504		if ((fault = con_sock_closed(con))) {
1505			dout("%s: con %p SOCK_CLOSED\n", __func__, con);
1506			break;
1507		}
1508		if (con_backoff(con)) {
1509			dout("%s: con %p BACKOFF\n", __func__, con);
1510			break;
1511		}
1512		if (con->state == CEPH_CON_S_STANDBY) {
1513			dout("%s: con %p STANDBY\n", __func__, con);
1514			break;
1515		}
1516		if (con->state == CEPH_CON_S_CLOSED) {
1517			dout("%s: con %p CLOSED\n", __func__, con);
1518			BUG_ON(con->sock);
1519			break;
1520		}
1521		if (con->state == CEPH_CON_S_PREOPEN) {
1522			dout("%s: con %p PREOPEN\n", __func__, con);
1523			BUG_ON(con->sock);
1524		}
1525
1526		if (ceph_msgr2(from_msgr(con->msgr)))
1527			ret = ceph_con_v2_try_read(con);
1528		else
1529			ret = ceph_con_v1_try_read(con);
1530		if (ret < 0) {
1531			if (ret == -EAGAIN)
1532				continue;
1533			if (!con->error_msg)
1534				con->error_msg = "socket error on read";
1535			fault = true;
1536			break;
1537		}
1538
1539		if (ceph_msgr2(from_msgr(con->msgr)))
1540			ret = ceph_con_v2_try_write(con);
1541		else
1542			ret = ceph_con_v1_try_write(con);
1543		if (ret < 0) {
1544			if (ret == -EAGAIN)
1545				continue;
1546			if (!con->error_msg)
1547				con->error_msg = "socket error on write";
1548			fault = true;
1549		}
1550
1551		break;	/* If we make it to here, we're done */
1552	}
1553	if (fault)
1554		con_fault(con);
1555	mutex_unlock(&con->mutex);
1556
1557	if (fault)
1558		con_fault_finish(con);
1559
1560	con->ops->put(con);
1561}
1562
1563/*
1564 * Generic error/fault handler.  A retry mechanism is used with
1565 * exponential backoff
1566 */
1567static void con_fault(struct ceph_connection *con)
1568{
1569	dout("fault %p state %d to peer %s\n",
1570	     con, con->state, ceph_pr_addr(&con->peer_addr));
1571
1572	pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1573		ceph_pr_addr(&con->peer_addr), con->error_msg);
1574	con->error_msg = NULL;
1575
1576	WARN_ON(con->state == CEPH_CON_S_STANDBY ||
1577		con->state == CEPH_CON_S_CLOSED);
1578
1579	ceph_con_reset_protocol(con);
1580
1581	if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
1582		dout("fault on LOSSYTX channel, marking CLOSED\n");
1583		con->state = CEPH_CON_S_CLOSED;
1584		return;
1585	}
1586
1587	/* Requeue anything that hasn't been acked */
1588	list_splice_init(&con->out_sent, &con->out_queue);
1589
1590	/* If there are no messages queued or keepalive pending, place
1591	 * the connection in a STANDBY state */
1592	if (list_empty(&con->out_queue) &&
1593	    !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
1594		dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
1595		ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
1596		con->state = CEPH_CON_S_STANDBY;
1597	} else {
1598		/* retry after a delay. */
1599		con->state = CEPH_CON_S_PREOPEN;
1600		if (!con->delay) {
1601			con->delay = BASE_DELAY_INTERVAL;
1602		} else if (con->delay < MAX_DELAY_INTERVAL) {
1603			con->delay *= 2;
1604			if (con->delay > MAX_DELAY_INTERVAL)
1605				con->delay = MAX_DELAY_INTERVAL;
1606		}
1607		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1608		queue_con(con);
1609	}
1610}
1611
1612void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
1613{
1614	u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
1615	msgr->inst.addr.nonce = cpu_to_le32(nonce);
1616	ceph_encode_my_addr(msgr);
1617}
1618
1619/*
1620 * initialize a new messenger instance
1621 */
1622void ceph_messenger_init(struct ceph_messenger *msgr,
1623			 struct ceph_entity_addr *myaddr)
1624{
1625	spin_lock_init(&msgr->global_seq_lock);
1626
1627	if (myaddr) {
1628		memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
1629		       sizeof(msgr->inst.addr.in_addr));
1630		ceph_addr_set_port(&msgr->inst.addr, 0);
1631	}
1632
1633	/*
1634	 * Since nautilus, clients are identified using type ANY.
1635	 * For msgr1, ceph_encode_banner_addr() munges it to NONE.
1636	 */
1637	msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
1638
1639	/* generate a random non-zero nonce */
1640	do {
1641		get_random_bytes(&msgr->inst.addr.nonce,
1642				 sizeof(msgr->inst.addr.nonce));
1643	} while (!msgr->inst.addr.nonce);
1644	ceph_encode_my_addr(msgr);
1645
1646	atomic_set(&msgr->stopping, 0);
1647	write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
1648
1649	dout("%s %p\n", __func__, msgr);
1650}
1651
1652void ceph_messenger_fini(struct ceph_messenger *msgr)
1653{
1654	put_net(read_pnet(&msgr->net));
1655}
1656
1657static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
1658{
1659	if (msg->con)
1660		msg->con->ops->put(msg->con);
1661
1662	msg->con = con ? con->ops->get(con) : NULL;
1663	BUG_ON(msg->con != con);
1664}
1665
1666static void clear_standby(struct ceph_connection *con)
1667{
1668	/* come back from STANDBY? */
1669	if (con->state == CEPH_CON_S_STANDBY) {
1670		dout("clear_standby %p and ++connect_seq\n", con);
1671		con->state = CEPH_CON_S_PREOPEN;
1672		con->v1.connect_seq++;
1673		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
1674		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
1675	}
1676}
1677
1678/*
1679 * Queue up an outgoing message on the given connection.
1680 *
1681 * Consumes a ref on @msg.
1682 */
1683void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1684{
1685	/* set src+dst */
1686	msg->hdr.src = con->msgr->inst.name;
1687	BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1688	msg->needs_out_seq = true;
1689
1690	mutex_lock(&con->mutex);
1691
1692	if (con->state == CEPH_CON_S_CLOSED) {
1693		dout("con_send %p closed, dropping %p\n", con, msg);
1694		ceph_msg_put(msg);
1695		mutex_unlock(&con->mutex);
1696		return;
1697	}
1698
1699	msg_con_set(msg, con);
1700
1701	BUG_ON(!list_empty(&msg->list_head));
1702	list_add_tail(&msg->list_head, &con->out_queue);
1703	dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1704	     ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1705	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1706	     le32_to_cpu(msg->hdr.front_len),
1707	     le32_to_cpu(msg->hdr.middle_len),
1708	     le32_to_cpu(msg->hdr.data_len));
1709
1710	clear_standby(con);
1711	mutex_unlock(&con->mutex);
1712
1713	/* if there wasn't anything waiting to send before, queue
1714	 * new work */
1715	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1716		queue_con(con);
1717}
1718EXPORT_SYMBOL(ceph_con_send);
1719
1720/*
1721 * Revoke a message that was previously queued for send
1722 */
1723void ceph_msg_revoke(struct ceph_msg *msg)
1724{
1725	struct ceph_connection *con = msg->con;
1726
1727	if (!con) {
1728		dout("%s msg %p null con\n", __func__, msg);
1729		return;		/* Message not in our possession */
1730	}
1731
1732	mutex_lock(&con->mutex);
1733	if (list_empty(&msg->list_head)) {
1734		WARN_ON(con->out_msg == msg);
1735		dout("%s con %p msg %p not linked\n", __func__, con, msg);
1736		mutex_unlock(&con->mutex);
1737		return;
1738	}
1739
1740	dout("%s con %p msg %p was linked\n", __func__, con, msg);
1741	msg->hdr.seq = 0;
1742	ceph_msg_remove(msg);
1743
1744	if (con->out_msg == msg) {
1745		WARN_ON(con->state != CEPH_CON_S_OPEN);
1746		dout("%s con %p msg %p was sending\n", __func__, con, msg);
1747		if (ceph_msgr2(from_msgr(con->msgr)))
1748			ceph_con_v2_revoke(con);
1749		else
1750			ceph_con_v1_revoke(con);
1751		ceph_msg_put(con->out_msg);
1752		con->out_msg = NULL;
1753	} else {
1754		dout("%s con %p msg %p not current, out_msg %p\n", __func__,
1755		     con, msg, con->out_msg);
1756	}
1757	mutex_unlock(&con->mutex);
1758}
1759
1760/*
1761 * Revoke a message that we may be reading data into
1762 */
1763void ceph_msg_revoke_incoming(struct ceph_msg *msg)
1764{
1765	struct ceph_connection *con = msg->con;
1766
1767	if (!con) {
1768		dout("%s msg %p null con\n", __func__, msg);
1769		return;		/* Message not in our possession */
1770	}
1771
1772	mutex_lock(&con->mutex);
1773	if (con->in_msg == msg) {
1774		WARN_ON(con->state != CEPH_CON_S_OPEN);
1775		dout("%s con %p msg %p was recving\n", __func__, con, msg);
1776		if (ceph_msgr2(from_msgr(con->msgr)))
1777			ceph_con_v2_revoke_incoming(con);
1778		else
1779			ceph_con_v1_revoke_incoming(con);
1780		ceph_msg_put(con->in_msg);
1781		con->in_msg = NULL;
1782	} else {
1783		dout("%s con %p msg %p not current, in_msg %p\n", __func__,
1784		     con, msg, con->in_msg);
1785	}
1786	mutex_unlock(&con->mutex);
1787}
1788
1789/*
1790 * Queue a keepalive byte to ensure the tcp connection is alive.
1791 */
1792void ceph_con_keepalive(struct ceph_connection *con)
1793{
1794	dout("con_keepalive %p\n", con);
1795	mutex_lock(&con->mutex);
1796	clear_standby(con);
1797	ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
1798	mutex_unlock(&con->mutex);
1799
1800	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1801		queue_con(con);
1802}
1803EXPORT_SYMBOL(ceph_con_keepalive);
1804
1805bool ceph_con_keepalive_expired(struct ceph_connection *con,
1806			       unsigned long interval)
1807{
1808	if (interval > 0 &&
1809	    (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1810		struct timespec64 now;
1811		struct timespec64 ts;
1812		ktime_get_real_ts64(&now);
1813		jiffies_to_timespec64(interval, &ts);
1814		ts = timespec64_add(con->last_keepalive_ack, ts);
1815		return timespec64_compare(&now, &ts) >= 0;
1816	}
1817	return false;
1818}
1819
1820static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
1821{
1822	BUG_ON(msg->num_data_items >= msg->max_data_items);
1823	return &msg->data[msg->num_data_items++];
1824}
1825
1826static void ceph_msg_data_destroy(struct ceph_msg_data *data)
1827{
1828	if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
1829		int num_pages = calc_pages_for(data->alignment, data->length);
1830		ceph_release_page_vector(data->pages, num_pages);
1831	} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
1832		ceph_pagelist_release(data->pagelist);
1833	}
1834}
1835
1836void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
1837			     size_t length, size_t alignment, bool own_pages)
1838{
1839	struct ceph_msg_data *data;
1840
1841	BUG_ON(!pages);
1842	BUG_ON(!length);
1843
1844	data = ceph_msg_data_add(msg);
1845	data->type = CEPH_MSG_DATA_PAGES;
1846	data->pages = pages;
1847	data->length = length;
1848	data->alignment = alignment & ~PAGE_MASK;
1849	data->own_pages = own_pages;
1850
1851	msg->data_length += length;
1852}
1853EXPORT_SYMBOL(ceph_msg_data_add_pages);
1854
1855void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
1856				struct ceph_pagelist *pagelist)
1857{
1858	struct ceph_msg_data *data;
1859
1860	BUG_ON(!pagelist);
1861	BUG_ON(!pagelist->length);
1862
1863	data = ceph_msg_data_add(msg);
1864	data->type = CEPH_MSG_DATA_PAGELIST;
1865	refcount_inc(&pagelist->refcnt);
1866	data->pagelist = pagelist;
1867
1868	msg->data_length += pagelist->length;
1869}
1870EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
1871
1872#ifdef	CONFIG_BLOCK
1873void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
1874			   u32 length)
1875{
1876	struct ceph_msg_data *data;
1877
1878	data = ceph_msg_data_add(msg);
1879	data->type = CEPH_MSG_DATA_BIO;
1880	data->bio_pos = *bio_pos;
1881	data->bio_length = length;
1882
1883	msg->data_length += length;
1884}
1885EXPORT_SYMBOL(ceph_msg_data_add_bio);
1886#endif	/* CONFIG_BLOCK */
1887
1888void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
1889			     struct ceph_bvec_iter *bvec_pos)
1890{
1891	struct ceph_msg_data *data;
1892
1893	data = ceph_msg_data_add(msg);
1894	data->type = CEPH_MSG_DATA_BVECS;
1895	data->bvec_pos = *bvec_pos;
1896
1897	msg->data_length += bvec_pos->iter.bi_size;
1898}
1899EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
1900
1901/*
1902 * construct a new message with given type, size
1903 * the new msg has a ref count of 1.
1904 */
1905struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
1906			       gfp_t flags, bool can_fail)
1907{
1908	struct ceph_msg *m;
1909
1910	m = kmem_cache_zalloc(ceph_msg_cache, flags);
1911	if (m == NULL)
1912		goto out;
1913
1914	m->hdr.type = cpu_to_le16(type);
1915	m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
1916	m->hdr.front_len = cpu_to_le32(front_len);
1917
1918	INIT_LIST_HEAD(&m->list_head);
1919	kref_init(&m->kref);
1920
1921	/* front */
1922	if (front_len) {
1923		m->front.iov_base = ceph_kvmalloc(front_len, flags);
1924		if (m->front.iov_base == NULL) {
1925			dout("ceph_msg_new can't allocate %d bytes\n",
1926			     front_len);
1927			goto out2;
1928		}
1929	} else {
1930		m->front.iov_base = NULL;
1931	}
1932	m->front_alloc_len = m->front.iov_len = front_len;
1933
1934	if (max_data_items) {
1935		m->data = kmalloc_array(max_data_items, sizeof(*m->data),
1936					flags);
1937		if (!m->data)
1938			goto out2;
1939
1940		m->max_data_items = max_data_items;
1941	}
1942
1943	dout("ceph_msg_new %p front %d\n", m, front_len);
1944	return m;
1945
1946out2:
1947	ceph_msg_put(m);
1948out:
1949	if (!can_fail) {
1950		pr_err("msg_new can't create type %d front %d\n", type,
1951		       front_len);
1952		WARN_ON(1);
1953	} else {
1954		dout("msg_new can't create type %d front %d\n", type,
1955		     front_len);
1956	}
1957	return NULL;
1958}
1959EXPORT_SYMBOL(ceph_msg_new2);
1960
1961struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
1962			      bool can_fail)
1963{
1964	return ceph_msg_new2(type, front_len, 0, flags, can_fail);
1965}
1966EXPORT_SYMBOL(ceph_msg_new);
1967
1968/*
1969 * Allocate "middle" portion of a message, if it is needed and wasn't
1970 * allocated by alloc_msg.  This allows us to read a small fixed-size
1971 * per-type header in the front and then gracefully fail (i.e.,
1972 * propagate the error to the caller based on info in the front) when
1973 * the middle is too large.
1974 */
1975static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
1976{
1977	int type = le16_to_cpu(msg->hdr.type);
1978	int middle_len = le32_to_cpu(msg->hdr.middle_len);
1979
1980	dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
1981	     ceph_msg_type_name(type), middle_len);
1982	BUG_ON(!middle_len);
1983	BUG_ON(msg->middle);
1984
1985	msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
1986	if (!msg->middle)
1987		return -ENOMEM;
1988	return 0;
1989}
1990
1991/*
1992 * Allocate a message for receiving an incoming message on a
1993 * connection, and save the result in con->in_msg.  Uses the
1994 * connection's private alloc_msg op if available.
1995 *
1996 * Returns 0 on success, or a negative error code.
1997 *
1998 * On success, if we set *skip = 1:
1999 *  - the next message should be skipped and ignored.
2000 *  - con->in_msg == NULL
2001 * or if we set *skip = 0:
2002 *  - con->in_msg is non-null.
2003 * On error (ENOMEM, EAGAIN, ...),
2004 *  - con->in_msg == NULL
2005 */
2006int ceph_con_in_msg_alloc(struct ceph_connection *con,
2007			  struct ceph_msg_header *hdr, int *skip)
2008{
2009	int middle_len = le32_to_cpu(hdr->middle_len);
2010	struct ceph_msg *msg;
2011	int ret = 0;
2012
2013	BUG_ON(con->in_msg != NULL);
2014	BUG_ON(!con->ops->alloc_msg);
2015
2016	mutex_unlock(&con->mutex);
2017	msg = con->ops->alloc_msg(con, hdr, skip);
2018	mutex_lock(&con->mutex);
2019	if (con->state != CEPH_CON_S_OPEN) {
2020		if (msg)
2021			ceph_msg_put(msg);
2022		return -EAGAIN;
2023	}
2024	if (msg) {
2025		BUG_ON(*skip);
2026		msg_con_set(msg, con);
2027		con->in_msg = msg;
2028	} else {
2029		/*
2030		 * Null message pointer means either we should skip
2031		 * this message or we couldn't allocate memory.  The
2032		 * former is not an error.
2033		 */
2034		if (*skip)
2035			return 0;
2036
2037		con->error_msg = "error allocating memory for incoming message";
2038		return -ENOMEM;
2039	}
2040	memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2041
2042	if (middle_len && !con->in_msg->middle) {
2043		ret = ceph_alloc_middle(con, con->in_msg);
2044		if (ret < 0) {
2045			ceph_msg_put(con->in_msg);
2046			con->in_msg = NULL;
2047		}
2048	}
2049
2050	return ret;
2051}
2052
2053void ceph_con_get_out_msg(struct ceph_connection *con)
2054{
2055	struct ceph_msg *msg;
2056
2057	BUG_ON(list_empty(&con->out_queue));
2058	msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
2059	WARN_ON(msg->con != con);
2060
2061	/*
2062	 * Put the message on "sent" list using a ref from ceph_con_send().
2063	 * It is put when the message is acked or revoked.
2064	 */
2065	list_move_tail(&msg->list_head, &con->out_sent);
2066
2067	/*
2068	 * Only assign outgoing seq # if we haven't sent this message
2069	 * yet.  If it is requeued, resend with it's original seq.
2070	 */
2071	if (msg->needs_out_seq) {
2072		msg->hdr.seq = cpu_to_le64(++con->out_seq);
2073		msg->needs_out_seq = false;
2074
2075		if (con->ops->reencode_message)
2076			con->ops->reencode_message(msg);
2077	}
2078
2079	/*
2080	 * Get a ref for out_msg.  It is put when we are done sending the
2081	 * message or in case of a fault.
2082	 */
2083	WARN_ON(con->out_msg);
2084	con->out_msg = ceph_msg_get(msg);
2085}
2086
2087/*
2088 * Free a generically kmalloc'd message.
2089 */
2090static void ceph_msg_free(struct ceph_msg *m)
2091{
2092	dout("%s %p\n", __func__, m);
2093	kvfree(m->front.iov_base);
2094	kfree(m->data);
2095	kmem_cache_free(ceph_msg_cache, m);
2096}
2097
2098static void ceph_msg_release(struct kref *kref)
2099{
2100	struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2101	int i;
2102
2103	dout("%s %p\n", __func__, m);
2104	WARN_ON(!list_empty(&m->list_head));
2105
2106	msg_con_set(m, NULL);
2107
2108	/* drop middle, data, if any */
2109	if (m->middle) {
2110		ceph_buffer_put(m->middle);
2111		m->middle = NULL;
2112	}
2113
2114	for (i = 0; i < m->num_data_items; i++)
2115		ceph_msg_data_destroy(&m->data[i]);
2116
2117	if (m->pool)
2118		ceph_msgpool_put(m->pool, m);
2119	else
2120		ceph_msg_free(m);
2121}
2122
2123struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
2124{
2125	dout("%s %p (was %d)\n", __func__, msg,
2126	     kref_read(&msg->kref));
2127	kref_get(&msg->kref);
2128	return msg;
2129}
2130EXPORT_SYMBOL(ceph_msg_get);
2131
2132void ceph_msg_put(struct ceph_msg *msg)
2133{
2134	dout("%s %p (was %d)\n", __func__, msg,
2135	     kref_read(&msg->kref));
2136	kref_put(&msg->kref, ceph_msg_release);
2137}
2138EXPORT_SYMBOL(ceph_msg_put);
2139
2140void ceph_msg_dump(struct ceph_msg *msg)
2141{
2142	pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2143		 msg->front_alloc_len, msg->data_length);
2144	print_hex_dump(KERN_DEBUG, "header: ",
2145		       DUMP_PREFIX_OFFSET, 16, 1,
2146		       &msg->hdr, sizeof(msg->hdr), true);
2147	print_hex_dump(KERN_DEBUG, " front: ",
2148		       DUMP_PREFIX_OFFSET, 16, 1,
2149		       msg->front.iov_base, msg->front.iov_len, true);
2150	if (msg->middle)
2151		print_hex_dump(KERN_DEBUG, "middle: ",
2152			       DUMP_PREFIX_OFFSET, 16, 1,
2153			       msg->middle->vec.iov_base,
2154			       msg->middle->vec.iov_len, true);
2155	print_hex_dump(KERN_DEBUG, "footer: ",
2156		       DUMP_PREFIX_OFFSET, 16, 1,
2157		       &msg->footer, sizeof(msg->footer), true);
2158}
2159EXPORT_SYMBOL(ceph_msg_dump);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3
   4#include <linux/crc32c.h>
   5#include <linux/ctype.h>
   6#include <linux/highmem.h>
   7#include <linux/inet.h>
   8#include <linux/kthread.h>
   9#include <linux/net.h>
  10#include <linux/nsproxy.h>
  11#include <linux/sched/mm.h>
  12#include <linux/slab.h>
  13#include <linux/socket.h>
  14#include <linux/string.h>
  15#ifdef	CONFIG_BLOCK
  16#include <linux/bio.h>
  17#endif	/* CONFIG_BLOCK */
  18#include <linux/dns_resolver.h>
  19#include <net/tcp.h>
  20
  21#include <linux/ceph/ceph_features.h>
  22#include <linux/ceph/libceph.h>
  23#include <linux/ceph/messenger.h>
  24#include <linux/ceph/decode.h>
  25#include <linux/ceph/pagelist.h>
  26#include <linux/export.h>
  27
  28/*
  29 * Ceph uses the messenger to exchange ceph_msg messages with other
  30 * hosts in the system.  The messenger provides ordered and reliable
  31 * delivery.  We tolerate TCP disconnects by reconnecting (with
  32 * exponential backoff) in the case of a fault (disconnection, bad
  33 * crc, protocol error).  Acks allow sent messages to be discarded by
  34 * the sender.
  35 */
  36
  37/*
  38 * We track the state of the socket on a given connection using
  39 * values defined below.  The transition to a new socket state is
  40 * handled by a function which verifies we aren't coming from an
  41 * unexpected state.
  42 *
  43 *      --------
  44 *      | NEW* |  transient initial state
  45 *      --------
  46 *          | con_sock_state_init()
  47 *          v
  48 *      ----------
  49 *      | CLOSED |  initialized, but no socket (and no
  50 *      ----------  TCP connection)
  51 *       ^      \
  52 *       |       \ con_sock_state_connecting()
  53 *       |        ----------------------
  54 *       |                              \
  55 *       + con_sock_state_closed()       \
  56 *       |+---------------------------    \
  57 *       | \                          \    \
  58 *       |  -----------                \    \
  59 *       |  | CLOSING |  socket event;  \    \
  60 *       |  -----------  await close     \    \
  61 *       |       ^                        \   |
  62 *       |       |                         \  |
  63 *       |       + con_sock_state_closing() \ |
  64 *       |      / \                         | |
  65 *       |     /   ---------------          | |
  66 *       |    /                   \         v v
  67 *       |   /                    --------------
  68 *       |  /    -----------------| CONNECTING |  socket created, TCP
  69 *       |  |   /                 --------------  connect initiated
  70 *       |  |   | con_sock_state_connected()
  71 *       |  |   v
  72 *      -------------
  73 *      | CONNECTED |  TCP connection established
  74 *      -------------
  75 *
  76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
  77 */
  78
  79#define CON_SOCK_STATE_NEW		0	/* -> CLOSED */
  80#define CON_SOCK_STATE_CLOSED		1	/* -> CONNECTING */
  81#define CON_SOCK_STATE_CONNECTING	2	/* -> CONNECTED or -> CLOSING */
  82#define CON_SOCK_STATE_CONNECTED	3	/* -> CLOSING or -> CLOSED */
  83#define CON_SOCK_STATE_CLOSING		4	/* -> CLOSED */
  84
  85static bool con_flag_valid(unsigned long con_flag)
  86{
  87	switch (con_flag) {
  88	case CEPH_CON_F_LOSSYTX:
  89	case CEPH_CON_F_KEEPALIVE_PENDING:
  90	case CEPH_CON_F_WRITE_PENDING:
  91	case CEPH_CON_F_SOCK_CLOSED:
  92	case CEPH_CON_F_BACKOFF:
  93		return true;
  94	default:
  95		return false;
  96	}
  97}
  98
  99void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
 100{
 101	BUG_ON(!con_flag_valid(con_flag));
 102
 103	clear_bit(con_flag, &con->flags);
 104}
 105
 106void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
 107{
 108	BUG_ON(!con_flag_valid(con_flag));
 109
 110	set_bit(con_flag, &con->flags);
 111}
 112
 113bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
 114{
 115	BUG_ON(!con_flag_valid(con_flag));
 116
 117	return test_bit(con_flag, &con->flags);
 118}
 119
 120bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
 121				  unsigned long con_flag)
 122{
 123	BUG_ON(!con_flag_valid(con_flag));
 124
 125	return test_and_clear_bit(con_flag, &con->flags);
 126}
 127
 128bool ceph_con_flag_test_and_set(struct ceph_connection *con,
 129				unsigned long con_flag)
 130{
 131	BUG_ON(!con_flag_valid(con_flag));
 132
 133	return test_and_set_bit(con_flag, &con->flags);
 134}
 135
 136/* Slab caches for frequently-allocated structures */
 137
 138static struct kmem_cache	*ceph_msg_cache;
 139
 140#ifdef CONFIG_LOCKDEP
 141static struct lock_class_key socket_class;
 142#endif
 143
 144static void queue_con(struct ceph_connection *con);
 145static void cancel_con(struct ceph_connection *con);
 146static void ceph_con_workfn(struct work_struct *);
 147static void con_fault(struct ceph_connection *con);
 148
 149/*
 150 * Nicely render a sockaddr as a string.  An array of formatted
 151 * strings is used, to approximate reentrancy.
 152 */
 153#define ADDR_STR_COUNT_LOG	5	/* log2(# address strings in array) */
 154#define ADDR_STR_COUNT		(1 << ADDR_STR_COUNT_LOG)
 155#define ADDR_STR_COUNT_MASK	(ADDR_STR_COUNT - 1)
 156#define MAX_ADDR_STR_LEN	64	/* 54 is enough */
 157
 158static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
 159static atomic_t addr_str_seq = ATOMIC_INIT(0);
 160
 161struct page *ceph_zero_page;		/* used in certain error cases */
 162
 163const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
 164{
 165	int i;
 166	char *s;
 167	struct sockaddr_storage ss = addr->in_addr; /* align */
 168	struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
 169	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
 170
 171	i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
 172	s = addr_str[i];
 173
 174	switch (ss.ss_family) {
 175	case AF_INET:
 176		snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
 177			 le32_to_cpu(addr->type), &in4->sin_addr,
 178			 ntohs(in4->sin_port));
 179		break;
 180
 181	case AF_INET6:
 182		snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
 183			 le32_to_cpu(addr->type), &in6->sin6_addr,
 184			 ntohs(in6->sin6_port));
 185		break;
 186
 187	default:
 188		snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
 189			 ss.ss_family);
 190	}
 191
 192	return s;
 193}
 194EXPORT_SYMBOL(ceph_pr_addr);
 195
 196void ceph_encode_my_addr(struct ceph_messenger *msgr)
 197{
 198	if (!ceph_msgr2(from_msgr(msgr))) {
 199		memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
 200		       sizeof(msgr->my_enc_addr));
 201		ceph_encode_banner_addr(&msgr->my_enc_addr);
 202	}
 203}
 204
 205/*
 206 * work queue for all reading and writing to/from the socket.
 207 */
 208static struct workqueue_struct *ceph_msgr_wq;
 209
 210static int ceph_msgr_slab_init(void)
 211{
 212	BUG_ON(ceph_msg_cache);
 213	ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
 214	if (!ceph_msg_cache)
 215		return -ENOMEM;
 216
 217	return 0;
 218}
 219
 220static void ceph_msgr_slab_exit(void)
 221{
 222	BUG_ON(!ceph_msg_cache);
 223	kmem_cache_destroy(ceph_msg_cache);
 224	ceph_msg_cache = NULL;
 225}
 226
 227static void _ceph_msgr_exit(void)
 228{
 229	if (ceph_msgr_wq) {
 230		destroy_workqueue(ceph_msgr_wq);
 231		ceph_msgr_wq = NULL;
 232	}
 233
 234	BUG_ON(!ceph_zero_page);
 235	put_page(ceph_zero_page);
 236	ceph_zero_page = NULL;
 237
 238	ceph_msgr_slab_exit();
 239}
 240
 241int __init ceph_msgr_init(void)
 242{
 243	if (ceph_msgr_slab_init())
 244		return -ENOMEM;
 245
 246	BUG_ON(ceph_zero_page);
 247	ceph_zero_page = ZERO_PAGE(0);
 248	get_page(ceph_zero_page);
 249
 250	/*
 251	 * The number of active work items is limited by the number of
 252	 * connections, so leave @max_active at default.
 253	 */
 254	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
 255	if (ceph_msgr_wq)
 256		return 0;
 257
 258	pr_err("msgr_init failed to create workqueue\n");
 259	_ceph_msgr_exit();
 260
 261	return -ENOMEM;
 262}
 263
 264void ceph_msgr_exit(void)
 265{
 266	BUG_ON(ceph_msgr_wq == NULL);
 267
 268	_ceph_msgr_exit();
 269}
 270
 271void ceph_msgr_flush(void)
 272{
 273	flush_workqueue(ceph_msgr_wq);
 274}
 275EXPORT_SYMBOL(ceph_msgr_flush);
 276
 277/* Connection socket state transition functions */
 278
 279static void con_sock_state_init(struct ceph_connection *con)
 280{
 281	int old_state;
 282
 283	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
 284	if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
 285		printk("%s: unexpected old state %d\n", __func__, old_state);
 286	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 287	     CON_SOCK_STATE_CLOSED);
 288}
 289
 290static void con_sock_state_connecting(struct ceph_connection *con)
 291{
 292	int old_state;
 293
 294	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
 295	if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
 296		printk("%s: unexpected old state %d\n", __func__, old_state);
 297	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 298	     CON_SOCK_STATE_CONNECTING);
 299}
 300
 301static void con_sock_state_connected(struct ceph_connection *con)
 302{
 303	int old_state;
 304
 305	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
 306	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
 307		printk("%s: unexpected old state %d\n", __func__, old_state);
 308	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 309	     CON_SOCK_STATE_CONNECTED);
 310}
 311
 312static void con_sock_state_closing(struct ceph_connection *con)
 313{
 314	int old_state;
 315
 316	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
 317	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
 318			old_state != CON_SOCK_STATE_CONNECTED &&
 319			old_state != CON_SOCK_STATE_CLOSING))
 320		printk("%s: unexpected old state %d\n", __func__, old_state);
 321	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 322	     CON_SOCK_STATE_CLOSING);
 323}
 324
 325static void con_sock_state_closed(struct ceph_connection *con)
 326{
 327	int old_state;
 328
 329	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
 330	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
 331		    old_state != CON_SOCK_STATE_CLOSING &&
 332		    old_state != CON_SOCK_STATE_CONNECTING &&
 333		    old_state != CON_SOCK_STATE_CLOSED))
 334		printk("%s: unexpected old state %d\n", __func__, old_state);
 335	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
 336	     CON_SOCK_STATE_CLOSED);
 337}
 338
 339/*
 340 * socket callback functions
 341 */
 342
 343/* data available on socket, or listen socket received a connect */
 344static void ceph_sock_data_ready(struct sock *sk)
 345{
 346	struct ceph_connection *con = sk->sk_user_data;
 347	if (atomic_read(&con->msgr->stopping)) {
 348		return;
 349	}
 350
 351	if (sk->sk_state != TCP_CLOSE_WAIT) {
 352		dout("%s %p state = %d, queueing work\n", __func__,
 353		     con, con->state);
 354		queue_con(con);
 355	}
 356}
 357
 358/* socket has buffer space for writing */
 359static void ceph_sock_write_space(struct sock *sk)
 360{
 361	struct ceph_connection *con = sk->sk_user_data;
 362
 363	/* only queue to workqueue if there is data we want to write,
 364	 * and there is sufficient space in the socket buffer to accept
 365	 * more data.  clear SOCK_NOSPACE so that ceph_sock_write_space()
 366	 * doesn't get called again until try_write() fills the socket
 367	 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
 368	 * and net/core/stream.c:sk_stream_write_space().
 369	 */
 370	if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
 371		if (sk_stream_is_writeable(sk)) {
 372			dout("%s %p queueing write work\n", __func__, con);
 373			clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 374			queue_con(con);
 375		}
 376	} else {
 377		dout("%s %p nothing to write\n", __func__, con);
 378	}
 379}
 380
 381/* socket's state has changed */
 382static void ceph_sock_state_change(struct sock *sk)
 383{
 384	struct ceph_connection *con = sk->sk_user_data;
 385
 386	dout("%s %p state = %d sk_state = %u\n", __func__,
 387	     con, con->state, sk->sk_state);
 388
 389	switch (sk->sk_state) {
 390	case TCP_CLOSE:
 391		dout("%s TCP_CLOSE\n", __func__);
 392		fallthrough;
 393	case TCP_CLOSE_WAIT:
 394		dout("%s TCP_CLOSE_WAIT\n", __func__);
 395		con_sock_state_closing(con);
 396		ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
 397		queue_con(con);
 398		break;
 399	case TCP_ESTABLISHED:
 400		dout("%s TCP_ESTABLISHED\n", __func__);
 401		con_sock_state_connected(con);
 402		queue_con(con);
 403		break;
 404	default:	/* Everything else is uninteresting */
 405		break;
 406	}
 407}
 408
 409/*
 410 * set up socket callbacks
 411 */
 412static void set_sock_callbacks(struct socket *sock,
 413			       struct ceph_connection *con)
 414{
 415	struct sock *sk = sock->sk;
 416	sk->sk_user_data = con;
 417	sk->sk_data_ready = ceph_sock_data_ready;
 418	sk->sk_write_space = ceph_sock_write_space;
 419	sk->sk_state_change = ceph_sock_state_change;
 420}
 421
 422
 423/*
 424 * socket helpers
 425 */
 426
 427/*
 428 * initiate connection to a remote socket.
 429 */
 430int ceph_tcp_connect(struct ceph_connection *con)
 431{
 432	struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
 433	struct socket *sock;
 434	unsigned int noio_flag;
 435	int ret;
 436
 437	dout("%s con %p peer_addr %s\n", __func__, con,
 438	     ceph_pr_addr(&con->peer_addr));
 439	BUG_ON(con->sock);
 440
 441	/* sock_create_kern() allocates with GFP_KERNEL */
 442	noio_flag = memalloc_noio_save();
 443	ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
 444			       SOCK_STREAM, IPPROTO_TCP, &sock);
 445	memalloc_noio_restore(noio_flag);
 446	if (ret)
 447		return ret;
 448	sock->sk->sk_allocation = GFP_NOFS;
 449	sock->sk->sk_use_task_frag = false;
 450
 451#ifdef CONFIG_LOCKDEP
 452	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
 453#endif
 454
 455	set_sock_callbacks(sock, con);
 456
 457	con_sock_state_connecting(con);
 458	ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
 459				 O_NONBLOCK);
 460	if (ret == -EINPROGRESS) {
 461		dout("connect %s EINPROGRESS sk_state = %u\n",
 462		     ceph_pr_addr(&con->peer_addr),
 463		     sock->sk->sk_state);
 464	} else if (ret < 0) {
 465		pr_err("connect %s error %d\n",
 466		       ceph_pr_addr(&con->peer_addr), ret);
 467		sock_release(sock);
 468		return ret;
 469	}
 470
 471	if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
 472		tcp_sock_set_nodelay(sock->sk);
 473
 474	con->sock = sock;
 475	return 0;
 476}
 477
 478/*
 479 * Shutdown/close the socket for the given connection.
 480 */
 481int ceph_con_close_socket(struct ceph_connection *con)
 482{
 483	int rc = 0;
 484
 485	dout("%s con %p sock %p\n", __func__, con, con->sock);
 486	if (con->sock) {
 487		rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
 488		sock_release(con->sock);
 489		con->sock = NULL;
 490	}
 491
 492	/*
 493	 * Forcibly clear the SOCK_CLOSED flag.  It gets set
 494	 * independent of the connection mutex, and we could have
 495	 * received a socket close event before we had the chance to
 496	 * shut the socket down.
 497	 */
 498	ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
 499
 500	con_sock_state_closed(con);
 501	return rc;
 502}
 503
 504static void ceph_con_reset_protocol(struct ceph_connection *con)
 505{
 506	dout("%s con %p\n", __func__, con);
 507
 508	ceph_con_close_socket(con);
 509	if (con->in_msg) {
 510		WARN_ON(con->in_msg->con != con);
 511		ceph_msg_put(con->in_msg);
 512		con->in_msg = NULL;
 513	}
 514	if (con->out_msg) {
 515		WARN_ON(con->out_msg->con != con);
 516		ceph_msg_put(con->out_msg);
 517		con->out_msg = NULL;
 518	}
 519	if (con->bounce_page) {
 520		__free_page(con->bounce_page);
 521		con->bounce_page = NULL;
 522	}
 523
 524	if (ceph_msgr2(from_msgr(con->msgr)))
 525		ceph_con_v2_reset_protocol(con);
 526	else
 527		ceph_con_v1_reset_protocol(con);
 528}
 529
 530/*
 531 * Reset a connection.  Discard all incoming and outgoing messages
 532 * and clear *_seq state.
 533 */
 534static void ceph_msg_remove(struct ceph_msg *msg)
 535{
 536	list_del_init(&msg->list_head);
 537
 538	ceph_msg_put(msg);
 539}
 540
 541static void ceph_msg_remove_list(struct list_head *head)
 542{
 543	while (!list_empty(head)) {
 544		struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
 545							list_head);
 546		ceph_msg_remove(msg);
 547	}
 548}
 549
 550void ceph_con_reset_session(struct ceph_connection *con)
 551{
 552	dout("%s con %p\n", __func__, con);
 553
 554	WARN_ON(con->in_msg);
 555	WARN_ON(con->out_msg);
 556	ceph_msg_remove_list(&con->out_queue);
 557	ceph_msg_remove_list(&con->out_sent);
 558	con->out_seq = 0;
 559	con->in_seq = 0;
 560	con->in_seq_acked = 0;
 561
 562	if (ceph_msgr2(from_msgr(con->msgr)))
 563		ceph_con_v2_reset_session(con);
 564	else
 565		ceph_con_v1_reset_session(con);
 566}
 567
 568/*
 569 * mark a peer down.  drop any open connections.
 570 */
 571void ceph_con_close(struct ceph_connection *con)
 572{
 573	mutex_lock(&con->mutex);
 574	dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
 575	con->state = CEPH_CON_S_CLOSED;
 576
 577	ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX);  /* so we retry next
 578							  connect */
 579	ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
 580	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
 581	ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
 582
 583	ceph_con_reset_protocol(con);
 584	ceph_con_reset_session(con);
 585	cancel_con(con);
 586	mutex_unlock(&con->mutex);
 587}
 588EXPORT_SYMBOL(ceph_con_close);
 589
 590/*
 591 * Reopen a closed connection, with a new peer address.
 592 */
 593void ceph_con_open(struct ceph_connection *con,
 594		   __u8 entity_type, __u64 entity_num,
 595		   struct ceph_entity_addr *addr)
 596{
 597	mutex_lock(&con->mutex);
 598	dout("con_open %p %s\n", con, ceph_pr_addr(addr));
 599
 600	WARN_ON(con->state != CEPH_CON_S_CLOSED);
 601	con->state = CEPH_CON_S_PREOPEN;
 602
 603	con->peer_name.type = (__u8) entity_type;
 604	con->peer_name.num = cpu_to_le64(entity_num);
 605
 606	memcpy(&con->peer_addr, addr, sizeof(*addr));
 607	con->delay = 0;      /* reset backoff memory */
 608	mutex_unlock(&con->mutex);
 609	queue_con(con);
 610}
 611EXPORT_SYMBOL(ceph_con_open);
 612
 613/*
 614 * return true if this connection ever successfully opened
 615 */
 616bool ceph_con_opened(struct ceph_connection *con)
 617{
 618	if (ceph_msgr2(from_msgr(con->msgr)))
 619		return ceph_con_v2_opened(con);
 620
 621	return ceph_con_v1_opened(con);
 622}
 623
 624/*
 625 * initialize a new connection.
 626 */
 627void ceph_con_init(struct ceph_connection *con, void *private,
 628	const struct ceph_connection_operations *ops,
 629	struct ceph_messenger *msgr)
 630{
 631	dout("con_init %p\n", con);
 632	memset(con, 0, sizeof(*con));
 633	con->private = private;
 634	con->ops = ops;
 635	con->msgr = msgr;
 636
 637	con_sock_state_init(con);
 638
 639	mutex_init(&con->mutex);
 640	INIT_LIST_HEAD(&con->out_queue);
 641	INIT_LIST_HEAD(&con->out_sent);
 642	INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
 643
 644	con->state = CEPH_CON_S_CLOSED;
 645}
 646EXPORT_SYMBOL(ceph_con_init);
 647
 648/*
 649 * We maintain a global counter to order connection attempts.  Get
 650 * a unique seq greater than @gt.
 651 */
 652u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
 653{
 654	u32 ret;
 655
 656	spin_lock(&msgr->global_seq_lock);
 657	if (msgr->global_seq < gt)
 658		msgr->global_seq = gt;
 659	ret = ++msgr->global_seq;
 660	spin_unlock(&msgr->global_seq_lock);
 661	return ret;
 662}
 663
 664/*
 665 * Discard messages that have been acked by the server.
 666 */
 667void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
 668{
 669	struct ceph_msg *msg;
 670	u64 seq;
 671
 672	dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
 673	while (!list_empty(&con->out_sent)) {
 674		msg = list_first_entry(&con->out_sent, struct ceph_msg,
 675				       list_head);
 676		WARN_ON(msg->needs_out_seq);
 677		seq = le64_to_cpu(msg->hdr.seq);
 678		if (seq > ack_seq)
 679			break;
 680
 681		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
 682		     msg, seq);
 683		ceph_msg_remove(msg);
 684	}
 685}
 686
 687/*
 688 * Discard messages that have been requeued in con_fault(), up to
 689 * reconnect_seq.  This avoids gratuitously resending messages that
 690 * the server had received and handled prior to reconnect.
 691 */
 692void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
 693{
 694	struct ceph_msg *msg;
 695	u64 seq;
 696
 697	dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
 698	while (!list_empty(&con->out_queue)) {
 699		msg = list_first_entry(&con->out_queue, struct ceph_msg,
 700				       list_head);
 701		if (msg->needs_out_seq)
 702			break;
 703		seq = le64_to_cpu(msg->hdr.seq);
 704		if (seq > reconnect_seq)
 705			break;
 706
 707		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
 708		     msg, seq);
 709		ceph_msg_remove(msg);
 710	}
 711}
 712
 713#ifdef CONFIG_BLOCK
 714
 715/*
 716 * For a bio data item, a piece is whatever remains of the next
 717 * entry in the current bio iovec, or the first entry in the next
 718 * bio in the list.
 719 */
 720static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
 721					size_t length)
 722{
 723	struct ceph_msg_data *data = cursor->data;
 724	struct ceph_bio_iter *it = &cursor->bio_iter;
 725
 726	cursor->resid = min_t(size_t, length, data->bio_length);
 727	*it = data->bio_pos;
 728	if (cursor->resid < it->iter.bi_size)
 729		it->iter.bi_size = cursor->resid;
 730
 731	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
 
 732}
 733
 734static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
 735						size_t *page_offset,
 736						size_t *length)
 737{
 738	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
 739					   cursor->bio_iter.iter);
 740
 741	*page_offset = bv.bv_offset;
 742	*length = bv.bv_len;
 743	return bv.bv_page;
 744}
 745
 746static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
 747					size_t bytes)
 748{
 749	struct ceph_bio_iter *it = &cursor->bio_iter;
 750	struct page *page = bio_iter_page(it->bio, it->iter);
 751
 752	BUG_ON(bytes > cursor->resid);
 753	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
 754	cursor->resid -= bytes;
 755	bio_advance_iter(it->bio, &it->iter, bytes);
 756
 757	if (!cursor->resid)
 
 758		return false;   /* no more data */
 
 759
 760	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
 761		       page == bio_iter_page(it->bio, it->iter)))
 762		return false;	/* more bytes to process in this segment */
 763
 764	if (!it->iter.bi_size) {
 765		it->bio = it->bio->bi_next;
 766		it->iter = it->bio->bi_iter;
 767		if (cursor->resid < it->iter.bi_size)
 768			it->iter.bi_size = cursor->resid;
 769	}
 770
 
 771	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
 
 772	return true;
 773}
 774#endif /* CONFIG_BLOCK */
 775
 776static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
 777					size_t length)
 778{
 779	struct ceph_msg_data *data = cursor->data;
 780	struct bio_vec *bvecs = data->bvec_pos.bvecs;
 781
 782	cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
 783	cursor->bvec_iter = data->bvec_pos.iter;
 784	cursor->bvec_iter.bi_size = cursor->resid;
 785
 786	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
 
 
 787}
 788
 789static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
 790						size_t *page_offset,
 791						size_t *length)
 792{
 793	struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
 794					   cursor->bvec_iter);
 795
 796	*page_offset = bv.bv_offset;
 797	*length = bv.bv_len;
 798	return bv.bv_page;
 799}
 800
 801static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
 802					size_t bytes)
 803{
 804	struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
 805	struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 806
 807	BUG_ON(bytes > cursor->resid);
 808	BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
 809	cursor->resid -= bytes;
 810	bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
 811
 812	if (!cursor->resid)
 
 813		return false;   /* no more data */
 
 814
 815	if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
 816		       page == bvec_iter_page(bvecs, cursor->bvec_iter)))
 817		return false;	/* more bytes to process in this segment */
 818
 
 819	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
 
 
 820	return true;
 821}
 822
 823/*
 824 * For a page array, a piece comes from the first page in the array
 825 * that has not already been fully consumed.
 826 */
 827static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
 828					size_t length)
 829{
 830	struct ceph_msg_data *data = cursor->data;
 831	int page_count;
 832
 833	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
 834
 835	BUG_ON(!data->pages);
 836	BUG_ON(!data->length);
 837
 838	cursor->resid = min(length, data->length);
 839	page_count = calc_pages_for(data->alignment, (u64)data->length);
 840	cursor->page_offset = data->alignment & ~PAGE_MASK;
 841	cursor->page_index = 0;
 842	BUG_ON(page_count > (int)USHRT_MAX);
 843	cursor->page_count = (unsigned short)page_count;
 844	BUG_ON(length > SIZE_MAX - cursor->page_offset);
 
 845}
 846
 847static struct page *
 848ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
 849					size_t *page_offset, size_t *length)
 850{
 851	struct ceph_msg_data *data = cursor->data;
 852
 853	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
 854
 855	BUG_ON(cursor->page_index >= cursor->page_count);
 856	BUG_ON(cursor->page_offset >= PAGE_SIZE);
 857
 858	*page_offset = cursor->page_offset;
 859	*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
 
 
 
 
 860	return data->pages[cursor->page_index];
 861}
 862
 863static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
 864						size_t bytes)
 865{
 866	BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
 867
 868	BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
 869
 870	/* Advance the cursor page offset */
 871
 872	cursor->resid -= bytes;
 873	cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
 874	if (!bytes || cursor->page_offset)
 875		return false;	/* more bytes to process in the current page */
 876
 877	if (!cursor->resid)
 878		return false;   /* no more data */
 879
 880	/* Move on to the next page; offset is already at 0 */
 881
 882	BUG_ON(cursor->page_index >= cursor->page_count);
 883	cursor->page_index++;
 
 
 884	return true;
 885}
 886
 887/*
 888 * For a pagelist, a piece is whatever remains to be consumed in the
 889 * first page in the list, or the front of the next page.
 890 */
 891static void
 892ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
 893					size_t length)
 894{
 895	struct ceph_msg_data *data = cursor->data;
 896	struct ceph_pagelist *pagelist;
 897	struct page *page;
 898
 899	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 900
 901	pagelist = data->pagelist;
 902	BUG_ON(!pagelist);
 903
 904	if (!length)
 905		return;		/* pagelist can be assigned but empty */
 906
 907	BUG_ON(list_empty(&pagelist->head));
 908	page = list_first_entry(&pagelist->head, struct page, lru);
 909
 910	cursor->resid = min(length, pagelist->length);
 911	cursor->page = page;
 912	cursor->offset = 0;
 
 913}
 914
 915static struct page *
 916ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
 917				size_t *page_offset, size_t *length)
 918{
 919	struct ceph_msg_data *data = cursor->data;
 920	struct ceph_pagelist *pagelist;
 921
 922	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 923
 924	pagelist = data->pagelist;
 925	BUG_ON(!pagelist);
 926
 927	BUG_ON(!cursor->page);
 928	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
 929
 930	/* offset of first page in pagelist is always 0 */
 931	*page_offset = cursor->offset & ~PAGE_MASK;
 932	*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
 
 
 
 
 933	return cursor->page;
 934}
 935
 936static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
 937						size_t bytes)
 938{
 939	struct ceph_msg_data *data = cursor->data;
 940	struct ceph_pagelist *pagelist;
 941
 942	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
 943
 944	pagelist = data->pagelist;
 945	BUG_ON(!pagelist);
 946
 947	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
 948	BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
 949
 950	/* Advance the cursor offset */
 951
 952	cursor->resid -= bytes;
 953	cursor->offset += bytes;
 954	/* offset of first page in pagelist is always 0 */
 955	if (!bytes || cursor->offset & ~PAGE_MASK)
 956		return false;	/* more bytes to process in the current page */
 957
 958	if (!cursor->resid)
 959		return false;   /* no more data */
 960
 961	/* Move on to the next page */
 962
 963	BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
 964	cursor->page = list_next_entry(cursor->page, lru);
 
 
 965	return true;
 966}
 967
 968/*
 969 * Message data is handled (sent or received) in pieces, where each
 970 * piece resides on a single page.  The network layer might not
 971 * consume an entire piece at once.  A data item's cursor keeps
 972 * track of which piece is next to process and how much remains to
 973 * be processed in that piece.  It also tracks whether the current
 974 * piece is the last one in the data item.
 975 */
 976static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
 977{
 978	size_t length = cursor->total_resid;
 979
 980	switch (cursor->data->type) {
 981	case CEPH_MSG_DATA_PAGELIST:
 982		ceph_msg_data_pagelist_cursor_init(cursor, length);
 983		break;
 984	case CEPH_MSG_DATA_PAGES:
 985		ceph_msg_data_pages_cursor_init(cursor, length);
 986		break;
 987#ifdef CONFIG_BLOCK
 988	case CEPH_MSG_DATA_BIO:
 989		ceph_msg_data_bio_cursor_init(cursor, length);
 990		break;
 991#endif /* CONFIG_BLOCK */
 992	case CEPH_MSG_DATA_BVECS:
 993		ceph_msg_data_bvecs_cursor_init(cursor, length);
 994		break;
 995	case CEPH_MSG_DATA_NONE:
 996	default:
 997		/* BUG(); */
 998		break;
 999	}
1000	cursor->need_crc = true;
1001}
1002
1003void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1004			       struct ceph_msg *msg, size_t length)
1005{
1006	BUG_ON(!length);
1007	BUG_ON(length > msg->data_length);
1008	BUG_ON(!msg->num_data_items);
1009
1010	cursor->total_resid = length;
1011	cursor->data = msg->data;
1012
1013	__ceph_msg_data_cursor_init(cursor);
1014}
1015
1016/*
1017 * Return the page containing the next piece to process for a given
1018 * data item, and supply the page offset and length of that piece.
1019 * Indicate whether this is the last piece in this data item.
1020 */
1021struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1022				size_t *page_offset, size_t *length)
 
1023{
1024	struct page *page;
1025
1026	switch (cursor->data->type) {
1027	case CEPH_MSG_DATA_PAGELIST:
1028		page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1029		break;
1030	case CEPH_MSG_DATA_PAGES:
1031		page = ceph_msg_data_pages_next(cursor, page_offset, length);
1032		break;
1033#ifdef CONFIG_BLOCK
1034	case CEPH_MSG_DATA_BIO:
1035		page = ceph_msg_data_bio_next(cursor, page_offset, length);
1036		break;
1037#endif /* CONFIG_BLOCK */
1038	case CEPH_MSG_DATA_BVECS:
1039		page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1040		break;
1041	case CEPH_MSG_DATA_NONE:
1042	default:
1043		page = NULL;
1044		break;
1045	}
1046
1047	BUG_ON(!page);
1048	BUG_ON(*page_offset + *length > PAGE_SIZE);
1049	BUG_ON(!*length);
1050	BUG_ON(*length > cursor->resid);
 
 
1051
1052	return page;
1053}
1054
1055/*
1056 * Returns true if the result moves the cursor on to the next piece
1057 * of the data item.
1058 */
1059void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
1060{
1061	bool new_piece;
1062
1063	BUG_ON(bytes > cursor->resid);
1064	switch (cursor->data->type) {
1065	case CEPH_MSG_DATA_PAGELIST:
1066		new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1067		break;
1068	case CEPH_MSG_DATA_PAGES:
1069		new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1070		break;
1071#ifdef CONFIG_BLOCK
1072	case CEPH_MSG_DATA_BIO:
1073		new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1074		break;
1075#endif /* CONFIG_BLOCK */
1076	case CEPH_MSG_DATA_BVECS:
1077		new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1078		break;
1079	case CEPH_MSG_DATA_NONE:
1080	default:
1081		BUG();
1082		break;
1083	}
1084	cursor->total_resid -= bytes;
1085
1086	if (!cursor->resid && cursor->total_resid) {
 
1087		cursor->data++;
1088		__ceph_msg_data_cursor_init(cursor);
1089		new_piece = true;
1090	}
1091	cursor->need_crc = new_piece;
1092}
1093
1094u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1095		     unsigned int length)
1096{
1097	char *kaddr;
1098
1099	kaddr = kmap(page);
1100	BUG_ON(kaddr == NULL);
1101	crc = crc32c(crc, kaddr + page_offset, length);
1102	kunmap(page);
1103
1104	return crc;
1105}
1106
1107bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
1108{
1109	struct sockaddr_storage ss = addr->in_addr; /* align */
1110	struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1111	struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1112
1113	switch (ss.ss_family) {
1114	case AF_INET:
1115		return addr4->s_addr == htonl(INADDR_ANY);
1116	case AF_INET6:
1117		return ipv6_addr_any(addr6);
1118	default:
1119		return true;
1120	}
1121}
1122
1123int ceph_addr_port(const struct ceph_entity_addr *addr)
1124{
1125	switch (get_unaligned(&addr->in_addr.ss_family)) {
1126	case AF_INET:
1127		return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1128	case AF_INET6:
1129		return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1130	}
1131	return 0;
1132}
1133
1134void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
1135{
1136	switch (get_unaligned(&addr->in_addr.ss_family)) {
1137	case AF_INET:
1138		put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1139		break;
1140	case AF_INET6:
1141		put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1142		break;
1143	}
1144}
1145
1146/*
1147 * Unlike other *_pton function semantics, zero indicates success.
1148 */
1149static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1150		char delim, const char **ipend)
1151{
1152	memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1153
1154	if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1155		put_unaligned(AF_INET, &addr->in_addr.ss_family);
1156		return 0;
1157	}
1158
1159	if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1160		put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1161		return 0;
1162	}
1163
1164	return -EINVAL;
1165}
1166
1167/*
1168 * Extract hostname string and resolve using kernel DNS facility.
1169 */
1170#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1171static int ceph_dns_resolve_name(const char *name, size_t namelen,
1172		struct ceph_entity_addr *addr, char delim, const char **ipend)
1173{
1174	const char *end, *delim_p;
1175	char *colon_p, *ip_addr = NULL;
1176	int ip_len, ret;
1177
1178	/*
1179	 * The end of the hostname occurs immediately preceding the delimiter or
1180	 * the port marker (':') where the delimiter takes precedence.
1181	 */
1182	delim_p = memchr(name, delim, namelen);
1183	colon_p = memchr(name, ':', namelen);
1184
1185	if (delim_p && colon_p)
1186		end = delim_p < colon_p ? delim_p : colon_p;
1187	else if (!delim_p && colon_p)
1188		end = colon_p;
1189	else {
1190		end = delim_p;
1191		if (!end) /* case: hostname:/ */
1192			end = name + namelen;
1193	}
1194
1195	if (end <= name)
1196		return -EINVAL;
1197
1198	/* do dns_resolve upcall */
1199	ip_len = dns_query(current->nsproxy->net_ns,
1200			   NULL, name, end - name, NULL, &ip_addr, NULL, false);
1201	if (ip_len > 0)
1202		ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1203	else
1204		ret = -ESRCH;
1205
1206	kfree(ip_addr);
1207
1208	*ipend = end;
1209
1210	pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1211			ret, ret ? "failed" : ceph_pr_addr(addr));
1212
1213	return ret;
1214}
1215#else
1216static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1217		struct ceph_entity_addr *addr, char delim, const char **ipend)
1218{
1219	return -EINVAL;
1220}
1221#endif
1222
1223/*
1224 * Parse a server name (IP or hostname). If a valid IP address is not found
1225 * then try to extract a hostname to resolve using userspace DNS upcall.
1226 */
1227static int ceph_parse_server_name(const char *name, size_t namelen,
1228		struct ceph_entity_addr *addr, char delim, const char **ipend)
1229{
1230	int ret;
1231
1232	ret = ceph_pton(name, namelen, addr, delim, ipend);
1233	if (ret)
1234		ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1235
1236	return ret;
1237}
1238
1239/*
1240 * Parse an ip[:port] list into an addr array.  Use the default
1241 * monitor port if a port isn't specified.
1242 */
1243int ceph_parse_ips(const char *c, const char *end,
1244		   struct ceph_entity_addr *addr,
1245		   int max_count, int *count, char delim)
1246{
1247	int i, ret = -EINVAL;
1248	const char *p = c;
1249
1250	dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1251	for (i = 0; i < max_count; i++) {
1252		char cur_delim = delim;
1253		const char *ipend;
1254		int port;
 
1255
1256		if (*p == '[') {
1257			cur_delim = ']';
1258			p++;
1259		}
1260
1261		ret = ceph_parse_server_name(p, end - p, &addr[i], cur_delim,
1262					     &ipend);
1263		if (ret)
1264			goto bad;
1265		ret = -EINVAL;
1266
1267		p = ipend;
1268
1269		if (cur_delim == ']') {
1270			if (*p != ']') {
1271				dout("missing matching ']'\n");
1272				goto bad;
1273			}
1274			p++;
1275		}
1276
1277		/* port? */
1278		if (p < end && *p == ':') {
1279			port = 0;
1280			p++;
1281			while (p < end && *p >= '0' && *p <= '9') {
1282				port = (port * 10) + (*p - '0');
1283				p++;
1284			}
1285			if (port == 0)
1286				port = CEPH_MON_PORT;
1287			else if (port > 65535)
1288				goto bad;
1289		} else {
1290			port = CEPH_MON_PORT;
1291		}
1292
1293		ceph_addr_set_port(&addr[i], port);
1294		/*
1295		 * We want the type to be set according to ms_mode
1296		 * option, but options are normally parsed after mon
1297		 * addresses.  Rather than complicating parsing, set
1298		 * to LEGACY and override in build_initial_monmap()
1299		 * for mon addresses and ceph_messenger_init() for
1300		 * ip option.
1301		 */
1302		addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
1303		addr[i].nonce = 0;
1304
1305		dout("%s got %s\n", __func__, ceph_pr_addr(&addr[i]));
1306
1307		if (p == end)
1308			break;
1309		if (*p != delim)
1310			goto bad;
1311		p++;
1312	}
1313
1314	if (p != end)
1315		goto bad;
1316
1317	if (count)
1318		*count = i + 1;
1319	return 0;
1320
1321bad:
1322	return ret;
1323}
1324
1325/*
1326 * Process message.  This happens in the worker thread.  The callback should
1327 * be careful not to do anything that waits on other incoming messages or it
1328 * may deadlock.
1329 */
1330void ceph_con_process_message(struct ceph_connection *con)
1331{
1332	struct ceph_msg *msg = con->in_msg;
1333
1334	BUG_ON(con->in_msg->con != con);
1335	con->in_msg = NULL;
1336
1337	/* if first message, set peer_name */
1338	if (con->peer_name.type == 0)
1339		con->peer_name = msg->hdr.src;
1340
1341	con->in_seq++;
1342	mutex_unlock(&con->mutex);
1343
1344	dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
1345	     msg, le64_to_cpu(msg->hdr.seq),
1346	     ENTITY_NAME(msg->hdr.src),
1347	     le16_to_cpu(msg->hdr.type),
1348	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1349	     le32_to_cpu(msg->hdr.front_len),
1350	     le32_to_cpu(msg->hdr.middle_len),
1351	     le32_to_cpu(msg->hdr.data_len),
1352	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1353	con->ops->dispatch(con, msg);
1354
1355	mutex_lock(&con->mutex);
1356}
1357
1358/*
1359 * Atomically queue work on a connection after the specified delay.
1360 * Bump @con reference to avoid races with connection teardown.
1361 * Returns 0 if work was queued, or an error code otherwise.
1362 */
1363static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
1364{
1365	if (!con->ops->get(con)) {
1366		dout("%s %p ref count 0\n", __func__, con);
1367		return -ENOENT;
1368	}
1369
1370	if (delay >= HZ)
1371		delay = round_jiffies_relative(delay);
1372
1373	dout("%s %p %lu\n", __func__, con, delay);
1374	if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
1375		dout("%s %p - already queued\n", __func__, con);
1376		con->ops->put(con);
1377		return -EBUSY;
1378	}
1379
1380	return 0;
1381}
1382
1383static void queue_con(struct ceph_connection *con)
1384{
1385	(void) queue_con_delay(con, 0);
1386}
1387
1388static void cancel_con(struct ceph_connection *con)
1389{
1390	if (cancel_delayed_work(&con->work)) {
1391		dout("%s %p\n", __func__, con);
1392		con->ops->put(con);
1393	}
1394}
1395
1396static bool con_sock_closed(struct ceph_connection *con)
1397{
1398	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
1399		return false;
1400
1401#define CASE(x)								\
1402	case CEPH_CON_S_ ## x:						\
1403		con->error_msg = "socket closed (con state " #x ")";	\
1404		break;
1405
1406	switch (con->state) {
1407	CASE(CLOSED);
1408	CASE(PREOPEN);
1409	CASE(V1_BANNER);
1410	CASE(V1_CONNECT_MSG);
1411	CASE(V2_BANNER_PREFIX);
1412	CASE(V2_BANNER_PAYLOAD);
1413	CASE(V2_HELLO);
1414	CASE(V2_AUTH);
1415	CASE(V2_AUTH_SIGNATURE);
1416	CASE(V2_SESSION_CONNECT);
1417	CASE(V2_SESSION_RECONNECT);
1418	CASE(OPEN);
1419	CASE(STANDBY);
1420	default:
1421		BUG();
1422	}
1423#undef CASE
1424
1425	return true;
1426}
1427
1428static bool con_backoff(struct ceph_connection *con)
1429{
1430	int ret;
1431
1432	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
1433		return false;
1434
1435	ret = queue_con_delay(con, con->delay);
1436	if (ret) {
1437		dout("%s: con %p FAILED to back off %lu\n", __func__,
1438			con, con->delay);
1439		BUG_ON(ret == -ENOENT);
1440		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1441	}
1442
1443	return true;
1444}
1445
1446/* Finish fault handling; con->mutex must *not* be held here */
1447
1448static void con_fault_finish(struct ceph_connection *con)
1449{
1450	dout("%s %p\n", __func__, con);
1451
1452	/*
1453	 * in case we faulted due to authentication, invalidate our
1454	 * current tickets so that we can get new ones.
1455	 */
1456	if (con->v1.auth_retry) {
1457		dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
1458		if (con->ops->invalidate_authorizer)
1459			con->ops->invalidate_authorizer(con);
1460		con->v1.auth_retry = 0;
1461	}
1462
1463	if (con->ops->fault)
1464		con->ops->fault(con);
1465}
1466
1467/*
1468 * Do some work on a connection.  Drop a connection ref when we're done.
1469 */
1470static void ceph_con_workfn(struct work_struct *work)
1471{
1472	struct ceph_connection *con = container_of(work, struct ceph_connection,
1473						   work.work);
1474	bool fault;
1475
1476	mutex_lock(&con->mutex);
1477	while (true) {
1478		int ret;
1479
1480		if ((fault = con_sock_closed(con))) {
1481			dout("%s: con %p SOCK_CLOSED\n", __func__, con);
1482			break;
1483		}
1484		if (con_backoff(con)) {
1485			dout("%s: con %p BACKOFF\n", __func__, con);
1486			break;
1487		}
1488		if (con->state == CEPH_CON_S_STANDBY) {
1489			dout("%s: con %p STANDBY\n", __func__, con);
1490			break;
1491		}
1492		if (con->state == CEPH_CON_S_CLOSED) {
1493			dout("%s: con %p CLOSED\n", __func__, con);
1494			BUG_ON(con->sock);
1495			break;
1496		}
1497		if (con->state == CEPH_CON_S_PREOPEN) {
1498			dout("%s: con %p PREOPEN\n", __func__, con);
1499			BUG_ON(con->sock);
1500		}
1501
1502		if (ceph_msgr2(from_msgr(con->msgr)))
1503			ret = ceph_con_v2_try_read(con);
1504		else
1505			ret = ceph_con_v1_try_read(con);
1506		if (ret < 0) {
1507			if (ret == -EAGAIN)
1508				continue;
1509			if (!con->error_msg)
1510				con->error_msg = "socket error on read";
1511			fault = true;
1512			break;
1513		}
1514
1515		if (ceph_msgr2(from_msgr(con->msgr)))
1516			ret = ceph_con_v2_try_write(con);
1517		else
1518			ret = ceph_con_v1_try_write(con);
1519		if (ret < 0) {
1520			if (ret == -EAGAIN)
1521				continue;
1522			if (!con->error_msg)
1523				con->error_msg = "socket error on write";
1524			fault = true;
1525		}
1526
1527		break;	/* If we make it to here, we're done */
1528	}
1529	if (fault)
1530		con_fault(con);
1531	mutex_unlock(&con->mutex);
1532
1533	if (fault)
1534		con_fault_finish(con);
1535
1536	con->ops->put(con);
1537}
1538
1539/*
1540 * Generic error/fault handler.  A retry mechanism is used with
1541 * exponential backoff
1542 */
1543static void con_fault(struct ceph_connection *con)
1544{
1545	dout("fault %p state %d to peer %s\n",
1546	     con, con->state, ceph_pr_addr(&con->peer_addr));
1547
1548	pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1549		ceph_pr_addr(&con->peer_addr), con->error_msg);
1550	con->error_msg = NULL;
1551
1552	WARN_ON(con->state == CEPH_CON_S_STANDBY ||
1553		con->state == CEPH_CON_S_CLOSED);
1554
1555	ceph_con_reset_protocol(con);
1556
1557	if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
1558		dout("fault on LOSSYTX channel, marking CLOSED\n");
1559		con->state = CEPH_CON_S_CLOSED;
1560		return;
1561	}
1562
1563	/* Requeue anything that hasn't been acked */
1564	list_splice_init(&con->out_sent, &con->out_queue);
1565
1566	/* If there are no messages queued or keepalive pending, place
1567	 * the connection in a STANDBY state */
1568	if (list_empty(&con->out_queue) &&
1569	    !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
1570		dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
1571		ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
1572		con->state = CEPH_CON_S_STANDBY;
1573	} else {
1574		/* retry after a delay. */
1575		con->state = CEPH_CON_S_PREOPEN;
1576		if (!con->delay) {
1577			con->delay = BASE_DELAY_INTERVAL;
1578		} else if (con->delay < MAX_DELAY_INTERVAL) {
1579			con->delay *= 2;
1580			if (con->delay > MAX_DELAY_INTERVAL)
1581				con->delay = MAX_DELAY_INTERVAL;
1582		}
1583		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1584		queue_con(con);
1585	}
1586}
1587
1588void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
1589{
1590	u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
1591	msgr->inst.addr.nonce = cpu_to_le32(nonce);
1592	ceph_encode_my_addr(msgr);
1593}
1594
1595/*
1596 * initialize a new messenger instance
1597 */
1598void ceph_messenger_init(struct ceph_messenger *msgr,
1599			 struct ceph_entity_addr *myaddr)
1600{
1601	spin_lock_init(&msgr->global_seq_lock);
1602
1603	if (myaddr) {
1604		memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
1605		       sizeof(msgr->inst.addr.in_addr));
1606		ceph_addr_set_port(&msgr->inst.addr, 0);
1607	}
1608
1609	/*
1610	 * Since nautilus, clients are identified using type ANY.
1611	 * For msgr1, ceph_encode_banner_addr() munges it to NONE.
1612	 */
1613	msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
1614
1615	/* generate a random non-zero nonce */
1616	do {
1617		get_random_bytes(&msgr->inst.addr.nonce,
1618				 sizeof(msgr->inst.addr.nonce));
1619	} while (!msgr->inst.addr.nonce);
1620	ceph_encode_my_addr(msgr);
1621
1622	atomic_set(&msgr->stopping, 0);
1623	write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
1624
1625	dout("%s %p\n", __func__, msgr);
1626}
1627
1628void ceph_messenger_fini(struct ceph_messenger *msgr)
1629{
1630	put_net(read_pnet(&msgr->net));
1631}
1632
1633static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
1634{
1635	if (msg->con)
1636		msg->con->ops->put(msg->con);
1637
1638	msg->con = con ? con->ops->get(con) : NULL;
1639	BUG_ON(msg->con != con);
1640}
1641
1642static void clear_standby(struct ceph_connection *con)
1643{
1644	/* come back from STANDBY? */
1645	if (con->state == CEPH_CON_S_STANDBY) {
1646		dout("clear_standby %p and ++connect_seq\n", con);
1647		con->state = CEPH_CON_S_PREOPEN;
1648		con->v1.connect_seq++;
1649		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
1650		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
1651	}
1652}
1653
1654/*
1655 * Queue up an outgoing message on the given connection.
1656 *
1657 * Consumes a ref on @msg.
1658 */
1659void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1660{
1661	/* set src+dst */
1662	msg->hdr.src = con->msgr->inst.name;
1663	BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1664	msg->needs_out_seq = true;
1665
1666	mutex_lock(&con->mutex);
1667
1668	if (con->state == CEPH_CON_S_CLOSED) {
1669		dout("con_send %p closed, dropping %p\n", con, msg);
1670		ceph_msg_put(msg);
1671		mutex_unlock(&con->mutex);
1672		return;
1673	}
1674
1675	msg_con_set(msg, con);
1676
1677	BUG_ON(!list_empty(&msg->list_head));
1678	list_add_tail(&msg->list_head, &con->out_queue);
1679	dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1680	     ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1681	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1682	     le32_to_cpu(msg->hdr.front_len),
1683	     le32_to_cpu(msg->hdr.middle_len),
1684	     le32_to_cpu(msg->hdr.data_len));
1685
1686	clear_standby(con);
1687	mutex_unlock(&con->mutex);
1688
1689	/* if there wasn't anything waiting to send before, queue
1690	 * new work */
1691	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1692		queue_con(con);
1693}
1694EXPORT_SYMBOL(ceph_con_send);
1695
1696/*
1697 * Revoke a message that was previously queued for send
1698 */
1699void ceph_msg_revoke(struct ceph_msg *msg)
1700{
1701	struct ceph_connection *con = msg->con;
1702
1703	if (!con) {
1704		dout("%s msg %p null con\n", __func__, msg);
1705		return;		/* Message not in our possession */
1706	}
1707
1708	mutex_lock(&con->mutex);
1709	if (list_empty(&msg->list_head)) {
1710		WARN_ON(con->out_msg == msg);
1711		dout("%s con %p msg %p not linked\n", __func__, con, msg);
1712		mutex_unlock(&con->mutex);
1713		return;
1714	}
1715
1716	dout("%s con %p msg %p was linked\n", __func__, con, msg);
1717	msg->hdr.seq = 0;
1718	ceph_msg_remove(msg);
1719
1720	if (con->out_msg == msg) {
1721		WARN_ON(con->state != CEPH_CON_S_OPEN);
1722		dout("%s con %p msg %p was sending\n", __func__, con, msg);
1723		if (ceph_msgr2(from_msgr(con->msgr)))
1724			ceph_con_v2_revoke(con);
1725		else
1726			ceph_con_v1_revoke(con);
1727		ceph_msg_put(con->out_msg);
1728		con->out_msg = NULL;
1729	} else {
1730		dout("%s con %p msg %p not current, out_msg %p\n", __func__,
1731		     con, msg, con->out_msg);
1732	}
1733	mutex_unlock(&con->mutex);
1734}
1735
1736/*
1737 * Revoke a message that we may be reading data into
1738 */
1739void ceph_msg_revoke_incoming(struct ceph_msg *msg)
1740{
1741	struct ceph_connection *con = msg->con;
1742
1743	if (!con) {
1744		dout("%s msg %p null con\n", __func__, msg);
1745		return;		/* Message not in our possession */
1746	}
1747
1748	mutex_lock(&con->mutex);
1749	if (con->in_msg == msg) {
1750		WARN_ON(con->state != CEPH_CON_S_OPEN);
1751		dout("%s con %p msg %p was recving\n", __func__, con, msg);
1752		if (ceph_msgr2(from_msgr(con->msgr)))
1753			ceph_con_v2_revoke_incoming(con);
1754		else
1755			ceph_con_v1_revoke_incoming(con);
1756		ceph_msg_put(con->in_msg);
1757		con->in_msg = NULL;
1758	} else {
1759		dout("%s con %p msg %p not current, in_msg %p\n", __func__,
1760		     con, msg, con->in_msg);
1761	}
1762	mutex_unlock(&con->mutex);
1763}
1764
1765/*
1766 * Queue a keepalive byte to ensure the tcp connection is alive.
1767 */
1768void ceph_con_keepalive(struct ceph_connection *con)
1769{
1770	dout("con_keepalive %p\n", con);
1771	mutex_lock(&con->mutex);
1772	clear_standby(con);
1773	ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
1774	mutex_unlock(&con->mutex);
1775
1776	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1777		queue_con(con);
1778}
1779EXPORT_SYMBOL(ceph_con_keepalive);
1780
1781bool ceph_con_keepalive_expired(struct ceph_connection *con,
1782			       unsigned long interval)
1783{
1784	if (interval > 0 &&
1785	    (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1786		struct timespec64 now;
1787		struct timespec64 ts;
1788		ktime_get_real_ts64(&now);
1789		jiffies_to_timespec64(interval, &ts);
1790		ts = timespec64_add(con->last_keepalive_ack, ts);
1791		return timespec64_compare(&now, &ts) >= 0;
1792	}
1793	return false;
1794}
1795
1796static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
1797{
1798	BUG_ON(msg->num_data_items >= msg->max_data_items);
1799	return &msg->data[msg->num_data_items++];
1800}
1801
1802static void ceph_msg_data_destroy(struct ceph_msg_data *data)
1803{
1804	if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
1805		int num_pages = calc_pages_for(data->alignment, data->length);
1806		ceph_release_page_vector(data->pages, num_pages);
1807	} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
1808		ceph_pagelist_release(data->pagelist);
1809	}
1810}
1811
1812void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
1813			     size_t length, size_t alignment, bool own_pages)
1814{
1815	struct ceph_msg_data *data;
1816
1817	BUG_ON(!pages);
1818	BUG_ON(!length);
1819
1820	data = ceph_msg_data_add(msg);
1821	data->type = CEPH_MSG_DATA_PAGES;
1822	data->pages = pages;
1823	data->length = length;
1824	data->alignment = alignment & ~PAGE_MASK;
1825	data->own_pages = own_pages;
1826
1827	msg->data_length += length;
1828}
1829EXPORT_SYMBOL(ceph_msg_data_add_pages);
1830
1831void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
1832				struct ceph_pagelist *pagelist)
1833{
1834	struct ceph_msg_data *data;
1835
1836	BUG_ON(!pagelist);
1837	BUG_ON(!pagelist->length);
1838
1839	data = ceph_msg_data_add(msg);
1840	data->type = CEPH_MSG_DATA_PAGELIST;
1841	refcount_inc(&pagelist->refcnt);
1842	data->pagelist = pagelist;
1843
1844	msg->data_length += pagelist->length;
1845}
1846EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
1847
1848#ifdef	CONFIG_BLOCK
1849void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
1850			   u32 length)
1851{
1852	struct ceph_msg_data *data;
1853
1854	data = ceph_msg_data_add(msg);
1855	data->type = CEPH_MSG_DATA_BIO;
1856	data->bio_pos = *bio_pos;
1857	data->bio_length = length;
1858
1859	msg->data_length += length;
1860}
1861EXPORT_SYMBOL(ceph_msg_data_add_bio);
1862#endif	/* CONFIG_BLOCK */
1863
1864void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
1865			     struct ceph_bvec_iter *bvec_pos)
1866{
1867	struct ceph_msg_data *data;
1868
1869	data = ceph_msg_data_add(msg);
1870	data->type = CEPH_MSG_DATA_BVECS;
1871	data->bvec_pos = *bvec_pos;
1872
1873	msg->data_length += bvec_pos->iter.bi_size;
1874}
1875EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
1876
1877/*
1878 * construct a new message with given type, size
1879 * the new msg has a ref count of 1.
1880 */
1881struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
1882			       gfp_t flags, bool can_fail)
1883{
1884	struct ceph_msg *m;
1885
1886	m = kmem_cache_zalloc(ceph_msg_cache, flags);
1887	if (m == NULL)
1888		goto out;
1889
1890	m->hdr.type = cpu_to_le16(type);
1891	m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
1892	m->hdr.front_len = cpu_to_le32(front_len);
1893
1894	INIT_LIST_HEAD(&m->list_head);
1895	kref_init(&m->kref);
1896
1897	/* front */
1898	if (front_len) {
1899		m->front.iov_base = kvmalloc(front_len, flags);
1900		if (m->front.iov_base == NULL) {
1901			dout("ceph_msg_new can't allocate %d bytes\n",
1902			     front_len);
1903			goto out2;
1904		}
1905	} else {
1906		m->front.iov_base = NULL;
1907	}
1908	m->front_alloc_len = m->front.iov_len = front_len;
1909
1910	if (max_data_items) {
1911		m->data = kmalloc_array(max_data_items, sizeof(*m->data),
1912					flags);
1913		if (!m->data)
1914			goto out2;
1915
1916		m->max_data_items = max_data_items;
1917	}
1918
1919	dout("ceph_msg_new %p front %d\n", m, front_len);
1920	return m;
1921
1922out2:
1923	ceph_msg_put(m);
1924out:
1925	if (!can_fail) {
1926		pr_err("msg_new can't create type %d front %d\n", type,
1927		       front_len);
1928		WARN_ON(1);
1929	} else {
1930		dout("msg_new can't create type %d front %d\n", type,
1931		     front_len);
1932	}
1933	return NULL;
1934}
1935EXPORT_SYMBOL(ceph_msg_new2);
1936
1937struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
1938			      bool can_fail)
1939{
1940	return ceph_msg_new2(type, front_len, 0, flags, can_fail);
1941}
1942EXPORT_SYMBOL(ceph_msg_new);
1943
1944/*
1945 * Allocate "middle" portion of a message, if it is needed and wasn't
1946 * allocated by alloc_msg.  This allows us to read a small fixed-size
1947 * per-type header in the front and then gracefully fail (i.e.,
1948 * propagate the error to the caller based on info in the front) when
1949 * the middle is too large.
1950 */
1951static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
1952{
1953	int type = le16_to_cpu(msg->hdr.type);
1954	int middle_len = le32_to_cpu(msg->hdr.middle_len);
1955
1956	dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
1957	     ceph_msg_type_name(type), middle_len);
1958	BUG_ON(!middle_len);
1959	BUG_ON(msg->middle);
1960
1961	msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
1962	if (!msg->middle)
1963		return -ENOMEM;
1964	return 0;
1965}
1966
1967/*
1968 * Allocate a message for receiving an incoming message on a
1969 * connection, and save the result in con->in_msg.  Uses the
1970 * connection's private alloc_msg op if available.
1971 *
1972 * Returns 0 on success, or a negative error code.
1973 *
1974 * On success, if we set *skip = 1:
1975 *  - the next message should be skipped and ignored.
1976 *  - con->in_msg == NULL
1977 * or if we set *skip = 0:
1978 *  - con->in_msg is non-null.
1979 * On error (ENOMEM, EAGAIN, ...),
1980 *  - con->in_msg == NULL
1981 */
1982int ceph_con_in_msg_alloc(struct ceph_connection *con,
1983			  struct ceph_msg_header *hdr, int *skip)
1984{
1985	int middle_len = le32_to_cpu(hdr->middle_len);
1986	struct ceph_msg *msg;
1987	int ret = 0;
1988
1989	BUG_ON(con->in_msg != NULL);
1990	BUG_ON(!con->ops->alloc_msg);
1991
1992	mutex_unlock(&con->mutex);
1993	msg = con->ops->alloc_msg(con, hdr, skip);
1994	mutex_lock(&con->mutex);
1995	if (con->state != CEPH_CON_S_OPEN) {
1996		if (msg)
1997			ceph_msg_put(msg);
1998		return -EAGAIN;
1999	}
2000	if (msg) {
2001		BUG_ON(*skip);
2002		msg_con_set(msg, con);
2003		con->in_msg = msg;
2004	} else {
2005		/*
2006		 * Null message pointer means either we should skip
2007		 * this message or we couldn't allocate memory.  The
2008		 * former is not an error.
2009		 */
2010		if (*skip)
2011			return 0;
2012
2013		con->error_msg = "error allocating memory for incoming message";
2014		return -ENOMEM;
2015	}
2016	memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2017
2018	if (middle_len && !con->in_msg->middle) {
2019		ret = ceph_alloc_middle(con, con->in_msg);
2020		if (ret < 0) {
2021			ceph_msg_put(con->in_msg);
2022			con->in_msg = NULL;
2023		}
2024	}
2025
2026	return ret;
2027}
2028
2029void ceph_con_get_out_msg(struct ceph_connection *con)
2030{
2031	struct ceph_msg *msg;
2032
2033	BUG_ON(list_empty(&con->out_queue));
2034	msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
2035	WARN_ON(msg->con != con);
2036
2037	/*
2038	 * Put the message on "sent" list using a ref from ceph_con_send().
2039	 * It is put when the message is acked or revoked.
2040	 */
2041	list_move_tail(&msg->list_head, &con->out_sent);
2042
2043	/*
2044	 * Only assign outgoing seq # if we haven't sent this message
2045	 * yet.  If it is requeued, resend with it's original seq.
2046	 */
2047	if (msg->needs_out_seq) {
2048		msg->hdr.seq = cpu_to_le64(++con->out_seq);
2049		msg->needs_out_seq = false;
2050
2051		if (con->ops->reencode_message)
2052			con->ops->reencode_message(msg);
2053	}
2054
2055	/*
2056	 * Get a ref for out_msg.  It is put when we are done sending the
2057	 * message or in case of a fault.
2058	 */
2059	WARN_ON(con->out_msg);
2060	con->out_msg = ceph_msg_get(msg);
2061}
2062
2063/*
2064 * Free a generically kmalloc'd message.
2065 */
2066static void ceph_msg_free(struct ceph_msg *m)
2067{
2068	dout("%s %p\n", __func__, m);
2069	kvfree(m->front.iov_base);
2070	kfree(m->data);
2071	kmem_cache_free(ceph_msg_cache, m);
2072}
2073
2074static void ceph_msg_release(struct kref *kref)
2075{
2076	struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2077	int i;
2078
2079	dout("%s %p\n", __func__, m);
2080	WARN_ON(!list_empty(&m->list_head));
2081
2082	msg_con_set(m, NULL);
2083
2084	/* drop middle, data, if any */
2085	if (m->middle) {
2086		ceph_buffer_put(m->middle);
2087		m->middle = NULL;
2088	}
2089
2090	for (i = 0; i < m->num_data_items; i++)
2091		ceph_msg_data_destroy(&m->data[i]);
2092
2093	if (m->pool)
2094		ceph_msgpool_put(m->pool, m);
2095	else
2096		ceph_msg_free(m);
2097}
2098
2099struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
2100{
2101	dout("%s %p (was %d)\n", __func__, msg,
2102	     kref_read(&msg->kref));
2103	kref_get(&msg->kref);
2104	return msg;
2105}
2106EXPORT_SYMBOL(ceph_msg_get);
2107
2108void ceph_msg_put(struct ceph_msg *msg)
2109{
2110	dout("%s %p (was %d)\n", __func__, msg,
2111	     kref_read(&msg->kref));
2112	kref_put(&msg->kref, ceph_msg_release);
2113}
2114EXPORT_SYMBOL(ceph_msg_put);
2115
2116void ceph_msg_dump(struct ceph_msg *msg)
2117{
2118	pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2119		 msg->front_alloc_len, msg->data_length);
2120	print_hex_dump(KERN_DEBUG, "header: ",
2121		       DUMP_PREFIX_OFFSET, 16, 1,
2122		       &msg->hdr, sizeof(msg->hdr), true);
2123	print_hex_dump(KERN_DEBUG, " front: ",
2124		       DUMP_PREFIX_OFFSET, 16, 1,
2125		       msg->front.iov_base, msg->front.iov_len, true);
2126	if (msg->middle)
2127		print_hex_dump(KERN_DEBUG, "middle: ",
2128			       DUMP_PREFIX_OFFSET, 16, 1,
2129			       msg->middle->vec.iov_base,
2130			       msg->middle->vec.iov_len, true);
2131	print_hex_dump(KERN_DEBUG, "footer: ",
2132		       DUMP_PREFIX_OFFSET, 16, 1,
2133		       &msg->footer, sizeof(msg->footer), true);
2134}
2135EXPORT_SYMBOL(ceph_msg_dump);