Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Multipath TCP
   3 *
   4 * Copyright (c) 2017 - 2019, Intel Corporation.
   5 */
   6
   7#define pr_fmt(fmt) "MPTCP: " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/netdevice.h>
  12#include <linux/sched/signal.h>
  13#include <linux/atomic.h>
  14#include <net/sock.h>
  15#include <net/inet_common.h>
  16#include <net/inet_hashtables.h>
  17#include <net/protocol.h>
  18#include <net/tcp.h>
  19#include <net/tcp_states.h>
  20#if IS_ENABLED(CONFIG_MPTCP_IPV6)
  21#include <net/transp_v6.h>
  22#endif
  23#include <net/mptcp.h>
  24#include "protocol.h"
  25#include "mib.h"
  26
  27#define MPTCP_SAME_STATE TCP_MAX_STATES
  28
  29#if IS_ENABLED(CONFIG_MPTCP_IPV6)
  30struct mptcp6_sock {
  31	struct mptcp_sock msk;
  32	struct ipv6_pinfo np;
  33};
  34#endif
  35
  36struct mptcp_skb_cb {
  37	u32 offset;
  38};
  39
  40#define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))
  41
  42static struct percpu_counter mptcp_sockets_allocated;
  43
  44/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
  45 * completed yet or has failed, return the subflow socket.
  46 * Otherwise return NULL.
  47 */
  48static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
  49{
  50	if (!msk->subflow || READ_ONCE(msk->can_ack))
  51		return NULL;
  52
  53	return msk->subflow;
  54}
  55
  56static bool mptcp_is_tcpsk(struct sock *sk)
  57{
  58	struct socket *sock = sk->sk_socket;
  59
  60	if (unlikely(sk->sk_prot == &tcp_prot)) {
  61		/* we are being invoked after mptcp_accept() has
  62		 * accepted a non-mp-capable flow: sk is a tcp_sk,
  63		 * not an mptcp one.
  64		 *
  65		 * Hand the socket over to tcp so all further socket ops
  66		 * bypass mptcp.
  67		 */
  68		sock->ops = &inet_stream_ops;
  69		return true;
  70#if IS_ENABLED(CONFIG_MPTCP_IPV6)
  71	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
  72		sock->ops = &inet6_stream_ops;
  73		return true;
  74#endif
  75	}
  76
  77	return false;
  78}
  79
  80static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
  81{
  82	sock_owned_by_me((const struct sock *)msk);
  83
  84	if (likely(!__mptcp_check_fallback(msk)))
  85		return NULL;
  86
  87	return msk->first;
  88}
  89
  90static int __mptcp_socket_create(struct mptcp_sock *msk)
  91{
  92	struct mptcp_subflow_context *subflow;
  93	struct sock *sk = (struct sock *)msk;
  94	struct socket *ssock;
  95	int err;
  96
  97	err = mptcp_subflow_create_socket(sk, &ssock);
  98	if (err)
  99		return err;
 100
 101	msk->first = ssock->sk;
 102	msk->subflow = ssock;
 103	subflow = mptcp_subflow_ctx(ssock->sk);
 104	list_add(&subflow->node, &msk->conn_list);
 105	subflow->request_mptcp = 1;
 106
 107	/* accept() will wait on first subflow sk_wq, and we always wakes up
 108	 * via msk->sk_socket
 109	 */
 110	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
 111
 112	return 0;
 113}
 114
 115static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
 116			     struct sk_buff *skb,
 117			     unsigned int offset, size_t copy_len)
 118{
 119	struct sock *sk = (struct sock *)msk;
 120	struct sk_buff *tail;
 121
 122	__skb_unlink(skb, &ssk->sk_receive_queue);
 123
 124	skb_ext_reset(skb);
 125	skb_orphan(skb);
 126	WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
 127
 128	tail = skb_peek_tail(&sk->sk_receive_queue);
 129	if (offset == 0 && tail) {
 130		bool fragstolen;
 131		int delta;
 132
 133		if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
 134			kfree_skb_partial(skb, fragstolen);
 135			atomic_add(delta, &sk->sk_rmem_alloc);
 136			sk_mem_charge(sk, delta);
 137			return;
 138		}
 139	}
 140
 141	skb_set_owner_r(skb, sk);
 142	__skb_queue_tail(&sk->sk_receive_queue, skb);
 143	MPTCP_SKB_CB(skb)->offset = offset;
 144}
 145
 146static void mptcp_stop_timer(struct sock *sk)
 147{
 148	struct inet_connection_sock *icsk = inet_csk(sk);
 149
 150	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 151	mptcp_sk(sk)->timer_ival = 0;
 152}
 153
 154/* both sockets must be locked */
 155static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
 156				    struct sock *ssk)
 157{
 158	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
 159	u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
 160
 161	/* revalidate data sequence number.
 162	 *
 163	 * mptcp_subflow_data_available() is usually called
 164	 * without msk lock.  Its unlikely (but possible)
 165	 * that msk->ack_seq has been advanced since the last
 166	 * call found in-sequence data.
 167	 */
 168	if (likely(dsn == msk->ack_seq))
 169		return true;
 170
 171	subflow->data_avail = 0;
 172	return mptcp_subflow_data_available(ssk);
 173}
 174
 175static void mptcp_check_data_fin_ack(struct sock *sk)
 176{
 177	struct mptcp_sock *msk = mptcp_sk(sk);
 178
 179	if (__mptcp_check_fallback(msk))
 180		return;
 181
 182	/* Look for an acknowledged DATA_FIN */
 183	if (((1 << sk->sk_state) &
 184	     (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
 185	    msk->write_seq == atomic64_read(&msk->snd_una)) {
 186		mptcp_stop_timer(sk);
 187
 188		WRITE_ONCE(msk->snd_data_fin_enable, 0);
 189
 190		switch (sk->sk_state) {
 191		case TCP_FIN_WAIT1:
 192			inet_sk_state_store(sk, TCP_FIN_WAIT2);
 193			sk->sk_state_change(sk);
 194			break;
 195		case TCP_CLOSING:
 196		case TCP_LAST_ACK:
 197			inet_sk_state_store(sk, TCP_CLOSE);
 198			sk->sk_state_change(sk);
 199			break;
 200		}
 201
 202		if (sk->sk_shutdown == SHUTDOWN_MASK ||
 203		    sk->sk_state == TCP_CLOSE)
 204			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
 205		else
 206			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
 207	}
 208}
 209
 210static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
 211{
 212	struct mptcp_sock *msk = mptcp_sk(sk);
 213
 214	if (READ_ONCE(msk->rcv_data_fin) &&
 215	    ((1 << sk->sk_state) &
 216	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 217		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
 218
 219		if (msk->ack_seq == rcv_data_fin_seq) {
 220			if (seq)
 221				*seq = rcv_data_fin_seq;
 222
 223			return true;
 224		}
 225	}
 226
 227	return false;
 228}
 229
 230static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
 231{
 232	long tout = ssk && inet_csk(ssk)->icsk_pending ?
 233				      inet_csk(ssk)->icsk_timeout - jiffies : 0;
 234
 235	if (tout <= 0)
 236		tout = mptcp_sk(sk)->timer_ival;
 237	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
 238}
 239
 240static void mptcp_check_data_fin(struct sock *sk)
 241{
 242	struct mptcp_sock *msk = mptcp_sk(sk);
 243	u64 rcv_data_fin_seq;
 244
 245	if (__mptcp_check_fallback(msk) || !msk->first)
 246		return;
 247
 248	/* Need to ack a DATA_FIN received from a peer while this side
 249	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
 250	 * msk->rcv_data_fin was set when parsing the incoming options
 251	 * at the subflow level and the msk lock was not held, so this
 252	 * is the first opportunity to act on the DATA_FIN and change
 253	 * the msk state.
 254	 *
 255	 * If we are caught up to the sequence number of the incoming
 256	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
 257	 * not caught up, do nothing and let the recv code send DATA_ACK
 258	 * when catching up.
 259	 */
 260
 261	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
 262		struct mptcp_subflow_context *subflow;
 263
 264		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
 265		WRITE_ONCE(msk->rcv_data_fin, 0);
 266
 267		sk->sk_shutdown |= RCV_SHUTDOWN;
 268		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
 269		set_bit(MPTCP_DATA_READY, &msk->flags);
 270
 271		switch (sk->sk_state) {
 272		case TCP_ESTABLISHED:
 273			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
 274			break;
 275		case TCP_FIN_WAIT1:
 276			inet_sk_state_store(sk, TCP_CLOSING);
 277			break;
 278		case TCP_FIN_WAIT2:
 279			inet_sk_state_store(sk, TCP_CLOSE);
 280			// @@ Close subflows now?
 281			break;
 282		default:
 283			/* Other states not expected */
 284			WARN_ON_ONCE(1);
 285			break;
 286		}
 287
 288		mptcp_set_timeout(sk, NULL);
 289		mptcp_for_each_subflow(msk, subflow) {
 290			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 291
 292			lock_sock(ssk);
 293			tcp_send_ack(ssk);
 294			release_sock(ssk);
 295		}
 296
 297		sk->sk_state_change(sk);
 298
 299		if (sk->sk_shutdown == SHUTDOWN_MASK ||
 300		    sk->sk_state == TCP_CLOSE)
 301			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
 302		else
 303			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
 304	}
 305}
 306
 307static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
 308					   struct sock *ssk,
 309					   unsigned int *bytes)
 310{
 311	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
 312	struct sock *sk = (struct sock *)msk;
 313	unsigned int moved = 0;
 314	bool more_data_avail;
 315	struct tcp_sock *tp;
 316	bool done = false;
 317
 318	if (!mptcp_subflow_dsn_valid(msk, ssk)) {
 319		*bytes = 0;
 320		return false;
 321	}
 322
 323	tp = tcp_sk(ssk);
 324	do {
 325		u32 map_remaining, offset;
 326		u32 seq = tp->copied_seq;
 327		struct sk_buff *skb;
 328		bool fin;
 329
 330		/* try to move as much data as available */
 331		map_remaining = subflow->map_data_len -
 332				mptcp_subflow_get_map_offset(subflow);
 333
 334		skb = skb_peek(&ssk->sk_receive_queue);
 335		if (!skb)
 336			break;
 337
 338		if (__mptcp_check_fallback(msk)) {
 339			/* if we are running under the workqueue, TCP could have
 340			 * collapsed skbs between dummy map creation and now
 341			 * be sure to adjust the size
 342			 */
 343			map_remaining = skb->len;
 344			subflow->map_data_len = skb->len;
 345		}
 346
 347		offset = seq - TCP_SKB_CB(skb)->seq;
 348		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
 349		if (fin) {
 350			done = true;
 351			seq++;
 352		}
 353
 354		if (offset < skb->len) {
 355			size_t len = skb->len - offset;
 356
 357			if (tp->urg_data)
 358				done = true;
 359
 360			__mptcp_move_skb(msk, ssk, skb, offset, len);
 361			seq += len;
 362			moved += len;
 363
 364			if (WARN_ON_ONCE(map_remaining < len))
 365				break;
 366		} else {
 367			WARN_ON_ONCE(!fin);
 368			sk_eat_skb(ssk, skb);
 369			done = true;
 370		}
 371
 372		WRITE_ONCE(tp->copied_seq, seq);
 373		more_data_avail = mptcp_subflow_data_available(ssk);
 374
 375		if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
 376			done = true;
 377			break;
 378		}
 379	} while (more_data_avail);
 380
 381	*bytes = moved;
 382
 383	/* If the moves have caught up with the DATA_FIN sequence number
 384	 * it's time to ack the DATA_FIN and change socket state, but
 385	 * this is not a good place to change state. Let the workqueue
 386	 * do it.
 387	 */
 388	if (mptcp_pending_data_fin(sk, NULL) &&
 389	    schedule_work(&msk->work))
 390		sock_hold(sk);
 391
 392	return done;
 393}
 394
 395/* In most cases we will be able to lock the mptcp socket.  If its already
 396 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 397 */
 398static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
 399{
 400	struct sock *sk = (struct sock *)msk;
 401	unsigned int moved = 0;
 402
 403	if (READ_ONCE(sk->sk_lock.owned))
 404		return false;
 405
 406	if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
 407		return false;
 408
 409	/* must re-check after taking the lock */
 410	if (!READ_ONCE(sk->sk_lock.owned))
 411		__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
 412
 413	spin_unlock_bh(&sk->sk_lock.slock);
 414
 415	return moved > 0;
 416}
 417
 418void mptcp_data_ready(struct sock *sk, struct sock *ssk)
 419{
 420	struct mptcp_sock *msk = mptcp_sk(sk);
 421
 422	set_bit(MPTCP_DATA_READY, &msk->flags);
 423
 424	if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
 425	    move_skbs_to_msk(msk, ssk))
 426		goto wake;
 427
 428	/* don't schedule if mptcp sk is (still) over limit */
 429	if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
 430		goto wake;
 431
 432	/* mptcp socket is owned, release_cb should retry */
 433	if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
 434			      &sk->sk_tsq_flags)) {
 435		sock_hold(sk);
 436
 437		/* need to try again, its possible release_cb() has already
 438		 * been called after the test_and_set_bit() above.
 439		 */
 440		move_skbs_to_msk(msk, ssk);
 441	}
 442wake:
 443	sk->sk_data_ready(sk);
 444}
 445
 446static void __mptcp_flush_join_list(struct mptcp_sock *msk)
 447{
 448	if (likely(list_empty(&msk->join_list)))
 449		return;
 450
 451	spin_lock_bh(&msk->join_list_lock);
 452	list_splice_tail_init(&msk->join_list, &msk->conn_list);
 453	spin_unlock_bh(&msk->join_list_lock);
 454}
 455
 456static bool mptcp_timer_pending(struct sock *sk)
 457{
 458	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
 459}
 460
 461static void mptcp_reset_timer(struct sock *sk)
 462{
 463	struct inet_connection_sock *icsk = inet_csk(sk);
 464	unsigned long tout;
 465
 466	/* should never be called with mptcp level timer cleared */
 467	tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
 468	if (WARN_ON_ONCE(!tout))
 469		tout = TCP_RTO_MIN;
 470	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
 471}
 472
 473void mptcp_data_acked(struct sock *sk)
 474{
 475	mptcp_reset_timer(sk);
 476
 477	if ((!sk_stream_is_writeable(sk) ||
 478	     (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
 479	    schedule_work(&mptcp_sk(sk)->work))
 480		sock_hold(sk);
 481}
 482
 483void mptcp_subflow_eof(struct sock *sk)
 484{
 485	struct mptcp_sock *msk = mptcp_sk(sk);
 486
 487	if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
 488	    schedule_work(&msk->work))
 489		sock_hold(sk);
 490}
 491
 492static void mptcp_check_for_eof(struct mptcp_sock *msk)
 493{
 494	struct mptcp_subflow_context *subflow;
 495	struct sock *sk = (struct sock *)msk;
 496	int receivers = 0;
 497
 498	mptcp_for_each_subflow(msk, subflow)
 499		receivers += !subflow->rx_eof;
 500
 501	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
 502		/* hopefully temporary hack: propagate shutdown status
 503		 * to msk, when all subflows agree on it
 504		 */
 505		sk->sk_shutdown |= RCV_SHUTDOWN;
 506
 507		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
 508		set_bit(MPTCP_DATA_READY, &msk->flags);
 509		sk->sk_data_ready(sk);
 510	}
 511}
 512
 513static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
 514{
 515	const struct sock *sk = (const struct sock *)msk;
 516
 517	if (!msk->cached_ext)
 518		msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
 519
 520	return !!msk->cached_ext;
 521}
 522
 523static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
 524{
 525	struct mptcp_subflow_context *subflow;
 526	struct sock *sk = (struct sock *)msk;
 527
 528	sock_owned_by_me(sk);
 529
 530	mptcp_for_each_subflow(msk, subflow) {
 531		if (subflow->data_avail)
 532			return mptcp_subflow_tcp_sock(subflow);
 533	}
 534
 535	return NULL;
 536}
 537
 538static bool mptcp_skb_can_collapse_to(u64 write_seq,
 539				      const struct sk_buff *skb,
 540				      const struct mptcp_ext *mpext)
 541{
 542	if (!tcp_skb_can_collapse_to(skb))
 543		return false;
 544
 545	/* can collapse only if MPTCP level sequence is in order */
 546	return mpext && mpext->data_seq + mpext->data_len == write_seq;
 547}
 548
 549static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
 550				       const struct page_frag *pfrag,
 551				       const struct mptcp_data_frag *df)
 552{
 553	return df && pfrag->page == df->page &&
 554		df->data_seq + df->data_len == msk->write_seq;
 555}
 556
 557static void dfrag_uncharge(struct sock *sk, int len)
 558{
 559	sk_mem_uncharge(sk, len);
 560	sk_wmem_queued_add(sk, -len);
 561}
 562
 563static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
 564{
 565	int len = dfrag->data_len + dfrag->overhead;
 566
 567	list_del(&dfrag->list);
 568	dfrag_uncharge(sk, len);
 569	put_page(dfrag->page);
 570}
 571
 572static void mptcp_clean_una(struct sock *sk)
 573{
 574	struct mptcp_sock *msk = mptcp_sk(sk);
 575	struct mptcp_data_frag *dtmp, *dfrag;
 576	bool cleaned = false;
 577	u64 snd_una;
 578
 579	/* on fallback we just need to ignore snd_una, as this is really
 580	 * plain TCP
 581	 */
 582	if (__mptcp_check_fallback(msk))
 583		atomic64_set(&msk->snd_una, msk->write_seq);
 584	snd_una = atomic64_read(&msk->snd_una);
 585
 586	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
 587		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
 588			break;
 589
 590		dfrag_clear(sk, dfrag);
 591		cleaned = true;
 592	}
 593
 594	dfrag = mptcp_rtx_head(sk);
 595	if (dfrag && after64(snd_una, dfrag->data_seq)) {
 596		u64 delta = snd_una - dfrag->data_seq;
 597
 598		if (WARN_ON_ONCE(delta > dfrag->data_len))
 599			goto out;
 600
 601		dfrag->data_seq += delta;
 602		dfrag->offset += delta;
 603		dfrag->data_len -= delta;
 604
 605		dfrag_uncharge(sk, delta);
 606		cleaned = true;
 607	}
 608
 609out:
 610	if (cleaned) {
 611		sk_mem_reclaim_partial(sk);
 612
 613		/* Only wake up writers if a subflow is ready */
 614		if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
 615			sk_stream_write_space(sk);
 616	}
 617}
 618
 619/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 620 * data
 621 */
 622static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
 623{
 624	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
 625					pfrag, sk->sk_allocation)))
 626		return true;
 627
 628	sk->sk_prot->enter_memory_pressure(sk);
 629	sk_stream_moderate_sndbuf(sk);
 630	return false;
 631}
 632
 633static struct mptcp_data_frag *
 634mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
 635		      int orig_offset)
 636{
 637	int offset = ALIGN(orig_offset, sizeof(long));
 638	struct mptcp_data_frag *dfrag;
 639
 640	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
 641	dfrag->data_len = 0;
 642	dfrag->data_seq = msk->write_seq;
 643	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
 644	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
 645	dfrag->page = pfrag->page;
 646
 647	return dfrag;
 648}
 649
 650static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 651			      struct msghdr *msg, struct mptcp_data_frag *dfrag,
 652			      long *timeo, int *pmss_now,
 653			      int *ps_goal)
 654{
 655	int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
 656	bool dfrag_collapsed, can_collapse = false;
 657	struct mptcp_sock *msk = mptcp_sk(sk);
 658	struct mptcp_ext *mpext = NULL;
 659	bool retransmission = !!dfrag;
 660	struct sk_buff *skb, *tail;
 661	struct page_frag *pfrag;
 662	struct page *page;
 663	u64 *write_seq;
 664	size_t psize;
 665
 666	/* use the mptcp page cache so that we can easily move the data
 667	 * from one substream to another, but do per subflow memory accounting
 668	 * Note: pfrag is used only !retransmission, but the compiler if
 669	 * fooled into a warning if we don't init here
 670	 */
 671	pfrag = sk_page_frag(sk);
 672	if (!retransmission) {
 673		write_seq = &msk->write_seq;
 674		page = pfrag->page;
 675	} else {
 676		write_seq = &dfrag->data_seq;
 677		page = dfrag->page;
 678	}
 679
 680	/* compute copy limit */
 681	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
 682	*pmss_now = mss_now;
 683	*ps_goal = size_goal;
 684	avail_size = size_goal;
 685	skb = tcp_write_queue_tail(ssk);
 686	if (skb) {
 687		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
 688
 689		/* Limit the write to the size available in the
 690		 * current skb, if any, so that we create at most a new skb.
 691		 * Explicitly tells TCP internals to avoid collapsing on later
 692		 * queue management operation, to avoid breaking the ext <->
 693		 * SSN association set here
 694		 */
 695		can_collapse = (size_goal - skb->len > 0) &&
 696			      mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
 697		if (!can_collapse)
 698			TCP_SKB_CB(skb)->eor = 1;
 699		else
 700			avail_size = size_goal - skb->len;
 701	}
 702
 703	if (!retransmission) {
 704		/* reuse tail pfrag, if possible, or carve a new one from the
 705		 * page allocator
 706		 */
 707		dfrag = mptcp_rtx_tail(sk);
 708		offset = pfrag->offset;
 709		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
 710		if (!dfrag_collapsed) {
 711			dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
 712			offset = dfrag->offset;
 713			frag_truesize = dfrag->overhead;
 714		}
 715		psize = min_t(size_t, pfrag->size - offset, avail_size);
 716
 717		/* Copy to page */
 718		pr_debug("left=%zu", msg_data_left(msg));
 719		psize = copy_page_from_iter(pfrag->page, offset,
 720					    min_t(size_t, msg_data_left(msg),
 721						  psize),
 722					    &msg->msg_iter);
 723		pr_debug("left=%zu", msg_data_left(msg));
 724		if (!psize)
 725			return -EINVAL;
 726
 727		if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
 728			iov_iter_revert(&msg->msg_iter, psize);
 729			return -ENOMEM;
 730		}
 731	} else {
 732		offset = dfrag->offset;
 733		psize = min_t(size_t, dfrag->data_len, avail_size);
 734	}
 735
 736	/* tell the TCP stack to delay the push so that we can safely
 737	 * access the skb after the sendpages call
 738	 */
 739	ret = do_tcp_sendpages(ssk, page, offset, psize,
 740			       msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
 741	if (ret <= 0) {
 742		if (!retransmission)
 743			iov_iter_revert(&msg->msg_iter, psize);
 744		return ret;
 745	}
 746
 747	frag_truesize += ret;
 748	if (!retransmission) {
 749		if (unlikely(ret < psize))
 750			iov_iter_revert(&msg->msg_iter, psize - ret);
 751
 752		/* send successful, keep track of sent data for mptcp-level
 753		 * retransmission
 754		 */
 755		dfrag->data_len += ret;
 756		if (!dfrag_collapsed) {
 757			get_page(dfrag->page);
 758			list_add_tail(&dfrag->list, &msk->rtx_queue);
 759			sk_wmem_queued_add(sk, frag_truesize);
 760		} else {
 761			sk_wmem_queued_add(sk, ret);
 762		}
 763
 764		/* charge data on mptcp rtx queue to the master socket
 765		 * Note: we charge such data both to sk and ssk
 766		 */
 767		sk->sk_forward_alloc -= frag_truesize;
 768	}
 769
 770	/* if the tail skb extension is still the cached one, collapsing
 771	 * really happened. Note: we can't check for 'same skb' as the sk_buff
 772	 * hdr on tail can be transmitted, freed and re-allocated by the
 773	 * do_tcp_sendpages() call
 774	 */
 775	tail = tcp_write_queue_tail(ssk);
 776	if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
 777		WARN_ON_ONCE(!can_collapse);
 778		mpext->data_len += ret;
 779		goto out;
 780	}
 781
 782	skb = tcp_write_queue_tail(ssk);
 783	mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
 784	msk->cached_ext = NULL;
 785
 786	memset(mpext, 0, sizeof(*mpext));
 787	mpext->data_seq = *write_seq;
 788	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
 789	mpext->data_len = ret;
 790	mpext->use_map = 1;
 791	mpext->dsn64 = 1;
 792
 793	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
 794		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
 795		 mpext->dsn64);
 796
 797out:
 798	if (!retransmission)
 799		pfrag->offset += frag_truesize;
 800	WRITE_ONCE(*write_seq, *write_seq + ret);
 801	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
 802
 803	return ret;
 804}
 805
 806static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
 807{
 808	clear_bit(MPTCP_SEND_SPACE, &msk->flags);
 809	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
 810
 811	/* enables sk->write_space() callbacks */
 812	set_bit(SOCK_NOSPACE, &sock->flags);
 813}
 814
 815static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 816{
 817	struct mptcp_subflow_context *subflow;
 818	struct sock *backup = NULL;
 819
 820	sock_owned_by_me((const struct sock *)msk);
 821
 822	if (!mptcp_ext_cache_refill(msk))
 823		return NULL;
 824
 825	mptcp_for_each_subflow(msk, subflow) {
 826		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 827
 828		if (!sk_stream_memory_free(ssk)) {
 829			struct socket *sock = ssk->sk_socket;
 830
 831			if (sock)
 832				mptcp_nospace(msk, sock);
 833
 834			return NULL;
 835		}
 836
 837		if (subflow->backup) {
 838			if (!backup)
 839				backup = ssk;
 840
 841			continue;
 842		}
 843
 844		return ssk;
 845	}
 846
 847	return backup;
 848}
 849
 850static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
 851{
 852	struct socket *sock;
 853
 854	if (likely(sk_stream_is_writeable(ssk)))
 855		return;
 856
 857	sock = READ_ONCE(ssk->sk_socket);
 858	if (sock)
 859		mptcp_nospace(msk, sock);
 860}
 861
 862static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 863{
 864	int mss_now = 0, size_goal = 0, ret = 0;
 865	struct mptcp_sock *msk = mptcp_sk(sk);
 866	struct page_frag *pfrag;
 867	size_t copied = 0;
 868	struct sock *ssk;
 869	bool tx_ok;
 870	long timeo;
 871
 872	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
 873		return -EOPNOTSUPP;
 874
 875	lock_sock(sk);
 876
 877	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 878
 879	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
 880		ret = sk_stream_wait_connect(sk, &timeo);
 881		if (ret)
 882			goto out;
 883	}
 884
 885	pfrag = sk_page_frag(sk);
 886restart:
 887	mptcp_clean_una(sk);
 888
 889	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
 890		ret = -EPIPE;
 891		goto out;
 892	}
 893
 894	__mptcp_flush_join_list(msk);
 895	ssk = mptcp_subflow_get_send(msk);
 896	while (!sk_stream_memory_free(sk) ||
 897	       !ssk ||
 898	       !mptcp_page_frag_refill(ssk, pfrag)) {
 899		if (ssk) {
 900			/* make sure retransmit timer is
 901			 * running before we wait for memory.
 902			 *
 903			 * The retransmit timer might be needed
 904			 * to make the peer send an up-to-date
 905			 * MPTCP Ack.
 906			 */
 907			mptcp_set_timeout(sk, ssk);
 908			if (!mptcp_timer_pending(sk))
 909				mptcp_reset_timer(sk);
 910		}
 911
 912		ret = sk_stream_wait_memory(sk, &timeo);
 913		if (ret)
 914			goto out;
 915
 916		mptcp_clean_una(sk);
 917
 918		ssk = mptcp_subflow_get_send(msk);
 919		if (list_empty(&msk->conn_list)) {
 920			ret = -ENOTCONN;
 921			goto out;
 922		}
 923	}
 924
 925	pr_debug("conn_list->subflow=%p", ssk);
 926
 927	lock_sock(ssk);
 928	tx_ok = msg_data_left(msg);
 929	while (tx_ok) {
 930		ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
 931					 &size_goal);
 932		if (ret < 0) {
 933			if (ret == -EAGAIN && timeo > 0) {
 934				mptcp_set_timeout(sk, ssk);
 935				release_sock(ssk);
 936				goto restart;
 937			}
 938			break;
 939		}
 940
 941		copied += ret;
 942
 943		tx_ok = msg_data_left(msg);
 944		if (!tx_ok)
 945			break;
 946
 947		if (!sk_stream_memory_free(ssk) ||
 948		    !mptcp_page_frag_refill(ssk, pfrag) ||
 949		    !mptcp_ext_cache_refill(msk)) {
 950			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 951			tcp_push(ssk, msg->msg_flags, mss_now,
 952				 tcp_sk(ssk)->nonagle, size_goal);
 953			mptcp_set_timeout(sk, ssk);
 954			release_sock(ssk);
 955			goto restart;
 956		}
 957
 958		/* memory is charged to mptcp level socket as well, i.e.
 959		 * if msg is very large, mptcp socket may run out of buffer
 960		 * space.  mptcp_clean_una() will release data that has
 961		 * been acked at mptcp level in the mean time, so there is
 962		 * a good chance we can continue sending data right away.
 963		 *
 964		 * Normally, when the tcp subflow can accept more data, then
 965		 * so can the MPTCP socket.  However, we need to cope with
 966		 * peers that might lag behind in their MPTCP-level
 967		 * acknowledgements, i.e.  data might have been acked at
 968		 * tcp level only.  So, we must also check the MPTCP socket
 969		 * limits before we send more data.
 970		 */
 971		if (unlikely(!sk_stream_memory_free(sk))) {
 972			tcp_push(ssk, msg->msg_flags, mss_now,
 973				 tcp_sk(ssk)->nonagle, size_goal);
 974			mptcp_clean_una(sk);
 975			if (!sk_stream_memory_free(sk)) {
 976				/* can't send more for now, need to wait for
 977				 * MPTCP-level ACKs from peer.
 978				 *
 979				 * Wakeup will happen via mptcp_clean_una().
 980				 */
 981				mptcp_set_timeout(sk, ssk);
 982				release_sock(ssk);
 983				goto restart;
 984			}
 985		}
 986	}
 987
 988	mptcp_set_timeout(sk, ssk);
 989	if (copied) {
 990		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
 991			 size_goal);
 992
 993		/* start the timer, if it's not pending */
 994		if (!mptcp_timer_pending(sk))
 995			mptcp_reset_timer(sk);
 996	}
 997
 998	ssk_check_wmem(msk, ssk);
 999	release_sock(ssk);
1000out:
1001	release_sock(sk);
1002	return copied ? : ret;
1003}
1004
1005static void mptcp_wait_data(struct sock *sk, long *timeo)
1006{
1007	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1008	struct mptcp_sock *msk = mptcp_sk(sk);
1009
1010	add_wait_queue(sk_sleep(sk), &wait);
1011	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1012
1013	sk_wait_event(sk, timeo,
1014		      test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
1015
1016	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1017	remove_wait_queue(sk_sleep(sk), &wait);
1018}
1019
1020static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1021				struct msghdr *msg,
1022				size_t len)
1023{
1024	struct sock *sk = (struct sock *)msk;
1025	struct sk_buff *skb;
1026	int copied = 0;
1027
1028	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1029		u32 offset = MPTCP_SKB_CB(skb)->offset;
1030		u32 data_len = skb->len - offset;
1031		u32 count = min_t(size_t, len - copied, data_len);
1032		int err;
1033
1034		err = skb_copy_datagram_msg(skb, offset, msg, count);
1035		if (unlikely(err < 0)) {
1036			if (!copied)
1037				return err;
1038			break;
1039		}
1040
1041		copied += count;
1042
1043		if (count < data_len) {
1044			MPTCP_SKB_CB(skb)->offset += count;
1045			break;
1046		}
1047
1048		__skb_unlink(skb, &sk->sk_receive_queue);
1049		__kfree_skb(skb);
1050
1051		if (copied >= len)
1052			break;
1053	}
1054
1055	return copied;
1056}
1057
1058/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
1059 *
1060 * Only difference: Use highest rtt estimate of the subflows in use.
1061 */
1062static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1063{
1064	struct mptcp_subflow_context *subflow;
1065	struct sock *sk = (struct sock *)msk;
1066	u32 time, advmss = 1;
1067	u64 rtt_us, mstamp;
1068
1069	sock_owned_by_me(sk);
1070
1071	if (copied <= 0)
1072		return;
1073
1074	msk->rcvq_space.copied += copied;
1075
1076	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1077	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1078
1079	rtt_us = msk->rcvq_space.rtt_us;
1080	if (rtt_us && time < (rtt_us >> 3))
1081		return;
1082
1083	rtt_us = 0;
1084	mptcp_for_each_subflow(msk, subflow) {
1085		const struct tcp_sock *tp;
1086		u64 sf_rtt_us;
1087		u32 sf_advmss;
1088
1089		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1090
1091		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1092		sf_advmss = READ_ONCE(tp->advmss);
1093
1094		rtt_us = max(sf_rtt_us, rtt_us);
1095		advmss = max(sf_advmss, advmss);
1096	}
1097
1098	msk->rcvq_space.rtt_us = rtt_us;
1099	if (time < (rtt_us >> 3) || rtt_us == 0)
1100		return;
1101
1102	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1103		goto new_measure;
1104
1105	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1106	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1107		int rcvmem, rcvbuf;
1108		u64 rcvwin, grow;
1109
1110		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1111
1112		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1113
1114		do_div(grow, msk->rcvq_space.space);
1115		rcvwin += (grow << 1);
1116
1117		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1118		while (tcp_win_from_space(sk, rcvmem) < advmss)
1119			rcvmem += 128;
1120
1121		do_div(rcvwin, advmss);
1122		rcvbuf = min_t(u64, rcvwin * rcvmem,
1123			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1124
1125		if (rcvbuf > sk->sk_rcvbuf) {
1126			u32 window_clamp;
1127
1128			window_clamp = tcp_win_from_space(sk, rcvbuf);
1129			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1130
1131			/* Make subflows follow along.  If we do not do this, we
1132			 * get drops at subflow level if skbs can't be moved to
1133			 * the mptcp rx queue fast enough (announced rcv_win can
1134			 * exceed ssk->sk_rcvbuf).
1135			 */
1136			mptcp_for_each_subflow(msk, subflow) {
1137				struct sock *ssk;
1138
1139				ssk = mptcp_subflow_tcp_sock(subflow);
1140				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1141				tcp_sk(ssk)->window_clamp = window_clamp;
1142			}
1143		}
1144	}
1145
1146	msk->rcvq_space.space = msk->rcvq_space.copied;
1147new_measure:
1148	msk->rcvq_space.copied = 0;
1149	msk->rcvq_space.time = mstamp;
1150}
1151
1152static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1153{
1154	unsigned int moved = 0;
1155	bool done;
1156
1157	do {
1158		struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1159
1160		if (!ssk)
1161			break;
1162
1163		lock_sock(ssk);
1164		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1165		release_sock(ssk);
1166	} while (!done);
1167
1168	return moved > 0;
1169}
1170
1171static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1172			 int nonblock, int flags, int *addr_len)
1173{
1174	struct mptcp_sock *msk = mptcp_sk(sk);
1175	int copied = 0;
1176	int target;
1177	long timeo;
1178
1179	if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
1180		return -EOPNOTSUPP;
1181
1182	lock_sock(sk);
1183	timeo = sock_rcvtimeo(sk, nonblock);
1184
1185	len = min_t(size_t, len, INT_MAX);
1186	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1187	__mptcp_flush_join_list(msk);
1188
1189	while (len > (size_t)copied) {
1190		int bytes_read;
1191
1192		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
1193		if (unlikely(bytes_read < 0)) {
1194			if (!copied)
1195				copied = bytes_read;
1196			goto out_err;
1197		}
1198
1199		copied += bytes_read;
1200
1201		if (skb_queue_empty(&sk->sk_receive_queue) &&
1202		    __mptcp_move_skbs(msk))
1203			continue;
1204
1205		/* only the master socket status is relevant here. The exit
1206		 * conditions mirror closely tcp_recvmsg()
1207		 */
1208		if (copied >= target)
1209			break;
1210
1211		if (copied) {
1212			if (sk->sk_err ||
1213			    sk->sk_state == TCP_CLOSE ||
1214			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1215			    !timeo ||
1216			    signal_pending(current))
1217				break;
1218		} else {
1219			if (sk->sk_err) {
1220				copied = sock_error(sk);
1221				break;
1222			}
1223
1224			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1225				mptcp_check_for_eof(msk);
1226
1227			if (sk->sk_shutdown & RCV_SHUTDOWN)
1228				break;
1229
1230			if (sk->sk_state == TCP_CLOSE) {
1231				copied = -ENOTCONN;
1232				break;
1233			}
1234
1235			if (!timeo) {
1236				copied = -EAGAIN;
1237				break;
1238			}
1239
1240			if (signal_pending(current)) {
1241				copied = sock_intr_errno(timeo);
1242				break;
1243			}
1244		}
1245
1246		pr_debug("block timeout %ld", timeo);
1247		mptcp_wait_data(sk, &timeo);
1248	}
1249
1250	if (skb_queue_empty(&sk->sk_receive_queue)) {
1251		/* entire backlog drained, clear DATA_READY. */
1252		clear_bit(MPTCP_DATA_READY, &msk->flags);
1253
1254		/* .. race-breaker: ssk might have gotten new data
1255		 * after last __mptcp_move_skbs() returned false.
1256		 */
1257		if (unlikely(__mptcp_move_skbs(msk)))
1258			set_bit(MPTCP_DATA_READY, &msk->flags);
1259	} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
1260		/* data to read but mptcp_wait_data() cleared DATA_READY */
1261		set_bit(MPTCP_DATA_READY, &msk->flags);
1262	}
1263out_err:
1264	mptcp_rcv_space_adjust(msk, copied);
1265
1266	release_sock(sk);
1267	return copied;
1268}
1269
1270static void mptcp_retransmit_handler(struct sock *sk)
1271{
1272	struct mptcp_sock *msk = mptcp_sk(sk);
1273
1274	if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
1275		mptcp_stop_timer(sk);
1276	} else {
1277		set_bit(MPTCP_WORK_RTX, &msk->flags);
1278		if (schedule_work(&msk->work))
1279			sock_hold(sk);
1280	}
1281}
1282
1283static void mptcp_retransmit_timer(struct timer_list *t)
1284{
1285	struct inet_connection_sock *icsk = from_timer(icsk, t,
1286						       icsk_retransmit_timer);
1287	struct sock *sk = &icsk->icsk_inet.sk;
1288
1289	bh_lock_sock(sk);
1290	if (!sock_owned_by_user(sk)) {
1291		mptcp_retransmit_handler(sk);
1292	} else {
1293		/* delegate our work to tcp_release_cb() */
1294		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
1295				      &sk->sk_tsq_flags))
1296			sock_hold(sk);
1297	}
1298	bh_unlock_sock(sk);
1299	sock_put(sk);
1300}
1301
1302/* Find an idle subflow.  Return NULL if there is unacked data at tcp
1303 * level.
1304 *
1305 * A backup subflow is returned only if that is the only kind available.
1306 */
1307static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
1308{
1309	struct mptcp_subflow_context *subflow;
1310	struct sock *backup = NULL;
1311
1312	sock_owned_by_me((const struct sock *)msk);
1313
1314	mptcp_for_each_subflow(msk, subflow) {
1315		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1316
1317		/* still data outstanding at TCP level?  Don't retransmit. */
1318		if (!tcp_write_queue_empty(ssk))
1319			return NULL;
1320
1321		if (subflow->backup) {
1322			if (!backup)
1323				backup = ssk;
1324			continue;
1325		}
1326
1327		return ssk;
1328	}
1329
1330	return backup;
1331}
1332
1333/* subflow sockets can be either outgoing (connect) or incoming
1334 * (accept).
1335 *
1336 * Outgoing subflows use in-kernel sockets.
1337 * Incoming subflows do not have their own 'struct socket' allocated,
1338 * so we need to use tcp_close() after detaching them from the mptcp
1339 * parent socket.
1340 */
1341static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
1342			      struct mptcp_subflow_context *subflow,
1343			      long timeout)
1344{
1345	struct socket *sock = READ_ONCE(ssk->sk_socket);
1346
1347	list_del(&subflow->node);
1348
1349	if (sock && sock != sk->sk_socket) {
1350		/* outgoing subflow */
1351		sock_release(sock);
1352	} else {
1353		/* incoming subflow */
1354		tcp_close(ssk, timeout);
1355	}
1356}
1357
1358static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
1359{
1360	return 0;
1361}
1362
1363static void pm_work(struct mptcp_sock *msk)
1364{
1365	struct mptcp_pm_data *pm = &msk->pm;
1366
1367	spin_lock_bh(&msk->pm.lock);
1368
1369	pr_debug("msk=%p status=%x", msk, pm->status);
1370	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
1371		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
1372		mptcp_pm_nl_add_addr_received(msk);
1373	}
1374	if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
1375		pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
1376		mptcp_pm_nl_fully_established(msk);
1377	}
1378	if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
1379		pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
1380		mptcp_pm_nl_subflow_established(msk);
1381	}
1382
1383	spin_unlock_bh(&msk->pm.lock);
1384}
1385
1386static void mptcp_worker(struct work_struct *work)
1387{
1388	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1389	struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1390	int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1391	struct mptcp_data_frag *dfrag;
1392	u64 orig_write_seq;
1393	size_t copied = 0;
1394	struct msghdr msg = {
1395		.msg_flags = MSG_DONTWAIT,
1396	};
1397	long timeo = 0;
1398
1399	lock_sock(sk);
1400	mptcp_clean_una(sk);
1401	mptcp_check_data_fin_ack(sk);
1402	__mptcp_flush_join_list(msk);
1403	__mptcp_move_skbs(msk);
1404
1405	if (msk->pm.status)
1406		pm_work(msk);
1407
1408	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1409		mptcp_check_for_eof(msk);
1410
1411	mptcp_check_data_fin(sk);
1412
1413	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1414		goto unlock;
1415
1416	dfrag = mptcp_rtx_head(sk);
1417	if (!dfrag)
1418		goto unlock;
1419
1420	if (!mptcp_ext_cache_refill(msk))
1421		goto reset_unlock;
1422
1423	ssk = mptcp_subflow_get_retrans(msk);
1424	if (!ssk)
1425		goto reset_unlock;
1426
1427	lock_sock(ssk);
1428
1429	orig_len = dfrag->data_len;
1430	orig_offset = dfrag->offset;
1431	orig_write_seq = dfrag->data_seq;
1432	while (dfrag->data_len > 0) {
1433		int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
1434					     &mss_now, &size_goal);
1435		if (ret < 0)
1436			break;
1437
1438		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
1439		copied += ret;
1440		dfrag->data_len -= ret;
1441		dfrag->offset += ret;
1442
1443		if (!mptcp_ext_cache_refill(msk))
1444			break;
1445	}
1446	if (copied)
1447		tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
1448			 size_goal);
1449
1450	dfrag->data_seq = orig_write_seq;
1451	dfrag->offset = orig_offset;
1452	dfrag->data_len = orig_len;
1453
1454	mptcp_set_timeout(sk, ssk);
1455	release_sock(ssk);
1456
1457reset_unlock:
1458	if (!mptcp_timer_pending(sk))
1459		mptcp_reset_timer(sk);
1460
1461unlock:
1462	release_sock(sk);
1463	sock_put(sk);
1464}
1465
1466static int __mptcp_init_sock(struct sock *sk)
1467{
1468	struct mptcp_sock *msk = mptcp_sk(sk);
1469
1470	spin_lock_init(&msk->join_list_lock);
1471
1472	INIT_LIST_HEAD(&msk->conn_list);
1473	INIT_LIST_HEAD(&msk->join_list);
1474	INIT_LIST_HEAD(&msk->rtx_queue);
1475	__set_bit(MPTCP_SEND_SPACE, &msk->flags);
1476	INIT_WORK(&msk->work, mptcp_worker);
1477
1478	msk->first = NULL;
1479	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
1480
1481	mptcp_pm_data_init(msk);
1482
1483	/* re-use the csk retrans timer for MPTCP-level retrans */
1484	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
1485
1486	return 0;
1487}
1488
1489static int mptcp_init_sock(struct sock *sk)
1490{
1491	struct net *net = sock_net(sk);
1492	int ret;
1493
1494	if (!mptcp_is_enabled(net))
1495		return -ENOPROTOOPT;
1496
1497	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
1498		return -ENOMEM;
1499
1500	ret = __mptcp_init_sock(sk);
1501	if (ret)
1502		return ret;
1503
1504	ret = __mptcp_socket_create(mptcp_sk(sk));
1505	if (ret)
1506		return ret;
1507
1508	sk_sockets_allocated_inc(sk);
1509	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
1510	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1511
1512	return 0;
1513}
1514
1515static void __mptcp_clear_xmit(struct sock *sk)
1516{
1517	struct mptcp_sock *msk = mptcp_sk(sk);
1518	struct mptcp_data_frag *dtmp, *dfrag;
1519
1520	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
1521
1522	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1523		dfrag_clear(sk, dfrag);
1524}
1525
1526static void mptcp_cancel_work(struct sock *sk)
1527{
1528	struct mptcp_sock *msk = mptcp_sk(sk);
1529
1530	if (cancel_work_sync(&msk->work))
1531		sock_put(sk);
1532}
1533
1534static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
1535{
1536	lock_sock(ssk);
1537
1538	switch (ssk->sk_state) {
1539	case TCP_LISTEN:
1540		if (!(how & RCV_SHUTDOWN))
1541			break;
1542		fallthrough;
1543	case TCP_SYN_SENT:
1544		tcp_disconnect(ssk, O_NONBLOCK);
1545		break;
1546	default:
1547		if (__mptcp_check_fallback(mptcp_sk(sk))) {
1548			pr_debug("Fallback");
1549			ssk->sk_shutdown |= how;
1550			tcp_shutdown(ssk, how);
1551		} else {
1552			pr_debug("Sending DATA_FIN on subflow %p", ssk);
1553			mptcp_set_timeout(sk, ssk);
1554			tcp_send_ack(ssk);
1555		}
1556		break;
1557	}
1558
1559	release_sock(ssk);
1560}
1561
1562static const unsigned char new_state[16] = {
1563	/* current state:     new state:      action:	*/
1564	[0 /* (Invalid) */] = TCP_CLOSE,
1565	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1566	[TCP_SYN_SENT]      = TCP_CLOSE,
1567	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1568	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
1569	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
1570	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
1571	[TCP_CLOSE]         = TCP_CLOSE,
1572	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
1573	[TCP_LAST_ACK]      = TCP_LAST_ACK,
1574	[TCP_LISTEN]        = TCP_CLOSE,
1575	[TCP_CLOSING]       = TCP_CLOSING,
1576	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
1577};
1578
1579static int mptcp_close_state(struct sock *sk)
1580{
1581	int next = (int)new_state[sk->sk_state];
1582	int ns = next & TCP_STATE_MASK;
1583
1584	inet_sk_state_store(sk, ns);
1585
1586	return next & TCP_ACTION_FIN;
1587}
1588
1589static void mptcp_close(struct sock *sk, long timeout)
1590{
1591	struct mptcp_subflow_context *subflow, *tmp;
1592	struct mptcp_sock *msk = mptcp_sk(sk);
1593	LIST_HEAD(conn_list);
1594
1595	lock_sock(sk);
1596	sk->sk_shutdown = SHUTDOWN_MASK;
1597
1598	if (sk->sk_state == TCP_LISTEN) {
1599		inet_sk_state_store(sk, TCP_CLOSE);
1600		goto cleanup;
1601	} else if (sk->sk_state == TCP_CLOSE) {
1602		goto cleanup;
1603	}
1604
1605	if (__mptcp_check_fallback(msk)) {
1606		goto update_state;
1607	} else if (mptcp_close_state(sk)) {
1608		pr_debug("Sending DATA_FIN sk=%p", sk);
1609		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
1610		WRITE_ONCE(msk->snd_data_fin_enable, 1);
1611
1612		mptcp_for_each_subflow(msk, subflow) {
1613			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
1614
1615			mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
1616		}
1617	}
1618
1619	sk_stream_wait_close(sk, timeout);
1620
1621update_state:
1622	inet_sk_state_store(sk, TCP_CLOSE);
1623
1624cleanup:
1625	/* be sure to always acquire the join list lock, to sync vs
1626	 * mptcp_finish_join().
1627	 */
1628	spin_lock_bh(&msk->join_list_lock);
1629	list_splice_tail_init(&msk->join_list, &msk->conn_list);
1630	spin_unlock_bh(&msk->join_list_lock);
1631	list_splice_init(&msk->conn_list, &conn_list);
1632
1633	__mptcp_clear_xmit(sk);
1634
1635	release_sock(sk);
1636
1637	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1638		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1639		__mptcp_close_ssk(sk, ssk, subflow, timeout);
1640	}
1641
1642	mptcp_cancel_work(sk);
1643
1644	__skb_queue_purge(&sk->sk_receive_queue);
1645
1646	sk_common_release(sk);
1647}
1648
1649static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
1650{
1651#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1652	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
1653	struct ipv6_pinfo *msk6 = inet6_sk(msk);
1654
1655	msk->sk_v6_daddr = ssk->sk_v6_daddr;
1656	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
1657
1658	if (msk6 && ssk6) {
1659		msk6->saddr = ssk6->saddr;
1660		msk6->flow_label = ssk6->flow_label;
1661	}
1662#endif
1663
1664	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
1665	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
1666	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
1667	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
1668	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
1669	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
1670}
1671
1672static int mptcp_disconnect(struct sock *sk, int flags)
1673{
1674	/* Should never be called.
1675	 * inet_stream_connect() calls ->disconnect, but that
1676	 * refers to the subflow socket, not the mptcp one.
1677	 */
1678	WARN_ON_ONCE(1);
1679	return 0;
1680}
1681
1682#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1683static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
1684{
1685	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
1686
1687	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
1688}
1689#endif
1690
1691struct sock *mptcp_sk_clone(const struct sock *sk,
1692			    const struct mptcp_options_received *mp_opt,
1693			    struct request_sock *req)
1694{
1695	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1696	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
1697	struct mptcp_sock *msk;
1698	u64 ack_seq;
1699
1700	if (!nsk)
1701		return NULL;
1702
1703#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1704	if (nsk->sk_family == AF_INET6)
1705		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
1706#endif
1707
1708	__mptcp_init_sock(nsk);
1709
1710	msk = mptcp_sk(nsk);
1711	msk->local_key = subflow_req->local_key;
1712	msk->token = subflow_req->token;
1713	msk->subflow = NULL;
1714	WRITE_ONCE(msk->fully_established, false);
1715
1716	msk->write_seq = subflow_req->idsn + 1;
1717	atomic64_set(&msk->snd_una, msk->write_seq);
1718	if (mp_opt->mp_capable) {
1719		msk->can_ack = true;
1720		msk->remote_key = mp_opt->sndr_key;
1721		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
1722		ack_seq++;
1723		WRITE_ONCE(msk->ack_seq, ack_seq);
1724	}
1725
1726	sock_reset_flag(nsk, SOCK_RCU_FREE);
1727	/* will be fully established after successful MPC subflow creation */
1728	inet_sk_state_store(nsk, TCP_SYN_RECV);
1729	bh_unlock_sock(nsk);
1730
1731	/* keep a single reference */
1732	__sock_put(nsk);
1733	return nsk;
1734}
1735
1736void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
1737{
1738	const struct tcp_sock *tp = tcp_sk(ssk);
1739
1740	msk->rcvq_space.copied = 0;
1741	msk->rcvq_space.rtt_us = 0;
1742
1743	msk->rcvq_space.time = tp->tcp_mstamp;
1744
1745	/* initial rcv_space offering made to peer */
1746	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
1747				      TCP_INIT_CWND * tp->advmss);
1748	if (msk->rcvq_space.space == 0)
1749		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
1750}
1751
1752static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
1753				 bool kern)
1754{
1755	struct mptcp_sock *msk = mptcp_sk(sk);
1756	struct socket *listener;
1757	struct sock *newsk;
1758
1759	listener = __mptcp_nmpc_socket(msk);
1760	if (WARN_ON_ONCE(!listener)) {
1761		*err = -EINVAL;
1762		return NULL;
1763	}
1764
1765	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
1766	newsk = inet_csk_accept(listener->sk, flags, err, kern);
1767	if (!newsk)
1768		return NULL;
1769
1770	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
1771	if (sk_is_mptcp(newsk)) {
1772		struct mptcp_subflow_context *subflow;
1773		struct sock *new_mptcp_sock;
1774		struct sock *ssk = newsk;
1775
1776		subflow = mptcp_subflow_ctx(newsk);
1777		new_mptcp_sock = subflow->conn;
1778
1779		/* is_mptcp should be false if subflow->conn is missing, see
1780		 * subflow_syn_recv_sock()
1781		 */
1782		if (WARN_ON_ONCE(!new_mptcp_sock)) {
1783			tcp_sk(newsk)->is_mptcp = 0;
1784			return newsk;
1785		}
1786
1787		/* acquire the 2nd reference for the owning socket */
1788		sock_hold(new_mptcp_sock);
1789
1790		local_bh_disable();
1791		bh_lock_sock(new_mptcp_sock);
1792		msk = mptcp_sk(new_mptcp_sock);
1793		msk->first = newsk;
1794
1795		newsk = new_mptcp_sock;
1796		mptcp_copy_inaddrs(newsk, ssk);
1797		list_add(&subflow->node, &msk->conn_list);
1798
1799		mptcp_rcv_space_init(msk, ssk);
1800		bh_unlock_sock(new_mptcp_sock);
1801
1802		__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1803		local_bh_enable();
1804	} else {
1805		MPTCP_INC_STATS(sock_net(sk),
1806				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1807	}
1808
1809	return newsk;
1810}
1811
1812static void mptcp_destroy(struct sock *sk)
1813{
1814	struct mptcp_sock *msk = mptcp_sk(sk);
1815
1816	mptcp_token_destroy(msk);
1817	if (msk->cached_ext)
1818		__skb_ext_put(msk->cached_ext);
1819
1820	sk_sockets_allocated_dec(sk);
1821}
1822
1823static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
1824				       sockptr_t optval, unsigned int optlen)
1825{
1826	struct sock *sk = (struct sock *)msk;
1827	struct socket *ssock;
1828	int ret;
1829
1830	switch (optname) {
1831	case SO_REUSEPORT:
1832	case SO_REUSEADDR:
1833		lock_sock(sk);
1834		ssock = __mptcp_nmpc_socket(msk);
1835		if (!ssock) {
1836			release_sock(sk);
1837			return -EINVAL;
1838		}
1839
1840		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
1841		if (ret == 0) {
1842			if (optname == SO_REUSEPORT)
1843				sk->sk_reuseport = ssock->sk->sk_reuseport;
1844			else if (optname == SO_REUSEADDR)
1845				sk->sk_reuse = ssock->sk->sk_reuse;
1846		}
1847		release_sock(sk);
1848		return ret;
1849	}
1850
1851	return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
1852}
1853
1854static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
1855			       sockptr_t optval, unsigned int optlen)
1856{
1857	struct sock *sk = (struct sock *)msk;
1858	int ret = -EOPNOTSUPP;
1859	struct socket *ssock;
1860
1861	switch (optname) {
1862	case IPV6_V6ONLY:
1863		lock_sock(sk);
1864		ssock = __mptcp_nmpc_socket(msk);
1865		if (!ssock) {
1866			release_sock(sk);
1867			return -EINVAL;
1868		}
1869
1870		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
1871		if (ret == 0)
1872			sk->sk_ipv6only = ssock->sk->sk_ipv6only;
1873
1874		release_sock(sk);
1875		break;
1876	}
1877
1878	return ret;
1879}
1880
1881static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1882			    sockptr_t optval, unsigned int optlen)
1883{
1884	struct mptcp_sock *msk = mptcp_sk(sk);
1885	struct sock *ssk;
1886
1887	pr_debug("msk=%p", msk);
1888
1889	if (level == SOL_SOCKET)
1890		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
1891
1892	/* @@ the meaning of setsockopt() when the socket is connected and
1893	 * there are multiple subflows is not yet defined. It is up to the
1894	 * MPTCP-level socket to configure the subflows until the subflow
1895	 * is in TCP fallback, when TCP socket options are passed through
1896	 * to the one remaining subflow.
1897	 */
1898	lock_sock(sk);
1899	ssk = __mptcp_tcp_fallback(msk);
1900	release_sock(sk);
1901	if (ssk)
1902		return tcp_setsockopt(ssk, level, optname, optval, optlen);
1903
1904	if (level == SOL_IPV6)
1905		return mptcp_setsockopt_v6(msk, optname, optval, optlen);
1906
1907	return -EOPNOTSUPP;
1908}
1909
1910static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1911			    char __user *optval, int __user *option)
1912{
1913	struct mptcp_sock *msk = mptcp_sk(sk);
1914	struct sock *ssk;
1915
1916	pr_debug("msk=%p", msk);
1917
1918	/* @@ the meaning of setsockopt() when the socket is connected and
1919	 * there are multiple subflows is not yet defined. It is up to the
1920	 * MPTCP-level socket to configure the subflows until the subflow
1921	 * is in TCP fallback, when socket options are passed through
1922	 * to the one remaining subflow.
1923	 */
1924	lock_sock(sk);
1925	ssk = __mptcp_tcp_fallback(msk);
1926	release_sock(sk);
1927	if (ssk)
1928		return tcp_getsockopt(ssk, level, optname, optval, option);
1929
1930	return -EOPNOTSUPP;
1931}
1932
1933#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1934			    TCPF_WRITE_TIMER_DEFERRED)
1935
1936/* this is very alike tcp_release_cb() but we must handle differently a
1937 * different set of events
1938 */
1939static void mptcp_release_cb(struct sock *sk)
1940{
1941	unsigned long flags, nflags;
1942
1943	do {
1944		flags = sk->sk_tsq_flags;
1945		if (!(flags & MPTCP_DEFERRED_ALL))
1946			return;
1947		nflags = flags & ~MPTCP_DEFERRED_ALL;
1948	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1949
1950	sock_release_ownership(sk);
1951
1952	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1953		struct mptcp_sock *msk = mptcp_sk(sk);
1954		struct sock *ssk;
1955
1956		ssk = mptcp_subflow_recv_lookup(msk);
1957		if (!ssk || !schedule_work(&msk->work))
1958			__sock_put(sk);
1959	}
1960
1961	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1962		mptcp_retransmit_handler(sk);
1963		__sock_put(sk);
1964	}
1965}
1966
1967static int mptcp_hash(struct sock *sk)
1968{
1969	/* should never be called,
1970	 * we hash the TCP subflows not the master socket
1971	 */
1972	WARN_ON_ONCE(1);
1973	return 0;
1974}
1975
1976static void mptcp_unhash(struct sock *sk)
1977{
1978	/* called from sk_common_release(), but nothing to do here */
1979}
1980
1981static int mptcp_get_port(struct sock *sk, unsigned short snum)
1982{
1983	struct mptcp_sock *msk = mptcp_sk(sk);
1984	struct socket *ssock;
1985
1986	ssock = __mptcp_nmpc_socket(msk);
1987	pr_debug("msk=%p, subflow=%p", msk, ssock);
1988	if (WARN_ON_ONCE(!ssock))
1989		return -EINVAL;
1990
1991	return inet_csk_get_port(ssock->sk, snum);
1992}
1993
1994void mptcp_finish_connect(struct sock *ssk)
1995{
1996	struct mptcp_subflow_context *subflow;
1997	struct mptcp_sock *msk;
1998	struct sock *sk;
1999	u64 ack_seq;
2000
2001	subflow = mptcp_subflow_ctx(ssk);
2002	sk = subflow->conn;
2003	msk = mptcp_sk(sk);
2004
2005	pr_debug("msk=%p, token=%u", sk, subflow->token);
2006
2007	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
2008	ack_seq++;
2009	subflow->map_seq = ack_seq;
2010	subflow->map_subflow_seq = 1;
2011
2012	/* the socket is not connected yet, no msk/subflow ops can access/race
2013	 * accessing the field below
2014	 */
2015	WRITE_ONCE(msk->remote_key, subflow->remote_key);
2016	WRITE_ONCE(msk->local_key, subflow->local_key);
2017	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
2018	WRITE_ONCE(msk->ack_seq, ack_seq);
2019	WRITE_ONCE(msk->can_ack, 1);
2020	atomic64_set(&msk->snd_una, msk->write_seq);
2021
2022	mptcp_pm_new_connection(msk, 0);
2023
2024	mptcp_rcv_space_init(msk, ssk);
2025}
2026
2027static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
2028{
2029	write_lock_bh(&sk->sk_callback_lock);
2030	rcu_assign_pointer(sk->sk_wq, &parent->wq);
2031	sk_set_socket(sk, parent);
2032	sk->sk_uid = SOCK_INODE(parent)->i_uid;
2033	write_unlock_bh(&sk->sk_callback_lock);
2034}
2035
2036bool mptcp_finish_join(struct sock *sk)
2037{
2038	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
2039	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2040	struct sock *parent = (void *)msk;
2041	struct socket *parent_sock;
2042	bool ret;
2043
2044	pr_debug("msk=%p, subflow=%p", msk, subflow);
2045
2046	/* mptcp socket already closing? */
2047	if (!mptcp_is_fully_established(parent))
2048		return false;
2049
2050	if (!msk->pm.server_side)
2051		return true;
2052
2053	if (!mptcp_pm_allow_new_subflow(msk))
2054		return false;
2055
2056	/* active connections are already on conn_list, and we can't acquire
2057	 * msk lock here.
2058	 * use the join list lock as synchronization point and double-check
2059	 * msk status to avoid racing with mptcp_close()
2060	 */
2061	spin_lock_bh(&msk->join_list_lock);
2062	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
2063	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
2064		list_add_tail(&subflow->node, &msk->join_list);
2065	spin_unlock_bh(&msk->join_list_lock);
2066	if (!ret)
2067		return false;
2068
2069	/* attach to msk socket only after we are sure he will deal with us
2070	 * at close time
2071	 */
2072	parent_sock = READ_ONCE(parent->sk_socket);
2073	if (parent_sock && !sk->sk_socket)
2074		mptcp_sock_graft(sk, parent_sock);
2075	subflow->map_seq = READ_ONCE(msk->ack_seq);
2076	return true;
2077}
2078
2079static bool mptcp_memory_free(const struct sock *sk, int wake)
2080{
2081	struct mptcp_sock *msk = mptcp_sk(sk);
2082
2083	return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
2084}
2085
2086static struct proto mptcp_prot = {
2087	.name		= "MPTCP",
2088	.owner		= THIS_MODULE,
2089	.init		= mptcp_init_sock,
2090	.disconnect	= mptcp_disconnect,
2091	.close		= mptcp_close,
2092	.accept		= mptcp_accept,
2093	.setsockopt	= mptcp_setsockopt,
2094	.getsockopt	= mptcp_getsockopt,
2095	.shutdown	= tcp_shutdown,
2096	.destroy	= mptcp_destroy,
2097	.sendmsg	= mptcp_sendmsg,
2098	.recvmsg	= mptcp_recvmsg,
2099	.release_cb	= mptcp_release_cb,
2100	.hash		= mptcp_hash,
2101	.unhash		= mptcp_unhash,
2102	.get_port	= mptcp_get_port,
2103	.sockets_allocated	= &mptcp_sockets_allocated,
2104	.memory_allocated	= &tcp_memory_allocated,
2105	.memory_pressure	= &tcp_memory_pressure,
2106	.stream_memory_free	= mptcp_memory_free,
2107	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2108	.sysctl_mem	= sysctl_tcp_mem,
2109	.obj_size	= sizeof(struct mptcp_sock),
2110	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
2111	.no_autobind	= true,
2112};
2113
2114static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2115{
2116	struct mptcp_sock *msk = mptcp_sk(sock->sk);
2117	struct socket *ssock;
2118	int err;
2119
2120	lock_sock(sock->sk);
2121	ssock = __mptcp_nmpc_socket(msk);
2122	if (!ssock) {
2123		err = -EINVAL;
2124		goto unlock;
2125	}
2126
2127	err = ssock->ops->bind(ssock, uaddr, addr_len);
2128	if (!err)
2129		mptcp_copy_inaddrs(sock->sk, ssock->sk);
2130
2131unlock:
2132	release_sock(sock->sk);
2133	return err;
2134}
2135
2136static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
2137					 struct mptcp_subflow_context *subflow)
2138{
2139	subflow->request_mptcp = 0;
2140	__mptcp_do_fallback(msk);
2141}
2142
2143static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
2144				int addr_len, int flags)
2145{
2146	struct mptcp_sock *msk = mptcp_sk(sock->sk);
2147	struct mptcp_subflow_context *subflow;
2148	struct socket *ssock;
2149	int err;
2150
2151	lock_sock(sock->sk);
2152	if (sock->state != SS_UNCONNECTED && msk->subflow) {
2153		/* pending connection or invalid state, let existing subflow
2154		 * cope with that
2155		 */
2156		ssock = msk->subflow;
2157		goto do_connect;
2158	}
2159
2160	ssock = __mptcp_nmpc_socket(msk);
2161	if (!ssock) {
2162		err = -EINVAL;
2163		goto unlock;
2164	}
2165
2166	mptcp_token_destroy(msk);
2167	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
2168	subflow = mptcp_subflow_ctx(ssock->sk);
2169#ifdef CONFIG_TCP_MD5SIG
2170	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
2171	 * TCP option space.
2172	 */
2173	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
2174		mptcp_subflow_early_fallback(msk, subflow);
2175#endif
2176	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
2177		mptcp_subflow_early_fallback(msk, subflow);
2178
2179do_connect:
2180	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
2181	sock->state = ssock->state;
2182
2183	/* on successful connect, the msk state will be moved to established by
2184	 * subflow_finish_connect()
2185	 */
2186	if (!err || err == -EINPROGRESS)
2187		mptcp_copy_inaddrs(sock->sk, ssock->sk);
2188	else
2189		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2190
2191unlock:
2192	release_sock(sock->sk);
2193	return err;
2194}
2195
2196static int mptcp_listen(struct socket *sock, int backlog)
2197{
2198	struct mptcp_sock *msk = mptcp_sk(sock->sk);
2199	struct socket *ssock;
2200	int err;
2201
2202	pr_debug("msk=%p", msk);
2203
2204	lock_sock(sock->sk);
2205	ssock = __mptcp_nmpc_socket(msk);
2206	if (!ssock) {
2207		err = -EINVAL;
2208		goto unlock;
2209	}
2210
2211	mptcp_token_destroy(msk);
2212	inet_sk_state_store(sock->sk, TCP_LISTEN);
2213	sock_set_flag(sock->sk, SOCK_RCU_FREE);
2214
2215	err = ssock->ops->listen(ssock, backlog);
2216	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2217	if (!err)
2218		mptcp_copy_inaddrs(sock->sk, ssock->sk);
2219
2220unlock:
2221	release_sock(sock->sk);
2222	return err;
2223}
2224
2225static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
2226			       int flags, bool kern)
2227{
2228	struct mptcp_sock *msk = mptcp_sk(sock->sk);
2229	struct socket *ssock;
2230	int err;
2231
2232	pr_debug("msk=%p", msk);
2233
2234	lock_sock(sock->sk);
2235	if (sock->sk->sk_state != TCP_LISTEN)
2236		goto unlock_fail;
2237
2238	ssock = __mptcp_nmpc_socket(msk);
2239	if (!ssock)
2240		goto unlock_fail;
2241
2242	clear_bit(MPTCP_DATA_READY, &msk->flags);
2243	sock_hold(ssock->sk);
2244	release_sock(sock->sk);
2245
2246	err = ssock->ops->accept(sock, newsock, flags, kern);
2247	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
2248		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
2249		struct mptcp_subflow_context *subflow;
2250
2251		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
2252		 * This is needed so NOSPACE flag can be set from tcp stack.
2253		 */
2254		__mptcp_flush_join_list(msk);
2255		mptcp_for_each_subflow(msk, subflow) {
2256			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2257
2258			if (!ssk->sk_socket)
2259				mptcp_sock_graft(ssk, newsock);
2260		}
2261	}
2262
2263	if (inet_csk_listen_poll(ssock->sk))
2264		set_bit(MPTCP_DATA_READY, &msk->flags);
2265	sock_put(ssock->sk);
2266	return err;
2267
2268unlock_fail:
2269	release_sock(sock->sk);
2270	return -EINVAL;
2271}
2272
2273static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
2274{
2275	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
2276	       0;
2277}
2278
2279static __poll_t mptcp_poll(struct file *file, struct socket *sock,
2280			   struct poll_table_struct *wait)
2281{
2282	struct sock *sk = sock->sk;
2283	struct mptcp_sock *msk;
2284	__poll_t mask = 0;
2285	int state;
2286
2287	msk = mptcp_sk(sk);
2288	sock_poll_wait(file, sock, wait);
2289
2290	state = inet_sk_state_load(sk);
2291	if (state == TCP_LISTEN)
2292		return mptcp_check_readable(msk);
2293
2294	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
2295		mask |= mptcp_check_readable(msk);
2296		if (sk_stream_is_writeable(sk) &&
2297		    test_bit(MPTCP_SEND_SPACE, &msk->flags))
2298			mask |= EPOLLOUT | EPOLLWRNORM;
2299	}
2300	if (sk->sk_shutdown & RCV_SHUTDOWN)
2301		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2302
2303	return mask;
2304}
2305
2306static int mptcp_shutdown(struct socket *sock, int how)
2307{
2308	struct mptcp_sock *msk = mptcp_sk(sock->sk);
2309	struct mptcp_subflow_context *subflow;
2310	int ret = 0;
2311
2312	pr_debug("sk=%p, how=%d", msk, how);
2313
2314	lock_sock(sock->sk);
2315
2316	how++;
2317	if ((how & ~SHUTDOWN_MASK) || !how) {
2318		ret = -EINVAL;
2319		goto out_unlock;
2320	}
2321
2322	if (sock->state == SS_CONNECTING) {
2323		if ((1 << sock->sk->sk_state) &
2324		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
2325			sock->state = SS_DISCONNECTING;
2326		else
2327			sock->state = SS_CONNECTED;
2328	}
2329
2330	/* If we've already sent a FIN, or it's a closed state, skip this. */
2331	if (__mptcp_check_fallback(msk)) {
2332		if (how == SHUT_WR || how == SHUT_RDWR)
2333			inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
2334
2335		mptcp_for_each_subflow(msk, subflow) {
2336			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2337
2338			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2339		}
2340	} else if ((how & SEND_SHUTDOWN) &&
2341		   ((1 << sock->sk->sk_state) &
2342		    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2343		     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
2344		   mptcp_close_state(sock->sk)) {
2345		__mptcp_flush_join_list(msk);
2346
2347		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2348		WRITE_ONCE(msk->snd_data_fin_enable, 1);
2349
2350		mptcp_for_each_subflow(msk, subflow) {
2351			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2352
2353			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2354		}
2355	}
2356
2357	/* Wake up anyone sleeping in poll. */
2358	sock->sk->sk_state_change(sock->sk);
2359
2360out_unlock:
2361	release_sock(sock->sk);
2362
2363	return ret;
2364}
2365
2366static const struct proto_ops mptcp_stream_ops = {
2367	.family		   = PF_INET,
2368	.owner		   = THIS_MODULE,
2369	.release	   = inet_release,
2370	.bind		   = mptcp_bind,
2371	.connect	   = mptcp_stream_connect,
2372	.socketpair	   = sock_no_socketpair,
2373	.accept		   = mptcp_stream_accept,
2374	.getname	   = inet_getname,
2375	.poll		   = mptcp_poll,
2376	.ioctl		   = inet_ioctl,
2377	.gettstamp	   = sock_gettstamp,
2378	.listen		   = mptcp_listen,
2379	.shutdown	   = mptcp_shutdown,
2380	.setsockopt	   = sock_common_setsockopt,
2381	.getsockopt	   = sock_common_getsockopt,
2382	.sendmsg	   = inet_sendmsg,
2383	.recvmsg	   = inet_recvmsg,
2384	.mmap		   = sock_no_mmap,
2385	.sendpage	   = inet_sendpage,
2386};
2387
2388static struct inet_protosw mptcp_protosw = {
2389	.type		= SOCK_STREAM,
2390	.protocol	= IPPROTO_MPTCP,
2391	.prot		= &mptcp_prot,
2392	.ops		= &mptcp_stream_ops,
2393	.flags		= INET_PROTOSW_ICSK,
2394};
2395
2396void __init mptcp_proto_init(void)
2397{
2398	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
2399
2400	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
2401		panic("Failed to allocate MPTCP pcpu counter\n");
2402
2403	mptcp_subflow_init();
2404	mptcp_pm_init();
2405	mptcp_token_init();
2406
2407	if (proto_register(&mptcp_prot, 1) != 0)
2408		panic("Failed to register MPTCP proto.\n");
2409
2410	inet_register_protosw(&mptcp_protosw);
2411
2412	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
2413}
2414
2415#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2416static const struct proto_ops mptcp_v6_stream_ops = {
2417	.family		   = PF_INET6,
2418	.owner		   = THIS_MODULE,
2419	.release	   = inet6_release,
2420	.bind		   = mptcp_bind,
2421	.connect	   = mptcp_stream_connect,
2422	.socketpair	   = sock_no_socketpair,
2423	.accept		   = mptcp_stream_accept,
2424	.getname	   = inet6_getname,
2425	.poll		   = mptcp_poll,
2426	.ioctl		   = inet6_ioctl,
2427	.gettstamp	   = sock_gettstamp,
2428	.listen		   = mptcp_listen,
2429	.shutdown	   = mptcp_shutdown,
2430	.setsockopt	   = sock_common_setsockopt,
2431	.getsockopt	   = sock_common_getsockopt,
2432	.sendmsg	   = inet6_sendmsg,
2433	.recvmsg	   = inet6_recvmsg,
2434	.mmap		   = sock_no_mmap,
2435	.sendpage	   = inet_sendpage,
2436#ifdef CONFIG_COMPAT
2437	.compat_ioctl	   = inet6_compat_ioctl,
2438#endif
2439};
2440
2441static struct proto mptcp_v6_prot;
2442
2443static void mptcp_v6_destroy(struct sock *sk)
2444{
2445	mptcp_destroy(sk);
2446	inet6_destroy_sock(sk);
2447}
2448
2449static struct inet_protosw mptcp_v6_protosw = {
2450	.type		= SOCK_STREAM,
2451	.protocol	= IPPROTO_MPTCP,
2452	.prot		= &mptcp_v6_prot,
2453	.ops		= &mptcp_v6_stream_ops,
2454	.flags		= INET_PROTOSW_ICSK,
2455};
2456
2457int __init mptcp_proto_v6_init(void)
2458{
2459	int err;
2460
2461	mptcp_v6_prot = mptcp_prot;
2462	strcpy(mptcp_v6_prot.name, "MPTCPv6");
2463	mptcp_v6_prot.slab = NULL;
2464	mptcp_v6_prot.destroy = mptcp_v6_destroy;
2465	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
2466
2467	err = proto_register(&mptcp_v6_prot, 1);
2468	if (err)
2469		return err;
2470
2471	err = inet6_register_protosw(&mptcp_v6_protosw);
2472	if (err)
2473		proto_unregister(&mptcp_v6_prot);
2474
2475	return err;
2476}
2477#endif