Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 *		IPv4 specific functions
  10 *
 
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
 
 
 
 
 
  17 */
  18
  19/*
  20 * Changes:
  21 *		David S. Miller	:	New socket lookup architecture.
  22 *					This code is dedicated to John Dyson.
  23 *		David S. Miller :	Change semantics of established hash,
  24 *					half is devoted to TIME_WAIT sockets
  25 *					and the rest go in the other half.
  26 *		Andi Kleen :		Add support for syncookies and fixed
  27 *					some bugs: ip options weren't passed to
  28 *					the TCP layer, missed a check for an
  29 *					ACK bit.
  30 *		Andi Kleen :		Implemented fast path mtu discovery.
  31 *	     				Fixed many serious bugs in the
  32 *					request_sock handling and moved
  33 *					most of it into the af independent code.
  34 *					Added tail drop and some other bugfixes.
  35 *					Added new listen semantics.
  36 *		Mike McLagan	:	Routing by source
  37 *	Juan Jose Ciarlante:		ip_dynaddr bits
  38 *		Andi Kleen:		various fixes.
  39 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  40 *					coma.
  41 *	Andi Kleen		:	Fix new listen.
  42 *	Andi Kleen		:	Fix accept error reporting.
  43 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  44 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  45 *					a single port at the same time.
  46 */
  47
  48#define pr_fmt(fmt) "TCP: " fmt
  49
  50#include <linux/bottom_half.h>
  51#include <linux/types.h>
  52#include <linux/fcntl.h>
  53#include <linux/module.h>
  54#include <linux/random.h>
  55#include <linux/cache.h>
  56#include <linux/jhash.h>
  57#include <linux/init.h>
  58#include <linux/times.h>
  59#include <linux/slab.h>
  60
  61#include <net/net_namespace.h>
  62#include <net/icmp.h>
  63#include <net/inet_hashtables.h>
  64#include <net/tcp.h>
  65#include <net/transp_v6.h>
  66#include <net/ipv6.h>
  67#include <net/inet_common.h>
  68#include <net/timewait_sock.h>
  69#include <net/xfrm.h>
  70#include <net/secure_seq.h>
  71#include <net/busy_poll.h>
  72
  73#include <linux/inet.h>
  74#include <linux/ipv6.h>
  75#include <linux/stddef.h>
  76#include <linux/proc_fs.h>
  77#include <linux/seq_file.h>
  78#include <linux/inetdevice.h>
  79#include <linux/btf_ids.h>
  80
  81#include <crypto/hash.h>
  82#include <linux/scatterlist.h>
  83
  84#include <trace/events/tcp.h>
 
 
  85
  86#ifdef CONFIG_TCP_MD5SIG
  87static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  88			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  89#endif
  90
  91struct inet_hashinfo tcp_hashinfo;
  92EXPORT_SYMBOL(tcp_hashinfo);
  93
  94static u32 tcp_v4_init_seq(const struct sk_buff *skb)
  95{
  96	return secure_tcp_seq(ip_hdr(skb)->daddr,
  97			      ip_hdr(skb)->saddr,
  98			      tcp_hdr(skb)->dest,
  99			      tcp_hdr(skb)->source);
 100}
 101
 102static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
 103{
 104	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
 
 
 
 105}
 106
 107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 108{
 109	const struct inet_timewait_sock *tw = inet_twsk(sktw);
 110	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 111	struct tcp_sock *tp = tcp_sk(sk);
 112	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
 113
 114	if (reuse == 2) {
 115		/* Still does not detect *everything* that goes through
 116		 * lo, since we require a loopback src or dst address
 117		 * or direct binding to 'lo' interface.
 118		 */
 119		bool loopback = false;
 120		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
 121			loopback = true;
 122#if IS_ENABLED(CONFIG_IPV6)
 123		if (tw->tw_family == AF_INET6) {
 124			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
 125			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
 126			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
 127			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
 128				loopback = true;
 129		} else
 130#endif
 131		{
 132			if (ipv4_is_loopback(tw->tw_daddr) ||
 133			    ipv4_is_loopback(tw->tw_rcv_saddr))
 134				loopback = true;
 135		}
 136		if (!loopback)
 137			reuse = 0;
 138	}
 139
 140	/* With PAWS, it is safe from the viewpoint
 141	   of data integrity. Even without PAWS it is safe provided sequence
 142	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 143
 144	   Actually, the idea is close to VJ's one, only timestamp cache is
 145	   held not per host, but per port pair and TW bucket is used as state
 146	   holder.
 147
 148	   If TW bucket has been already destroyed we fall back to VJ's scheme
 149	   and use initial timestamp retrieved from peer table.
 150	 */
 151	if (tcptw->tw_ts_recent_stamp &&
 152	    (!twp || (reuse && time_after32(ktime_get_seconds(),
 153					    tcptw->tw_ts_recent_stamp)))) {
 154		/* In case of repair and re-using TIME-WAIT sockets we still
 155		 * want to be sure that it is safe as above but honor the
 156		 * sequence numbers and time stamps set as part of the repair
 157		 * process.
 158		 *
 159		 * Without this check re-using a TIME-WAIT socket with TCP
 160		 * repair would accumulate a -1 on the repair assigned
 161		 * sequence number. The first time it is reused the sequence
 162		 * is -1, the second time -2, etc. This fixes that issue
 163		 * without appearing to create any others.
 164		 */
 165		if (likely(!tp->repair)) {
 166			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
 167
 168			if (!seq)
 169				seq = 1;
 170			WRITE_ONCE(tp->write_seq, seq);
 171			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 172			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 173		}
 174		sock_hold(sktw);
 175		return 1;
 176	}
 177
 178	return 0;
 179}
 180EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 181
 182static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 183			      int addr_len)
 184{
 185	/* This check is replicated from tcp_v4_connect() and intended to
 186	 * prevent BPF program called below from accessing bytes that are out
 187	 * of the bound specified by user in addr_len.
 188	 */
 189	if (addr_len < sizeof(struct sockaddr_in))
 190		return -EINVAL;
 191
 192	sock_owned_by_me(sk);
 193
 194	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
 195}
 196
 197/* This will initiate an outgoing connection. */
 198int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 199{
 200	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 201	struct inet_sock *inet = inet_sk(sk);
 202	struct tcp_sock *tp = tcp_sk(sk);
 203	__be16 orig_sport, orig_dport;
 204	__be32 daddr, nexthop;
 205	struct flowi4 *fl4;
 206	struct rtable *rt;
 207	int err;
 208	struct ip_options_rcu *inet_opt;
 209	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 210
 211	if (addr_len < sizeof(struct sockaddr_in))
 212		return -EINVAL;
 213
 214	if (usin->sin_family != AF_INET)
 215		return -EAFNOSUPPORT;
 216
 217	nexthop = daddr = usin->sin_addr.s_addr;
 218	inet_opt = rcu_dereference_protected(inet->inet_opt,
 219					     lockdep_sock_is_held(sk));
 220	if (inet_opt && inet_opt->opt.srr) {
 221		if (!daddr)
 222			return -EINVAL;
 223		nexthop = inet_opt->opt.faddr;
 224	}
 225
 226	orig_sport = inet->inet_sport;
 227	orig_dport = usin->sin_port;
 228	fl4 = &inet->cork.fl.u.ip4;
 229	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 230			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 231			      IPPROTO_TCP,
 232			      orig_sport, orig_dport, sk);
 233	if (IS_ERR(rt)) {
 234		err = PTR_ERR(rt);
 235		if (err == -ENETUNREACH)
 236			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 237		return err;
 238	}
 239
 240	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 241		ip_rt_put(rt);
 242		return -ENETUNREACH;
 243	}
 244
 245	if (!inet_opt || !inet_opt->opt.srr)
 246		daddr = fl4->daddr;
 247
 248	if (!inet->inet_saddr)
 249		inet->inet_saddr = fl4->saddr;
 250	sk_rcv_saddr_set(sk, inet->inet_saddr);
 251
 252	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 253		/* Reset inherited state */
 254		tp->rx_opt.ts_recent	   = 0;
 255		tp->rx_opt.ts_recent_stamp = 0;
 256		if (likely(!tp->repair))
 257			WRITE_ONCE(tp->write_seq, 0);
 258	}
 259
 
 
 
 
 260	inet->inet_dport = usin->sin_port;
 261	sk_daddr_set(sk, daddr);
 262
 263	inet_csk(sk)->icsk_ext_hdr_len = 0;
 264	if (inet_opt)
 265		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 266
 267	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 268
 269	/* Socket identity is still unknown (sport may be zero).
 270	 * However we set state to SYN-SENT and not releasing socket
 271	 * lock select source port, enter ourselves into the hash tables and
 272	 * complete initialization after this.
 273	 */
 274	tcp_set_state(sk, TCP_SYN_SENT);
 275	err = inet_hash_connect(tcp_death_row, sk);
 276	if (err)
 277		goto failure;
 278
 279	sk_set_txhash(sk);
 280
 281	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 282			       inet->inet_sport, inet->inet_dport, sk);
 283	if (IS_ERR(rt)) {
 284		err = PTR_ERR(rt);
 285		rt = NULL;
 286		goto failure;
 287	}
 288	/* OK, now commit destination to socket.  */
 289	sk->sk_gso_type = SKB_GSO_TCPV4;
 290	sk_setup_caps(sk, &rt->dst);
 291	rt = NULL;
 292
 293	if (likely(!tp->repair)) {
 294		if (!tp->write_seq)
 295			WRITE_ONCE(tp->write_seq,
 296				   secure_tcp_seq(inet->inet_saddr,
 297						  inet->inet_daddr,
 298						  inet->inet_sport,
 299						  usin->sin_port));
 300		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
 301						 inet->inet_saddr,
 302						 inet->inet_daddr);
 303	}
 304
 305	inet->inet_id = prandom_u32();
 
 
 
 
 306
 307	if (tcp_fastopen_defer_connect(sk, &err))
 308		return err;
 309	if (err)
 310		goto failure;
 311
 312	err = tcp_connect(sk);
 313
 
 314	if (err)
 315		goto failure;
 316
 317	return 0;
 318
 319failure:
 320	/*
 321	 * This unhashes the socket and releases the local port,
 322	 * if necessary.
 323	 */
 324	tcp_set_state(sk, TCP_CLOSE);
 325	ip_rt_put(rt);
 326	sk->sk_route_caps = 0;
 327	inet->inet_dport = 0;
 328	return err;
 329}
 330EXPORT_SYMBOL(tcp_v4_connect);
 331
 332/*
 333 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 334 * It can be called through tcp_release_cb() if socket was owned by user
 335 * at the time tcp_v4_err() was called to handle ICMP message.
 336 */
 337void tcp_v4_mtu_reduced(struct sock *sk)
 338{
 339	struct inet_sock *inet = inet_sk(sk);
 340	struct dst_entry *dst;
 341	u32 mtu;
 
 342
 343	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 344		return;
 345	mtu = tcp_sk(sk)->mtu_info;
 346	dst = inet_csk_update_pmtu(sk, mtu);
 347	if (!dst)
 348		return;
 349
 350	/* Something is about to be wrong... Remember soft error
 351	 * for the case, if this connection will not able to recover.
 352	 */
 353	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 354		sk->sk_err_soft = EMSGSIZE;
 355
 356	mtu = dst_mtu(dst);
 357
 358	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 359	    ip_sk_accept_pmtu(sk) &&
 360	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 361		tcp_sync_mss(sk, mtu);
 362
 363		/* Resend the TCP packet because it's
 364		 * clear that the old packet has been
 365		 * dropped. This is the new "fast" path mtu
 366		 * discovery.
 367		 */
 368		tcp_simple_retransmit(sk);
 369	} /* else let the usual retransmit timer handle it */
 370}
 371EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 372
 373static void do_redirect(struct sk_buff *skb, struct sock *sk)
 374{
 375	struct dst_entry *dst = __sk_dst_check(sk, 0);
 376
 377	if (dst)
 378		dst->ops->redirect(dst, sk, skb);
 379}
 380
 381
 382/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 383void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 384{
 385	struct request_sock *req = inet_reqsk(sk);
 386	struct net *net = sock_net(sk);
 387
 388	/* ICMPs are not backlogged, hence we cannot get
 389	 * an established socket here.
 390	 */
 391	if (seq != tcp_rsk(req)->snt_isn) {
 392		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 393	} else if (abort) {
 394		/*
 395		 * Still in SYN_RECV, just remove it silently.
 396		 * There is no good way to pass the error to the newly
 397		 * created socket, and POSIX does not want network
 398		 * errors returned from accept().
 399		 */
 400		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 401		tcp_listendrop(req->rsk_listener);
 402	}
 403	reqsk_put(req);
 404}
 405EXPORT_SYMBOL(tcp_req_err);
 406
 407/* TCP-LD (RFC 6069) logic */
 408void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
 409{
 410	struct inet_connection_sock *icsk = inet_csk(sk);
 411	struct tcp_sock *tp = tcp_sk(sk);
 412	struct sk_buff *skb;
 413	s32 remaining;
 414	u32 delta_us;
 415
 416	if (sock_owned_by_user(sk))
 417		return;
 418
 419	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 420	    !icsk->icsk_backoff)
 421		return;
 422
 423	skb = tcp_rtx_queue_head(sk);
 424	if (WARN_ON_ONCE(!skb))
 425		return;
 426
 427	icsk->icsk_backoff--;
 428	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
 429	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 430
 431	tcp_mstamp_refresh(tp);
 432	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
 433	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
 434
 435	if (remaining > 0) {
 436		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 437					  remaining, TCP_RTO_MAX);
 438	} else {
 439		/* RTO revert clocked out retransmission.
 440		 * Will retransmit now.
 441		 */
 442		tcp_retransmit_timer(sk);
 443	}
 444}
 445EXPORT_SYMBOL(tcp_ld_RTO_revert);
 446
 447/*
 448 * This routine is called by the ICMP module when it gets some
 449 * sort of error condition.  If err < 0 then the socket should
 450 * be closed and the error returned to the user.  If err > 0
 451 * it's just the icmp type << 8 | icmp code.  After adjustment
 452 * header points to the first 8 bytes of the tcp header.  We need
 453 * to find the appropriate port.
 454 *
 455 * The locking strategy used here is very "optimistic". When
 456 * someone else accesses the socket the ICMP is just dropped
 457 * and for some paths there is no check at all.
 458 * A more general error queue to queue errors for later handling
 459 * is probably better.
 460 *
 461 */
 462
 463int tcp_v4_err(struct sk_buff *skb, u32 info)
 464{
 465	const struct iphdr *iph = (const struct iphdr *)skb->data;
 466	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
 
 467	struct tcp_sock *tp;
 468	struct inet_sock *inet;
 469	const int type = icmp_hdr(skb)->type;
 470	const int code = icmp_hdr(skb)->code;
 471	struct sock *sk;
 
 472	struct request_sock *fastopen;
 473	u32 seq, snd_una;
 
 474	int err;
 475	struct net *net = dev_net(skb->dev);
 476
 477	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
 478				       th->dest, iph->saddr, ntohs(th->source),
 479				       inet_iif(skb), 0);
 480	if (!sk) {
 481		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 482		return -ENOENT;
 483	}
 484	if (sk->sk_state == TCP_TIME_WAIT) {
 485		inet_twsk_put(inet_twsk(sk));
 486		return 0;
 487	}
 488	seq = ntohl(th->seq);
 489	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 490		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
 491				     type == ICMP_TIME_EXCEEDED ||
 492				     (type == ICMP_DEST_UNREACH &&
 493				      (code == ICMP_NET_UNREACH ||
 494				       code == ICMP_HOST_UNREACH)));
 495		return 0;
 496	}
 497
 498	bh_lock_sock(sk);
 499	/* If too many ICMPs get dropped on busy
 500	 * servers this needs to be solved differently.
 501	 * We do take care of PMTU discovery (RFC1191) special case :
 502	 * we can receive locally generated ICMP messages while socket is held.
 503	 */
 504	if (sock_owned_by_user(sk)) {
 505		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 506			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 507	}
 508	if (sk->sk_state == TCP_CLOSE)
 509		goto out;
 510
 511	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 512		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 513		goto out;
 514	}
 515
 
 516	tp = tcp_sk(sk);
 517	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 518	fastopen = rcu_dereference(tp->fastopen_rsk);
 519	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 520	if (sk->sk_state != TCP_LISTEN &&
 521	    !between(seq, snd_una, tp->snd_nxt)) {
 522		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 523		goto out;
 524	}
 525
 526	switch (type) {
 527	case ICMP_REDIRECT:
 528		if (!sock_owned_by_user(sk))
 529			do_redirect(skb, sk);
 530		goto out;
 531	case ICMP_SOURCE_QUENCH:
 532		/* Just silently ignore these. */
 533		goto out;
 534	case ICMP_PARAMETERPROB:
 535		err = EPROTO;
 536		break;
 537	case ICMP_DEST_UNREACH:
 538		if (code > NR_ICMP_UNREACH)
 539			goto out;
 540
 541		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 542			/* We are not interested in TCP_LISTEN and open_requests
 543			 * (SYN-ACKs send out by Linux are always <576bytes so
 544			 * they should go through unfragmented).
 545			 */
 546			if (sk->sk_state == TCP_LISTEN)
 547				goto out;
 548
 549			tp->mtu_info = info;
 550			if (!sock_owned_by_user(sk)) {
 551				tcp_v4_mtu_reduced(sk);
 552			} else {
 553				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 554					sock_hold(sk);
 555			}
 556			goto out;
 557		}
 558
 559		err = icmp_err_convert[code].errno;
 560		/* check if this ICMP message allows revert of backoff.
 561		 * (see RFC 6069)
 562		 */
 563		if (!fastopen &&
 564		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
 565			tcp_ld_RTO_revert(sk, seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566		break;
 567	case ICMP_TIME_EXCEEDED:
 568		err = EHOSTUNREACH;
 569		break;
 570	default:
 571		goto out;
 572	}
 573
 574	switch (sk->sk_state) {
 575	case TCP_SYN_SENT:
 576	case TCP_SYN_RECV:
 577		/* Only in fast or simultaneous open. If a fast open socket is
 578		 * is already accepted it is treated as a connected one below.
 579		 */
 580		if (fastopen && !fastopen->sk)
 581			break;
 582
 583		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
 584
 585		if (!sock_owned_by_user(sk)) {
 586			sk->sk_err = err;
 587
 588			sk->sk_error_report(sk);
 589
 590			tcp_done(sk);
 591		} else {
 592			sk->sk_err_soft = err;
 593		}
 594		goto out;
 595	}
 596
 597	/* If we've already connected we will keep trying
 598	 * until we time out, or the user gives up.
 599	 *
 600	 * rfc1122 4.2.3.9 allows to consider as hard errors
 601	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 602	 * but it is obsoleted by pmtu discovery).
 603	 *
 604	 * Note, that in modern internet, where routing is unreliable
 605	 * and in each dark corner broken firewalls sit, sending random
 606	 * errors ordered by their masters even this two messages finally lose
 607	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 608	 *
 609	 * Now we are in compliance with RFCs.
 610	 *							--ANK (980905)
 611	 */
 612
 613	inet = inet_sk(sk);
 614	if (!sock_owned_by_user(sk) && inet->recverr) {
 615		sk->sk_err = err;
 616		sk->sk_error_report(sk);
 617	} else	{ /* Only an error on timeout */
 618		sk->sk_err_soft = err;
 619	}
 620
 621out:
 622	bh_unlock_sock(sk);
 623	sock_put(sk);
 624	return 0;
 625}
 626
 627void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 628{
 629	struct tcphdr *th = tcp_hdr(skb);
 630
 631	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 632	skb->csum_start = skb_transport_header(skb) - skb->head;
 633	skb->csum_offset = offsetof(struct tcphdr, check);
 
 
 
 
 
 
 
 634}
 635
 636/* This routine computes an IPv4 TCP checksum. */
 637void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 638{
 639	const struct inet_sock *inet = inet_sk(sk);
 640
 641	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 642}
 643EXPORT_SYMBOL(tcp_v4_send_check);
 644
 645/*
 646 *	This routine will send an RST to the other tcp.
 647 *
 648 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 649 *		      for reset.
 650 *	Answer: if a packet caused RST, it is not for a socket
 651 *		existing in our system, if it is matched to a socket,
 652 *		it is just duplicate segment or bug in other side's TCP.
 653 *		So that we build reply only basing on parameters
 654 *		arrived with segment.
 655 *	Exception: precedence violation. We do not implement it in any case.
 656 */
 657
 658static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 659{
 660	const struct tcphdr *th = tcp_hdr(skb);
 661	struct {
 662		struct tcphdr th;
 663#ifdef CONFIG_TCP_MD5SIG
 664		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 665#endif
 666	} rep;
 667	struct ip_reply_arg arg;
 668#ifdef CONFIG_TCP_MD5SIG
 669	struct tcp_md5sig_key *key = NULL;
 670	const __u8 *hash_location = NULL;
 671	unsigned char newhash[16];
 672	int genhash;
 673	struct sock *sk1 = NULL;
 674#endif
 675	u64 transmit_time = 0;
 676	struct sock *ctl_sk;
 677	struct net *net;
 678
 679	/* Never send a reset in response to a reset. */
 680	if (th->rst)
 681		return;
 682
 683	/* If sk not NULL, it means we did a successful lookup and incoming
 684	 * route had to be correct. prequeue might have dropped our dst.
 685	 */
 686	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 687		return;
 688
 689	/* Swap the send and the receive. */
 690	memset(&rep, 0, sizeof(rep));
 691	rep.th.dest   = th->source;
 692	rep.th.source = th->dest;
 693	rep.th.doff   = sizeof(struct tcphdr) / 4;
 694	rep.th.rst    = 1;
 695
 696	if (th->ack) {
 697		rep.th.seq = th->ack_seq;
 698	} else {
 699		rep.th.ack = 1;
 700		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 701				       skb->len - (th->doff << 2));
 702	}
 703
 704	memset(&arg, 0, sizeof(arg));
 705	arg.iov[0].iov_base = (unsigned char *)&rep;
 706	arg.iov[0].iov_len  = sizeof(rep.th);
 707
 708	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 709#ifdef CONFIG_TCP_MD5SIG
 710	rcu_read_lock();
 711	hash_location = tcp_parse_md5sig_option(th);
 712	if (sk && sk_fullsock(sk)) {
 713		const union tcp_md5_addr *addr;
 714		int l3index;
 715
 716		/* sdif set, means packet ingressed via a device
 717		 * in an L3 domain and inet_iif is set to it.
 718		 */
 719		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
 720		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 721		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
 722	} else if (hash_location) {
 723		const union tcp_md5_addr *addr;
 724		int sdif = tcp_v4_sdif(skb);
 725		int dif = inet_iif(skb);
 726		int l3index;
 727
 728		/*
 729		 * active side is lost. Try to find listening socket through
 730		 * source port, and then find md5 key through listening socket.
 731		 * we are not loose security here:
 732		 * Incoming packet is checked with md5 hash with finding key,
 733		 * no RST generated if md5 hash doesn't match.
 734		 */
 735		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
 736					     ip_hdr(skb)->saddr,
 737					     th->source, ip_hdr(skb)->daddr,
 738					     ntohs(th->source), dif, sdif);
 739		/* don't send rst if it can't find key */
 740		if (!sk1)
 741			goto out;
 742
 743		/* sdif set, means packet ingressed via a device
 744		 * in an L3 domain and dif is set to it.
 745		 */
 746		l3index = sdif ? dif : 0;
 747		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 748		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
 749		if (!key)
 750			goto out;
 751
 752
 753		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 754		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 755			goto out;
 756
 757	}
 758
 759	if (key) {
 760		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 761				   (TCPOPT_NOP << 16) |
 762				   (TCPOPT_MD5SIG << 8) |
 763				   TCPOLEN_MD5SIG);
 764		/* Update length and the length the header thinks exists */
 765		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 766		rep.th.doff = arg.iov[0].iov_len / 4;
 767
 768		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 769				     key, ip_hdr(skb)->saddr,
 770				     ip_hdr(skb)->daddr, &rep.th);
 771	}
 772#endif
 773	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 774				      ip_hdr(skb)->saddr, /* XXX */
 775				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 776	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 777	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 778
 779	/* When socket is gone, all binding information is lost.
 780	 * routing might fail in this case. No choice here, if we choose to force
 781	 * input interface, we will misroute in case of asymmetric route.
 782	 */
 783	if (sk) {
 784		arg.bound_dev_if = sk->sk_bound_dev_if;
 785		if (sk_fullsock(sk))
 786			trace_tcp_send_reset(sk, skb);
 787	}
 788
 789	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 790		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 791
 792	arg.tos = ip_hdr(skb)->tos;
 793	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 794	local_bh_disable();
 795	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
 796	if (sk) {
 797		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
 798				   inet_twsk(sk)->tw_mark : sk->sk_mark;
 799		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
 800				   inet_twsk(sk)->tw_priority : sk->sk_priority;
 801		transmit_time = tcp_transmit_time(sk);
 802	}
 803	ip_send_unicast_reply(ctl_sk,
 804			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 805			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 806			      &arg, arg.iov[0].iov_len,
 807			      transmit_time);
 808
 809	ctl_sk->sk_mark = 0;
 810	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 811	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 812	local_bh_enable();
 813
 814#ifdef CONFIG_TCP_MD5SIG
 815out:
 816	rcu_read_unlock();
 
 
 
 817#endif
 818}
 819
 820/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 821   outside socket context is ugly, certainly. What can I do?
 822 */
 823
 824static void tcp_v4_send_ack(const struct sock *sk,
 825			    struct sk_buff *skb, u32 seq, u32 ack,
 826			    u32 win, u32 tsval, u32 tsecr, int oif,
 827			    struct tcp_md5sig_key *key,
 828			    int reply_flags, u8 tos)
 829{
 830	const struct tcphdr *th = tcp_hdr(skb);
 831	struct {
 832		struct tcphdr th;
 833		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 834#ifdef CONFIG_TCP_MD5SIG
 835			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 836#endif
 837			];
 838	} rep;
 839	struct net *net = sock_net(sk);
 840	struct ip_reply_arg arg;
 841	struct sock *ctl_sk;
 842	u64 transmit_time;
 843
 844	memset(&rep.th, 0, sizeof(struct tcphdr));
 845	memset(&arg, 0, sizeof(arg));
 846
 847	arg.iov[0].iov_base = (unsigned char *)&rep;
 848	arg.iov[0].iov_len  = sizeof(rep.th);
 849	if (tsecr) {
 850		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 851				   (TCPOPT_TIMESTAMP << 8) |
 852				   TCPOLEN_TIMESTAMP);
 853		rep.opt[1] = htonl(tsval);
 854		rep.opt[2] = htonl(tsecr);
 855		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 856	}
 857
 858	/* Swap the send and the receive. */
 859	rep.th.dest    = th->source;
 860	rep.th.source  = th->dest;
 861	rep.th.doff    = arg.iov[0].iov_len / 4;
 862	rep.th.seq     = htonl(seq);
 863	rep.th.ack_seq = htonl(ack);
 864	rep.th.ack     = 1;
 865	rep.th.window  = htons(win);
 866
 867#ifdef CONFIG_TCP_MD5SIG
 868	if (key) {
 869		int offset = (tsecr) ? 3 : 0;
 870
 871		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 872					  (TCPOPT_NOP << 16) |
 873					  (TCPOPT_MD5SIG << 8) |
 874					  TCPOLEN_MD5SIG);
 875		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 876		rep.th.doff = arg.iov[0].iov_len/4;
 877
 878		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 879				    key, ip_hdr(skb)->saddr,
 880				    ip_hdr(skb)->daddr, &rep.th);
 881	}
 882#endif
 883	arg.flags = reply_flags;
 884	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 885				      ip_hdr(skb)->saddr, /* XXX */
 886				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 887	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 888	if (oif)
 889		arg.bound_dev_if = oif;
 890	arg.tos = tos;
 891	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 892	local_bh_disable();
 893	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
 894	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
 895			   inet_twsk(sk)->tw_mark : sk->sk_mark;
 896	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
 897			   inet_twsk(sk)->tw_priority : sk->sk_priority;
 898	transmit_time = tcp_transmit_time(sk);
 899	ip_send_unicast_reply(ctl_sk,
 900			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 901			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 902			      &arg, arg.iov[0].iov_len,
 903			      transmit_time);
 904
 905	ctl_sk->sk_mark = 0;
 906	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 907	local_bh_enable();
 908}
 909
 910static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 911{
 912	struct inet_timewait_sock *tw = inet_twsk(sk);
 913	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 914
 915	tcp_v4_send_ack(sk, skb,
 916			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 917			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 918			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
 919			tcptw->tw_ts_recent,
 920			tw->tw_bound_dev_if,
 921			tcp_twsk_md5_key(tcptw),
 922			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 923			tw->tw_tos
 924			);
 925
 926	inet_twsk_put(tw);
 927}
 928
 929static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 930				  struct request_sock *req)
 931{
 932	const union tcp_md5_addr *addr;
 933	int l3index;
 934
 935	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 936	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 937	 */
 938	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 939					     tcp_sk(sk)->snd_nxt;
 940
 941	/* RFC 7323 2.3
 942	 * The window field (SEG.WND) of every outgoing segment, with the
 943	 * exception of <SYN> segments, MUST be right-shifted by
 944	 * Rcv.Wind.Shift bits:
 945	 */
 946	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 947	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
 948	tcp_v4_send_ack(sk, skb, seq,
 949			tcp_rsk(req)->rcv_nxt,
 950			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 951			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
 952			req->ts_recent,
 953			0,
 954			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
 
 955			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 956			ip_hdr(skb)->tos);
 957}
 958
 959/*
 960 *	Send a SYN-ACK after having received a SYN.
 961 *	This still operates on a request_sock only, not on a big
 962 *	socket.
 963 */
 964static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 965			      struct flowi *fl,
 966			      struct request_sock *req,
 967			      struct tcp_fastopen_cookie *foc,
 968			      enum tcp_synack_type synack_type)
 969{
 970	const struct inet_request_sock *ireq = inet_rsk(req);
 971	struct flowi4 fl4;
 972	int err = -1;
 973	struct sk_buff *skb;
 974
 975	/* First, grab a route. */
 976	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 977		return -1;
 978
 979	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 980
 981	if (skb) {
 982		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 983
 984		rcu_read_lock();
 985		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 986					    ireq->ir_rmt_addr,
 987					    rcu_dereference(ireq->ireq_opt));
 988		rcu_read_unlock();
 989		err = net_xmit_eval(err);
 990	}
 991
 992	return err;
 993}
 994
 995/*
 996 *	IPv4 request_sock destructor.
 997 */
 998static void tcp_v4_reqsk_destructor(struct request_sock *req)
 999{
1000	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1001}
1002
1003#ifdef CONFIG_TCP_MD5SIG
1004/*
1005 * RFC2385 MD5 checksumming requires a mapping of
1006 * IP address->MD5 Key.
1007 * We need to maintain these in the sk structure.
1008 */
1009
1010DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1011EXPORT_SYMBOL(tcp_md5_needed);
1012
1013/* Find the Key structure for an address.  */
1014struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1015					   const union tcp_md5_addr *addr,
1016					   int family)
1017{
1018	const struct tcp_sock *tp = tcp_sk(sk);
1019	struct tcp_md5sig_key *key;
1020	const struct tcp_md5sig_info *md5sig;
1021	__be32 mask;
1022	struct tcp_md5sig_key *best_match = NULL;
1023	bool match;
1024
1025	/* caller either holds rcu_read_lock() or socket lock */
1026	md5sig = rcu_dereference_check(tp->md5sig_info,
1027				       lockdep_sock_is_held(sk));
1028	if (!md5sig)
1029		return NULL;
1030
1031	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1032				 lockdep_sock_is_held(sk)) {
1033		if (key->family != family)
1034			continue;
1035		if (key->l3index && key->l3index != l3index)
1036			continue;
1037		if (family == AF_INET) {
1038			mask = inet_make_mask(key->prefixlen);
1039			match = (key->addr.a4.s_addr & mask) ==
1040				(addr->a4.s_addr & mask);
1041#if IS_ENABLED(CONFIG_IPV6)
1042		} else if (family == AF_INET6) {
1043			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1044						  key->prefixlen);
1045#endif
1046		} else {
1047			match = false;
1048		}
1049
1050		if (match && (!best_match ||
1051			      key->prefixlen > best_match->prefixlen))
1052			best_match = key;
1053	}
1054	return best_match;
1055}
1056EXPORT_SYMBOL(__tcp_md5_do_lookup);
1057
1058static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1059						      const union tcp_md5_addr *addr,
1060						      int family, u8 prefixlen,
1061						      int l3index)
1062{
1063	const struct tcp_sock *tp = tcp_sk(sk);
1064	struct tcp_md5sig_key *key;
1065	unsigned int size = sizeof(struct in_addr);
1066	const struct tcp_md5sig_info *md5sig;
1067
1068	/* caller either holds rcu_read_lock() or socket lock */
1069	md5sig = rcu_dereference_check(tp->md5sig_info,
1070				       lockdep_sock_is_held(sk));
 
1071	if (!md5sig)
1072		return NULL;
1073#if IS_ENABLED(CONFIG_IPV6)
1074	if (family == AF_INET6)
1075		size = sizeof(struct in6_addr);
1076#endif
1077	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1078				 lockdep_sock_is_held(sk)) {
1079		if (key->family != family)
1080			continue;
1081		if (key->l3index && key->l3index != l3index)
1082			continue;
1083		if (!memcmp(&key->addr, addr, size) &&
1084		    key->prefixlen == prefixlen)
1085			return key;
1086	}
1087	return NULL;
1088}
 
1089
1090struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1091					 const struct sock *addr_sk)
1092{
1093	const union tcp_md5_addr *addr;
1094	int l3index;
1095
1096	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1097						 addr_sk->sk_bound_dev_if);
1098	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1099	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1100}
1101EXPORT_SYMBOL(tcp_v4_md5_lookup);
1102
1103/* This can be called on a newly created socket, from other files */
1104int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1105		   int family, u8 prefixlen, int l3index,
1106		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1107{
1108	/* Add Key to the list */
1109	struct tcp_md5sig_key *key;
1110	struct tcp_sock *tp = tcp_sk(sk);
1111	struct tcp_md5sig_info *md5sig;
1112
1113	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1114	if (key) {
1115		/* Pre-existing entry - just update that one.
1116		 * Note that the key might be used concurrently.
1117		 * data_race() is telling kcsan that we do not care of
1118		 * key mismatches, since changing MD5 key on live flows
1119		 * can lead to packet drops.
1120		 */
1121		data_race(memcpy(key->key, newkey, newkeylen));
1122
1123		/* Pairs with READ_ONCE() in tcp_md5_hash_key().
1124		 * Also note that a reader could catch new key->keylen value
1125		 * but old key->key[], this is the reason we use __GFP_ZERO
1126		 * at sock_kmalloc() time below these lines.
1127		 */
1128		WRITE_ONCE(key->keylen, newkeylen);
1129
1130		return 0;
1131	}
1132
1133	md5sig = rcu_dereference_protected(tp->md5sig_info,
1134					   lockdep_sock_is_held(sk));
 
1135	if (!md5sig) {
1136		md5sig = kmalloc(sizeof(*md5sig), gfp);
1137		if (!md5sig)
1138			return -ENOMEM;
1139
1140		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1141		INIT_HLIST_HEAD(&md5sig->head);
1142		rcu_assign_pointer(tp->md5sig_info, md5sig);
1143	}
1144
1145	key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1146	if (!key)
1147		return -ENOMEM;
1148	if (!tcp_alloc_md5sig_pool()) {
1149		sock_kfree_s(sk, key, sizeof(*key));
1150		return -ENOMEM;
1151	}
1152
1153	memcpy(key->key, newkey, newkeylen);
1154	key->keylen = newkeylen;
1155	key->family = family;
1156	key->prefixlen = prefixlen;
1157	key->l3index = l3index;
1158	memcpy(&key->addr, addr,
1159	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1160				      sizeof(struct in_addr));
1161	hlist_add_head_rcu(&key->node, &md5sig->head);
1162	return 0;
1163}
1164EXPORT_SYMBOL(tcp_md5_do_add);
1165
1166int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1167		   u8 prefixlen, int l3index)
1168{
1169	struct tcp_md5sig_key *key;
1170
1171	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1172	if (!key)
1173		return -ENOENT;
1174	hlist_del_rcu(&key->node);
1175	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1176	kfree_rcu(key, rcu);
1177	return 0;
1178}
1179EXPORT_SYMBOL(tcp_md5_do_del);
1180
1181static void tcp_clear_md5_list(struct sock *sk)
1182{
1183	struct tcp_sock *tp = tcp_sk(sk);
1184	struct tcp_md5sig_key *key;
1185	struct hlist_node *n;
1186	struct tcp_md5sig_info *md5sig;
1187
1188	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1189
1190	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1191		hlist_del_rcu(&key->node);
1192		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1193		kfree_rcu(key, rcu);
1194	}
1195}
1196
1197static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1198				 sockptr_t optval, int optlen)
1199{
1200	struct tcp_md5sig cmd;
1201	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1202	const union tcp_md5_addr *addr;
1203	u8 prefixlen = 32;
1204	int l3index = 0;
1205
1206	if (optlen < sizeof(cmd))
1207		return -EINVAL;
1208
1209	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1210		return -EFAULT;
1211
1212	if (sin->sin_family != AF_INET)
1213		return -EINVAL;
1214
1215	if (optname == TCP_MD5SIG_EXT &&
1216	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1217		prefixlen = cmd.tcpm_prefixlen;
1218		if (prefixlen > 32)
1219			return -EINVAL;
1220	}
1221
1222	if (optname == TCP_MD5SIG_EXT &&
1223	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1224		struct net_device *dev;
1225
1226		rcu_read_lock();
1227		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1228		if (dev && netif_is_l3_master(dev))
1229			l3index = dev->ifindex;
1230
1231		rcu_read_unlock();
1232
1233		/* ok to reference set/not set outside of rcu;
1234		 * right now device MUST be an L3 master
1235		 */
1236		if (!dev || !l3index)
1237			return -EINVAL;
1238	}
1239
1240	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1241
1242	if (!cmd.tcpm_keylen)
1243		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
 
1244
1245	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1246		return -EINVAL;
1247
1248	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1249			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
1250}
1251
1252static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1253				   __be32 daddr, __be32 saddr,
1254				   const struct tcphdr *th, int nbytes)
1255{
1256	struct tcp4_pseudohdr *bp;
1257	struct scatterlist sg;
1258	struct tcphdr *_th;
1259
1260	bp = hp->scratch;
 
 
 
 
 
 
1261	bp->saddr = saddr;
1262	bp->daddr = daddr;
1263	bp->pad = 0;
1264	bp->protocol = IPPROTO_TCP;
1265	bp->len = cpu_to_be16(nbytes);
1266
1267	_th = (struct tcphdr *)(bp + 1);
1268	memcpy(_th, th, sizeof(*th));
1269	_th->check = 0;
1270
1271	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1272	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1273				sizeof(*bp) + sizeof(*th));
1274	return crypto_ahash_update(hp->md5_req);
1275}
1276
1277static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1278			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1279{
1280	struct tcp_md5sig_pool *hp;
1281	struct ahash_request *req;
1282
1283	hp = tcp_get_md5sig_pool();
1284	if (!hp)
1285		goto clear_hash_noput;
1286	req = hp->md5_req;
1287
1288	if (crypto_ahash_init(req))
1289		goto clear_hash;
1290	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 
 
1291		goto clear_hash;
1292	if (tcp_md5_hash_key(hp, key))
1293		goto clear_hash;
1294	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1295	if (crypto_ahash_final(req))
1296		goto clear_hash;
1297
1298	tcp_put_md5sig_pool();
1299	return 0;
1300
1301clear_hash:
1302	tcp_put_md5sig_pool();
1303clear_hash_noput:
1304	memset(md5_hash, 0, 16);
1305	return 1;
1306}
1307
1308int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1309			const struct sock *sk,
1310			const struct sk_buff *skb)
1311{
1312	struct tcp_md5sig_pool *hp;
1313	struct ahash_request *req;
1314	const struct tcphdr *th = tcp_hdr(skb);
1315	__be32 saddr, daddr;
1316
1317	if (sk) { /* valid for establish/request sockets */
1318		saddr = sk->sk_rcv_saddr;
1319		daddr = sk->sk_daddr;
1320	} else {
1321		const struct iphdr *iph = ip_hdr(skb);
1322		saddr = iph->saddr;
1323		daddr = iph->daddr;
1324	}
1325
1326	hp = tcp_get_md5sig_pool();
1327	if (!hp)
1328		goto clear_hash_noput;
1329	req = hp->md5_req;
1330
1331	if (crypto_ahash_init(req))
1332		goto clear_hash;
1333
1334	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
1335		goto clear_hash;
1336	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1337		goto clear_hash;
1338	if (tcp_md5_hash_key(hp, key))
1339		goto clear_hash;
1340	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1341	if (crypto_ahash_final(req))
1342		goto clear_hash;
1343
1344	tcp_put_md5sig_pool();
1345	return 0;
1346
1347clear_hash:
1348	tcp_put_md5sig_pool();
1349clear_hash_noput:
1350	memset(md5_hash, 0, 16);
1351	return 1;
1352}
1353EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1354
1355#endif
1356
1357/* Called with rcu_read_lock() */
1358static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1359				    const struct sk_buff *skb,
1360				    int dif, int sdif)
1361{
1362#ifdef CONFIG_TCP_MD5SIG
1363	/*
1364	 * This gets called for each TCP segment that arrives
1365	 * so we want to be efficient.
1366	 * We have 3 drop cases:
1367	 * o No MD5 hash and one expected.
1368	 * o MD5 hash and we're not expecting one.
1369	 * o MD5 hash and its wrong.
1370	 */
1371	const __u8 *hash_location = NULL;
1372	struct tcp_md5sig_key *hash_expected;
1373	const struct iphdr *iph = ip_hdr(skb);
1374	const struct tcphdr *th = tcp_hdr(skb);
1375	const union tcp_md5_addr *addr;
1376	unsigned char newhash[16];
1377	int genhash, l3index;
1378
1379	/* sdif set, means packet ingressed via a device
1380	 * in an L3 domain and dif is set to the l3mdev
1381	 */
1382	l3index = sdif ? dif : 0;
1383
1384	addr = (union tcp_md5_addr *)&iph->saddr;
1385	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1386	hash_location = tcp_parse_md5sig_option(th);
1387
1388	/* We've parsed the options - do we have a hash? */
1389	if (!hash_expected && !hash_location)
1390		return false;
1391
1392	if (hash_expected && !hash_location) {
1393		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1394		return true;
1395	}
1396
1397	if (!hash_expected && hash_location) {
1398		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1399		return true;
1400	}
1401
1402	/* Okay, so this is hash_expected and hash_location -
1403	 * so we need to calculate the checksum.
1404	 */
1405	genhash = tcp_v4_md5_hash_skb(newhash,
1406				      hash_expected,
1407				      NULL, skb);
1408
1409	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1410		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1411		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1412				     &iph->saddr, ntohs(th->source),
1413				     &iph->daddr, ntohs(th->dest),
1414				     genhash ? " tcp_v4_calc_md5_hash failed"
1415				     : "", l3index);
1416		return true;
1417	}
1418	return false;
1419#endif
1420	return false;
1421}
1422
1423static void tcp_v4_init_req(struct request_sock *req,
1424			    const struct sock *sk_listener,
1425			    struct sk_buff *skb)
1426{
1427	struct inet_request_sock *ireq = inet_rsk(req);
1428	struct net *net = sock_net(sk_listener);
1429
1430	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1431	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1432	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
 
1433}
1434
1435static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1436					  struct flowi *fl,
1437					  const struct request_sock *req)
 
1438{
1439	return inet_csk_route_req(sk, &fl->u.ip4, req);
 
 
 
 
 
 
 
 
 
1440}
1441
1442struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1443	.family		=	PF_INET,
1444	.obj_size	=	sizeof(struct tcp_request_sock),
1445	.rtx_syn_ack	=	tcp_rtx_synack,
1446	.send_ack	=	tcp_v4_reqsk_send_ack,
1447	.destructor	=	tcp_v4_reqsk_destructor,
1448	.send_reset	=	tcp_v4_send_reset,
1449	.syn_ack_timeout =	tcp_syn_ack_timeout,
1450};
1451
1452const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1453	.mss_clamp	=	TCP_MSS_DEFAULT,
1454#ifdef CONFIG_TCP_MD5SIG
1455	.req_md5_lookup	=	tcp_v4_md5_lookup,
1456	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1457#endif
1458	.init_req	=	tcp_v4_init_req,
1459#ifdef CONFIG_SYN_COOKIES
1460	.cookie_init_seq =	cookie_v4_init_sequence,
1461#endif
1462	.route_req	=	tcp_v4_route_req,
1463	.init_seq	=	tcp_v4_init_seq,
1464	.init_ts_off	=	tcp_v4_init_ts_off,
1465	.send_synack	=	tcp_v4_send_synack,
1466};
1467
1468int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1469{
1470	/* Never answer to SYNs send to broadcast or multicast */
1471	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1472		goto drop;
1473
1474	return tcp_conn_request(&tcp_request_sock_ops,
1475				&tcp_request_sock_ipv4_ops, sk, skb);
1476
1477drop:
1478	tcp_listendrop(sk);
1479	return 0;
1480}
1481EXPORT_SYMBOL(tcp_v4_conn_request);
1482
1483
1484/*
1485 * The three way handshake has completed - we got a valid synack -
1486 * now create the new socket.
1487 */
1488struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1489				  struct request_sock *req,
1490				  struct dst_entry *dst,
1491				  struct request_sock *req_unhash,
1492				  bool *own_req)
1493{
1494	struct inet_request_sock *ireq;
1495	struct inet_sock *newinet;
1496	struct tcp_sock *newtp;
1497	struct sock *newsk;
1498#ifdef CONFIG_TCP_MD5SIG
1499	const union tcp_md5_addr *addr;
1500	struct tcp_md5sig_key *key;
1501	int l3index;
1502#endif
1503	struct ip_options_rcu *inet_opt;
1504
1505	if (sk_acceptq_is_full(sk))
1506		goto exit_overflow;
1507
1508	newsk = tcp_create_openreq_child(sk, req, skb);
1509	if (!newsk)
1510		goto exit_nonewsk;
1511
1512	newsk->sk_gso_type = SKB_GSO_TCPV4;
1513	inet_sk_rx_dst_set(newsk, skb);
1514
1515	newtp		      = tcp_sk(newsk);
1516	newinet		      = inet_sk(newsk);
1517	ireq		      = inet_rsk(req);
1518	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1519	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1520	newsk->sk_bound_dev_if = ireq->ir_iif;
1521	newinet->inet_saddr   = ireq->ir_loc_addr;
1522	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1523	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
 
1524	newinet->mc_index     = inet_iif(skb);
1525	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1526	newinet->rcv_tos      = ip_hdr(skb)->tos;
1527	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1528	if (inet_opt)
1529		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1530	newinet->inet_id = prandom_u32();
1531
1532	if (!dst) {
1533		dst = inet_csk_route_child_sock(sk, newsk, req);
1534		if (!dst)
1535			goto put_and_exit;
1536	} else {
1537		/* syncookie case : see end of cookie_v4_check() */
1538	}
1539	sk_setup_caps(newsk, dst);
1540
1541	tcp_ca_openreq_child(newsk, dst);
1542
1543	tcp_sync_mss(newsk, dst_mtu(dst));
1544	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1545
1546	tcp_initialize_rcv_mss(newsk);
1547
1548#ifdef CONFIG_TCP_MD5SIG
1549	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1550	/* Copy over the MD5 key from the original socket */
1551	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1552	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1553	if (key) {
1554		/*
1555		 * We're using one, so create a matching key
1556		 * on the newsk structure. If we fail to get
1557		 * memory, then we end up not copying the key
1558		 * across. Shucks.
1559		 */
1560		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1561			       key->key, key->keylen, GFP_ATOMIC);
1562		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1563	}
1564#endif
1565
1566	if (__inet_inherit_port(sk, newsk) < 0)
1567		goto put_and_exit;
1568	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1569	if (likely(*own_req)) {
1570		tcp_move_syn(newtp, req);
1571		ireq->ireq_opt = NULL;
1572	} else {
1573		newinet->inet_opt = NULL;
1574	}
1575	return newsk;
1576
1577exit_overflow:
1578	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1579exit_nonewsk:
1580	dst_release(dst);
1581exit:
1582	tcp_listendrop(sk);
1583	return NULL;
1584put_and_exit:
1585	newinet->inet_opt = NULL;
1586	inet_csk_prepare_forced_close(newsk);
1587	tcp_done(newsk);
1588	goto exit;
1589}
1590EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1591
1592static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1593{
1594#ifdef CONFIG_SYN_COOKIES
1595	const struct tcphdr *th = tcp_hdr(skb);
1596
1597	if (!th->syn)
1598		sk = cookie_v4_check(sk, skb);
1599#endif
1600	return sk;
1601}
1602
1603u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1604			 struct tcphdr *th, u32 *cookie)
1605{
1606	u16 mss = 0;
1607#ifdef CONFIG_SYN_COOKIES
1608	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1609				    &tcp_request_sock_ipv4_ops, sk, th);
1610	if (mss) {
1611		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
1612		tcp_synq_overflow(sk);
1613	}
1614#endif
1615	return mss;
1616}
1617
1618/* The socket must have it's spinlock held when we get
1619 * here, unless it is a TCP_LISTEN socket.
1620 *
1621 * We have a potential double-lock case here, so even when
1622 * doing backlog processing we use the BH locking scheme.
1623 * This is because we cannot sleep with the original spinlock
1624 * held.
1625 */
1626int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1627{
1628	struct sock *rsk;
1629
1630	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1631		struct dst_entry *dst = sk->sk_rx_dst;
1632
1633		sock_rps_save_rxhash(sk, skb);
1634		sk_mark_napi_id(sk, skb);
1635		if (dst) {
1636			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1637			    !dst->ops->check(dst, 0)) {
1638				dst_release(dst);
1639				sk->sk_rx_dst = NULL;
1640			}
1641		}
1642		tcp_rcv_established(sk, skb);
1643		return 0;
1644	}
1645
1646	if (tcp_checksum_complete(skb))
1647		goto csum_err;
1648
1649	if (sk->sk_state == TCP_LISTEN) {
1650		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1651
1652		if (!nsk)
1653			goto discard;
1654		if (nsk != sk) {
 
 
1655			if (tcp_child_process(sk, nsk, skb)) {
1656				rsk = nsk;
1657				goto reset;
1658			}
1659			return 0;
1660		}
1661	} else
1662		sock_rps_save_rxhash(sk, skb);
1663
1664	if (tcp_rcv_state_process(sk, skb)) {
1665		rsk = sk;
1666		goto reset;
1667	}
1668	return 0;
1669
1670reset:
1671	tcp_v4_send_reset(rsk, skb);
1672discard:
1673	kfree_skb(skb);
1674	/* Be careful here. If this function gets more complicated and
1675	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1676	 * might be destroyed here. This current version compiles correctly,
1677	 * but you have been warned.
1678	 */
1679	return 0;
1680
1681csum_err:
1682	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1683	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1684	goto discard;
1685}
1686EXPORT_SYMBOL(tcp_v4_do_rcv);
1687
1688int tcp_v4_early_demux(struct sk_buff *skb)
1689{
1690	const struct iphdr *iph;
1691	const struct tcphdr *th;
1692	struct sock *sk;
1693
1694	if (skb->pkt_type != PACKET_HOST)
1695		return 0;
1696
1697	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1698		return 0;
1699
1700	iph = ip_hdr(skb);
1701	th = tcp_hdr(skb);
1702
1703	if (th->doff < sizeof(struct tcphdr) / 4)
1704		return 0;
1705
1706	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1707				       iph->saddr, th->source,
1708				       iph->daddr, ntohs(th->dest),
1709				       skb->skb_iif, inet_sdif(skb));
1710	if (sk) {
1711		skb->sk = sk;
1712		skb->destructor = sock_edemux;
1713		if (sk_fullsock(sk)) {
1714			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1715
1716			if (dst)
1717				dst = dst_check(dst, 0);
1718			if (dst &&
1719			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1720				skb_dst_set_noref(skb, dst);
1721		}
1722	}
1723	return 0;
1724}
1725
1726bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
1727{
1728	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1729	struct skb_shared_info *shinfo;
1730	const struct tcphdr *th;
1731	struct tcphdr *thtail;
1732	struct sk_buff *tail;
1733	unsigned int hdrlen;
1734	bool fragstolen;
1735	u32 gso_segs;
1736	int delta;
1737
1738	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1739	 * we can fix skb->truesize to its real value to avoid future drops.
1740	 * This is valid because skb is not yet charged to the socket.
1741	 * It has been noticed pure SACK packets were sometimes dropped
1742	 * (if cooked by drivers without copybreak feature).
1743	 */
1744	skb_condense(skb);
1745
1746	skb_dst_drop(skb);
 
1747
1748	if (unlikely(tcp_checksum_complete(skb))) {
1749		bh_unlock_sock(sk);
1750		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1751		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1752		return true;
1753	}
1754
1755	/* Attempt coalescing to last skb in backlog, even if we are
1756	 * above the limits.
1757	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
 
 
1758	 */
1759	th = (const struct tcphdr *)skb->data;
1760	hdrlen = th->doff * 4;
1761	shinfo = skb_shinfo(skb);
1762
1763	if (!shinfo->gso_size)
1764		shinfo->gso_size = skb->len - hdrlen;
1765
1766	if (!shinfo->gso_segs)
1767		shinfo->gso_segs = 1;
1768
1769	tail = sk->sk_backlog.tail;
1770	if (!tail)
1771		goto no_coalesce;
1772	thtail = (struct tcphdr *)tail->data;
1773
1774	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1775	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1776	    ((TCP_SKB_CB(tail)->tcp_flags |
1777	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1778	    !((TCP_SKB_CB(tail)->tcp_flags &
1779	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1780	    ((TCP_SKB_CB(tail)->tcp_flags ^
1781	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1782#ifdef CONFIG_TLS_DEVICE
1783	    tail->decrypted != skb->decrypted ||
1784#endif
1785	    thtail->doff != th->doff ||
1786	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1787		goto no_coalesce;
1788
1789	__skb_pull(skb, hdrlen);
1790	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1791		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1792
1793		if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1794			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1795			thtail->window = th->window;
1796		}
1797
1798		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1799		 * thtail->fin, so that the fast path in tcp_rcv_established()
1800		 * is not entered if we append a packet with a FIN.
1801		 * SYN, RST, URG are not present.
1802		 * ACK is set on both packets.
1803		 * PSH : we do not really care in TCP stack,
1804		 *       at least for 'GRO' packets.
1805		 */
1806		thtail->fin |= th->fin;
1807		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1808
1809		if (TCP_SKB_CB(skb)->has_rxtstamp) {
1810			TCP_SKB_CB(tail)->has_rxtstamp = true;
1811			tail->tstamp = skb->tstamp;
1812			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
 
 
 
 
 
 
 
1813		}
1814
1815		/* Not as strict as GRO. We only need to carry mss max value */
1816		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1817						 skb_shinfo(tail)->gso_size);
1818
1819		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1820		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1821
1822		sk->sk_backlog.len += delta;
1823		__NET_INC_STATS(sock_net(sk),
1824				LINUX_MIB_TCPBACKLOGCOALESCE);
1825		kfree_skb_partial(skb, fragstolen);
1826		return false;
1827	}
1828	__skb_push(skb, hdrlen);
1829
1830no_coalesce:
1831	/* Only socket owner can try to collapse/prune rx queues
1832	 * to reduce memory overhead, so add a little headroom here.
1833	 * Few sockets backlog are possibly concurrently non empty.
1834	 */
1835	limit += 64*1024;
1836
1837	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1838		bh_unlock_sock(sk);
1839		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1840		return true;
1841	}
1842	return false;
1843}
1844EXPORT_SYMBOL(tcp_add_backlog);
1845
1846int tcp_filter(struct sock *sk, struct sk_buff *skb)
1847{
1848	struct tcphdr *th = (struct tcphdr *)skb->data;
1849
1850	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1851}
1852EXPORT_SYMBOL(tcp_filter);
1853
1854static void tcp_v4_restore_cb(struct sk_buff *skb)
1855{
1856	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1857		sizeof(struct inet_skb_parm));
1858}
1859
1860static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1861			   const struct tcphdr *th)
1862{
1863	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1864	 * barrier() makes sure compiler wont play fool^Waliasing games.
1865	 */
1866	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1867		sizeof(struct inet_skb_parm));
1868	barrier();
1869
1870	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1871	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1872				    skb->len - th->doff * 4);
1873	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1874	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1875	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1876	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1877	TCP_SKB_CB(skb)->sacked	 = 0;
1878	TCP_SKB_CB(skb)->has_rxtstamp =
1879			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1880}
 
1881
1882/*
1883 *	From tcp_input.c
1884 */
1885
1886int tcp_v4_rcv(struct sk_buff *skb)
1887{
1888	struct net *net = dev_net(skb->dev);
1889	struct sk_buff *skb_to_free;
1890	int sdif = inet_sdif(skb);
1891	int dif = inet_iif(skb);
1892	const struct iphdr *iph;
1893	const struct tcphdr *th;
1894	bool refcounted;
1895	struct sock *sk;
1896	int ret;
 
1897
1898	if (skb->pkt_type != PACKET_HOST)
1899		goto discard_it;
1900
1901	/* Count it even if it's bad */
1902	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1903
1904	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1905		goto discard_it;
1906
1907	th = (const struct tcphdr *)skb->data;
1908
1909	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1910		goto bad_packet;
1911	if (!pskb_may_pull(skb, th->doff * 4))
1912		goto discard_it;
1913
1914	/* An explanation is required here, I think.
1915	 * Packet length and doff are validated by header prediction,
1916	 * provided case of th->doff==0 is eliminated.
1917	 * So, we defer the checks. */
1918
1919	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1920		goto csum_error;
1921
1922	th = (const struct tcphdr *)skb->data;
1923	iph = ip_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1924lookup:
1925	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1926			       th->dest, sdif, &refcounted);
1927	if (!sk)
1928		goto no_tcp_socket;
1929
1930process:
1931	if (sk->sk_state == TCP_TIME_WAIT)
1932		goto do_time_wait;
1933
1934	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1935		struct request_sock *req = inet_reqsk(sk);
1936		bool req_stolen = false;
1937		struct sock *nsk;
1938
1939		sk = req->rsk_listener;
1940		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1941			sk_drops_add(sk, skb);
1942			reqsk_put(req);
1943			goto discard_it;
1944		}
1945		if (tcp_checksum_complete(skb)) {
1946			reqsk_put(req);
1947			goto csum_error;
1948		}
1949		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1950			inet_csk_reqsk_queue_drop_and_put(sk, req);
1951			goto lookup;
1952		}
1953		/* We own a reference on the listener, increase it again
1954		 * as we might lose it too soon.
1955		 */
1956		sock_hold(sk);
1957		refcounted = true;
1958		nsk = NULL;
1959		if (!tcp_filter(sk, skb)) {
1960			th = (const struct tcphdr *)skb->data;
1961			iph = ip_hdr(skb);
1962			tcp_v4_fill_cb(skb, iph, th);
1963			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1964		}
1965		if (!nsk) {
1966			reqsk_put(req);
1967			if (req_stolen) {
1968				/* Another cpu got exclusive access to req
1969				 * and created a full blown socket.
1970				 * Try to feed this packet to this socket
1971				 * instead of discarding it.
1972				 */
1973				tcp_v4_restore_cb(skb);
1974				sock_put(sk);
1975				goto lookup;
1976			}
1977			goto discard_and_relse;
1978		}
1979		if (nsk == sk) {
1980			reqsk_put(req);
1981			tcp_v4_restore_cb(skb);
1982		} else if (tcp_child_process(sk, nsk, skb)) {
1983			tcp_v4_send_reset(nsk, skb);
1984			goto discard_and_relse;
1985		} else {
1986			sock_put(sk);
1987			return 0;
1988		}
1989	}
1990	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1991		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1992		goto discard_and_relse;
1993	}
1994
1995	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1996		goto discard_and_relse;
1997
1998	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1999		goto discard_and_relse;
2000
2001	nf_reset_ct(skb);
2002
2003	if (tcp_filter(sk, skb))
2004		goto discard_and_relse;
2005	th = (const struct tcphdr *)skb->data;
2006	iph = ip_hdr(skb);
2007	tcp_v4_fill_cb(skb, iph, th);
2008
2009	skb->dev = NULL;
2010
2011	if (sk->sk_state == TCP_LISTEN) {
2012		ret = tcp_v4_do_rcv(sk, skb);
2013		goto put_and_return;
2014	}
2015
2016	sk_incoming_cpu_update(sk);
2017
2018	bh_lock_sock_nested(sk);
2019	tcp_segs_in(tcp_sk(sk), skb);
2020	ret = 0;
2021	if (!sock_owned_by_user(sk)) {
2022		skb_to_free = sk->sk_rx_skb_cache;
2023		sk->sk_rx_skb_cache = NULL;
2024		ret = tcp_v4_do_rcv(sk, skb);
2025	} else {
2026		if (tcp_add_backlog(sk, skb))
2027			goto discard_and_relse;
2028		skb_to_free = NULL;
2029	}
2030	bh_unlock_sock(sk);
2031	if (skb_to_free)
2032		__kfree_skb(skb_to_free);
2033
2034put_and_return:
2035	if (refcounted)
2036		sock_put(sk);
2037
2038	return ret;
2039
2040no_tcp_socket:
2041	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2042		goto discard_it;
2043
2044	tcp_v4_fill_cb(skb, iph, th);
2045
2046	if (tcp_checksum_complete(skb)) {
2047csum_error:
2048		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2049bad_packet:
2050		__TCP_INC_STATS(net, TCP_MIB_INERRS);
2051	} else {
2052		tcp_v4_send_reset(NULL, skb);
2053	}
2054
2055discard_it:
2056	/* Discard frame. */
2057	kfree_skb(skb);
2058	return 0;
2059
2060discard_and_relse:
2061	sk_drops_add(sk, skb);
2062	if (refcounted)
2063		sock_put(sk);
2064	goto discard_it;
2065
2066do_time_wait:
2067	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2068		inet_twsk_put(inet_twsk(sk));
2069		goto discard_it;
2070	}
2071
2072	tcp_v4_fill_cb(skb, iph, th);
2073
2074	if (tcp_checksum_complete(skb)) {
2075		inet_twsk_put(inet_twsk(sk));
2076		goto csum_error;
2077	}
2078	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2079	case TCP_TW_SYN: {
2080		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2081							&tcp_hashinfo, skb,
2082							__tcp_hdrlen(th),
2083							iph->saddr, th->source,
2084							iph->daddr, th->dest,
2085							inet_iif(skb),
2086							sdif);
2087		if (sk2) {
2088			inet_twsk_deschedule_put(inet_twsk(sk));
2089			sk = sk2;
2090			tcp_v4_restore_cb(skb);
2091			refcounted = false;
2092			goto process;
2093		}
 
2094	}
2095		/* to ACK */
2096		fallthrough;
2097	case TCP_TW_ACK:
2098		tcp_v4_timewait_ack(sk, skb);
2099		break;
2100	case TCP_TW_RST:
2101		tcp_v4_send_reset(sk, skb);
2102		inet_twsk_deschedule_put(inet_twsk(sk));
2103		goto discard_it;
2104	case TCP_TW_SUCCESS:;
2105	}
2106	goto discard_it;
2107}
2108
2109static struct timewait_sock_ops tcp_timewait_sock_ops = {
2110	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2111	.twsk_unique	= tcp_twsk_unique,
2112	.twsk_destructor= tcp_twsk_destructor,
2113};
2114
2115void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2116{
2117	struct dst_entry *dst = skb_dst(skb);
2118
2119	if (dst && dst_hold_safe(dst)) {
2120		sk->sk_rx_dst = dst;
2121		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2122	}
2123}
2124EXPORT_SYMBOL(inet_sk_rx_dst_set);
2125
2126const struct inet_connection_sock_af_ops ipv4_specific = {
2127	.queue_xmit	   = ip_queue_xmit,
2128	.send_check	   = tcp_v4_send_check,
2129	.rebuild_header	   = inet_sk_rebuild_header,
2130	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2131	.conn_request	   = tcp_v4_conn_request,
2132	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2133	.net_header_len	   = sizeof(struct iphdr),
2134	.setsockopt	   = ip_setsockopt,
2135	.getsockopt	   = ip_getsockopt,
2136	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2137	.sockaddr_len	   = sizeof(struct sockaddr_in),
 
 
 
 
 
2138	.mtu_reduced	   = tcp_v4_mtu_reduced,
2139};
2140EXPORT_SYMBOL(ipv4_specific);
2141
2142#ifdef CONFIG_TCP_MD5SIG
2143static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2144	.md5_lookup		= tcp_v4_md5_lookup,
2145	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2146	.md5_parse		= tcp_v4_parse_md5_keys,
2147};
2148#endif
2149
2150/* NOTE: A lot of things set to zero explicitly by call to
2151 *       sk_alloc() so need not be done here.
2152 */
2153static int tcp_v4_init_sock(struct sock *sk)
2154{
2155	struct inet_connection_sock *icsk = inet_csk(sk);
2156
2157	tcp_init_sock(sk);
2158
2159	icsk->icsk_af_ops = &ipv4_specific;
2160
2161#ifdef CONFIG_TCP_MD5SIG
2162	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2163#endif
2164
2165	return 0;
2166}
2167
2168void tcp_v4_destroy_sock(struct sock *sk)
2169{
2170	struct tcp_sock *tp = tcp_sk(sk);
2171
2172	trace_tcp_destroy_sock(sk);
2173
2174	tcp_clear_xmit_timers(sk);
2175
2176	tcp_cleanup_congestion_control(sk);
2177
2178	tcp_cleanup_ulp(sk);
2179
2180	/* Cleanup up the write buffer. */
2181	tcp_write_queue_purge(sk);
2182
2183	/* Check if we want to disable active TFO */
2184	tcp_fastopen_active_disable_ofo_check(sk);
2185
2186	/* Cleans up our, hopefully empty, out_of_order_queue. */
2187	skb_rbtree_purge(&tp->out_of_order_queue);
2188
2189#ifdef CONFIG_TCP_MD5SIG
2190	/* Clean up the MD5 key list, if any */
2191	if (tp->md5sig_info) {
2192		tcp_clear_md5_list(sk);
2193		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2194		tp->md5sig_info = NULL;
2195	}
2196#endif
2197
 
 
 
2198	/* Clean up a referenced TCP bind bucket. */
2199	if (inet_csk(sk)->icsk_bind_hash)
2200		inet_put_port(sk);
2201
2202	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2203
2204	/* If socket is aborted during connect operation */
2205	tcp_free_fastopen_req(tp);
2206	tcp_fastopen_destroy_cipher(sk);
2207	tcp_saved_syn_free(tp);
2208
2209	sk_sockets_allocated_dec(sk);
 
 
 
2210}
2211EXPORT_SYMBOL(tcp_v4_destroy_sock);
2212
2213#ifdef CONFIG_PROC_FS
2214/* Proc filesystem TCP sock list dumping. */
2215
2216/*
2217 * Get next listener socket follow cur.  If cur is NULL, get first socket
2218 * starting from bucket given in st->bucket; when st->bucket is zero the
2219 * very first socket in the hash table is returned.
2220 */
2221static void *listening_get_next(struct seq_file *seq, void *cur)
2222{
2223	struct tcp_seq_afinfo *afinfo;
2224	struct tcp_iter_state *st = seq->private;
2225	struct net *net = seq_file_net(seq);
2226	struct inet_listen_hashbucket *ilb;
2227	struct hlist_nulls_node *node;
2228	struct sock *sk = cur;
2229
2230	if (st->bpf_seq_afinfo)
2231		afinfo = st->bpf_seq_afinfo;
2232	else
2233		afinfo = PDE_DATA(file_inode(seq->file));
2234
2235	if (!sk) {
2236get_head:
2237		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2238		spin_lock(&ilb->lock);
2239		sk = sk_nulls_head(&ilb->nulls_head);
2240		st->offset = 0;
2241		goto get_sk;
2242	}
2243	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2244	++st->num;
2245	++st->offset;
2246
2247	sk = sk_nulls_next(sk);
2248get_sk:
2249	sk_nulls_for_each_from(sk, node) {
2250		if (!net_eq(sock_net(sk), net))
2251			continue;
2252		if (afinfo->family == AF_UNSPEC ||
2253		    sk->sk_family == afinfo->family)
2254			return sk;
 
 
2255	}
2256	spin_unlock(&ilb->lock);
2257	st->offset = 0;
2258	if (++st->bucket < INET_LHTABLE_SIZE)
2259		goto get_head;
2260	return NULL;
 
 
 
 
 
 
2261}
2262
2263static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2264{
2265	struct tcp_iter_state *st = seq->private;
2266	void *rc;
2267
2268	st->bucket = 0;
2269	st->offset = 0;
2270	rc = listening_get_next(seq, NULL);
2271
2272	while (rc && *pos) {
2273		rc = listening_get_next(seq, rc);
2274		--*pos;
2275	}
2276	return rc;
2277}
2278
2279static inline bool empty_bucket(const struct tcp_iter_state *st)
2280{
2281	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2282}
2283
2284/*
2285 * Get first established socket starting from bucket given in st->bucket.
2286 * If st->bucket is zero, the very first socket in the hash is returned.
2287 */
2288static void *established_get_first(struct seq_file *seq)
2289{
2290	struct tcp_seq_afinfo *afinfo;
2291	struct tcp_iter_state *st = seq->private;
2292	struct net *net = seq_file_net(seq);
2293	void *rc = NULL;
2294
2295	if (st->bpf_seq_afinfo)
2296		afinfo = st->bpf_seq_afinfo;
2297	else
2298		afinfo = PDE_DATA(file_inode(seq->file));
2299
2300	st->offset = 0;
2301	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2302		struct sock *sk;
2303		struct hlist_nulls_node *node;
2304		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2305
2306		/* Lockless fast path for the common case of empty buckets */
2307		if (empty_bucket(st))
2308			continue;
2309
2310		spin_lock_bh(lock);
2311		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2312			if ((afinfo->family != AF_UNSPEC &&
2313			     sk->sk_family != afinfo->family) ||
2314			    !net_eq(sock_net(sk), net)) {
2315				continue;
2316			}
2317			rc = sk;
2318			goto out;
2319		}
2320		spin_unlock_bh(lock);
2321	}
2322out:
2323	return rc;
2324}
2325
2326static void *established_get_next(struct seq_file *seq, void *cur)
2327{
2328	struct tcp_seq_afinfo *afinfo;
2329	struct sock *sk = cur;
2330	struct hlist_nulls_node *node;
2331	struct tcp_iter_state *st = seq->private;
2332	struct net *net = seq_file_net(seq);
2333
2334	if (st->bpf_seq_afinfo)
2335		afinfo = st->bpf_seq_afinfo;
2336	else
2337		afinfo = PDE_DATA(file_inode(seq->file));
2338
2339	++st->num;
2340	++st->offset;
2341
2342	sk = sk_nulls_next(sk);
2343
2344	sk_nulls_for_each_from(sk, node) {
2345		if ((afinfo->family == AF_UNSPEC ||
2346		     sk->sk_family == afinfo->family) &&
2347		    net_eq(sock_net(sk), net))
2348			return sk;
2349	}
2350
2351	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2352	++st->bucket;
2353	return established_get_first(seq);
2354}
2355
2356static void *established_get_idx(struct seq_file *seq, loff_t pos)
2357{
2358	struct tcp_iter_state *st = seq->private;
2359	void *rc;
2360
2361	st->bucket = 0;
2362	rc = established_get_first(seq);
2363
2364	while (rc && pos) {
2365		rc = established_get_next(seq, rc);
2366		--pos;
2367	}
2368	return rc;
2369}
2370
2371static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2372{
2373	void *rc;
2374	struct tcp_iter_state *st = seq->private;
2375
2376	st->state = TCP_SEQ_STATE_LISTENING;
2377	rc	  = listening_get_idx(seq, &pos);
2378
2379	if (!rc) {
2380		st->state = TCP_SEQ_STATE_ESTABLISHED;
2381		rc	  = established_get_idx(seq, pos);
2382	}
2383
2384	return rc;
2385}
2386
2387static void *tcp_seek_last_pos(struct seq_file *seq)
2388{
2389	struct tcp_iter_state *st = seq->private;
2390	int offset = st->offset;
2391	int orig_num = st->num;
2392	void *rc = NULL;
2393
2394	switch (st->state) {
2395	case TCP_SEQ_STATE_LISTENING:
2396		if (st->bucket >= INET_LHTABLE_SIZE)
2397			break;
2398		st->state = TCP_SEQ_STATE_LISTENING;
2399		rc = listening_get_next(seq, NULL);
2400		while (offset-- && rc)
2401			rc = listening_get_next(seq, rc);
2402		if (rc)
2403			break;
2404		st->bucket = 0;
2405		st->state = TCP_SEQ_STATE_ESTABLISHED;
2406		fallthrough;
2407	case TCP_SEQ_STATE_ESTABLISHED:
2408		if (st->bucket > tcp_hashinfo.ehash_mask)
2409			break;
2410		rc = established_get_first(seq);
2411		while (offset-- && rc)
2412			rc = established_get_next(seq, rc);
2413	}
2414
2415	st->num = orig_num;
2416
2417	return rc;
2418}
2419
2420void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2421{
2422	struct tcp_iter_state *st = seq->private;
2423	void *rc;
2424
2425	if (*pos && *pos == st->last_pos) {
2426		rc = tcp_seek_last_pos(seq);
2427		if (rc)
2428			goto out;
2429	}
2430
2431	st->state = TCP_SEQ_STATE_LISTENING;
2432	st->num = 0;
2433	st->bucket = 0;
2434	st->offset = 0;
2435	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2436
2437out:
2438	st->last_pos = *pos;
2439	return rc;
2440}
2441EXPORT_SYMBOL(tcp_seq_start);
2442
2443void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2444{
2445	struct tcp_iter_state *st = seq->private;
2446	void *rc = NULL;
2447
2448	if (v == SEQ_START_TOKEN) {
2449		rc = tcp_get_idx(seq, 0);
2450		goto out;
2451	}
2452
2453	switch (st->state) {
2454	case TCP_SEQ_STATE_LISTENING:
2455		rc = listening_get_next(seq, v);
2456		if (!rc) {
2457			st->state = TCP_SEQ_STATE_ESTABLISHED;
2458			st->bucket = 0;
2459			st->offset = 0;
2460			rc	  = established_get_first(seq);
2461		}
2462		break;
2463	case TCP_SEQ_STATE_ESTABLISHED:
2464		rc = established_get_next(seq, v);
2465		break;
2466	}
2467out:
2468	++*pos;
2469	st->last_pos = *pos;
2470	return rc;
2471}
2472EXPORT_SYMBOL(tcp_seq_next);
2473
2474void tcp_seq_stop(struct seq_file *seq, void *v)
2475{
2476	struct tcp_iter_state *st = seq->private;
2477
2478	switch (st->state) {
2479	case TCP_SEQ_STATE_LISTENING:
2480		if (v != SEQ_START_TOKEN)
2481			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2482		break;
2483	case TCP_SEQ_STATE_ESTABLISHED:
2484		if (v)
2485			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2486		break;
2487	}
2488}
2489EXPORT_SYMBOL(tcp_seq_stop);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490
2491static void get_openreq4(const struct request_sock *req,
2492			 struct seq_file *f, int i)
2493{
2494	const struct inet_request_sock *ireq = inet_rsk(req);
2495	long delta = req->rsk_timer.expires - jiffies;
2496
2497	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2498		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2499		i,
2500		ireq->ir_loc_addr,
2501		ireq->ir_num,
2502		ireq->ir_rmt_addr,
2503		ntohs(ireq->ir_rmt_port),
2504		TCP_SYN_RECV,
2505		0, 0, /* could print option size, but that is af dependent. */
2506		1,    /* timers active (only the expire timer) */
2507		jiffies_delta_to_clock_t(delta),
2508		req->num_timeout,
2509		from_kuid_munged(seq_user_ns(f),
2510				 sock_i_uid(req->rsk_listener)),
2511		0,  /* non standard timer */
2512		0, /* open_requests have no inode */
2513		0,
2514		req);
2515}
2516
2517static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2518{
2519	int timer_active;
2520	unsigned long timer_expires;
2521	const struct tcp_sock *tp = tcp_sk(sk);
2522	const struct inet_connection_sock *icsk = inet_csk(sk);
2523	const struct inet_sock *inet = inet_sk(sk);
2524	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2525	__be32 dest = inet->inet_daddr;
2526	__be32 src = inet->inet_rcv_saddr;
2527	__u16 destp = ntohs(inet->inet_dport);
2528	__u16 srcp = ntohs(inet->inet_sport);
2529	int rx_queue;
2530	int state;
2531
2532	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2533	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2534	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2535		timer_active	= 1;
2536		timer_expires	= icsk->icsk_timeout;
2537	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2538		timer_active	= 4;
2539		timer_expires	= icsk->icsk_timeout;
2540	} else if (timer_pending(&sk->sk_timer)) {
2541		timer_active	= 2;
2542		timer_expires	= sk->sk_timer.expires;
2543	} else {
2544		timer_active	= 0;
2545		timer_expires = jiffies;
2546	}
2547
2548	state = inet_sk_state_load(sk);
2549	if (state == TCP_LISTEN)
2550		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2551	else
2552		/* Because we don't lock the socket,
2553		 * we might find a transient negative value.
2554		 */
2555		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2556				      READ_ONCE(tp->copied_seq), 0);
2557
2558	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2559			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2560		i, src, srcp, dest, destp, state,
2561		READ_ONCE(tp->write_seq) - tp->snd_una,
2562		rx_queue,
2563		timer_active,
2564		jiffies_delta_to_clock_t(timer_expires - jiffies),
2565		icsk->icsk_retransmits,
2566		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2567		icsk->icsk_probes_out,
2568		sock_i_ino(sk),
2569		refcount_read(&sk->sk_refcnt), sk,
2570		jiffies_to_clock_t(icsk->icsk_rto),
2571		jiffies_to_clock_t(icsk->icsk_ack.ato),
2572		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2573		tp->snd_cwnd,
2574		state == TCP_LISTEN ?
2575		    fastopenq->max_qlen :
2576		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2577}
2578
2579static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2580			       struct seq_file *f, int i)
2581{
2582	long delta = tw->tw_timer.expires - jiffies;
2583	__be32 dest, src;
2584	__u16 destp, srcp;
2585
2586	dest  = tw->tw_daddr;
2587	src   = tw->tw_rcv_saddr;
2588	destp = ntohs(tw->tw_dport);
2589	srcp  = ntohs(tw->tw_sport);
2590
2591	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2592		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2593		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2594		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2595		refcount_read(&tw->tw_refcnt), tw);
2596}
2597
2598#define TMPSZ 150
2599
2600static int tcp4_seq_show(struct seq_file *seq, void *v)
2601{
2602	struct tcp_iter_state *st;
2603	struct sock *sk = v;
2604
2605	seq_setwidth(seq, TMPSZ - 1);
2606	if (v == SEQ_START_TOKEN) {
2607		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2608			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2609			   "inode");
2610		goto out;
2611	}
2612	st = seq->private;
2613
2614	if (sk->sk_state == TCP_TIME_WAIT)
2615		get_timewait4_sock(v, seq, st->num);
2616	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2617		get_openreq4(v, seq, st->num);
2618	else
2619		get_tcp4_sock(v, seq, st->num);
2620out:
2621	seq_pad(seq, '\n');
2622	return 0;
2623}
2624
2625#ifdef CONFIG_BPF_SYSCALL
2626struct bpf_iter__tcp {
2627	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2628	__bpf_md_ptr(struct sock_common *, sk_common);
2629	uid_t uid __aligned(8);
2630};
2631
2632static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2633			     struct sock_common *sk_common, uid_t uid)
2634{
2635	struct bpf_iter__tcp ctx;
2636
2637	meta->seq_num--;  /* skip SEQ_START_TOKEN */
2638	ctx.meta = meta;
2639	ctx.sk_common = sk_common;
2640	ctx.uid = uid;
2641	return bpf_iter_run_prog(prog, &ctx);
2642}
2643
2644static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2645{
2646	struct bpf_iter_meta meta;
2647	struct bpf_prog *prog;
2648	struct sock *sk = v;
2649	uid_t uid;
2650
2651	if (v == SEQ_START_TOKEN)
2652		return 0;
2653
2654	if (sk->sk_state == TCP_TIME_WAIT) {
2655		uid = 0;
2656	} else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2657		const struct request_sock *req = v;
2658
2659		uid = from_kuid_munged(seq_user_ns(seq),
2660				       sock_i_uid(req->rsk_listener));
2661	} else {
2662		uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2663	}
2664
2665	meta.seq = seq;
2666	prog = bpf_iter_get_info(&meta, false);
2667	return tcp_prog_seq_show(prog, &meta, v, uid);
2668}
2669
2670static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
2671{
2672	struct bpf_iter_meta meta;
2673	struct bpf_prog *prog;
2674
2675	if (!v) {
2676		meta.seq = seq;
2677		prog = bpf_iter_get_info(&meta, true);
2678		if (prog)
2679			(void)tcp_prog_seq_show(prog, &meta, v, 0);
2680	}
2681
2682	tcp_seq_stop(seq, v);
2683}
2684
2685static const struct seq_operations bpf_iter_tcp_seq_ops = {
2686	.show		= bpf_iter_tcp_seq_show,
2687	.start		= tcp_seq_start,
2688	.next		= tcp_seq_next,
2689	.stop		= bpf_iter_tcp_seq_stop,
2690};
2691#endif
2692
2693static const struct seq_operations tcp4_seq_ops = {
2694	.show		= tcp4_seq_show,
2695	.start		= tcp_seq_start,
2696	.next		= tcp_seq_next,
2697	.stop		= tcp_seq_stop,
2698};
2699
2700static struct tcp_seq_afinfo tcp4_seq_afinfo = {
 
2701	.family		= AF_INET,
 
 
 
 
2702};
2703
2704static int __net_init tcp4_proc_init_net(struct net *net)
2705{
2706	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2707			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2708		return -ENOMEM;
2709	return 0;
2710}
2711
2712static void __net_exit tcp4_proc_exit_net(struct net *net)
2713{
2714	remove_proc_entry("tcp", net->proc_net);
2715}
2716
2717static struct pernet_operations tcp4_net_ops = {
2718	.init = tcp4_proc_init_net,
2719	.exit = tcp4_proc_exit_net,
2720};
2721
2722int __init tcp4_proc_init(void)
2723{
2724	return register_pernet_subsys(&tcp4_net_ops);
2725}
2726
2727void tcp4_proc_exit(void)
2728{
2729	unregister_pernet_subsys(&tcp4_net_ops);
2730}
2731#endif /* CONFIG_PROC_FS */
2732
2733struct proto tcp_prot = {
2734	.name			= "TCP",
2735	.owner			= THIS_MODULE,
2736	.close			= tcp_close,
2737	.pre_connect		= tcp_v4_pre_connect,
2738	.connect		= tcp_v4_connect,
2739	.disconnect		= tcp_disconnect,
2740	.accept			= inet_csk_accept,
2741	.ioctl			= tcp_ioctl,
2742	.init			= tcp_v4_init_sock,
2743	.destroy		= tcp_v4_destroy_sock,
2744	.shutdown		= tcp_shutdown,
2745	.setsockopt		= tcp_setsockopt,
2746	.getsockopt		= tcp_getsockopt,
2747	.keepalive		= tcp_set_keepalive,
2748	.recvmsg		= tcp_recvmsg,
2749	.sendmsg		= tcp_sendmsg,
2750	.sendpage		= tcp_sendpage,
2751	.backlog_rcv		= tcp_v4_do_rcv,
2752	.release_cb		= tcp_release_cb,
2753	.hash			= inet_hash,
2754	.unhash			= inet_unhash,
2755	.get_port		= inet_csk_get_port,
2756	.enter_memory_pressure	= tcp_enter_memory_pressure,
2757	.leave_memory_pressure	= tcp_leave_memory_pressure,
2758	.stream_memory_free	= tcp_stream_memory_free,
2759	.sockets_allocated	= &tcp_sockets_allocated,
2760	.orphan_count		= &tcp_orphan_count,
2761	.memory_allocated	= &tcp_memory_allocated,
2762	.memory_pressure	= &tcp_memory_pressure,
2763	.sysctl_mem		= sysctl_tcp_mem,
2764	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2765	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2766	.max_header		= MAX_TCP_HEADER,
2767	.obj_size		= sizeof(struct tcp_sock),
2768	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2769	.twsk_prot		= &tcp_timewait_sock_ops,
2770	.rsk_prot		= &tcp_request_sock_ops,
2771	.h.hashinfo		= &tcp_hashinfo,
2772	.no_autobind		= true,
 
 
 
 
2773	.diag_destroy		= tcp_abort,
2774};
2775EXPORT_SYMBOL(tcp_prot);
2776
2777static void __net_exit tcp_sk_exit(struct net *net)
2778{
2779	int cpu;
2780
2781	if (net->ipv4.tcp_congestion_control)
2782		bpf_module_put(net->ipv4.tcp_congestion_control,
2783			       net->ipv4.tcp_congestion_control->owner);
2784
2785	for_each_possible_cpu(cpu)
2786		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2787	free_percpu(net->ipv4.tcp_sk);
2788}
2789
2790static int __net_init tcp_sk_init(struct net *net)
2791{
2792	int res, cpu, cnt;
2793
2794	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2795	if (!net->ipv4.tcp_sk)
2796		return -ENOMEM;
2797
2798	for_each_possible_cpu(cpu) {
2799		struct sock *sk;
2800
2801		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2802					   IPPROTO_TCP, net);
2803		if (res)
2804			goto fail;
2805		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2806
2807		/* Please enforce IP_DF and IPID==0 for RST and
2808		 * ACK sent in SYN-RECV and TIME-WAIT state.
2809		 */
2810		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2811
2812		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2813	}
2814
2815	net->ipv4.sysctl_tcp_ecn = 2;
2816	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2817
2818	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2819	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2820	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2821	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2822	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2823
2824	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2825	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2826	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2827
2828	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2829	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2830	net->ipv4.sysctl_tcp_syncookies = 1;
2831	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2832	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2833	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2834	net->ipv4.sysctl_tcp_orphan_retries = 0;
2835	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2836	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2837	net->ipv4.sysctl_tcp_tw_reuse = 2;
2838	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2839
2840	cnt = tcp_hashinfo.ehash_mask + 1;
2841	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2842	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2843
2844	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2845	net->ipv4.sysctl_tcp_sack = 1;
2846	net->ipv4.sysctl_tcp_window_scaling = 1;
2847	net->ipv4.sysctl_tcp_timestamps = 1;
2848	net->ipv4.sysctl_tcp_early_retrans = 3;
2849	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2850	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2851	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2852	net->ipv4.sysctl_tcp_max_reordering = 300;
2853	net->ipv4.sysctl_tcp_dsack = 1;
2854	net->ipv4.sysctl_tcp_app_win = 31;
2855	net->ipv4.sysctl_tcp_adv_win_scale = 1;
2856	net->ipv4.sysctl_tcp_frto = 2;
2857	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2858	/* This limits the percentage of the congestion window which we
2859	 * will allow a single TSO frame to consume.  Building TSO frames
2860	 * which are too large can cause TCP streams to be bursty.
2861	 */
2862	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2863	/* Default TSQ limit of 16 TSO segments */
2864	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2865	/* rfc5961 challenge ack rate limiting */
2866	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2867	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2868	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2869	net->ipv4.sysctl_tcp_autocorking = 1;
2870	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2871	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2872	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2873	if (net != &init_net) {
2874		memcpy(net->ipv4.sysctl_tcp_rmem,
2875		       init_net.ipv4.sysctl_tcp_rmem,
2876		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
2877		memcpy(net->ipv4.sysctl_tcp_wmem,
2878		       init_net.ipv4.sysctl_tcp_wmem,
2879		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
2880	}
2881	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2882	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
2883	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2884	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2885	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2886	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2887	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2888
2889	/* Reno is always built in */
2890	if (!net_eq(net, &init_net) &&
2891	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
2892			       init_net.ipv4.tcp_congestion_control->owner))
2893		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2894	else
2895		net->ipv4.tcp_congestion_control = &tcp_reno;
2896
2897	return 0;
2898fail:
2899	tcp_sk_exit(net);
2900
2901	return res;
2902}
2903
2904static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2905{
2906	struct net *net;
2907
2908	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2909
2910	list_for_each_entry(net, net_exit_list, exit_list)
2911		tcp_fastopen_ctx_destroy(net);
2912}
2913
2914static struct pernet_operations __net_initdata tcp_sk_ops = {
2915       .init	   = tcp_sk_init,
2916       .exit	   = tcp_sk_exit,
2917       .exit_batch = tcp_sk_exit_batch,
2918};
2919
2920#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2921DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
2922		     struct sock_common *sk_common, uid_t uid)
2923
2924static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
2925{
2926	struct tcp_iter_state *st = priv_data;
2927	struct tcp_seq_afinfo *afinfo;
2928	int ret;
2929
2930	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
2931	if (!afinfo)
2932		return -ENOMEM;
2933
2934	afinfo->family = AF_UNSPEC;
2935	st->bpf_seq_afinfo = afinfo;
2936	ret = bpf_iter_init_seq_net(priv_data, aux);
2937	if (ret)
2938		kfree(afinfo);
2939	return ret;
2940}
2941
2942static void bpf_iter_fini_tcp(void *priv_data)
2943{
2944	struct tcp_iter_state *st = priv_data;
2945
2946	kfree(st->bpf_seq_afinfo);
2947	bpf_iter_fini_seq_net(priv_data);
2948}
2949
2950static const struct bpf_iter_seq_info tcp_seq_info = {
2951	.seq_ops		= &bpf_iter_tcp_seq_ops,
2952	.init_seq_private	= bpf_iter_init_tcp,
2953	.fini_seq_private	= bpf_iter_fini_tcp,
2954	.seq_priv_size		= sizeof(struct tcp_iter_state),
2955};
2956
2957static struct bpf_iter_reg tcp_reg_info = {
2958	.target			= "tcp",
2959	.ctx_arg_info_size	= 1,
2960	.ctx_arg_info		= {
2961		{ offsetof(struct bpf_iter__tcp, sk_common),
2962		  PTR_TO_BTF_ID_OR_NULL },
2963	},
2964	.seq_info		= &tcp_seq_info,
2965};
2966
2967static void __init bpf_iter_register(void)
2968{
2969	tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
2970	if (bpf_iter_reg_target(&tcp_reg_info))
2971		pr_warn("Warning: could not register bpf iterator tcp\n");
2972}
2973
2974#endif
2975
2976void __init tcp_v4_init(void)
2977{
 
2978	if (register_pernet_subsys(&tcp_sk_ops))
2979		panic("Failed to create the TCP control socket.\n");
2980
2981#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2982	bpf_iter_register();
2983#endif
2984}
v4.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
  75#include <net/secure_seq.h>
  76#include <net/busy_poll.h>
  77
  78#include <linux/inet.h>
  79#include <linux/ipv6.h>
  80#include <linux/stddef.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
 
 
  83
  84#include <crypto/hash.h>
  85#include <linux/scatterlist.h>
  86
  87int sysctl_tcp_tw_reuse __read_mostly;
  88int sysctl_tcp_low_latency __read_mostly;
  89EXPORT_SYMBOL(sysctl_tcp_low_latency);
  90
  91#ifdef CONFIG_TCP_MD5SIG
  92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  93			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  94#endif
  95
  96struct inet_hashinfo tcp_hashinfo;
  97EXPORT_SYMBOL(tcp_hashinfo);
  98
  99static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 
 
 
 
 
 
 
 
 100{
 101	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 102					  ip_hdr(skb)->saddr,
 103					  tcp_hdr(skb)->dest,
 104					  tcp_hdr(skb)->source);
 105}
 106
 107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 108{
 
 109	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 110	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111
 112	/* With PAWS, it is safe from the viewpoint
 113	   of data integrity. Even without PAWS it is safe provided sequence
 114	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 115
 116	   Actually, the idea is close to VJ's one, only timestamp cache is
 117	   held not per host, but per port pair and TW bucket is used as state
 118	   holder.
 119
 120	   If TW bucket has been already destroyed we fall back to VJ's scheme
 121	   and use initial timestamp retrieved from peer table.
 122	 */
 123	if (tcptw->tw_ts_recent_stamp &&
 124	    (!twp || (sysctl_tcp_tw_reuse &&
 125			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 126		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 127		if (tp->write_seq == 0)
 128			tp->write_seq = 1;
 129		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 130		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131		sock_hold(sktw);
 132		return 1;
 133	}
 134
 135	return 0;
 136}
 137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139/* This will initiate an outgoing connection. */
 140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 141{
 142	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 143	struct inet_sock *inet = inet_sk(sk);
 144	struct tcp_sock *tp = tcp_sk(sk);
 145	__be16 orig_sport, orig_dport;
 146	__be32 daddr, nexthop;
 147	struct flowi4 *fl4;
 148	struct rtable *rt;
 149	int err;
 150	struct ip_options_rcu *inet_opt;
 
 151
 152	if (addr_len < sizeof(struct sockaddr_in))
 153		return -EINVAL;
 154
 155	if (usin->sin_family != AF_INET)
 156		return -EAFNOSUPPORT;
 157
 158	nexthop = daddr = usin->sin_addr.s_addr;
 159	inet_opt = rcu_dereference_protected(inet->inet_opt,
 160					     sock_owned_by_user(sk));
 161	if (inet_opt && inet_opt->opt.srr) {
 162		if (!daddr)
 163			return -EINVAL;
 164		nexthop = inet_opt->opt.faddr;
 165	}
 166
 167	orig_sport = inet->inet_sport;
 168	orig_dport = usin->sin_port;
 169	fl4 = &inet->cork.fl.u.ip4;
 170	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 171			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 172			      IPPROTO_TCP,
 173			      orig_sport, orig_dport, sk);
 174	if (IS_ERR(rt)) {
 175		err = PTR_ERR(rt);
 176		if (err == -ENETUNREACH)
 177			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 178		return err;
 179	}
 180
 181	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 182		ip_rt_put(rt);
 183		return -ENETUNREACH;
 184	}
 185
 186	if (!inet_opt || !inet_opt->opt.srr)
 187		daddr = fl4->daddr;
 188
 189	if (!inet->inet_saddr)
 190		inet->inet_saddr = fl4->saddr;
 191	sk_rcv_saddr_set(sk, inet->inet_saddr);
 192
 193	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 194		/* Reset inherited state */
 195		tp->rx_opt.ts_recent	   = 0;
 196		tp->rx_opt.ts_recent_stamp = 0;
 197		if (likely(!tp->repair))
 198			tp->write_seq	   = 0;
 199	}
 200
 201	if (tcp_death_row.sysctl_tw_recycle &&
 202	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
 203		tcp_fetch_timewait_stamp(sk, &rt->dst);
 204
 205	inet->inet_dport = usin->sin_port;
 206	sk_daddr_set(sk, daddr);
 207
 208	inet_csk(sk)->icsk_ext_hdr_len = 0;
 209	if (inet_opt)
 210		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 211
 212	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 213
 214	/* Socket identity is still unknown (sport may be zero).
 215	 * However we set state to SYN-SENT and not releasing socket
 216	 * lock select source port, enter ourselves into the hash tables and
 217	 * complete initialization after this.
 218	 */
 219	tcp_set_state(sk, TCP_SYN_SENT);
 220	err = inet_hash_connect(&tcp_death_row, sk);
 221	if (err)
 222		goto failure;
 223
 224	sk_set_txhash(sk);
 225
 226	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 227			       inet->inet_sport, inet->inet_dport, sk);
 228	if (IS_ERR(rt)) {
 229		err = PTR_ERR(rt);
 230		rt = NULL;
 231		goto failure;
 232	}
 233	/* OK, now commit destination to socket.  */
 234	sk->sk_gso_type = SKB_GSO_TCPV4;
 235	sk_setup_caps(sk, &rt->dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 236
 237	if (!tp->write_seq && likely(!tp->repair))
 238		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 239							   inet->inet_daddr,
 240							   inet->inet_sport,
 241							   usin->sin_port);
 242
 243	inet->inet_id = tp->write_seq ^ jiffies;
 
 
 
 244
 245	err = tcp_connect(sk);
 246
 247	rt = NULL;
 248	if (err)
 249		goto failure;
 250
 251	return 0;
 252
 253failure:
 254	/*
 255	 * This unhashes the socket and releases the local port,
 256	 * if necessary.
 257	 */
 258	tcp_set_state(sk, TCP_CLOSE);
 259	ip_rt_put(rt);
 260	sk->sk_route_caps = 0;
 261	inet->inet_dport = 0;
 262	return err;
 263}
 264EXPORT_SYMBOL(tcp_v4_connect);
 265
 266/*
 267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 268 * It can be called through tcp_release_cb() if socket was owned by user
 269 * at the time tcp_v4_err() was called to handle ICMP message.
 270 */
 271void tcp_v4_mtu_reduced(struct sock *sk)
 272{
 
 273	struct dst_entry *dst;
 274	struct inet_sock *inet = inet_sk(sk);
 275	u32 mtu = tcp_sk(sk)->mtu_info;
 276
 
 
 
 277	dst = inet_csk_update_pmtu(sk, mtu);
 278	if (!dst)
 279		return;
 280
 281	/* Something is about to be wrong... Remember soft error
 282	 * for the case, if this connection will not able to recover.
 283	 */
 284	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 285		sk->sk_err_soft = EMSGSIZE;
 286
 287	mtu = dst_mtu(dst);
 288
 289	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 290	    ip_sk_accept_pmtu(sk) &&
 291	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 292		tcp_sync_mss(sk, mtu);
 293
 294		/* Resend the TCP packet because it's
 295		 * clear that the old packet has been
 296		 * dropped. This is the new "fast" path mtu
 297		 * discovery.
 298		 */
 299		tcp_simple_retransmit(sk);
 300	} /* else let the usual retransmit timer handle it */
 301}
 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 303
 304static void do_redirect(struct sk_buff *skb, struct sock *sk)
 305{
 306	struct dst_entry *dst = __sk_dst_check(sk, 0);
 307
 308	if (dst)
 309		dst->ops->redirect(dst, sk, skb);
 310}
 311
 312
 313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 315{
 316	struct request_sock *req = inet_reqsk(sk);
 317	struct net *net = sock_net(sk);
 318
 319	/* ICMPs are not backlogged, hence we cannot get
 320	 * an established socket here.
 321	 */
 322	if (seq != tcp_rsk(req)->snt_isn) {
 323		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 324	} else if (abort) {
 325		/*
 326		 * Still in SYN_RECV, just remove it silently.
 327		 * There is no good way to pass the error to the newly
 328		 * created socket, and POSIX does not want network
 329		 * errors returned from accept().
 330		 */
 331		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 332		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
 333	}
 334	reqsk_put(req);
 335}
 336EXPORT_SYMBOL(tcp_req_err);
 337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338/*
 339 * This routine is called by the ICMP module when it gets some
 340 * sort of error condition.  If err < 0 then the socket should
 341 * be closed and the error returned to the user.  If err > 0
 342 * it's just the icmp type << 8 | icmp code.  After adjustment
 343 * header points to the first 8 bytes of the tcp header.  We need
 344 * to find the appropriate port.
 345 *
 346 * The locking strategy used here is very "optimistic". When
 347 * someone else accesses the socket the ICMP is just dropped
 348 * and for some paths there is no check at all.
 349 * A more general error queue to queue errors for later handling
 350 * is probably better.
 351 *
 352 */
 353
 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 355{
 356	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 357	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 358	struct inet_connection_sock *icsk;
 359	struct tcp_sock *tp;
 360	struct inet_sock *inet;
 361	const int type = icmp_hdr(icmp_skb)->type;
 362	const int code = icmp_hdr(icmp_skb)->code;
 363	struct sock *sk;
 364	struct sk_buff *skb;
 365	struct request_sock *fastopen;
 366	__u32 seq, snd_una;
 367	__u32 remaining;
 368	int err;
 369	struct net *net = dev_net(icmp_skb->dev);
 370
 371	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
 372				       th->dest, iph->saddr, ntohs(th->source),
 373				       inet_iif(icmp_skb));
 374	if (!sk) {
 375		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 376		return;
 377	}
 378	if (sk->sk_state == TCP_TIME_WAIT) {
 379		inet_twsk_put(inet_twsk(sk));
 380		return;
 381	}
 382	seq = ntohl(th->seq);
 383	if (sk->sk_state == TCP_NEW_SYN_RECV)
 384		return tcp_req_err(sk, seq,
 385				  type == ICMP_PARAMETERPROB ||
 386				  type == ICMP_TIME_EXCEEDED ||
 387				  (type == ICMP_DEST_UNREACH &&
 388				   (code == ICMP_NET_UNREACH ||
 389				    code == ICMP_HOST_UNREACH)));
 
 390
 391	bh_lock_sock(sk);
 392	/* If too many ICMPs get dropped on busy
 393	 * servers this needs to be solved differently.
 394	 * We do take care of PMTU discovery (RFC1191) special case :
 395	 * we can receive locally generated ICMP messages while socket is held.
 396	 */
 397	if (sock_owned_by_user(sk)) {
 398		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 399			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 400	}
 401	if (sk->sk_state == TCP_CLOSE)
 402		goto out;
 403
 404	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 405		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 406		goto out;
 407	}
 408
 409	icsk = inet_csk(sk);
 410	tp = tcp_sk(sk);
 411	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 412	fastopen = tp->fastopen_rsk;
 413	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 414	if (sk->sk_state != TCP_LISTEN &&
 415	    !between(seq, snd_una, tp->snd_nxt)) {
 416		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 417		goto out;
 418	}
 419
 420	switch (type) {
 421	case ICMP_REDIRECT:
 422		do_redirect(icmp_skb, sk);
 
 423		goto out;
 424	case ICMP_SOURCE_QUENCH:
 425		/* Just silently ignore these. */
 426		goto out;
 427	case ICMP_PARAMETERPROB:
 428		err = EPROTO;
 429		break;
 430	case ICMP_DEST_UNREACH:
 431		if (code > NR_ICMP_UNREACH)
 432			goto out;
 433
 434		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 435			/* We are not interested in TCP_LISTEN and open_requests
 436			 * (SYN-ACKs send out by Linux are always <576bytes so
 437			 * they should go through unfragmented).
 438			 */
 439			if (sk->sk_state == TCP_LISTEN)
 440				goto out;
 441
 442			tp->mtu_info = info;
 443			if (!sock_owned_by_user(sk)) {
 444				tcp_v4_mtu_reduced(sk);
 445			} else {
 446				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
 447					sock_hold(sk);
 448			}
 449			goto out;
 450		}
 451
 452		err = icmp_err_convert[code].errno;
 453		/* check if icmp_skb allows revert of backoff
 454		 * (see draft-zimmermann-tcp-lcd) */
 455		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 456			break;
 457		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 458		    !icsk->icsk_backoff || fastopen)
 459			break;
 460
 461		if (sock_owned_by_user(sk))
 462			break;
 463
 464		icsk->icsk_backoff--;
 465		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
 466					       TCP_TIMEOUT_INIT;
 467		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 468
 469		skb = tcp_write_queue_head(sk);
 470		BUG_ON(!skb);
 471
 472		remaining = icsk->icsk_rto -
 473			    min(icsk->icsk_rto,
 474				tcp_time_stamp - tcp_skb_timestamp(skb));
 475
 476		if (remaining) {
 477			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 478						  remaining, TCP_RTO_MAX);
 479		} else {
 480			/* RTO revert clocked out retransmission.
 481			 * Will retransmit now */
 482			tcp_retransmit_timer(sk);
 483		}
 484
 485		break;
 486	case ICMP_TIME_EXCEEDED:
 487		err = EHOSTUNREACH;
 488		break;
 489	default:
 490		goto out;
 491	}
 492
 493	switch (sk->sk_state) {
 494	case TCP_SYN_SENT:
 495	case TCP_SYN_RECV:
 496		/* Only in fast or simultaneous open. If a fast open socket is
 497		 * is already accepted it is treated as a connected one below.
 498		 */
 499		if (fastopen && !fastopen->sk)
 500			break;
 501
 
 
 502		if (!sock_owned_by_user(sk)) {
 503			sk->sk_err = err;
 504
 505			sk->sk_error_report(sk);
 506
 507			tcp_done(sk);
 508		} else {
 509			sk->sk_err_soft = err;
 510		}
 511		goto out;
 512	}
 513
 514	/* If we've already connected we will keep trying
 515	 * until we time out, or the user gives up.
 516	 *
 517	 * rfc1122 4.2.3.9 allows to consider as hard errors
 518	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 519	 * but it is obsoleted by pmtu discovery).
 520	 *
 521	 * Note, that in modern internet, where routing is unreliable
 522	 * and in each dark corner broken firewalls sit, sending random
 523	 * errors ordered by their masters even this two messages finally lose
 524	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 525	 *
 526	 * Now we are in compliance with RFCs.
 527	 *							--ANK (980905)
 528	 */
 529
 530	inet = inet_sk(sk);
 531	if (!sock_owned_by_user(sk) && inet->recverr) {
 532		sk->sk_err = err;
 533		sk->sk_error_report(sk);
 534	} else	{ /* Only an error on timeout */
 535		sk->sk_err_soft = err;
 536	}
 537
 538out:
 539	bh_unlock_sock(sk);
 540	sock_put(sk);
 
 541}
 542
 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 544{
 545	struct tcphdr *th = tcp_hdr(skb);
 546
 547	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 548		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 549		skb->csum_start = skb_transport_header(skb) - skb->head;
 550		skb->csum_offset = offsetof(struct tcphdr, check);
 551	} else {
 552		th->check = tcp_v4_check(skb->len, saddr, daddr,
 553					 csum_partial(th,
 554						      th->doff << 2,
 555						      skb->csum));
 556	}
 557}
 558
 559/* This routine computes an IPv4 TCP checksum. */
 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 561{
 562	const struct inet_sock *inet = inet_sk(sk);
 563
 564	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 565}
 566EXPORT_SYMBOL(tcp_v4_send_check);
 567
 568/*
 569 *	This routine will send an RST to the other tcp.
 570 *
 571 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 572 *		      for reset.
 573 *	Answer: if a packet caused RST, it is not for a socket
 574 *		existing in our system, if it is matched to a socket,
 575 *		it is just duplicate segment or bug in other side's TCP.
 576 *		So that we build reply only basing on parameters
 577 *		arrived with segment.
 578 *	Exception: precedence violation. We do not implement it in any case.
 579 */
 580
 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 582{
 583	const struct tcphdr *th = tcp_hdr(skb);
 584	struct {
 585		struct tcphdr th;
 586#ifdef CONFIG_TCP_MD5SIG
 587		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 588#endif
 589	} rep;
 590	struct ip_reply_arg arg;
 591#ifdef CONFIG_TCP_MD5SIG
 592	struct tcp_md5sig_key *key = NULL;
 593	const __u8 *hash_location = NULL;
 594	unsigned char newhash[16];
 595	int genhash;
 596	struct sock *sk1 = NULL;
 597#endif
 
 
 598	struct net *net;
 599
 600	/* Never send a reset in response to a reset. */
 601	if (th->rst)
 602		return;
 603
 604	/* If sk not NULL, it means we did a successful lookup and incoming
 605	 * route had to be correct. prequeue might have dropped our dst.
 606	 */
 607	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 608		return;
 609
 610	/* Swap the send and the receive. */
 611	memset(&rep, 0, sizeof(rep));
 612	rep.th.dest   = th->source;
 613	rep.th.source = th->dest;
 614	rep.th.doff   = sizeof(struct tcphdr) / 4;
 615	rep.th.rst    = 1;
 616
 617	if (th->ack) {
 618		rep.th.seq = th->ack_seq;
 619	} else {
 620		rep.th.ack = 1;
 621		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 622				       skb->len - (th->doff << 2));
 623	}
 624
 625	memset(&arg, 0, sizeof(arg));
 626	arg.iov[0].iov_base = (unsigned char *)&rep;
 627	arg.iov[0].iov_len  = sizeof(rep.th);
 628
 629	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 630#ifdef CONFIG_TCP_MD5SIG
 
 631	hash_location = tcp_parse_md5sig_option(th);
 632	if (sk && sk_fullsock(sk)) {
 633		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 634					&ip_hdr(skb)->saddr, AF_INET);
 
 
 
 
 
 
 
 635	} else if (hash_location) {
 
 
 
 
 
 636		/*
 637		 * active side is lost. Try to find listening socket through
 638		 * source port, and then find md5 key through listening socket.
 639		 * we are not loose security here:
 640		 * Incoming packet is checked with md5 hash with finding key,
 641		 * no RST generated if md5 hash doesn't match.
 642		 */
 643		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
 644					     ip_hdr(skb)->saddr,
 645					     th->source, ip_hdr(skb)->daddr,
 646					     ntohs(th->source), inet_iif(skb));
 647		/* don't send rst if it can't find key */
 648		if (!sk1)
 649			return;
 650		rcu_read_lock();
 651		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 652					&ip_hdr(skb)->saddr, AF_INET);
 
 
 
 
 653		if (!key)
 654			goto release_sk1;
 
 655
 656		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 657		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 658			goto release_sk1;
 
 659	}
 660
 661	if (key) {
 662		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 663				   (TCPOPT_NOP << 16) |
 664				   (TCPOPT_MD5SIG << 8) |
 665				   TCPOLEN_MD5SIG);
 666		/* Update length and the length the header thinks exists */
 667		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 668		rep.th.doff = arg.iov[0].iov_len / 4;
 669
 670		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 671				     key, ip_hdr(skb)->saddr,
 672				     ip_hdr(skb)->daddr, &rep.th);
 673	}
 674#endif
 675	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 676				      ip_hdr(skb)->saddr, /* XXX */
 677				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 678	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 679	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 680
 681	/* When socket is gone, all binding information is lost.
 682	 * routing might fail in this case. No choice here, if we choose to force
 683	 * input interface, we will misroute in case of asymmetric route.
 684	 */
 685	if (sk)
 686		arg.bound_dev_if = sk->sk_bound_dev_if;
 
 
 
 687
 688	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 689		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 690
 691	arg.tos = ip_hdr(skb)->tos;
 692	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 
 
 
 
 
 
 
 
 
 
 693			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 694			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 695			      &arg, arg.iov[0].iov_len);
 
 696
 697	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 698	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 
 
 699
 700#ifdef CONFIG_TCP_MD5SIG
 701release_sk1:
 702	if (sk1) {
 703		rcu_read_unlock();
 704		sock_put(sk1);
 705	}
 706#endif
 707}
 708
 709/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 710   outside socket context is ugly, certainly. What can I do?
 711 */
 712
 713static void tcp_v4_send_ack(struct net *net,
 714			    struct sk_buff *skb, u32 seq, u32 ack,
 715			    u32 win, u32 tsval, u32 tsecr, int oif,
 716			    struct tcp_md5sig_key *key,
 717			    int reply_flags, u8 tos)
 718{
 719	const struct tcphdr *th = tcp_hdr(skb);
 720	struct {
 721		struct tcphdr th;
 722		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 723#ifdef CONFIG_TCP_MD5SIG
 724			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 725#endif
 726			];
 727	} rep;
 
 728	struct ip_reply_arg arg;
 
 
 729
 730	memset(&rep.th, 0, sizeof(struct tcphdr));
 731	memset(&arg, 0, sizeof(arg));
 732
 733	arg.iov[0].iov_base = (unsigned char *)&rep;
 734	arg.iov[0].iov_len  = sizeof(rep.th);
 735	if (tsecr) {
 736		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 737				   (TCPOPT_TIMESTAMP << 8) |
 738				   TCPOLEN_TIMESTAMP);
 739		rep.opt[1] = htonl(tsval);
 740		rep.opt[2] = htonl(tsecr);
 741		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 742	}
 743
 744	/* Swap the send and the receive. */
 745	rep.th.dest    = th->source;
 746	rep.th.source  = th->dest;
 747	rep.th.doff    = arg.iov[0].iov_len / 4;
 748	rep.th.seq     = htonl(seq);
 749	rep.th.ack_seq = htonl(ack);
 750	rep.th.ack     = 1;
 751	rep.th.window  = htons(win);
 752
 753#ifdef CONFIG_TCP_MD5SIG
 754	if (key) {
 755		int offset = (tsecr) ? 3 : 0;
 756
 757		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 758					  (TCPOPT_NOP << 16) |
 759					  (TCPOPT_MD5SIG << 8) |
 760					  TCPOLEN_MD5SIG);
 761		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 762		rep.th.doff = arg.iov[0].iov_len/4;
 763
 764		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 765				    key, ip_hdr(skb)->saddr,
 766				    ip_hdr(skb)->daddr, &rep.th);
 767	}
 768#endif
 769	arg.flags = reply_flags;
 770	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 771				      ip_hdr(skb)->saddr, /* XXX */
 772				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 773	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 774	if (oif)
 775		arg.bound_dev_if = oif;
 776	arg.tos = tos;
 777	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 
 
 
 
 
 
 
 
 778			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 779			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 780			      &arg, arg.iov[0].iov_len);
 
 781
 782	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 
 
 783}
 784
 785static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 786{
 787	struct inet_timewait_sock *tw = inet_twsk(sk);
 788	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 789
 790	tcp_v4_send_ack(sock_net(sk), skb,
 791			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 792			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 793			tcp_time_stamp + tcptw->tw_ts_offset,
 794			tcptw->tw_ts_recent,
 795			tw->tw_bound_dev_if,
 796			tcp_twsk_md5_key(tcptw),
 797			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 798			tw->tw_tos
 799			);
 800
 801	inet_twsk_put(tw);
 802}
 803
 804static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 805				  struct request_sock *req)
 806{
 
 
 
 807	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 808	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 809	 */
 810	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 811					     tcp_sk(sk)->snd_nxt;
 812
 813	tcp_v4_send_ack(sock_net(sk), skb, seq,
 814			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 815			tcp_time_stamp,
 
 
 
 
 
 
 
 
 816			req->ts_recent,
 817			0,
 818			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
 819					  AF_INET),
 820			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 821			ip_hdr(skb)->tos);
 822}
 823
 824/*
 825 *	Send a SYN-ACK after having received a SYN.
 826 *	This still operates on a request_sock only, not on a big
 827 *	socket.
 828 */
 829static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 830			      struct flowi *fl,
 831			      struct request_sock *req,
 832			      struct tcp_fastopen_cookie *foc,
 833				  bool attach_req)
 834{
 835	const struct inet_request_sock *ireq = inet_rsk(req);
 836	struct flowi4 fl4;
 837	int err = -1;
 838	struct sk_buff *skb;
 839
 840	/* First, grab a route. */
 841	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 842		return -1;
 843
 844	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 845
 846	if (skb) {
 847		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 848
 
 849		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 850					    ireq->ir_rmt_addr,
 851					    ireq->opt);
 
 852		err = net_xmit_eval(err);
 853	}
 854
 855	return err;
 856}
 857
 858/*
 859 *	IPv4 request_sock destructor.
 860 */
 861static void tcp_v4_reqsk_destructor(struct request_sock *req)
 862{
 863	kfree(inet_rsk(req)->opt);
 864}
 865
 866#ifdef CONFIG_TCP_MD5SIG
 867/*
 868 * RFC2385 MD5 checksumming requires a mapping of
 869 * IP address->MD5 Key.
 870 * We need to maintain these in the sk structure.
 871 */
 872
 
 
 
 873/* Find the Key structure for an address.  */
 874struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 875					 const union tcp_md5_addr *addr,
 876					 int family)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877{
 878	const struct tcp_sock *tp = tcp_sk(sk);
 879	struct tcp_md5sig_key *key;
 880	unsigned int size = sizeof(struct in_addr);
 881	const struct tcp_md5sig_info *md5sig;
 882
 883	/* caller either holds rcu_read_lock() or socket lock */
 884	md5sig = rcu_dereference_check(tp->md5sig_info,
 885				       sock_owned_by_user(sk) ||
 886				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
 887	if (!md5sig)
 888		return NULL;
 889#if IS_ENABLED(CONFIG_IPV6)
 890	if (family == AF_INET6)
 891		size = sizeof(struct in6_addr);
 892#endif
 893	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 
 894		if (key->family != family)
 895			continue;
 896		if (!memcmp(&key->addr, addr, size))
 
 
 
 897			return key;
 898	}
 899	return NULL;
 900}
 901EXPORT_SYMBOL(tcp_md5_do_lookup);
 902
 903struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 904					 const struct sock *addr_sk)
 905{
 906	const union tcp_md5_addr *addr;
 
 907
 
 
 908	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
 909	return tcp_md5_do_lookup(sk, addr, AF_INET);
 910}
 911EXPORT_SYMBOL(tcp_v4_md5_lookup);
 912
 913/* This can be called on a newly created socket, from other files */
 914int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 915		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
 
 916{
 917	/* Add Key to the list */
 918	struct tcp_md5sig_key *key;
 919	struct tcp_sock *tp = tcp_sk(sk);
 920	struct tcp_md5sig_info *md5sig;
 921
 922	key = tcp_md5_do_lookup(sk, addr, family);
 923	if (key) {
 924		/* Pre-existing entry - just update that one. */
 925		memcpy(key->key, newkey, newkeylen);
 926		key->keylen = newkeylen;
 
 
 
 
 
 
 
 
 
 
 
 
 927		return 0;
 928	}
 929
 930	md5sig = rcu_dereference_protected(tp->md5sig_info,
 931					   sock_owned_by_user(sk) ||
 932					   lockdep_is_held(&sk->sk_lock.slock));
 933	if (!md5sig) {
 934		md5sig = kmalloc(sizeof(*md5sig), gfp);
 935		if (!md5sig)
 936			return -ENOMEM;
 937
 938		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 939		INIT_HLIST_HEAD(&md5sig->head);
 940		rcu_assign_pointer(tp->md5sig_info, md5sig);
 941	}
 942
 943	key = sock_kmalloc(sk, sizeof(*key), gfp);
 944	if (!key)
 945		return -ENOMEM;
 946	if (!tcp_alloc_md5sig_pool()) {
 947		sock_kfree_s(sk, key, sizeof(*key));
 948		return -ENOMEM;
 949	}
 950
 951	memcpy(key->key, newkey, newkeylen);
 952	key->keylen = newkeylen;
 953	key->family = family;
 
 
 954	memcpy(&key->addr, addr,
 955	       (family == AF_INET6) ? sizeof(struct in6_addr) :
 956				      sizeof(struct in_addr));
 957	hlist_add_head_rcu(&key->node, &md5sig->head);
 958	return 0;
 959}
 960EXPORT_SYMBOL(tcp_md5_do_add);
 961
 962int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
 
 963{
 964	struct tcp_md5sig_key *key;
 965
 966	key = tcp_md5_do_lookup(sk, addr, family);
 967	if (!key)
 968		return -ENOENT;
 969	hlist_del_rcu(&key->node);
 970	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
 971	kfree_rcu(key, rcu);
 972	return 0;
 973}
 974EXPORT_SYMBOL(tcp_md5_do_del);
 975
 976static void tcp_clear_md5_list(struct sock *sk)
 977{
 978	struct tcp_sock *tp = tcp_sk(sk);
 979	struct tcp_md5sig_key *key;
 980	struct hlist_node *n;
 981	struct tcp_md5sig_info *md5sig;
 982
 983	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
 984
 985	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
 986		hlist_del_rcu(&key->node);
 987		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
 988		kfree_rcu(key, rcu);
 989	}
 990}
 991
 992static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
 993				 int optlen)
 994{
 995	struct tcp_md5sig cmd;
 996	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
 
 
 
 997
 998	if (optlen < sizeof(cmd))
 999		return -EINVAL;
1000
1001	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1002		return -EFAULT;
1003
1004	if (sin->sin_family != AF_INET)
1005		return -EINVAL;
1006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007	if (!cmd.tcpm_keylen)
1008		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1009				      AF_INET);
1010
1011	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1012		return -EINVAL;
1013
1014	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1015			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1016			      GFP_KERNEL);
1017}
1018
1019static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1020					__be32 daddr, __be32 saddr, int nbytes)
 
1021{
1022	struct tcp4_pseudohdr *bp;
1023	struct scatterlist sg;
 
1024
1025	bp = &hp->md5_blk.ip4;
1026
1027	/*
1028	 * 1. the TCP pseudo-header (in the order: source IP address,
1029	 * destination IP address, zero-padded protocol number, and
1030	 * segment length)
1031	 */
1032	bp->saddr = saddr;
1033	bp->daddr = daddr;
1034	bp->pad = 0;
1035	bp->protocol = IPPROTO_TCP;
1036	bp->len = cpu_to_be16(nbytes);
1037
1038	sg_init_one(&sg, bp, sizeof(*bp));
1039	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
 
 
 
 
 
1040	return crypto_ahash_update(hp->md5_req);
1041}
1042
1043static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1044			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1045{
1046	struct tcp_md5sig_pool *hp;
1047	struct ahash_request *req;
1048
1049	hp = tcp_get_md5sig_pool();
1050	if (!hp)
1051		goto clear_hash_noput;
1052	req = hp->md5_req;
1053
1054	if (crypto_ahash_init(req))
1055		goto clear_hash;
1056	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1057		goto clear_hash;
1058	if (tcp_md5_hash_header(hp, th))
1059		goto clear_hash;
1060	if (tcp_md5_hash_key(hp, key))
1061		goto clear_hash;
1062	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1063	if (crypto_ahash_final(req))
1064		goto clear_hash;
1065
1066	tcp_put_md5sig_pool();
1067	return 0;
1068
1069clear_hash:
1070	tcp_put_md5sig_pool();
1071clear_hash_noput:
1072	memset(md5_hash, 0, 16);
1073	return 1;
1074}
1075
1076int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1077			const struct sock *sk,
1078			const struct sk_buff *skb)
1079{
1080	struct tcp_md5sig_pool *hp;
1081	struct ahash_request *req;
1082	const struct tcphdr *th = tcp_hdr(skb);
1083	__be32 saddr, daddr;
1084
1085	if (sk) { /* valid for establish/request sockets */
1086		saddr = sk->sk_rcv_saddr;
1087		daddr = sk->sk_daddr;
1088	} else {
1089		const struct iphdr *iph = ip_hdr(skb);
1090		saddr = iph->saddr;
1091		daddr = iph->daddr;
1092	}
1093
1094	hp = tcp_get_md5sig_pool();
1095	if (!hp)
1096		goto clear_hash_noput;
1097	req = hp->md5_req;
1098
1099	if (crypto_ahash_init(req))
1100		goto clear_hash;
1101
1102	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1103		goto clear_hash;
1104	if (tcp_md5_hash_header(hp, th))
1105		goto clear_hash;
1106	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1107		goto clear_hash;
1108	if (tcp_md5_hash_key(hp, key))
1109		goto clear_hash;
1110	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1111	if (crypto_ahash_final(req))
1112		goto clear_hash;
1113
1114	tcp_put_md5sig_pool();
1115	return 0;
1116
1117clear_hash:
1118	tcp_put_md5sig_pool();
1119clear_hash_noput:
1120	memset(md5_hash, 0, 16);
1121	return 1;
1122}
1123EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1124
1125#endif
1126
1127/* Called with rcu_read_lock() */
1128static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1129				    const struct sk_buff *skb)
 
1130{
1131#ifdef CONFIG_TCP_MD5SIG
1132	/*
1133	 * This gets called for each TCP segment that arrives
1134	 * so we want to be efficient.
1135	 * We have 3 drop cases:
1136	 * o No MD5 hash and one expected.
1137	 * o MD5 hash and we're not expecting one.
1138	 * o MD5 hash and its wrong.
1139	 */
1140	const __u8 *hash_location = NULL;
1141	struct tcp_md5sig_key *hash_expected;
1142	const struct iphdr *iph = ip_hdr(skb);
1143	const struct tcphdr *th = tcp_hdr(skb);
1144	int genhash;
1145	unsigned char newhash[16];
 
1146
1147	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1148					  AF_INET);
 
 
 
 
 
1149	hash_location = tcp_parse_md5sig_option(th);
1150
1151	/* We've parsed the options - do we have a hash? */
1152	if (!hash_expected && !hash_location)
1153		return false;
1154
1155	if (hash_expected && !hash_location) {
1156		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1157		return true;
1158	}
1159
1160	if (!hash_expected && hash_location) {
1161		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1162		return true;
1163	}
1164
1165	/* Okay, so this is hash_expected and hash_location -
1166	 * so we need to calculate the checksum.
1167	 */
1168	genhash = tcp_v4_md5_hash_skb(newhash,
1169				      hash_expected,
1170				      NULL, skb);
1171
1172	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1173		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
 
1174				     &iph->saddr, ntohs(th->source),
1175				     &iph->daddr, ntohs(th->dest),
1176				     genhash ? " tcp_v4_calc_md5_hash failed"
1177				     : "");
1178		return true;
1179	}
1180	return false;
1181#endif
1182	return false;
1183}
1184
1185static void tcp_v4_init_req(struct request_sock *req,
1186			    const struct sock *sk_listener,
1187			    struct sk_buff *skb)
1188{
1189	struct inet_request_sock *ireq = inet_rsk(req);
 
1190
1191	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1192	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1193	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1194	ireq->opt = tcp_v4_save_options(skb);
1195}
1196
1197static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1198					  struct flowi *fl,
1199					  const struct request_sock *req,
1200					  bool *strict)
1201{
1202	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1203
1204	if (strict) {
1205		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1206			*strict = true;
1207		else
1208			*strict = false;
1209	}
1210
1211	return dst;
1212}
1213
1214struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1215	.family		=	PF_INET,
1216	.obj_size	=	sizeof(struct tcp_request_sock),
1217	.rtx_syn_ack	=	tcp_rtx_synack,
1218	.send_ack	=	tcp_v4_reqsk_send_ack,
1219	.destructor	=	tcp_v4_reqsk_destructor,
1220	.send_reset	=	tcp_v4_send_reset,
1221	.syn_ack_timeout =	tcp_syn_ack_timeout,
1222};
1223
1224static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1225	.mss_clamp	=	TCP_MSS_DEFAULT,
1226#ifdef CONFIG_TCP_MD5SIG
1227	.req_md5_lookup	=	tcp_v4_md5_lookup,
1228	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1229#endif
1230	.init_req	=	tcp_v4_init_req,
1231#ifdef CONFIG_SYN_COOKIES
1232	.cookie_init_seq =	cookie_v4_init_sequence,
1233#endif
1234	.route_req	=	tcp_v4_route_req,
1235	.init_seq	=	tcp_v4_init_sequence,
 
1236	.send_synack	=	tcp_v4_send_synack,
1237};
1238
1239int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1240{
1241	/* Never answer to SYNs send to broadcast or multicast */
1242	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1243		goto drop;
1244
1245	return tcp_conn_request(&tcp_request_sock_ops,
1246				&tcp_request_sock_ipv4_ops, sk, skb);
1247
1248drop:
1249	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1250	return 0;
1251}
1252EXPORT_SYMBOL(tcp_v4_conn_request);
1253
1254
1255/*
1256 * The three way handshake has completed - we got a valid synack -
1257 * now create the new socket.
1258 */
1259struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1260				  struct request_sock *req,
1261				  struct dst_entry *dst,
1262				  struct request_sock *req_unhash,
1263				  bool *own_req)
1264{
1265	struct inet_request_sock *ireq;
1266	struct inet_sock *newinet;
1267	struct tcp_sock *newtp;
1268	struct sock *newsk;
1269#ifdef CONFIG_TCP_MD5SIG
 
1270	struct tcp_md5sig_key *key;
 
1271#endif
1272	struct ip_options_rcu *inet_opt;
1273
1274	if (sk_acceptq_is_full(sk))
1275		goto exit_overflow;
1276
1277	newsk = tcp_create_openreq_child(sk, req, skb);
1278	if (!newsk)
1279		goto exit_nonewsk;
1280
1281	newsk->sk_gso_type = SKB_GSO_TCPV4;
1282	inet_sk_rx_dst_set(newsk, skb);
1283
1284	newtp		      = tcp_sk(newsk);
1285	newinet		      = inet_sk(newsk);
1286	ireq		      = inet_rsk(req);
1287	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1288	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1289	newsk->sk_bound_dev_if = ireq->ir_iif;
1290	newinet->inet_saddr	      = ireq->ir_loc_addr;
1291	inet_opt	      = ireq->opt;
1292	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1293	ireq->opt	      = NULL;
1294	newinet->mc_index     = inet_iif(skb);
1295	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1296	newinet->rcv_tos      = ip_hdr(skb)->tos;
1297	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1298	if (inet_opt)
1299		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1300	newinet->inet_id = newtp->write_seq ^ jiffies;
1301
1302	if (!dst) {
1303		dst = inet_csk_route_child_sock(sk, newsk, req);
1304		if (!dst)
1305			goto put_and_exit;
1306	} else {
1307		/* syncookie case : see end of cookie_v4_check() */
1308	}
1309	sk_setup_caps(newsk, dst);
1310
1311	tcp_ca_openreq_child(newsk, dst);
1312
1313	tcp_sync_mss(newsk, dst_mtu(dst));
1314	newtp->advmss = dst_metric_advmss(dst);
1315	if (tcp_sk(sk)->rx_opt.user_mss &&
1316	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1317		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1318
1319	tcp_initialize_rcv_mss(newsk);
1320
1321#ifdef CONFIG_TCP_MD5SIG
 
1322	/* Copy over the MD5 key from the original socket */
1323	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1324				AF_INET);
1325	if (key) {
1326		/*
1327		 * We're using one, so create a matching key
1328		 * on the newsk structure. If we fail to get
1329		 * memory, then we end up not copying the key
1330		 * across. Shucks.
1331		 */
1332		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1333			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1334		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1335	}
1336#endif
1337
1338	if (__inet_inherit_port(sk, newsk) < 0)
1339		goto put_and_exit;
1340	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1341	if (*own_req)
1342		tcp_move_syn(newtp, req);
1343
 
 
 
1344	return newsk;
1345
1346exit_overflow:
1347	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1348exit_nonewsk:
1349	dst_release(dst);
1350exit:
1351	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1352	return NULL;
1353put_and_exit:
 
1354	inet_csk_prepare_forced_close(newsk);
1355	tcp_done(newsk);
1356	goto exit;
1357}
1358EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1359
1360static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1361{
1362#ifdef CONFIG_SYN_COOKIES
1363	const struct tcphdr *th = tcp_hdr(skb);
1364
1365	if (!th->syn)
1366		sk = cookie_v4_check(sk, skb);
1367#endif
1368	return sk;
1369}
1370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371/* The socket must have it's spinlock held when we get
1372 * here, unless it is a TCP_LISTEN socket.
1373 *
1374 * We have a potential double-lock case here, so even when
1375 * doing backlog processing we use the BH locking scheme.
1376 * This is because we cannot sleep with the original spinlock
1377 * held.
1378 */
1379int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1380{
1381	struct sock *rsk;
1382
1383	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1384		struct dst_entry *dst = sk->sk_rx_dst;
1385
1386		sock_rps_save_rxhash(sk, skb);
1387		sk_mark_napi_id(sk, skb);
1388		if (dst) {
1389			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1390			    !dst->ops->check(dst, 0)) {
1391				dst_release(dst);
1392				sk->sk_rx_dst = NULL;
1393			}
1394		}
1395		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1396		return 0;
1397	}
1398
1399	if (tcp_checksum_complete(skb))
1400		goto csum_err;
1401
1402	if (sk->sk_state == TCP_LISTEN) {
1403		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1404
1405		if (!nsk)
1406			goto discard;
1407		if (nsk != sk) {
1408			sock_rps_save_rxhash(nsk, skb);
1409			sk_mark_napi_id(nsk, skb);
1410			if (tcp_child_process(sk, nsk, skb)) {
1411				rsk = nsk;
1412				goto reset;
1413			}
1414			return 0;
1415		}
1416	} else
1417		sock_rps_save_rxhash(sk, skb);
1418
1419	if (tcp_rcv_state_process(sk, skb)) {
1420		rsk = sk;
1421		goto reset;
1422	}
1423	return 0;
1424
1425reset:
1426	tcp_v4_send_reset(rsk, skb);
1427discard:
1428	kfree_skb(skb);
1429	/* Be careful here. If this function gets more complicated and
1430	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1431	 * might be destroyed here. This current version compiles correctly,
1432	 * but you have been warned.
1433	 */
1434	return 0;
1435
1436csum_err:
1437	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1438	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1439	goto discard;
1440}
1441EXPORT_SYMBOL(tcp_v4_do_rcv);
1442
1443void tcp_v4_early_demux(struct sk_buff *skb)
1444{
1445	const struct iphdr *iph;
1446	const struct tcphdr *th;
1447	struct sock *sk;
1448
1449	if (skb->pkt_type != PACKET_HOST)
1450		return;
1451
1452	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1453		return;
1454
1455	iph = ip_hdr(skb);
1456	th = tcp_hdr(skb);
1457
1458	if (th->doff < sizeof(struct tcphdr) / 4)
1459		return;
1460
1461	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1462				       iph->saddr, th->source,
1463				       iph->daddr, ntohs(th->dest),
1464				       skb->skb_iif);
1465	if (sk) {
1466		skb->sk = sk;
1467		skb->destructor = sock_edemux;
1468		if (sk_fullsock(sk)) {
1469			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1470
1471			if (dst)
1472				dst = dst_check(dst, 0);
1473			if (dst &&
1474			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1475				skb_dst_set_noref(skb, dst);
1476		}
1477	}
 
1478}
1479
1480/* Packet is added to VJ-style prequeue for processing in process
1481 * context, if a reader task is waiting. Apparently, this exciting
1482 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1483 * failed somewhere. Latency? Burstiness? Well, at least now we will
1484 * see, why it failed. 8)8)				  --ANK
1485 *
1486 */
1487bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1488{
1489	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1490
1491	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1492		return false;
1493
1494	if (skb->len <= tcp_hdrlen(skb) &&
1495	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1496		return false;
 
 
 
1497
1498	/* Before escaping RCU protected region, we need to take care of skb
1499	 * dst. Prequeue is only enabled for established sockets.
1500	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1501	 * Instead of doing full sk_rx_dst validity here, let's perform
1502	 * an optimistic check.
1503	 */
1504	if (likely(sk->sk_rx_dst))
1505		skb_dst_drop(skb);
1506	else
1507		skb_dst_force_safe(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508
1509	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1510	tp->ucopy.memory += skb->truesize;
1511	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1512		struct sk_buff *skb1;
1513
1514		BUG_ON(sock_owned_by_user(sk));
1515
1516		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1517			sk_backlog_rcv(sk, skb1);
1518			NET_INC_STATS_BH(sock_net(sk),
1519					 LINUX_MIB_TCPPREQUEUEDROPPED);
1520		}
1521
1522		tp->ucopy.memory = 0;
1523	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1524		wake_up_interruptible_sync_poll(sk_sleep(sk),
1525					   POLLIN | POLLRDNORM | POLLRDBAND);
1526		if (!inet_csk_ack_scheduled(sk))
1527			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1528						  (3 * tcp_rto_min(sk)) / 4,
1529						  TCP_RTO_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1530	}
1531	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532}
1533EXPORT_SYMBOL(tcp_prequeue);
1534
1535/*
1536 *	From tcp_input.c
1537 */
1538
1539int tcp_v4_rcv(struct sk_buff *skb)
1540{
 
 
 
 
1541	const struct iphdr *iph;
1542	const struct tcphdr *th;
 
1543	struct sock *sk;
1544	int ret;
1545	struct net *net = dev_net(skb->dev);
1546
1547	if (skb->pkt_type != PACKET_HOST)
1548		goto discard_it;
1549
1550	/* Count it even if it's bad */
1551	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1552
1553	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1554		goto discard_it;
1555
1556	th = tcp_hdr(skb);
1557
1558	if (th->doff < sizeof(struct tcphdr) / 4)
1559		goto bad_packet;
1560	if (!pskb_may_pull(skb, th->doff * 4))
1561		goto discard_it;
1562
1563	/* An explanation is required here, I think.
1564	 * Packet length and doff are validated by header prediction,
1565	 * provided case of th->doff==0 is eliminated.
1566	 * So, we defer the checks. */
1567
1568	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1569		goto csum_error;
1570
1571	th = tcp_hdr(skb);
1572	iph = ip_hdr(skb);
1573	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1574	 * barrier() makes sure compiler wont play fool^Waliasing games.
1575	 */
1576	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1577		sizeof(struct inet_skb_parm));
1578	barrier();
1579
1580	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1581	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1582				    skb->len - th->doff * 4);
1583	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1584	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1585	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1586	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1587	TCP_SKB_CB(skb)->sacked	 = 0;
1588
1589lookup:
1590	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1591			       th->dest);
1592	if (!sk)
1593		goto no_tcp_socket;
1594
1595process:
1596	if (sk->sk_state == TCP_TIME_WAIT)
1597		goto do_time_wait;
1598
1599	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1600		struct request_sock *req = inet_reqsk(sk);
 
1601		struct sock *nsk;
1602
1603		sk = req->rsk_listener;
1604		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
 
1605			reqsk_put(req);
1606			goto discard_it;
1607		}
 
 
 
 
1608		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1609			inet_csk_reqsk_queue_drop_and_put(sk, req);
1610			goto lookup;
1611		}
 
 
 
1612		sock_hold(sk);
1613		nsk = tcp_check_req(sk, skb, req, false);
 
 
 
 
 
 
 
1614		if (!nsk) {
1615			reqsk_put(req);
 
 
 
 
 
 
 
 
 
 
1616			goto discard_and_relse;
1617		}
1618		if (nsk == sk) {
1619			reqsk_put(req);
 
1620		} else if (tcp_child_process(sk, nsk, skb)) {
1621			tcp_v4_send_reset(nsk, skb);
1622			goto discard_and_relse;
1623		} else {
1624			sock_put(sk);
1625			return 0;
1626		}
1627	}
1628	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1629		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1630		goto discard_and_relse;
1631	}
1632
1633	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1634		goto discard_and_relse;
1635
1636	if (tcp_v4_inbound_md5_hash(sk, skb))
1637		goto discard_and_relse;
1638
1639	nf_reset(skb);
1640
1641	if (sk_filter(sk, skb))
1642		goto discard_and_relse;
 
 
 
1643
1644	skb->dev = NULL;
1645
1646	if (sk->sk_state == TCP_LISTEN) {
1647		ret = tcp_v4_do_rcv(sk, skb);
1648		goto put_and_return;
1649	}
1650
1651	sk_incoming_cpu_update(sk);
1652
1653	bh_lock_sock_nested(sk);
1654	tcp_segs_in(tcp_sk(sk), skb);
1655	ret = 0;
1656	if (!sock_owned_by_user(sk)) {
1657		if (!tcp_prequeue(sk, skb))
1658			ret = tcp_v4_do_rcv(sk, skb);
1659	} else if (unlikely(sk_add_backlog(sk, skb,
1660					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1661		bh_unlock_sock(sk);
1662		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1663		goto discard_and_relse;
1664	}
1665	bh_unlock_sock(sk);
 
 
1666
1667put_and_return:
1668	sock_put(sk);
 
1669
1670	return ret;
1671
1672no_tcp_socket:
1673	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1674		goto discard_it;
1675
 
 
1676	if (tcp_checksum_complete(skb)) {
1677csum_error:
1678		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1679bad_packet:
1680		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681	} else {
1682		tcp_v4_send_reset(NULL, skb);
1683	}
1684
1685discard_it:
1686	/* Discard frame. */
1687	kfree_skb(skb);
1688	return 0;
1689
1690discard_and_relse:
1691	sock_put(sk);
 
 
1692	goto discard_it;
1693
1694do_time_wait:
1695	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1696		inet_twsk_put(inet_twsk(sk));
1697		goto discard_it;
1698	}
1699
 
 
1700	if (tcp_checksum_complete(skb)) {
1701		inet_twsk_put(inet_twsk(sk));
1702		goto csum_error;
1703	}
1704	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1705	case TCP_TW_SYN: {
1706		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1707							&tcp_hashinfo, skb,
1708							__tcp_hdrlen(th),
1709							iph->saddr, th->source,
1710							iph->daddr, th->dest,
1711							inet_iif(skb));
 
1712		if (sk2) {
1713			inet_twsk_deschedule_put(inet_twsk(sk));
1714			sk = sk2;
 
 
1715			goto process;
1716		}
1717		/* Fall through to ACK */
1718	}
 
 
1719	case TCP_TW_ACK:
1720		tcp_v4_timewait_ack(sk, skb);
1721		break;
1722	case TCP_TW_RST:
1723		tcp_v4_send_reset(sk, skb);
1724		inet_twsk_deschedule_put(inet_twsk(sk));
1725		goto discard_it;
1726	case TCP_TW_SUCCESS:;
1727	}
1728	goto discard_it;
1729}
1730
1731static struct timewait_sock_ops tcp_timewait_sock_ops = {
1732	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1733	.twsk_unique	= tcp_twsk_unique,
1734	.twsk_destructor= tcp_twsk_destructor,
1735};
1736
1737void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1738{
1739	struct dst_entry *dst = skb_dst(skb);
1740
1741	if (dst && dst_hold_safe(dst)) {
1742		sk->sk_rx_dst = dst;
1743		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1744	}
1745}
1746EXPORT_SYMBOL(inet_sk_rx_dst_set);
1747
1748const struct inet_connection_sock_af_ops ipv4_specific = {
1749	.queue_xmit	   = ip_queue_xmit,
1750	.send_check	   = tcp_v4_send_check,
1751	.rebuild_header	   = inet_sk_rebuild_header,
1752	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1753	.conn_request	   = tcp_v4_conn_request,
1754	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1755	.net_header_len	   = sizeof(struct iphdr),
1756	.setsockopt	   = ip_setsockopt,
1757	.getsockopt	   = ip_getsockopt,
1758	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1759	.sockaddr_len	   = sizeof(struct sockaddr_in),
1760	.bind_conflict	   = inet_csk_bind_conflict,
1761#ifdef CONFIG_COMPAT
1762	.compat_setsockopt = compat_ip_setsockopt,
1763	.compat_getsockopt = compat_ip_getsockopt,
1764#endif
1765	.mtu_reduced	   = tcp_v4_mtu_reduced,
1766};
1767EXPORT_SYMBOL(ipv4_specific);
1768
1769#ifdef CONFIG_TCP_MD5SIG
1770static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1771	.md5_lookup		= tcp_v4_md5_lookup,
1772	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1773	.md5_parse		= tcp_v4_parse_md5_keys,
1774};
1775#endif
1776
1777/* NOTE: A lot of things set to zero explicitly by call to
1778 *       sk_alloc() so need not be done here.
1779 */
1780static int tcp_v4_init_sock(struct sock *sk)
1781{
1782	struct inet_connection_sock *icsk = inet_csk(sk);
1783
1784	tcp_init_sock(sk);
1785
1786	icsk->icsk_af_ops = &ipv4_specific;
1787
1788#ifdef CONFIG_TCP_MD5SIG
1789	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1790#endif
1791
1792	return 0;
1793}
1794
1795void tcp_v4_destroy_sock(struct sock *sk)
1796{
1797	struct tcp_sock *tp = tcp_sk(sk);
1798
 
 
1799	tcp_clear_xmit_timers(sk);
1800
1801	tcp_cleanup_congestion_control(sk);
1802
 
 
1803	/* Cleanup up the write buffer. */
1804	tcp_write_queue_purge(sk);
1805
 
 
 
1806	/* Cleans up our, hopefully empty, out_of_order_queue. */
1807	__skb_queue_purge(&tp->out_of_order_queue);
1808
1809#ifdef CONFIG_TCP_MD5SIG
1810	/* Clean up the MD5 key list, if any */
1811	if (tp->md5sig_info) {
1812		tcp_clear_md5_list(sk);
1813		kfree_rcu(tp->md5sig_info, rcu);
1814		tp->md5sig_info = NULL;
1815	}
1816#endif
1817
1818	/* Clean prequeue, it must be empty really */
1819	__skb_queue_purge(&tp->ucopy.prequeue);
1820
1821	/* Clean up a referenced TCP bind bucket. */
1822	if (inet_csk(sk)->icsk_bind_hash)
1823		inet_put_port(sk);
1824
1825	BUG_ON(tp->fastopen_rsk);
1826
1827	/* If socket is aborted during connect operation */
1828	tcp_free_fastopen_req(tp);
 
1829	tcp_saved_syn_free(tp);
1830
1831	sk_sockets_allocated_dec(sk);
1832
1833	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1834		sock_release_memcg(sk);
1835}
1836EXPORT_SYMBOL(tcp_v4_destroy_sock);
1837
1838#ifdef CONFIG_PROC_FS
1839/* Proc filesystem TCP sock list dumping. */
1840
1841/*
1842 * Get next listener socket follow cur.  If cur is NULL, get first socket
1843 * starting from bucket given in st->bucket; when st->bucket is zero the
1844 * very first socket in the hash table is returned.
1845 */
1846static void *listening_get_next(struct seq_file *seq, void *cur)
1847{
1848	struct inet_connection_sock *icsk;
 
 
 
1849	struct hlist_nulls_node *node;
1850	struct sock *sk = cur;
1851	struct inet_listen_hashbucket *ilb;
1852	struct tcp_iter_state *st = seq->private;
1853	struct net *net = seq_file_net(seq);
 
 
1854
1855	if (!sk) {
 
1856		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1857		spin_lock_bh(&ilb->lock);
1858		sk = sk_nulls_head(&ilb->head);
1859		st->offset = 0;
1860		goto get_sk;
1861	}
1862	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1863	++st->num;
1864	++st->offset;
1865
1866	sk = sk_nulls_next(sk);
1867get_sk:
1868	sk_nulls_for_each_from(sk, node) {
1869		if (!net_eq(sock_net(sk), net))
1870			continue;
1871		if (sk->sk_family == st->family) {
1872			cur = sk;
1873			goto out;
1874		}
1875		icsk = inet_csk(sk);
1876	}
1877	spin_unlock_bh(&ilb->lock);
1878	st->offset = 0;
1879	if (++st->bucket < INET_LHTABLE_SIZE) {
1880		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1881		spin_lock_bh(&ilb->lock);
1882		sk = sk_nulls_head(&ilb->head);
1883		goto get_sk;
1884	}
1885	cur = NULL;
1886out:
1887	return cur;
1888}
1889
1890static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1891{
1892	struct tcp_iter_state *st = seq->private;
1893	void *rc;
1894
1895	st->bucket = 0;
1896	st->offset = 0;
1897	rc = listening_get_next(seq, NULL);
1898
1899	while (rc && *pos) {
1900		rc = listening_get_next(seq, rc);
1901		--*pos;
1902	}
1903	return rc;
1904}
1905
1906static inline bool empty_bucket(const struct tcp_iter_state *st)
1907{
1908	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1909}
1910
1911/*
1912 * Get first established socket starting from bucket given in st->bucket.
1913 * If st->bucket is zero, the very first socket in the hash is returned.
1914 */
1915static void *established_get_first(struct seq_file *seq)
1916{
 
1917	struct tcp_iter_state *st = seq->private;
1918	struct net *net = seq_file_net(seq);
1919	void *rc = NULL;
1920
 
 
 
 
 
1921	st->offset = 0;
1922	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1923		struct sock *sk;
1924		struct hlist_nulls_node *node;
1925		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1926
1927		/* Lockless fast path for the common case of empty buckets */
1928		if (empty_bucket(st))
1929			continue;
1930
1931		spin_lock_bh(lock);
1932		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1933			if (sk->sk_family != st->family ||
 
1934			    !net_eq(sock_net(sk), net)) {
1935				continue;
1936			}
1937			rc = sk;
1938			goto out;
1939		}
1940		spin_unlock_bh(lock);
1941	}
1942out:
1943	return rc;
1944}
1945
1946static void *established_get_next(struct seq_file *seq, void *cur)
1947{
 
1948	struct sock *sk = cur;
1949	struct hlist_nulls_node *node;
1950	struct tcp_iter_state *st = seq->private;
1951	struct net *net = seq_file_net(seq);
1952
 
 
 
 
 
1953	++st->num;
1954	++st->offset;
1955
1956	sk = sk_nulls_next(sk);
1957
1958	sk_nulls_for_each_from(sk, node) {
1959		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
 
 
1960			return sk;
1961	}
1962
1963	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1964	++st->bucket;
1965	return established_get_first(seq);
1966}
1967
1968static void *established_get_idx(struct seq_file *seq, loff_t pos)
1969{
1970	struct tcp_iter_state *st = seq->private;
1971	void *rc;
1972
1973	st->bucket = 0;
1974	rc = established_get_first(seq);
1975
1976	while (rc && pos) {
1977		rc = established_get_next(seq, rc);
1978		--pos;
1979	}
1980	return rc;
1981}
1982
1983static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1984{
1985	void *rc;
1986	struct tcp_iter_state *st = seq->private;
1987
1988	st->state = TCP_SEQ_STATE_LISTENING;
1989	rc	  = listening_get_idx(seq, &pos);
1990
1991	if (!rc) {
1992		st->state = TCP_SEQ_STATE_ESTABLISHED;
1993		rc	  = established_get_idx(seq, pos);
1994	}
1995
1996	return rc;
1997}
1998
1999static void *tcp_seek_last_pos(struct seq_file *seq)
2000{
2001	struct tcp_iter_state *st = seq->private;
2002	int offset = st->offset;
2003	int orig_num = st->num;
2004	void *rc = NULL;
2005
2006	switch (st->state) {
2007	case TCP_SEQ_STATE_LISTENING:
2008		if (st->bucket >= INET_LHTABLE_SIZE)
2009			break;
2010		st->state = TCP_SEQ_STATE_LISTENING;
2011		rc = listening_get_next(seq, NULL);
2012		while (offset-- && rc)
2013			rc = listening_get_next(seq, rc);
2014		if (rc)
2015			break;
2016		st->bucket = 0;
2017		st->state = TCP_SEQ_STATE_ESTABLISHED;
2018		/* Fallthrough */
2019	case TCP_SEQ_STATE_ESTABLISHED:
2020		if (st->bucket > tcp_hashinfo.ehash_mask)
2021			break;
2022		rc = established_get_first(seq);
2023		while (offset-- && rc)
2024			rc = established_get_next(seq, rc);
2025	}
2026
2027	st->num = orig_num;
2028
2029	return rc;
2030}
2031
2032static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2033{
2034	struct tcp_iter_state *st = seq->private;
2035	void *rc;
2036
2037	if (*pos && *pos == st->last_pos) {
2038		rc = tcp_seek_last_pos(seq);
2039		if (rc)
2040			goto out;
2041	}
2042
2043	st->state = TCP_SEQ_STATE_LISTENING;
2044	st->num = 0;
2045	st->bucket = 0;
2046	st->offset = 0;
2047	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2048
2049out:
2050	st->last_pos = *pos;
2051	return rc;
2052}
 
2053
2054static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2055{
2056	struct tcp_iter_state *st = seq->private;
2057	void *rc = NULL;
2058
2059	if (v == SEQ_START_TOKEN) {
2060		rc = tcp_get_idx(seq, 0);
2061		goto out;
2062	}
2063
2064	switch (st->state) {
2065	case TCP_SEQ_STATE_LISTENING:
2066		rc = listening_get_next(seq, v);
2067		if (!rc) {
2068			st->state = TCP_SEQ_STATE_ESTABLISHED;
2069			st->bucket = 0;
2070			st->offset = 0;
2071			rc	  = established_get_first(seq);
2072		}
2073		break;
2074	case TCP_SEQ_STATE_ESTABLISHED:
2075		rc = established_get_next(seq, v);
2076		break;
2077	}
2078out:
2079	++*pos;
2080	st->last_pos = *pos;
2081	return rc;
2082}
 
2083
2084static void tcp_seq_stop(struct seq_file *seq, void *v)
2085{
2086	struct tcp_iter_state *st = seq->private;
2087
2088	switch (st->state) {
2089	case TCP_SEQ_STATE_LISTENING:
2090		if (v != SEQ_START_TOKEN)
2091			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2092		break;
2093	case TCP_SEQ_STATE_ESTABLISHED:
2094		if (v)
2095			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2096		break;
2097	}
2098}
2099
2100int tcp_seq_open(struct inode *inode, struct file *file)
2101{
2102	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2103	struct tcp_iter_state *s;
2104	int err;
2105
2106	err = seq_open_net(inode, file, &afinfo->seq_ops,
2107			  sizeof(struct tcp_iter_state));
2108	if (err < 0)
2109		return err;
2110
2111	s = ((struct seq_file *)file->private_data)->private;
2112	s->family		= afinfo->family;
2113	s->last_pos		= 0;
2114	return 0;
2115}
2116EXPORT_SYMBOL(tcp_seq_open);
2117
2118int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2119{
2120	int rc = 0;
2121	struct proc_dir_entry *p;
2122
2123	afinfo->seq_ops.start		= tcp_seq_start;
2124	afinfo->seq_ops.next		= tcp_seq_next;
2125	afinfo->seq_ops.stop		= tcp_seq_stop;
2126
2127	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2128			     afinfo->seq_fops, afinfo);
2129	if (!p)
2130		rc = -ENOMEM;
2131	return rc;
2132}
2133EXPORT_SYMBOL(tcp_proc_register);
2134
2135void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2136{
2137	remove_proc_entry(afinfo->name, net->proc_net);
2138}
2139EXPORT_SYMBOL(tcp_proc_unregister);
2140
2141static void get_openreq4(const struct request_sock *req,
2142			 struct seq_file *f, int i)
2143{
2144	const struct inet_request_sock *ireq = inet_rsk(req);
2145	long delta = req->rsk_timer.expires - jiffies;
2146
2147	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2148		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2149		i,
2150		ireq->ir_loc_addr,
2151		ireq->ir_num,
2152		ireq->ir_rmt_addr,
2153		ntohs(ireq->ir_rmt_port),
2154		TCP_SYN_RECV,
2155		0, 0, /* could print option size, but that is af dependent. */
2156		1,    /* timers active (only the expire timer) */
2157		jiffies_delta_to_clock_t(delta),
2158		req->num_timeout,
2159		from_kuid_munged(seq_user_ns(f),
2160				 sock_i_uid(req->rsk_listener)),
2161		0,  /* non standard timer */
2162		0, /* open_requests have no inode */
2163		0,
2164		req);
2165}
2166
2167static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2168{
2169	int timer_active;
2170	unsigned long timer_expires;
2171	const struct tcp_sock *tp = tcp_sk(sk);
2172	const struct inet_connection_sock *icsk = inet_csk(sk);
2173	const struct inet_sock *inet = inet_sk(sk);
2174	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2175	__be32 dest = inet->inet_daddr;
2176	__be32 src = inet->inet_rcv_saddr;
2177	__u16 destp = ntohs(inet->inet_dport);
2178	__u16 srcp = ntohs(inet->inet_sport);
2179	int rx_queue;
2180	int state;
2181
2182	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2183	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2184	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2185		timer_active	= 1;
2186		timer_expires	= icsk->icsk_timeout;
2187	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2188		timer_active	= 4;
2189		timer_expires	= icsk->icsk_timeout;
2190	} else if (timer_pending(&sk->sk_timer)) {
2191		timer_active	= 2;
2192		timer_expires	= sk->sk_timer.expires;
2193	} else {
2194		timer_active	= 0;
2195		timer_expires = jiffies;
2196	}
2197
2198	state = sk_state_load(sk);
2199	if (state == TCP_LISTEN)
2200		rx_queue = sk->sk_ack_backlog;
2201	else
2202		/* Because we don't lock the socket,
2203		 * we might find a transient negative value.
2204		 */
2205		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
2206
2207	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2208			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2209		i, src, srcp, dest, destp, state,
2210		tp->write_seq - tp->snd_una,
2211		rx_queue,
2212		timer_active,
2213		jiffies_delta_to_clock_t(timer_expires - jiffies),
2214		icsk->icsk_retransmits,
2215		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2216		icsk->icsk_probes_out,
2217		sock_i_ino(sk),
2218		atomic_read(&sk->sk_refcnt), sk,
2219		jiffies_to_clock_t(icsk->icsk_rto),
2220		jiffies_to_clock_t(icsk->icsk_ack.ato),
2221		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2222		tp->snd_cwnd,
2223		state == TCP_LISTEN ?
2224		    fastopenq->max_qlen :
2225		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2226}
2227
2228static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2229			       struct seq_file *f, int i)
2230{
2231	long delta = tw->tw_timer.expires - jiffies;
2232	__be32 dest, src;
2233	__u16 destp, srcp;
2234
2235	dest  = tw->tw_daddr;
2236	src   = tw->tw_rcv_saddr;
2237	destp = ntohs(tw->tw_dport);
2238	srcp  = ntohs(tw->tw_sport);
2239
2240	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2241		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2242		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2243		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2244		atomic_read(&tw->tw_refcnt), tw);
2245}
2246
2247#define TMPSZ 150
2248
2249static int tcp4_seq_show(struct seq_file *seq, void *v)
2250{
2251	struct tcp_iter_state *st;
2252	struct sock *sk = v;
2253
2254	seq_setwidth(seq, TMPSZ - 1);
2255	if (v == SEQ_START_TOKEN) {
2256		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2257			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2258			   "inode");
2259		goto out;
2260	}
2261	st = seq->private;
2262
2263	if (sk->sk_state == TCP_TIME_WAIT)
2264		get_timewait4_sock(v, seq, st->num);
2265	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2266		get_openreq4(v, seq, st->num);
2267	else
2268		get_tcp4_sock(v, seq, st->num);
2269out:
2270	seq_pad(seq, '\n');
2271	return 0;
2272}
2273
2274static const struct file_operations tcp_afinfo_seq_fops = {
2275	.owner   = THIS_MODULE,
2276	.open    = tcp_seq_open,
2277	.read    = seq_read,
2278	.llseek  = seq_lseek,
2279	.release = seq_release_net
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2280};
2281
2282static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2283	.name		= "tcp",
2284	.family		= AF_INET,
2285	.seq_fops	= &tcp_afinfo_seq_fops,
2286	.seq_ops	= {
2287		.show		= tcp4_seq_show,
2288	},
2289};
2290
2291static int __net_init tcp4_proc_init_net(struct net *net)
2292{
2293	return tcp_proc_register(net, &tcp4_seq_afinfo);
 
 
 
2294}
2295
2296static void __net_exit tcp4_proc_exit_net(struct net *net)
2297{
2298	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2299}
2300
2301static struct pernet_operations tcp4_net_ops = {
2302	.init = tcp4_proc_init_net,
2303	.exit = tcp4_proc_exit_net,
2304};
2305
2306int __init tcp4_proc_init(void)
2307{
2308	return register_pernet_subsys(&tcp4_net_ops);
2309}
2310
2311void tcp4_proc_exit(void)
2312{
2313	unregister_pernet_subsys(&tcp4_net_ops);
2314}
2315#endif /* CONFIG_PROC_FS */
2316
2317struct proto tcp_prot = {
2318	.name			= "TCP",
2319	.owner			= THIS_MODULE,
2320	.close			= tcp_close,
 
2321	.connect		= tcp_v4_connect,
2322	.disconnect		= tcp_disconnect,
2323	.accept			= inet_csk_accept,
2324	.ioctl			= tcp_ioctl,
2325	.init			= tcp_v4_init_sock,
2326	.destroy		= tcp_v4_destroy_sock,
2327	.shutdown		= tcp_shutdown,
2328	.setsockopt		= tcp_setsockopt,
2329	.getsockopt		= tcp_getsockopt,
 
2330	.recvmsg		= tcp_recvmsg,
2331	.sendmsg		= tcp_sendmsg,
2332	.sendpage		= tcp_sendpage,
2333	.backlog_rcv		= tcp_v4_do_rcv,
2334	.release_cb		= tcp_release_cb,
2335	.hash			= inet_hash,
2336	.unhash			= inet_unhash,
2337	.get_port		= inet_csk_get_port,
2338	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2339	.stream_memory_free	= tcp_stream_memory_free,
2340	.sockets_allocated	= &tcp_sockets_allocated,
2341	.orphan_count		= &tcp_orphan_count,
2342	.memory_allocated	= &tcp_memory_allocated,
2343	.memory_pressure	= &tcp_memory_pressure,
2344	.sysctl_mem		= sysctl_tcp_mem,
2345	.sysctl_wmem		= sysctl_tcp_wmem,
2346	.sysctl_rmem		= sysctl_tcp_rmem,
2347	.max_header		= MAX_TCP_HEADER,
2348	.obj_size		= sizeof(struct tcp_sock),
2349	.slab_flags		= SLAB_DESTROY_BY_RCU,
2350	.twsk_prot		= &tcp_timewait_sock_ops,
2351	.rsk_prot		= &tcp_request_sock_ops,
2352	.h.hashinfo		= &tcp_hashinfo,
2353	.no_autobind		= true,
2354#ifdef CONFIG_COMPAT
2355	.compat_setsockopt	= compat_tcp_setsockopt,
2356	.compat_getsockopt	= compat_tcp_getsockopt,
2357#endif
2358	.diag_destroy		= tcp_abort,
2359};
2360EXPORT_SYMBOL(tcp_prot);
2361
2362static void __net_exit tcp_sk_exit(struct net *net)
2363{
2364	int cpu;
2365
 
 
 
 
2366	for_each_possible_cpu(cpu)
2367		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2368	free_percpu(net->ipv4.tcp_sk);
2369}
2370
2371static int __net_init tcp_sk_init(struct net *net)
2372{
2373	int res, cpu;
2374
2375	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2376	if (!net->ipv4.tcp_sk)
2377		return -ENOMEM;
2378
2379	for_each_possible_cpu(cpu) {
2380		struct sock *sk;
2381
2382		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2383					   IPPROTO_TCP, net);
2384		if (res)
2385			goto fail;
 
 
 
 
 
 
 
2386		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2387	}
2388
2389	net->ipv4.sysctl_tcp_ecn = 2;
2390	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2391
2392	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
 
2393	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2394	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
 
2395
2396	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2397	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2398	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2399
2400	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2401	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2402	net->ipv4.sysctl_tcp_syncookies = 1;
2403	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2404	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2405	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2406	net->ipv4.sysctl_tcp_orphan_retries = 0;
2407	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2408	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2409
2410	return 0;
2411fail:
2412	tcp_sk_exit(net);
2413
2414	return res;
2415}
2416
2417static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2418{
2419	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
 
 
 
 
 
2420}
2421
2422static struct pernet_operations __net_initdata tcp_sk_ops = {
2423       .init	   = tcp_sk_init,
2424       .exit	   = tcp_sk_exit,
2425       .exit_batch = tcp_sk_exit_batch,
2426};
2427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2428void __init tcp_v4_init(void)
2429{
2430	inet_hashinfo_init(&tcp_hashinfo);
2431	if (register_pernet_subsys(&tcp_sk_ops))
2432		panic("Failed to create the TCP control socket.\n");
 
 
 
 
2433}