Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
  75#include <net/netdma.h>
  76#include <net/secure_seq.h>
  77#include <net/tcp_memcontrol.h>
  78#include <net/busy_poll.h>
  79
  80#include <linux/inet.h>
  81#include <linux/ipv6.h>
  82#include <linux/stddef.h>
  83#include <linux/proc_fs.h>
  84#include <linux/seq_file.h>
 
  85
  86#include <linux/crypto.h>
  87#include <linux/scatterlist.h>
  88
  89int sysctl_tcp_tw_reuse __read_mostly;
  90int sysctl_tcp_low_latency __read_mostly;
  91EXPORT_SYMBOL(sysctl_tcp_low_latency);
  92
  93
  94#ifdef CONFIG_TCP_MD5SIG
  95static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  96			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  97#endif
  98
  99struct inet_hashinfo tcp_hashinfo;
 100EXPORT_SYMBOL(tcp_hashinfo);
 101
 102static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 
 
 
 
 
 
 
 
 103{
 104	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 105					  ip_hdr(skb)->saddr,
 106					  tcp_hdr(skb)->dest,
 107					  tcp_hdr(skb)->source);
 108}
 109
 110int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 111{
 112	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 113	struct tcp_sock *tp = tcp_sk(sk);
 114
 115	/* With PAWS, it is safe from the viewpoint
 116	   of data integrity. Even without PAWS it is safe provided sequence
 117	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 118
 119	   Actually, the idea is close to VJ's one, only timestamp cache is
 120	   held not per host, but per port pair and TW bucket is used as state
 121	   holder.
 122
 123	   If TW bucket has been already destroyed we fall back to VJ's scheme
 124	   and use initial timestamp retrieved from peer table.
 125	 */
 126	if (tcptw->tw_ts_recent_stamp &&
 127	    (twp == NULL || (sysctl_tcp_tw_reuse &&
 128			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 129		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 130		if (tp->write_seq == 0)
 131			tp->write_seq = 1;
 132		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 133		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 134		sock_hold(sktw);
 135		return 1;
 136	}
 137
 138	return 0;
 139}
 140EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142/* This will initiate an outgoing connection. */
 143int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 144{
 145	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 146	struct inet_sock *inet = inet_sk(sk);
 147	struct tcp_sock *tp = tcp_sk(sk);
 148	__be16 orig_sport, orig_dport;
 149	__be32 daddr, nexthop;
 150	struct flowi4 *fl4;
 151	struct rtable *rt;
 152	int err;
 153	struct ip_options_rcu *inet_opt;
 
 154
 155	if (addr_len < sizeof(struct sockaddr_in))
 156		return -EINVAL;
 157
 158	if (usin->sin_family != AF_INET)
 159		return -EAFNOSUPPORT;
 160
 161	nexthop = daddr = usin->sin_addr.s_addr;
 162	inet_opt = rcu_dereference_protected(inet->inet_opt,
 163					     sock_owned_by_user(sk));
 164	if (inet_opt && inet_opt->opt.srr) {
 165		if (!daddr)
 166			return -EINVAL;
 167		nexthop = inet_opt->opt.faddr;
 168	}
 169
 170	orig_sport = inet->inet_sport;
 171	orig_dport = usin->sin_port;
 172	fl4 = &inet->cork.fl.u.ip4;
 173	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 174			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 175			      IPPROTO_TCP,
 176			      orig_sport, orig_dport, sk);
 177	if (IS_ERR(rt)) {
 178		err = PTR_ERR(rt);
 179		if (err == -ENETUNREACH)
 180			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 181		return err;
 182	}
 183
 184	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 185		ip_rt_put(rt);
 186		return -ENETUNREACH;
 187	}
 188
 189	if (!inet_opt || !inet_opt->opt.srr)
 190		daddr = fl4->daddr;
 191
 192	if (!inet->inet_saddr)
 193		inet->inet_saddr = fl4->saddr;
 194	inet->inet_rcv_saddr = inet->inet_saddr;
 195
 196	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 197		/* Reset inherited state */
 198		tp->rx_opt.ts_recent	   = 0;
 199		tp->rx_opt.ts_recent_stamp = 0;
 200		if (likely(!tp->repair))
 201			tp->write_seq	   = 0;
 202	}
 203
 204	if (tcp_death_row.sysctl_tw_recycle &&
 205	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
 206		tcp_fetch_timewait_stamp(sk, &rt->dst);
 207
 208	inet->inet_dport = usin->sin_port;
 209	inet->inet_daddr = daddr;
 210
 211	inet_csk(sk)->icsk_ext_hdr_len = 0;
 212	if (inet_opt)
 213		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 214
 215	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 216
 217	/* Socket identity is still unknown (sport may be zero).
 218	 * However we set state to SYN-SENT and not releasing socket
 219	 * lock select source port, enter ourselves into the hash tables and
 220	 * complete initialization after this.
 221	 */
 222	tcp_set_state(sk, TCP_SYN_SENT);
 223	err = inet_hash_connect(&tcp_death_row, sk);
 224	if (err)
 225		goto failure;
 226
 
 
 227	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 228			       inet->inet_sport, inet->inet_dport, sk);
 229	if (IS_ERR(rt)) {
 230		err = PTR_ERR(rt);
 231		rt = NULL;
 232		goto failure;
 233	}
 234	/* OK, now commit destination to socket.  */
 235	sk->sk_gso_type = SKB_GSO_TCPV4;
 236	sk_setup_caps(sk, &rt->dst);
 
 237
 238	if (!tp->write_seq && likely(!tp->repair))
 239		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 240							   inet->inet_daddr,
 241							   inet->inet_sport,
 242							   usin->sin_port);
 
 
 
 
 
 243
 244	inet->inet_id = tp->write_seq ^ jiffies;
 245
 
 
 
 
 
 246	err = tcp_connect(sk);
 247
 248	rt = NULL;
 249	if (err)
 250		goto failure;
 251
 252	return 0;
 253
 254failure:
 255	/*
 256	 * This unhashes the socket and releases the local port,
 257	 * if necessary.
 258	 */
 259	tcp_set_state(sk, TCP_CLOSE);
 260	ip_rt_put(rt);
 261	sk->sk_route_caps = 0;
 262	inet->inet_dport = 0;
 263	return err;
 264}
 265EXPORT_SYMBOL(tcp_v4_connect);
 266
 267/*
 268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 269 * It can be called through tcp_release_cb() if socket was owned by user
 270 * at the time tcp_v4_err() was called to handle ICMP message.
 271 */
 272static void tcp_v4_mtu_reduced(struct sock *sk)
 273{
 274	struct dst_entry *dst;
 275	struct inet_sock *inet = inet_sk(sk);
 276	u32 mtu = tcp_sk(sk)->mtu_info;
 
 277
 
 
 
 278	dst = inet_csk_update_pmtu(sk, mtu);
 279	if (!dst)
 280		return;
 281
 282	/* Something is about to be wrong... Remember soft error
 283	 * for the case, if this connection will not able to recover.
 284	 */
 285	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 286		sk->sk_err_soft = EMSGSIZE;
 287
 288	mtu = dst_mtu(dst);
 289
 290	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 291	    ip_sk_accept_pmtu(sk) &&
 292	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 293		tcp_sync_mss(sk, mtu);
 294
 295		/* Resend the TCP packet because it's
 296		 * clear that the old packet has been
 297		 * dropped. This is the new "fast" path mtu
 298		 * discovery.
 299		 */
 300		tcp_simple_retransmit(sk);
 301	} /* else let the usual retransmit timer handle it */
 302}
 
 303
 304static void do_redirect(struct sk_buff *skb, struct sock *sk)
 305{
 306	struct dst_entry *dst = __sk_dst_check(sk, 0);
 307
 308	if (dst)
 309		dst->ops->redirect(dst, sk, skb);
 310}
 311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312/*
 313 * This routine is called by the ICMP module when it gets some
 314 * sort of error condition.  If err < 0 then the socket should
 315 * be closed and the error returned to the user.  If err > 0
 316 * it's just the icmp type << 8 | icmp code.  After adjustment
 317 * header points to the first 8 bytes of the tcp header.  We need
 318 * to find the appropriate port.
 319 *
 320 * The locking strategy used here is very "optimistic". When
 321 * someone else accesses the socket the ICMP is just dropped
 322 * and for some paths there is no check at all.
 323 * A more general error queue to queue errors for later handling
 324 * is probably better.
 325 *
 326 */
 327
 328void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 329{
 330	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 331	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 332	struct inet_connection_sock *icsk;
 333	struct tcp_sock *tp;
 334	struct inet_sock *inet;
 335	const int type = icmp_hdr(icmp_skb)->type;
 336	const int code = icmp_hdr(icmp_skb)->code;
 337	struct sock *sk;
 338	struct sk_buff *skb;
 339	struct request_sock *req;
 340	__u32 seq;
 341	__u32 remaining;
 
 342	int err;
 343	struct net *net = dev_net(icmp_skb->dev);
 344
 345	if (icmp_skb->len < (iph->ihl << 2) + 8) {
 346		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 347		return;
 348	}
 349
 350	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
 351			iph->saddr, th->source, inet_iif(icmp_skb));
 352	if (!sk) {
 353		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 354		return;
 355	}
 356	if (sk->sk_state == TCP_TIME_WAIT) {
 357		inet_twsk_put(inet_twsk(sk));
 358		return;
 359	}
 
 
 
 
 
 
 
 
 360
 361	bh_lock_sock(sk);
 362	/* If too many ICMPs get dropped on busy
 363	 * servers this needs to be solved differently.
 364	 * We do take care of PMTU discovery (RFC1191) special case :
 365	 * we can receive locally generated ICMP messages while socket is held.
 366	 */
 367	if (sock_owned_by_user(sk)) {
 368		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 369			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 370	}
 371	if (sk->sk_state == TCP_CLOSE)
 372		goto out;
 373
 374	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 375		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 376		goto out;
 377	}
 378
 379	icsk = inet_csk(sk);
 380	tp = tcp_sk(sk);
 381	req = tp->fastopen_rsk;
 382	seq = ntohl(th->seq);
 
 383	if (sk->sk_state != TCP_LISTEN &&
 384	    !between(seq, tp->snd_una, tp->snd_nxt) &&
 385	    (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
 386		/* For a Fast Open socket, allow seq to be snt_isn. */
 387		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 388		goto out;
 389	}
 390
 391	switch (type) {
 392	case ICMP_REDIRECT:
 393		do_redirect(icmp_skb, sk);
 
 394		goto out;
 395	case ICMP_SOURCE_QUENCH:
 396		/* Just silently ignore these. */
 397		goto out;
 398	case ICMP_PARAMETERPROB:
 399		err = EPROTO;
 400		break;
 401	case ICMP_DEST_UNREACH:
 402		if (code > NR_ICMP_UNREACH)
 403			goto out;
 404
 405		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 406			/* We are not interested in TCP_LISTEN and open_requests
 407			 * (SYN-ACKs send out by Linux are always <576bytes so
 408			 * they should go through unfragmented).
 409			 */
 410			if (sk->sk_state == TCP_LISTEN)
 411				goto out;
 412
 413			tp->mtu_info = info;
 414			if (!sock_owned_by_user(sk)) {
 415				tcp_v4_mtu_reduced(sk);
 416			} else {
 417				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
 418					sock_hold(sk);
 419			}
 420			goto out;
 421		}
 422
 423		err = icmp_err_convert[code].errno;
 424		/* check if icmp_skb allows revert of backoff
 425		 * (see draft-zimmermann-tcp-lcd) */
 426		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 427			break;
 428		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 429		    !icsk->icsk_backoff)
 430			break;
 431
 432		/* XXX (TFO) - revisit the following logic for TFO */
 433
 434		if (sock_owned_by_user(sk))
 435			break;
 436
 437		icsk->icsk_backoff--;
 438		inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
 439			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
 440		tcp_bound_rto(sk);
 441
 442		skb = tcp_write_queue_head(sk);
 443		BUG_ON(!skb);
 444
 445		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
 446				tcp_time_stamp - TCP_SKB_CB(skb)->when);
 
 
 447
 448		if (remaining) {
 449			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 450						  remaining, TCP_RTO_MAX);
 451		} else {
 452			/* RTO revert clocked out retransmission.
 453			 * Will retransmit now */
 454			tcp_retransmit_timer(sk);
 455		}
 456
 457		break;
 458	case ICMP_TIME_EXCEEDED:
 459		err = EHOSTUNREACH;
 460		break;
 461	default:
 462		goto out;
 463	}
 464
 465	/* XXX (TFO) - if it's a TFO socket and has been accepted, rather
 466	 * than following the TCP_SYN_RECV case and closing the socket,
 467	 * we ignore the ICMP error and keep trying like a fully established
 468	 * socket. Is this the right thing to do?
 469	 */
 470	if (req && req->sk == NULL)
 471		goto out;
 472
 473	switch (sk->sk_state) {
 474		struct request_sock *req, **prev;
 475	case TCP_LISTEN:
 476		if (sock_owned_by_user(sk))
 477			goto out;
 478
 479		req = inet_csk_search_req(sk, &prev, th->dest,
 480					  iph->daddr, iph->saddr);
 481		if (!req)
 482			goto out;
 483
 484		/* ICMPs are not backlogged, hence we cannot get
 485		   an established socket here.
 486		 */
 487		WARN_ON(req->sk);
 488
 489		if (seq != tcp_rsk(req)->snt_isn) {
 490			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 491			goto out;
 492		}
 493
 494		/*
 495		 * Still in SYN_RECV, just remove it silently.
 496		 * There is no good way to pass the error to the newly
 497		 * created socket, and POSIX does not want network
 498		 * errors returned from accept().
 499		 */
 500		inet_csk_reqsk_queue_drop(sk, req, prev);
 501		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 502		goto out;
 503
 504	case TCP_SYN_SENT:
 505	case TCP_SYN_RECV:  /* Cannot happen.
 506			       It can f.e. if SYNs crossed,
 507			       or Fast Open.
 508			     */
 509		if (!sock_owned_by_user(sk)) {
 510			sk->sk_err = err;
 511
 512			sk->sk_error_report(sk);
 513
 514			tcp_done(sk);
 515		} else {
 516			sk->sk_err_soft = err;
 517		}
 518		goto out;
 519	}
 520
 521	/* If we've already connected we will keep trying
 522	 * until we time out, or the user gives up.
 523	 *
 524	 * rfc1122 4.2.3.9 allows to consider as hard errors
 525	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 526	 * but it is obsoleted by pmtu discovery).
 527	 *
 528	 * Note, that in modern internet, where routing is unreliable
 529	 * and in each dark corner broken firewalls sit, sending random
 530	 * errors ordered by their masters even this two messages finally lose
 531	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 532	 *
 533	 * Now we are in compliance with RFCs.
 534	 *							--ANK (980905)
 535	 */
 536
 537	inet = inet_sk(sk);
 538	if (!sock_owned_by_user(sk) && inet->recverr) {
 539		sk->sk_err = err;
 540		sk->sk_error_report(sk);
 541	} else	{ /* Only an error on timeout */
 542		sk->sk_err_soft = err;
 543	}
 544
 545out:
 546	bh_unlock_sock(sk);
 547	sock_put(sk);
 548}
 549
 550void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 551{
 552	struct tcphdr *th = tcp_hdr(skb);
 553
 554	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 555		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 556		skb->csum_start = skb_transport_header(skb) - skb->head;
 557		skb->csum_offset = offsetof(struct tcphdr, check);
 558	} else {
 559		th->check = tcp_v4_check(skb->len, saddr, daddr,
 560					 csum_partial(th,
 561						      th->doff << 2,
 562						      skb->csum));
 563	}
 564}
 565
 566/* This routine computes an IPv4 TCP checksum. */
 567void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 568{
 569	const struct inet_sock *inet = inet_sk(sk);
 570
 571	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 572}
 573EXPORT_SYMBOL(tcp_v4_send_check);
 574
 575/*
 576 *	This routine will send an RST to the other tcp.
 577 *
 578 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 579 *		      for reset.
 580 *	Answer: if a packet caused RST, it is not for a socket
 581 *		existing in our system, if it is matched to a socket,
 582 *		it is just duplicate segment or bug in other side's TCP.
 583 *		So that we build reply only basing on parameters
 584 *		arrived with segment.
 585 *	Exception: precedence violation. We do not implement it in any case.
 586 */
 587
 588static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 589{
 590	const struct tcphdr *th = tcp_hdr(skb);
 591	struct {
 592		struct tcphdr th;
 593#ifdef CONFIG_TCP_MD5SIG
 594		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 595#endif
 596	} rep;
 597	struct ip_reply_arg arg;
 598#ifdef CONFIG_TCP_MD5SIG
 599	struct tcp_md5sig_key *key;
 600	const __u8 *hash_location = NULL;
 601	unsigned char newhash[16];
 602	int genhash;
 603	struct sock *sk1 = NULL;
 604#endif
 605	struct net *net;
 606
 607	/* Never send a reset in response to a reset. */
 608	if (th->rst)
 609		return;
 610
 611	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
 
 
 
 612		return;
 613
 614	/* Swap the send and the receive. */
 615	memset(&rep, 0, sizeof(rep));
 616	rep.th.dest   = th->source;
 617	rep.th.source = th->dest;
 618	rep.th.doff   = sizeof(struct tcphdr) / 4;
 619	rep.th.rst    = 1;
 620
 621	if (th->ack) {
 622		rep.th.seq = th->ack_seq;
 623	} else {
 624		rep.th.ack = 1;
 625		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 626				       skb->len - (th->doff << 2));
 627	}
 628
 629	memset(&arg, 0, sizeof(arg));
 630	arg.iov[0].iov_base = (unsigned char *)&rep;
 631	arg.iov[0].iov_len  = sizeof(rep.th);
 632
 
 633#ifdef CONFIG_TCP_MD5SIG
 
 634	hash_location = tcp_parse_md5sig_option(th);
 635	if (!sk && hash_location) {
 
 
 
 636		/*
 637		 * active side is lost. Try to find listening socket through
 638		 * source port, and then find md5 key through listening socket.
 639		 * we are not loose security here:
 640		 * Incoming packet is checked with md5 hash with finding key,
 641		 * no RST generated if md5 hash doesn't match.
 642		 */
 643		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
 644					     &tcp_hashinfo, ip_hdr(skb)->saddr,
 645					     th->source, ip_hdr(skb)->daddr,
 646					     ntohs(th->source), inet_iif(skb));
 
 647		/* don't send rst if it can't find key */
 648		if (!sk1)
 649			return;
 650		rcu_read_lock();
 651		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 652					&ip_hdr(skb)->saddr, AF_INET);
 653		if (!key)
 654			goto release_sk1;
 
 655
 656		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
 657		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 658			goto release_sk1;
 659	} else {
 660		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 661					     &ip_hdr(skb)->saddr,
 662					     AF_INET) : NULL;
 663	}
 664
 665	if (key) {
 666		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 667				   (TCPOPT_NOP << 16) |
 668				   (TCPOPT_MD5SIG << 8) |
 669				   TCPOLEN_MD5SIG);
 670		/* Update length and the length the header thinks exists */
 671		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 672		rep.th.doff = arg.iov[0].iov_len / 4;
 673
 674		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 675				     key, ip_hdr(skb)->saddr,
 676				     ip_hdr(skb)->daddr, &rep.th);
 677	}
 678#endif
 679	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 680				      ip_hdr(skb)->saddr, /* XXX */
 681				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 682	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 683	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 
 684	/* When socket is gone, all binding information is lost.
 685	 * routing might fail in this case. No choice here, if we choose to force
 686	 * input interface, we will misroute in case of asymmetric route.
 687	 */
 688	if (sk)
 689		arg.bound_dev_if = sk->sk_bound_dev_if;
 
 
 
 690
 691	net = dev_net(skb_dst(skb)->dev);
 692	arg.tos = ip_hdr(skb)->tos;
 693	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
 694			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
 695
 696	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 697	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 
 
 
 
 
 
 
 
 
 698
 699#ifdef CONFIG_TCP_MD5SIG
 700release_sk1:
 701	if (sk1) {
 702		rcu_read_unlock();
 703		sock_put(sk1);
 704	}
 705#endif
 706}
 707
 708/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 709   outside socket context is ugly, certainly. What can I do?
 710 */
 711
 712static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 
 713			    u32 win, u32 tsval, u32 tsecr, int oif,
 714			    struct tcp_md5sig_key *key,
 715			    int reply_flags, u8 tos)
 716{
 717	const struct tcphdr *th = tcp_hdr(skb);
 718	struct {
 719		struct tcphdr th;
 720		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 721#ifdef CONFIG_TCP_MD5SIG
 722			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 723#endif
 724			];
 725	} rep;
 
 726	struct ip_reply_arg arg;
 727	struct net *net = dev_net(skb_dst(skb)->dev);
 728
 729	memset(&rep.th, 0, sizeof(struct tcphdr));
 730	memset(&arg, 0, sizeof(arg));
 731
 732	arg.iov[0].iov_base = (unsigned char *)&rep;
 733	arg.iov[0].iov_len  = sizeof(rep.th);
 734	if (tsecr) {
 735		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 736				   (TCPOPT_TIMESTAMP << 8) |
 737				   TCPOLEN_TIMESTAMP);
 738		rep.opt[1] = htonl(tsval);
 739		rep.opt[2] = htonl(tsecr);
 740		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 741	}
 742
 743	/* Swap the send and the receive. */
 744	rep.th.dest    = th->source;
 745	rep.th.source  = th->dest;
 746	rep.th.doff    = arg.iov[0].iov_len / 4;
 747	rep.th.seq     = htonl(seq);
 748	rep.th.ack_seq = htonl(ack);
 749	rep.th.ack     = 1;
 750	rep.th.window  = htons(win);
 751
 752#ifdef CONFIG_TCP_MD5SIG
 753	if (key) {
 754		int offset = (tsecr) ? 3 : 0;
 755
 756		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 757					  (TCPOPT_NOP << 16) |
 758					  (TCPOPT_MD5SIG << 8) |
 759					  TCPOLEN_MD5SIG);
 760		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 761		rep.th.doff = arg.iov[0].iov_len/4;
 762
 763		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 764				    key, ip_hdr(skb)->saddr,
 765				    ip_hdr(skb)->daddr, &rep.th);
 766	}
 767#endif
 768	arg.flags = reply_flags;
 769	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 770				      ip_hdr(skb)->saddr, /* XXX */
 771				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 772	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 773	if (oif)
 774		arg.bound_dev_if = oif;
 775	arg.tos = tos;
 776	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
 777			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
 
 
 
 
 778
 779	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 
 780}
 781
 782static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 783{
 784	struct inet_timewait_sock *tw = inet_twsk(sk);
 785	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 786
 787	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 
 788			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 789			tcp_time_stamp + tcptw->tw_ts_offset,
 790			tcptw->tw_ts_recent,
 791			tw->tw_bound_dev_if,
 792			tcp_twsk_md5_key(tcptw),
 793			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 794			tw->tw_tos
 795			);
 796
 797	inet_twsk_put(tw);
 798}
 799
 800static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 801				  struct request_sock *req)
 802{
 803	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 804	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 805	 */
 806	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
 807			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 808			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
 809			tcp_time_stamp,
 
 
 
 
 
 
 
 
 810			req->ts_recent,
 811			0,
 812			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
 813					  AF_INET),
 814			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 815			ip_hdr(skb)->tos);
 816}
 817
 818/*
 819 *	Send a SYN-ACK after having received a SYN.
 820 *	This still operates on a request_sock only, not on a big
 821 *	socket.
 822 */
 823static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 
 824			      struct request_sock *req,
 825			      u16 queue_mapping)
 
 826{
 827	const struct inet_request_sock *ireq = inet_rsk(req);
 828	struct flowi4 fl4;
 829	int err = -1;
 830	struct sk_buff *skb;
 831
 832	/* First, grab a route. */
 833	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 834		return -1;
 835
 836	skb = tcp_make_synack(sk, dst, req, NULL);
 837
 838	if (skb) {
 839		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 840
 841		skb_set_queue_mapping(skb, queue_mapping);
 842		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 843					    ireq->ir_rmt_addr,
 844					    ireq->opt);
 845		err = net_xmit_eval(err);
 846		if (!tcp_rsk(req)->snt_synack && !err)
 847			tcp_rsk(req)->snt_synack = tcp_time_stamp;
 848	}
 849
 850	return err;
 851}
 852
 853static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
 854{
 855	int res = tcp_v4_send_synack(sk, NULL, req, 0);
 856
 857	if (!res) {
 858		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 859		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
 860	}
 861	return res;
 862}
 863
 864/*
 865 *	IPv4 request_sock destructor.
 866 */
 867static void tcp_v4_reqsk_destructor(struct request_sock *req)
 868{
 869	kfree(inet_rsk(req)->opt);
 870}
 871
 872/*
 873 * Return true if a syncookie should be sent
 874 */
 875bool tcp_syn_flood_action(struct sock *sk,
 876			 const struct sk_buff *skb,
 877			 const char *proto)
 878{
 879	const char *msg = "Dropping request";
 880	bool want_cookie = false;
 881	struct listen_sock *lopt;
 882
 883#ifdef CONFIG_SYN_COOKIES
 884	if (sysctl_tcp_syncookies) {
 885		msg = "Sending cookies";
 886		want_cookie = true;
 887		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
 888	} else
 889#endif
 890		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 891
 892	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
 893	if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
 894		lopt->synflood_warned = 1;
 895		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
 896			proto, ntohs(tcp_hdr(skb)->dest), msg);
 897	}
 898	return want_cookie;
 899}
 900EXPORT_SYMBOL(tcp_syn_flood_action);
 901
 902/*
 903 * Save and compile IPv4 options into the request_sock if needed.
 904 */
 905static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
 906{
 907	const struct ip_options *opt = &(IPCB(skb)->opt);
 908	struct ip_options_rcu *dopt = NULL;
 909
 910	if (opt && opt->optlen) {
 911		int opt_size = sizeof(*dopt) + opt->optlen;
 912
 913		dopt = kmalloc(opt_size, GFP_ATOMIC);
 914		if (dopt) {
 915			if (ip_options_echo(&dopt->opt, skb)) {
 916				kfree(dopt);
 917				dopt = NULL;
 918			}
 919		}
 920	}
 921	return dopt;
 922}
 923
 924#ifdef CONFIG_TCP_MD5SIG
 925/*
 926 * RFC2385 MD5 checksumming requires a mapping of
 927 * IP address->MD5 Key.
 928 * We need to maintain these in the sk structure.
 929 */
 930
 931/* Find the Key structure for an address.  */
 932struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 933					 const union tcp_md5_addr *addr,
 934					 int family)
 935{
 936	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937	struct tcp_md5sig_key *key;
 938	unsigned int size = sizeof(struct in_addr);
 939	struct tcp_md5sig_info *md5sig;
 940
 941	/* caller either holds rcu_read_lock() or socket lock */
 942	md5sig = rcu_dereference_check(tp->md5sig_info,
 943				       sock_owned_by_user(sk) ||
 944				       lockdep_is_held(&sk->sk_lock.slock));
 945	if (!md5sig)
 946		return NULL;
 947#if IS_ENABLED(CONFIG_IPV6)
 948	if (family == AF_INET6)
 949		size = sizeof(struct in6_addr);
 950#endif
 951	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 952		if (key->family != family)
 953			continue;
 954		if (!memcmp(&key->addr, addr, size))
 
 955			return key;
 956	}
 957	return NULL;
 958}
 959EXPORT_SYMBOL(tcp_md5_do_lookup);
 960
 961struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
 962					 struct sock *addr_sk)
 963{
 964	union tcp_md5_addr *addr;
 965
 966	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
 967	return tcp_md5_do_lookup(sk, addr, AF_INET);
 968}
 969EXPORT_SYMBOL(tcp_v4_md5_lookup);
 970
 971static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
 972						      struct request_sock *req)
 973{
 974	union tcp_md5_addr *addr;
 975
 976	addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
 977	return tcp_md5_do_lookup(sk, addr, AF_INET);
 978}
 979
 980/* This can be called on a newly created socket, from other files */
 981int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 982		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
 
 983{
 984	/* Add Key to the list */
 985	struct tcp_md5sig_key *key;
 986	struct tcp_sock *tp = tcp_sk(sk);
 987	struct tcp_md5sig_info *md5sig;
 988
 989	key = tcp_md5_do_lookup(sk, addr, family);
 990	if (key) {
 991		/* Pre-existing entry - just update that one. */
 992		memcpy(key->key, newkey, newkeylen);
 993		key->keylen = newkeylen;
 994		return 0;
 995	}
 996
 997	md5sig = rcu_dereference_protected(tp->md5sig_info,
 998					   sock_owned_by_user(sk));
 999	if (!md5sig) {
1000		md5sig = kmalloc(sizeof(*md5sig), gfp);
1001		if (!md5sig)
1002			return -ENOMEM;
1003
1004		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1005		INIT_HLIST_HEAD(&md5sig->head);
1006		rcu_assign_pointer(tp->md5sig_info, md5sig);
1007	}
1008
1009	key = sock_kmalloc(sk, sizeof(*key), gfp);
1010	if (!key)
1011		return -ENOMEM;
1012	if (!tcp_alloc_md5sig_pool()) {
1013		sock_kfree_s(sk, key, sizeof(*key));
1014		return -ENOMEM;
1015	}
1016
1017	memcpy(key->key, newkey, newkeylen);
1018	key->keylen = newkeylen;
1019	key->family = family;
 
1020	memcpy(&key->addr, addr,
1021	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1022				      sizeof(struct in_addr));
1023	hlist_add_head_rcu(&key->node, &md5sig->head);
1024	return 0;
1025}
1026EXPORT_SYMBOL(tcp_md5_do_add);
1027
1028int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
 
1029{
1030	struct tcp_md5sig_key *key;
1031
1032	key = tcp_md5_do_lookup(sk, addr, family);
1033	if (!key)
1034		return -ENOENT;
1035	hlist_del_rcu(&key->node);
1036	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1037	kfree_rcu(key, rcu);
1038	return 0;
1039}
1040EXPORT_SYMBOL(tcp_md5_do_del);
1041
1042static void tcp_clear_md5_list(struct sock *sk)
1043{
1044	struct tcp_sock *tp = tcp_sk(sk);
1045	struct tcp_md5sig_key *key;
1046	struct hlist_node *n;
1047	struct tcp_md5sig_info *md5sig;
1048
1049	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1050
1051	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1052		hlist_del_rcu(&key->node);
1053		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1054		kfree_rcu(key, rcu);
1055	}
1056}
1057
1058static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1059				 int optlen)
1060{
1061	struct tcp_md5sig cmd;
1062	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
 
1063
1064	if (optlen < sizeof(cmd))
1065		return -EINVAL;
1066
1067	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1068		return -EFAULT;
1069
1070	if (sin->sin_family != AF_INET)
1071		return -EINVAL;
1072
1073	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
 
 
 
 
 
 
 
1074		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1075				      AF_INET);
1076
1077	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1078		return -EINVAL;
1079
1080	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1081			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1082			      GFP_KERNEL);
1083}
1084
1085static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1086					__be32 daddr, __be32 saddr, int nbytes)
 
1087{
1088	struct tcp4_pseudohdr *bp;
1089	struct scatterlist sg;
 
1090
1091	bp = &hp->md5_blk.ip4;
1092
1093	/*
1094	 * 1. the TCP pseudo-header (in the order: source IP address,
1095	 * destination IP address, zero-padded protocol number, and
1096	 * segment length)
1097	 */
1098	bp->saddr = saddr;
1099	bp->daddr = daddr;
1100	bp->pad = 0;
1101	bp->protocol = IPPROTO_TCP;
1102	bp->len = cpu_to_be16(nbytes);
1103
1104	sg_init_one(&sg, bp, sizeof(*bp));
1105	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
 
 
 
 
 
1106}
1107
1108static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1109			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1110{
1111	struct tcp_md5sig_pool *hp;
1112	struct hash_desc *desc;
1113
1114	hp = tcp_get_md5sig_pool();
1115	if (!hp)
1116		goto clear_hash_noput;
1117	desc = &hp->md5_desc;
1118
1119	if (crypto_hash_init(desc))
1120		goto clear_hash;
1121	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1122		goto clear_hash;
1123	if (tcp_md5_hash_header(hp, th))
1124		goto clear_hash;
1125	if (tcp_md5_hash_key(hp, key))
1126		goto clear_hash;
1127	if (crypto_hash_final(desc, md5_hash))
 
1128		goto clear_hash;
1129
1130	tcp_put_md5sig_pool();
1131	return 0;
1132
1133clear_hash:
1134	tcp_put_md5sig_pool();
1135clear_hash_noput:
1136	memset(md5_hash, 0, 16);
1137	return 1;
1138}
1139
1140int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1141			const struct sock *sk, const struct request_sock *req,
1142			const struct sk_buff *skb)
1143{
1144	struct tcp_md5sig_pool *hp;
1145	struct hash_desc *desc;
1146	const struct tcphdr *th = tcp_hdr(skb);
1147	__be32 saddr, daddr;
1148
1149	if (sk) {
1150		saddr = inet_sk(sk)->inet_saddr;
1151		daddr = inet_sk(sk)->inet_daddr;
1152	} else if (req) {
1153		saddr = inet_rsk(req)->ir_loc_addr;
1154		daddr = inet_rsk(req)->ir_rmt_addr;
1155	} else {
1156		const struct iphdr *iph = ip_hdr(skb);
1157		saddr = iph->saddr;
1158		daddr = iph->daddr;
1159	}
1160
1161	hp = tcp_get_md5sig_pool();
1162	if (!hp)
1163		goto clear_hash_noput;
1164	desc = &hp->md5_desc;
1165
1166	if (crypto_hash_init(desc))
1167		goto clear_hash;
1168
1169	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1170		goto clear_hash;
1171	if (tcp_md5_hash_header(hp, th))
1172		goto clear_hash;
1173	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1174		goto clear_hash;
1175	if (tcp_md5_hash_key(hp, key))
1176		goto clear_hash;
1177	if (crypto_hash_final(desc, md5_hash))
 
1178		goto clear_hash;
1179
1180	tcp_put_md5sig_pool();
1181	return 0;
1182
1183clear_hash:
1184	tcp_put_md5sig_pool();
1185clear_hash_noput:
1186	memset(md5_hash, 0, 16);
1187	return 1;
1188}
1189EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1190
1191static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 
 
 
 
1192{
 
1193	/*
1194	 * This gets called for each TCP segment that arrives
1195	 * so we want to be efficient.
1196	 * We have 3 drop cases:
1197	 * o No MD5 hash and one expected.
1198	 * o MD5 hash and we're not expecting one.
1199	 * o MD5 hash and its wrong.
1200	 */
1201	const __u8 *hash_location = NULL;
1202	struct tcp_md5sig_key *hash_expected;
1203	const struct iphdr *iph = ip_hdr(skb);
1204	const struct tcphdr *th = tcp_hdr(skb);
1205	int genhash;
1206	unsigned char newhash[16];
1207
1208	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1209					  AF_INET);
1210	hash_location = tcp_parse_md5sig_option(th);
1211
1212	/* We've parsed the options - do we have a hash? */
1213	if (!hash_expected && !hash_location)
1214		return false;
1215
1216	if (hash_expected && !hash_location) {
1217		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1218		return true;
1219	}
1220
1221	if (!hash_expected && hash_location) {
1222		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1223		return true;
1224	}
1225
1226	/* Okay, so this is hash_expected and hash_location -
1227	 * so we need to calculate the checksum.
1228	 */
1229	genhash = tcp_v4_md5_hash_skb(newhash,
1230				      hash_expected,
1231				      NULL, NULL, skb);
1232
1233	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 
1234		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1235				     &iph->saddr, ntohs(th->source),
1236				     &iph->daddr, ntohs(th->dest),
1237				     genhash ? " tcp_v4_calc_md5_hash failed"
1238				     : "");
1239		return true;
1240	}
1241	return false;
 
 
1242}
1243
1244#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1245
1246struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1247	.family		=	PF_INET,
1248	.obj_size	=	sizeof(struct tcp_request_sock),
1249	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1250	.send_ack	=	tcp_v4_reqsk_send_ack,
1251	.destructor	=	tcp_v4_reqsk_destructor,
1252	.send_reset	=	tcp_v4_send_reset,
1253	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1254};
1255
1256#ifdef CONFIG_TCP_MD5SIG
1257static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1258	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
 
 
1259	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1260};
1261#endif
1262
1263static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1264			       struct request_sock *req,
1265			       struct tcp_fastopen_cookie *foc,
1266			       struct tcp_fastopen_cookie *valid_foc)
1267{
1268	bool skip_cookie = false;
1269	struct fastopen_queue *fastopenq;
1270
1271	if (likely(!fastopen_cookie_present(foc))) {
1272		/* See include/net/tcp.h for the meaning of these knobs */
1273		if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1274		    ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1275		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1276			skip_cookie = true; /* no cookie to validate */
1277		else
1278			return false;
1279	}
1280	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1281	/* A FO option is present; bump the counter. */
1282	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1283
1284	/* Make sure the listener has enabled fastopen, and we don't
1285	 * exceed the max # of pending TFO requests allowed before trying
1286	 * to validating the cookie in order to avoid burning CPU cycles
1287	 * unnecessarily.
1288	 *
1289	 * XXX (TFO) - The implication of checking the max_qlen before
1290	 * processing a cookie request is that clients can't differentiate
1291	 * between qlen overflow causing Fast Open to be disabled
1292	 * temporarily vs a server not supporting Fast Open at all.
1293	 */
1294	if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1295	    fastopenq == NULL || fastopenq->max_qlen == 0)
1296		return false;
1297
1298	if (fastopenq->qlen >= fastopenq->max_qlen) {
1299		struct request_sock *req1;
1300		spin_lock(&fastopenq->lock);
1301		req1 = fastopenq->rskq_rst_head;
1302		if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1303			spin_unlock(&fastopenq->lock);
1304			NET_INC_STATS_BH(sock_net(sk),
1305			    LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1306			/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1307			foc->len = -1;
1308			return false;
1309		}
1310		fastopenq->rskq_rst_head = req1->dl_next;
1311		fastopenq->qlen--;
1312		spin_unlock(&fastopenq->lock);
1313		reqsk_free(req1);
1314	}
1315	if (skip_cookie) {
1316		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1317		return true;
1318	}
1319
1320	if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1321		if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1322			tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1323						ip_hdr(skb)->daddr, valid_foc);
1324			if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1325			    memcmp(&foc->val[0], &valid_foc->val[0],
1326			    TCP_FASTOPEN_COOKIE_SIZE) != 0)
1327				return false;
1328			valid_foc->len = -1;
1329		}
1330		/* Acknowledge the data received from the peer. */
1331		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1332		return true;
1333	} else if (foc->len == 0) { /* Client requesting a cookie */
1334		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1335					ip_hdr(skb)->daddr, valid_foc);
1336		NET_INC_STATS_BH(sock_net(sk),
1337		    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1338	} else {
1339		/* Client sent a cookie with wrong size. Treat it
1340		 * the same as invalid and return a valid one.
1341		 */
1342		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1343					ip_hdr(skb)->daddr, valid_foc);
1344	}
1345	return false;
1346}
1347
1348static int tcp_v4_conn_req_fastopen(struct sock *sk,
1349				    struct sk_buff *skb,
1350				    struct sk_buff *skb_synack,
1351				    struct request_sock *req)
1352{
1353	struct tcp_sock *tp = tcp_sk(sk);
1354	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1355	const struct inet_request_sock *ireq = inet_rsk(req);
1356	struct sock *child;
1357	int err;
1358
1359	req->num_retrans = 0;
1360	req->num_timeout = 0;
1361	req->sk = NULL;
1362
1363	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1364	if (child == NULL) {
1365		NET_INC_STATS_BH(sock_net(sk),
1366				 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1367		kfree_skb(skb_synack);
1368		return -1;
1369	}
1370	err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1371				    ireq->ir_rmt_addr, ireq->opt);
1372	err = net_xmit_eval(err);
1373	if (!err)
1374		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1375	/* XXX (TFO) - is it ok to ignore error and continue? */
1376
1377	spin_lock(&queue->fastopenq->lock);
1378	queue->fastopenq->qlen++;
1379	spin_unlock(&queue->fastopenq->lock);
1380
1381	/* Initialize the child socket. Have to fix some values to take
1382	 * into account the child is a Fast Open socket and is created
1383	 * only out of the bits carried in the SYN packet.
1384	 */
1385	tp = tcp_sk(child);
1386
1387	tp->fastopen_rsk = req;
1388	/* Do a hold on the listner sk so that if the listener is being
1389	 * closed, the child that has been accepted can live on and still
1390	 * access listen_lock.
1391	 */
1392	sock_hold(sk);
1393	tcp_rsk(req)->listener = sk;
1394
1395	/* RFC1323: The window in SYN & SYN/ACK segments is never
1396	 * scaled. So correct it appropriately.
1397	 */
1398	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1399
1400	/* Activate the retrans timer so that SYNACK can be retransmitted.
1401	 * The request socket is not added to the SYN table of the parent
1402	 * because it's been added to the accept queue directly.
1403	 */
1404	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1405	    TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1406
1407	/* Add the child socket directly into the accept queue */
1408	inet_csk_reqsk_queue_add(sk, req, child);
1409
1410	/* Now finish processing the fastopen child socket. */
1411	inet_csk(child)->icsk_af_ops->rebuild_header(child);
1412	tcp_init_congestion_control(child);
1413	tcp_mtup_init(child);
1414	tcp_init_metrics(child);
1415	tcp_init_buffer_space(child);
1416
1417	/* Queue the data carried in the SYN packet. We need to first
1418	 * bump skb's refcnt because the caller will attempt to free it.
1419	 *
1420	 * XXX (TFO) - we honor a zero-payload TFO request for now.
1421	 * (Any reason not to?)
1422	 */
1423	if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1424		/* Don't queue the skb if there is no payload in SYN.
1425		 * XXX (TFO) - How about SYN+FIN?
1426		 */
1427		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1428	} else {
1429		skb = skb_get(skb);
1430		skb_dst_drop(skb);
1431		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
1432		skb_set_owner_r(skb, child);
1433		__skb_queue_tail(&child->sk_receive_queue, skb);
1434		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435		tp->syn_data_acked = 1;
1436	}
1437	sk->sk_data_ready(sk);
1438	bh_unlock_sock(child);
1439	sock_put(child);
1440	WARN_ON(req->sk == NULL);
1441	return 0;
1442}
1443
1444int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1445{
1446	struct tcp_options_received tmp_opt;
1447	struct request_sock *req;
1448	struct inet_request_sock *ireq;
1449	struct tcp_sock *tp = tcp_sk(sk);
1450	struct dst_entry *dst = NULL;
1451	__be32 saddr = ip_hdr(skb)->saddr;
1452	__be32 daddr = ip_hdr(skb)->daddr;
1453	__u32 isn = TCP_SKB_CB(skb)->when;
1454	bool want_cookie = false;
1455	struct flowi4 fl4;
1456	struct tcp_fastopen_cookie foc = { .len = -1 };
1457	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1458	struct sk_buff *skb_synack;
1459	int do_fastopen;
1460
1461	/* Never answer to SYNs send to broadcast or multicast */
1462	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1463		goto drop;
1464
1465	/* TW buckets are converted to open requests without
1466	 * limitations, they conserve resources and peer is
1467	 * evidently real one.
1468	 */
1469	if ((sysctl_tcp_syncookies == 2 ||
1470	     inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1471		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1472		if (!want_cookie)
1473			goto drop;
1474	}
1475
1476	/* Accept backlog is full. If we have already queued enough
1477	 * of warm entries in syn queue, drop request. It is better than
1478	 * clogging syn queue with openreqs with exponentially increasing
1479	 * timeout.
1480	 */
1481	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1482		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1483		goto drop;
1484	}
1485
1486	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1487	if (!req)
1488		goto drop;
1489
1490#ifdef CONFIG_TCP_MD5SIG
1491	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1492#endif
1493
1494	tcp_clear_options(&tmp_opt);
1495	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1496	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1497	tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1498
1499	if (want_cookie && !tmp_opt.saw_tstamp)
1500		tcp_clear_options(&tmp_opt);
1501
1502	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1503	tcp_openreq_init(req, &tmp_opt, skb);
1504
1505	ireq = inet_rsk(req);
1506	ireq->ir_loc_addr = daddr;
1507	ireq->ir_rmt_addr = saddr;
1508	ireq->no_srccheck = inet_sk(sk)->transparent;
1509	ireq->opt = tcp_v4_save_options(skb);
1510
1511	if (security_inet_conn_request(sk, skb, req))
1512		goto drop_and_free;
1513
1514	if (!want_cookie || tmp_opt.tstamp_ok)
1515		TCP_ECN_create_request(req, skb, sock_net(sk));
1516
1517	if (want_cookie) {
1518		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1519		req->cookie_ts = tmp_opt.tstamp_ok;
1520	} else if (!isn) {
1521		/* VJ's idea. We save last timestamp seen
1522		 * from the destination in peer table, when entering
1523		 * state TIME-WAIT, and check against it before
1524		 * accepting new connection request.
1525		 *
1526		 * If "isn" is not zero, this request hit alive
1527		 * timewait bucket, so that all the necessary checks
1528		 * are made in the function processing timewait state.
1529		 */
1530		if (tmp_opt.saw_tstamp &&
1531		    tcp_death_row.sysctl_tw_recycle &&
1532		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1533		    fl4.daddr == saddr) {
1534			if (!tcp_peer_is_proven(req, dst, true)) {
1535				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1536				goto drop_and_release;
1537			}
1538		}
1539		/* Kill the following clause, if you dislike this way. */
1540		else if (!sysctl_tcp_syncookies &&
1541			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1542			  (sysctl_max_syn_backlog >> 2)) &&
1543			 !tcp_peer_is_proven(req, dst, false)) {
1544			/* Without syncookies last quarter of
1545			 * backlog is filled with destinations,
1546			 * proven to be alive.
1547			 * It means that we continue to communicate
1548			 * to destinations, already remembered
1549			 * to the moment of synflood.
1550			 */
1551			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1552				       &saddr, ntohs(tcp_hdr(skb)->source));
1553			goto drop_and_release;
1554		}
1555
1556		isn = tcp_v4_init_sequence(skb);
1557	}
1558	tcp_rsk(req)->snt_isn = isn;
1559
1560	if (dst == NULL) {
1561		dst = inet_csk_route_req(sk, &fl4, req);
1562		if (dst == NULL)
1563			goto drop_and_free;
1564	}
1565	do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1566
1567	/* We don't call tcp_v4_send_synack() directly because we need
1568	 * to make sure a child socket can be created successfully before
1569	 * sending back synack!
1570	 *
1571	 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1572	 * (or better yet, call tcp_send_synack() in the child context
1573	 * directly, but will have to fix bunch of other code first)
1574	 * after syn_recv_sock() except one will need to first fix the
1575	 * latter to remove its dependency on the current implementation
1576	 * of tcp_v4_send_synack()->tcp_select_initial_window().
1577	 */
1578	skb_synack = tcp_make_synack(sk, dst, req,
1579	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1580
1581	if (skb_synack) {
1582		__tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1583		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1584	} else
1585		goto drop_and_free;
1586
1587	if (likely(!do_fastopen)) {
1588		int err;
1589		err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1590		     ireq->ir_rmt_addr, ireq->opt);
1591		err = net_xmit_eval(err);
1592		if (err || want_cookie)
1593			goto drop_and_free;
1594
1595		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1596		tcp_rsk(req)->listener = NULL;
1597		/* Add the request_sock to the SYN table */
1598		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1599		if (fastopen_cookie_present(&foc) && foc.len != 0)
1600			NET_INC_STATS_BH(sock_net(sk),
1601			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1602	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1603		goto drop_and_free;
1604
1605	return 0;
1606
1607drop_and_release:
1608	dst_release(dst);
1609drop_and_free:
1610	reqsk_free(req);
1611drop:
1612	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1613	return 0;
1614}
1615EXPORT_SYMBOL(tcp_v4_conn_request);
1616
1617
1618/*
1619 * The three way handshake has completed - we got a valid synack -
1620 * now create the new socket.
1621 */
1622struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1623				  struct request_sock *req,
1624				  struct dst_entry *dst)
 
 
1625{
1626	struct inet_request_sock *ireq;
1627	struct inet_sock *newinet;
1628	struct tcp_sock *newtp;
1629	struct sock *newsk;
1630#ifdef CONFIG_TCP_MD5SIG
1631	struct tcp_md5sig_key *key;
1632#endif
1633	struct ip_options_rcu *inet_opt;
1634
1635	if (sk_acceptq_is_full(sk))
1636		goto exit_overflow;
1637
1638	newsk = tcp_create_openreq_child(sk, req, skb);
1639	if (!newsk)
1640		goto exit_nonewsk;
1641
1642	newsk->sk_gso_type = SKB_GSO_TCPV4;
1643	inet_sk_rx_dst_set(newsk, skb);
1644
1645	newtp		      = tcp_sk(newsk);
1646	newinet		      = inet_sk(newsk);
1647	ireq		      = inet_rsk(req);
1648	newinet->inet_daddr   = ireq->ir_rmt_addr;
1649	newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1650	newinet->inet_saddr	      = ireq->ir_loc_addr;
1651	inet_opt	      = ireq->opt;
1652	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1653	ireq->opt	      = NULL;
1654	newinet->mc_index     = inet_iif(skb);
1655	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1656	newinet->rcv_tos      = ip_hdr(skb)->tos;
1657	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1658	if (inet_opt)
1659		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1660	newinet->inet_id = newtp->write_seq ^ jiffies;
1661
1662	if (!dst) {
1663		dst = inet_csk_route_child_sock(sk, newsk, req);
1664		if (!dst)
1665			goto put_and_exit;
1666	} else {
1667		/* syncookie case : see end of cookie_v4_check() */
1668	}
1669	sk_setup_caps(newsk, dst);
1670
 
 
1671	tcp_sync_mss(newsk, dst_mtu(dst));
1672	newtp->advmss = dst_metric_advmss(dst);
1673	if (tcp_sk(sk)->rx_opt.user_mss &&
1674	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1675		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1676
1677	tcp_initialize_rcv_mss(newsk);
1678
1679#ifdef CONFIG_TCP_MD5SIG
1680	/* Copy over the MD5 key from the original socket */
1681	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1682				AF_INET);
1683	if (key != NULL) {
1684		/*
1685		 * We're using one, so create a matching key
1686		 * on the newsk structure. If we fail to get
1687		 * memory, then we end up not copying the key
1688		 * across. Shucks.
1689		 */
1690		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1691			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1692		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1693	}
1694#endif
1695
1696	if (__inet_inherit_port(sk, newsk) < 0)
1697		goto put_and_exit;
1698	__inet_hash_nolisten(newsk, NULL);
1699
 
 
 
 
 
1700	return newsk;
1701
1702exit_overflow:
1703	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1704exit_nonewsk:
1705	dst_release(dst);
1706exit:
1707	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1708	return NULL;
1709put_and_exit:
 
1710	inet_csk_prepare_forced_close(newsk);
1711	tcp_done(newsk);
1712	goto exit;
1713}
1714EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1715
1716static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1717{
1718	struct tcphdr *th = tcp_hdr(skb);
1719	const struct iphdr *iph = ip_hdr(skb);
1720	struct sock *nsk;
1721	struct request_sock **prev;
1722	/* Find possible connection requests. */
1723	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1724						       iph->saddr, iph->daddr);
1725	if (req)
1726		return tcp_check_req(sk, skb, req, prev, false);
1727
1728	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1729			th->source, iph->daddr, th->dest, inet_iif(skb));
1730
1731	if (nsk) {
1732		if (nsk->sk_state != TCP_TIME_WAIT) {
1733			bh_lock_sock(nsk);
1734			return nsk;
1735		}
1736		inet_twsk_put(inet_twsk(nsk));
1737		return NULL;
1738	}
1739
1740#ifdef CONFIG_SYN_COOKIES
 
 
1741	if (!th->syn)
1742		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1743#endif
1744	return sk;
1745}
1746
1747static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1748{
1749	const struct iphdr *iph = ip_hdr(skb);
1750
1751	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1752		if (!tcp_v4_check(skb->len, iph->saddr,
1753				  iph->daddr, skb->csum)) {
1754			skb->ip_summed = CHECKSUM_UNNECESSARY;
1755			return 0;
1756		}
1757	}
1758
1759	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1760				       skb->len, IPPROTO_TCP, 0);
1761
1762	if (skb->len <= 76) {
1763		return __skb_checksum_complete(skb);
1764	}
1765	return 0;
1766}
1767
1768
1769/* The socket must have it's spinlock held when we get
1770 * here.
1771 *
1772 * We have a potential double-lock case here, so even when
1773 * doing backlog processing we use the BH locking scheme.
1774 * This is because we cannot sleep with the original spinlock
1775 * held.
1776 */
1777int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1778{
1779	struct sock *rsk;
1780#ifdef CONFIG_TCP_MD5SIG
1781	/*
1782	 * We really want to reject the packet as early as possible
1783	 * if:
1784	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1785	 *  o There is an MD5 option and we're not expecting one
1786	 */
1787	if (tcp_v4_inbound_md5_hash(sk, skb))
1788		goto discard;
1789#endif
1790
1791	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1792		struct dst_entry *dst = sk->sk_rx_dst;
1793
1794		sock_rps_save_rxhash(sk, skb);
 
1795		if (dst) {
1796			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1797			    dst->ops->check(dst, 0) == NULL) {
1798				dst_release(dst);
1799				sk->sk_rx_dst = NULL;
1800			}
1801		}
1802		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1803		return 0;
1804	}
1805
1806	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1807		goto csum_err;
1808
1809	if (sk->sk_state == TCP_LISTEN) {
1810		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
 
1811		if (!nsk)
1812			goto discard;
1813
1814		if (nsk != sk) {
1815			sock_rps_save_rxhash(nsk, skb);
1816			if (tcp_child_process(sk, nsk, skb)) {
1817				rsk = nsk;
1818				goto reset;
1819			}
1820			return 0;
1821		}
1822	} else
1823		sock_rps_save_rxhash(sk, skb);
1824
1825	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1826		rsk = sk;
1827		goto reset;
1828	}
1829	return 0;
1830
1831reset:
1832	tcp_v4_send_reset(rsk, skb);
1833discard:
1834	kfree_skb(skb);
1835	/* Be careful here. If this function gets more complicated and
1836	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1837	 * might be destroyed here. This current version compiles correctly,
1838	 * but you have been warned.
1839	 */
1840	return 0;
1841
1842csum_err:
1843	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1844	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1845	goto discard;
1846}
1847EXPORT_SYMBOL(tcp_v4_do_rcv);
1848
1849void tcp_v4_early_demux(struct sk_buff *skb)
1850{
1851	const struct iphdr *iph;
1852	const struct tcphdr *th;
1853	struct sock *sk;
1854
1855	if (skb->pkt_type != PACKET_HOST)
1856		return;
1857
1858	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1859		return;
1860
1861	iph = ip_hdr(skb);
1862	th = tcp_hdr(skb);
1863
1864	if (th->doff < sizeof(struct tcphdr) / 4)
1865		return;
1866
1867	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1868				       iph->saddr, th->source,
1869				       iph->daddr, ntohs(th->dest),
1870				       skb->skb_iif);
1871	if (sk) {
1872		skb->sk = sk;
1873		skb->destructor = sock_edemux;
1874		if (sk->sk_state != TCP_TIME_WAIT) {
1875			struct dst_entry *dst = sk->sk_rx_dst;
1876
1877			if (dst)
1878				dst = dst_check(dst, 0);
1879			if (dst &&
1880			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1881				skb_dst_set_noref(skb, dst);
1882		}
1883	}
 
1884}
1885
1886/* Packet is added to VJ-style prequeue for processing in process
1887 * context, if a reader task is waiting. Apparently, this exciting
1888 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1889 * failed somewhere. Latency? Burstiness? Well, at least now we will
1890 * see, why it failed. 8)8)				  --ANK
1891 *
1892 */
1893bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1894{
1895	struct tcp_sock *tp = tcp_sk(sk);
1896
1897	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1898		return false;
 
 
 
1899
1900	if (skb->len <= tcp_hdrlen(skb) &&
1901	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1902		return false;
 
 
 
 
1903
1904	skb_dst_force(skb);
1905	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1906	tp->ucopy.memory += skb->truesize;
1907	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1908		struct sk_buff *skb1;
1909
1910		BUG_ON(sock_owned_by_user(sk));
1911
1912		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1913			sk_backlog_rcv(sk, skb1);
1914			NET_INC_STATS_BH(sock_net(sk),
1915					 LINUX_MIB_TCPPREQUEUEDROPPED);
1916		}
 
1917
1918		tp->ucopy.memory = 0;
1919	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1920		wake_up_interruptible_sync_poll(sk_sleep(sk),
1921					   POLLIN | POLLRDNORM | POLLRDBAND);
1922		if (!inet_csk_ack_scheduled(sk))
1923			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1924						  (3 * tcp_rto_min(sk)) / 4,
1925						  TCP_RTO_MAX);
1926	}
1927	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1928}
1929EXPORT_SYMBOL(tcp_prequeue);
1930
1931/*
1932 *	From tcp_input.c
1933 */
1934
1935int tcp_v4_rcv(struct sk_buff *skb)
1936{
 
 
1937	const struct iphdr *iph;
1938	const struct tcphdr *th;
 
1939	struct sock *sk;
1940	int ret;
1941	struct net *net = dev_net(skb->dev);
1942
1943	if (skb->pkt_type != PACKET_HOST)
1944		goto discard_it;
1945
1946	/* Count it even if it's bad */
1947	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1948
1949	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1950		goto discard_it;
1951
1952	th = tcp_hdr(skb);
1953
1954	if (th->doff < sizeof(struct tcphdr) / 4)
1955		goto bad_packet;
1956	if (!pskb_may_pull(skb, th->doff * 4))
1957		goto discard_it;
1958
1959	/* An explanation is required here, I think.
1960	 * Packet length and doff are validated by header prediction,
1961	 * provided case of th->doff==0 is eliminated.
1962	 * So, we defer the checks. */
1963	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
 
1964		goto csum_error;
1965
1966	th = tcp_hdr(skb);
1967	iph = ip_hdr(skb);
1968	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1969	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1970				    skb->len - th->doff * 4);
1971	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1972	TCP_SKB_CB(skb)->when	 = 0;
1973	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1974	TCP_SKB_CB(skb)->sacked	 = 0;
1975
1976	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1977	if (!sk)
1978		goto no_tcp_socket;
1979
1980process:
1981	if (sk->sk_state == TCP_TIME_WAIT)
1982		goto do_time_wait;
1983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1984	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1985		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1986		goto discard_and_relse;
1987	}
1988
1989	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1990		goto discard_and_relse;
 
 
 
 
1991	nf_reset(skb);
1992
1993	if (sk_filter(sk, skb))
1994		goto discard_and_relse;
 
 
 
1995
1996	sk_mark_napi_id(sk, skb);
1997	skb->dev = NULL;
1998
 
 
 
 
 
 
 
1999	bh_lock_sock_nested(sk);
 
2000	ret = 0;
2001	if (!sock_owned_by_user(sk)) {
2002#ifdef CONFIG_NET_DMA
2003		struct tcp_sock *tp = tcp_sk(sk);
2004		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2005			tp->ucopy.dma_chan = net_dma_find_channel();
2006		if (tp->ucopy.dma_chan)
2007			ret = tcp_v4_do_rcv(sk, skb);
2008		else
2009#endif
2010		{
2011			if (!tcp_prequeue(sk, skb))
2012				ret = tcp_v4_do_rcv(sk, skb);
2013		}
2014	} else if (unlikely(sk_add_backlog(sk, skb,
2015					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
2016		bh_unlock_sock(sk);
2017		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2018		goto discard_and_relse;
2019	}
2020	bh_unlock_sock(sk);
2021
2022	sock_put(sk);
 
 
2023
2024	return ret;
2025
2026no_tcp_socket:
2027	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2028		goto discard_it;
2029
2030	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
 
 
2031csum_error:
2032		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2033bad_packet:
2034		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2035	} else {
2036		tcp_v4_send_reset(NULL, skb);
2037	}
2038
2039discard_it:
2040	/* Discard frame. */
2041	kfree_skb(skb);
2042	return 0;
2043
2044discard_and_relse:
2045	sock_put(sk);
 
 
2046	goto discard_it;
2047
2048do_time_wait:
2049	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2050		inet_twsk_put(inet_twsk(sk));
2051		goto discard_it;
2052	}
2053
2054	if (skb->len < (th->doff << 2)) {
2055		inet_twsk_put(inet_twsk(sk));
2056		goto bad_packet;
2057	}
2058	if (tcp_checksum_complete(skb)) {
2059		inet_twsk_put(inet_twsk(sk));
2060		goto csum_error;
2061	}
2062	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2063	case TCP_TW_SYN: {
2064		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2065							&tcp_hashinfo,
 
2066							iph->saddr, th->source,
2067							iph->daddr, th->dest,
2068							inet_iif(skb));
 
2069		if (sk2) {
2070			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2071			inet_twsk_put(inet_twsk(sk));
2072			sk = sk2;
 
 
2073			goto process;
2074		}
2075		/* Fall through to ACK */
2076	}
 
 
2077	case TCP_TW_ACK:
2078		tcp_v4_timewait_ack(sk, skb);
2079		break;
2080	case TCP_TW_RST:
2081		goto no_tcp_socket;
 
 
2082	case TCP_TW_SUCCESS:;
2083	}
2084	goto discard_it;
2085}
2086
2087static struct timewait_sock_ops tcp_timewait_sock_ops = {
2088	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2089	.twsk_unique	= tcp_twsk_unique,
2090	.twsk_destructor= tcp_twsk_destructor,
2091};
2092
2093void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2094{
2095	struct dst_entry *dst = skb_dst(skb);
2096
2097	dst_hold(dst);
2098	sk->sk_rx_dst = dst;
2099	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 
2100}
2101EXPORT_SYMBOL(inet_sk_rx_dst_set);
2102
2103const struct inet_connection_sock_af_ops ipv4_specific = {
2104	.queue_xmit	   = ip_queue_xmit,
2105	.send_check	   = tcp_v4_send_check,
2106	.rebuild_header	   = inet_sk_rebuild_header,
2107	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2108	.conn_request	   = tcp_v4_conn_request,
2109	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2110	.net_header_len	   = sizeof(struct iphdr),
2111	.setsockopt	   = ip_setsockopt,
2112	.getsockopt	   = ip_getsockopt,
2113	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2114	.sockaddr_len	   = sizeof(struct sockaddr_in),
2115	.bind_conflict	   = inet_csk_bind_conflict,
2116#ifdef CONFIG_COMPAT
2117	.compat_setsockopt = compat_ip_setsockopt,
2118	.compat_getsockopt = compat_ip_getsockopt,
2119#endif
 
2120};
2121EXPORT_SYMBOL(ipv4_specific);
2122
2123#ifdef CONFIG_TCP_MD5SIG
2124static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2125	.md5_lookup		= tcp_v4_md5_lookup,
2126	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2127	.md5_parse		= tcp_v4_parse_md5_keys,
2128};
2129#endif
2130
2131/* NOTE: A lot of things set to zero explicitly by call to
2132 *       sk_alloc() so need not be done here.
2133 */
2134static int tcp_v4_init_sock(struct sock *sk)
2135{
2136	struct inet_connection_sock *icsk = inet_csk(sk);
2137
2138	tcp_init_sock(sk);
2139
2140	icsk->icsk_af_ops = &ipv4_specific;
2141
2142#ifdef CONFIG_TCP_MD5SIG
2143	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2144#endif
2145
2146	return 0;
2147}
2148
2149void tcp_v4_destroy_sock(struct sock *sk)
2150{
2151	struct tcp_sock *tp = tcp_sk(sk);
2152
 
 
2153	tcp_clear_xmit_timers(sk);
2154
2155	tcp_cleanup_congestion_control(sk);
2156
 
 
2157	/* Cleanup up the write buffer. */
2158	tcp_write_queue_purge(sk);
2159
 
 
 
2160	/* Cleans up our, hopefully empty, out_of_order_queue. */
2161	__skb_queue_purge(&tp->out_of_order_queue);
2162
2163#ifdef CONFIG_TCP_MD5SIG
2164	/* Clean up the MD5 key list, if any */
2165	if (tp->md5sig_info) {
2166		tcp_clear_md5_list(sk);
2167		kfree_rcu(tp->md5sig_info, rcu);
2168		tp->md5sig_info = NULL;
2169	}
2170#endif
2171
2172#ifdef CONFIG_NET_DMA
2173	/* Cleans up our sk_async_wait_queue */
2174	__skb_queue_purge(&sk->sk_async_wait_queue);
2175#endif
2176
2177	/* Clean prequeue, it must be empty really */
2178	__skb_queue_purge(&tp->ucopy.prequeue);
2179
2180	/* Clean up a referenced TCP bind bucket. */
2181	if (inet_csk(sk)->icsk_bind_hash)
2182		inet_put_port(sk);
2183
2184	BUG_ON(tp->fastopen_rsk != NULL);
2185
2186	/* If socket is aborted during connect operation */
2187	tcp_free_fastopen_req(tp);
 
 
2188
2189	sk_sockets_allocated_dec(sk);
2190	sock_release_memcg(sk);
2191}
2192EXPORT_SYMBOL(tcp_v4_destroy_sock);
2193
2194#ifdef CONFIG_PROC_FS
2195/* Proc filesystem TCP sock list dumping. */
2196
2197/*
2198 * Get next listener socket follow cur.  If cur is NULL, get first socket
2199 * starting from bucket given in st->bucket; when st->bucket is zero the
2200 * very first socket in the hash table is returned.
2201 */
2202static void *listening_get_next(struct seq_file *seq, void *cur)
2203{
2204	struct inet_connection_sock *icsk;
2205	struct hlist_nulls_node *node;
2206	struct sock *sk = cur;
2207	struct inet_listen_hashbucket *ilb;
2208	struct tcp_iter_state *st = seq->private;
2209	struct net *net = seq_file_net(seq);
 
 
2210
2211	if (!sk) {
 
2212		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2213		spin_lock_bh(&ilb->lock);
2214		sk = sk_nulls_head(&ilb->head);
2215		st->offset = 0;
2216		goto get_sk;
2217	}
2218	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2219	++st->num;
2220	++st->offset;
2221
2222	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2223		struct request_sock *req = cur;
2224
2225		icsk = inet_csk(st->syn_wait_sk);
2226		req = req->dl_next;
2227		while (1) {
2228			while (req) {
2229				if (req->rsk_ops->family == st->family) {
2230					cur = req;
2231					goto out;
2232				}
2233				req = req->dl_next;
2234			}
2235			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2236				break;
2237get_req:
2238			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2239		}
2240		sk	  = sk_nulls_next(st->syn_wait_sk);
2241		st->state = TCP_SEQ_STATE_LISTENING;
2242		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2243	} else {
2244		icsk = inet_csk(sk);
2245		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2246		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2247			goto start_req;
2248		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2249		sk = sk_nulls_next(sk);
2250	}
2251get_sk:
2252	sk_nulls_for_each_from(sk, node) {
2253		if (!net_eq(sock_net(sk), net))
2254			continue;
2255		if (sk->sk_family == st->family) {
2256			cur = sk;
2257			goto out;
2258		}
2259		icsk = inet_csk(sk);
2260		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2261		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2262start_req:
2263			st->uid		= sock_i_uid(sk);
2264			st->syn_wait_sk = sk;
2265			st->state	= TCP_SEQ_STATE_OPENREQ;
2266			st->sbucket	= 0;
2267			goto get_req;
2268		}
2269		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2270	}
2271	spin_unlock_bh(&ilb->lock);
2272	st->offset = 0;
2273	if (++st->bucket < INET_LHTABLE_SIZE) {
2274		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2275		spin_lock_bh(&ilb->lock);
2276		sk = sk_nulls_head(&ilb->head);
2277		goto get_sk;
2278	}
2279	cur = NULL;
2280out:
2281	return cur;
2282}
2283
2284static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2285{
2286	struct tcp_iter_state *st = seq->private;
2287	void *rc;
2288
2289	st->bucket = 0;
2290	st->offset = 0;
2291	rc = listening_get_next(seq, NULL);
2292
2293	while (rc && *pos) {
2294		rc = listening_get_next(seq, rc);
2295		--*pos;
2296	}
2297	return rc;
2298}
2299
2300static inline bool empty_bucket(const struct tcp_iter_state *st)
2301{
2302	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2303}
2304
2305/*
2306 * Get first established socket starting from bucket given in st->bucket.
2307 * If st->bucket is zero, the very first socket in the hash is returned.
2308 */
2309static void *established_get_first(struct seq_file *seq)
2310{
2311	struct tcp_iter_state *st = seq->private;
2312	struct net *net = seq_file_net(seq);
2313	void *rc = NULL;
2314
2315	st->offset = 0;
2316	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2317		struct sock *sk;
2318		struct hlist_nulls_node *node;
2319		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2320
2321		/* Lockless fast path for the common case of empty buckets */
2322		if (empty_bucket(st))
2323			continue;
2324
2325		spin_lock_bh(lock);
2326		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2327			if (sk->sk_family != st->family ||
2328			    !net_eq(sock_net(sk), net)) {
2329				continue;
2330			}
2331			rc = sk;
2332			goto out;
2333		}
2334		spin_unlock_bh(lock);
2335	}
2336out:
2337	return rc;
2338}
2339
2340static void *established_get_next(struct seq_file *seq, void *cur)
2341{
2342	struct sock *sk = cur;
2343	struct hlist_nulls_node *node;
2344	struct tcp_iter_state *st = seq->private;
2345	struct net *net = seq_file_net(seq);
2346
2347	++st->num;
2348	++st->offset;
2349
2350	sk = sk_nulls_next(sk);
2351
2352	sk_nulls_for_each_from(sk, node) {
2353		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2354			return sk;
2355	}
2356
2357	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2358	++st->bucket;
2359	return established_get_first(seq);
2360}
2361
2362static void *established_get_idx(struct seq_file *seq, loff_t pos)
2363{
2364	struct tcp_iter_state *st = seq->private;
2365	void *rc;
2366
2367	st->bucket = 0;
2368	rc = established_get_first(seq);
2369
2370	while (rc && pos) {
2371		rc = established_get_next(seq, rc);
2372		--pos;
2373	}
2374	return rc;
2375}
2376
2377static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2378{
2379	void *rc;
2380	struct tcp_iter_state *st = seq->private;
2381
2382	st->state = TCP_SEQ_STATE_LISTENING;
2383	rc	  = listening_get_idx(seq, &pos);
2384
2385	if (!rc) {
2386		st->state = TCP_SEQ_STATE_ESTABLISHED;
2387		rc	  = established_get_idx(seq, pos);
2388	}
2389
2390	return rc;
2391}
2392
2393static void *tcp_seek_last_pos(struct seq_file *seq)
2394{
2395	struct tcp_iter_state *st = seq->private;
2396	int offset = st->offset;
2397	int orig_num = st->num;
2398	void *rc = NULL;
2399
2400	switch (st->state) {
2401	case TCP_SEQ_STATE_OPENREQ:
2402	case TCP_SEQ_STATE_LISTENING:
2403		if (st->bucket >= INET_LHTABLE_SIZE)
2404			break;
2405		st->state = TCP_SEQ_STATE_LISTENING;
2406		rc = listening_get_next(seq, NULL);
2407		while (offset-- && rc)
2408			rc = listening_get_next(seq, rc);
2409		if (rc)
2410			break;
2411		st->bucket = 0;
2412		st->state = TCP_SEQ_STATE_ESTABLISHED;
2413		/* Fallthrough */
2414	case TCP_SEQ_STATE_ESTABLISHED:
2415		if (st->bucket > tcp_hashinfo.ehash_mask)
2416			break;
2417		rc = established_get_first(seq);
2418		while (offset-- && rc)
2419			rc = established_get_next(seq, rc);
2420	}
2421
2422	st->num = orig_num;
2423
2424	return rc;
2425}
2426
2427static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2428{
2429	struct tcp_iter_state *st = seq->private;
2430	void *rc;
2431
2432	if (*pos && *pos == st->last_pos) {
2433		rc = tcp_seek_last_pos(seq);
2434		if (rc)
2435			goto out;
2436	}
2437
2438	st->state = TCP_SEQ_STATE_LISTENING;
2439	st->num = 0;
2440	st->bucket = 0;
2441	st->offset = 0;
2442	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2443
2444out:
2445	st->last_pos = *pos;
2446	return rc;
2447}
2448
2449static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2450{
2451	struct tcp_iter_state *st = seq->private;
2452	void *rc = NULL;
2453
2454	if (v == SEQ_START_TOKEN) {
2455		rc = tcp_get_idx(seq, 0);
2456		goto out;
2457	}
2458
2459	switch (st->state) {
2460	case TCP_SEQ_STATE_OPENREQ:
2461	case TCP_SEQ_STATE_LISTENING:
2462		rc = listening_get_next(seq, v);
2463		if (!rc) {
2464			st->state = TCP_SEQ_STATE_ESTABLISHED;
2465			st->bucket = 0;
2466			st->offset = 0;
2467			rc	  = established_get_first(seq);
2468		}
2469		break;
2470	case TCP_SEQ_STATE_ESTABLISHED:
2471		rc = established_get_next(seq, v);
2472		break;
2473	}
2474out:
2475	++*pos;
2476	st->last_pos = *pos;
2477	return rc;
2478}
2479
2480static void tcp_seq_stop(struct seq_file *seq, void *v)
2481{
2482	struct tcp_iter_state *st = seq->private;
2483
2484	switch (st->state) {
2485	case TCP_SEQ_STATE_OPENREQ:
2486		if (v) {
2487			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2488			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2489		}
2490	case TCP_SEQ_STATE_LISTENING:
2491		if (v != SEQ_START_TOKEN)
2492			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2493		break;
2494	case TCP_SEQ_STATE_ESTABLISHED:
2495		if (v)
2496			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2497		break;
2498	}
2499}
2500
2501int tcp_seq_open(struct inode *inode, struct file *file)
2502{
2503	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2504	struct tcp_iter_state *s;
2505	int err;
2506
2507	err = seq_open_net(inode, file, &afinfo->seq_ops,
2508			  sizeof(struct tcp_iter_state));
2509	if (err < 0)
2510		return err;
2511
2512	s = ((struct seq_file *)file->private_data)->private;
2513	s->family		= afinfo->family;
2514	s->last_pos 		= 0;
2515	return 0;
2516}
2517EXPORT_SYMBOL(tcp_seq_open);
2518
2519int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2520{
2521	int rc = 0;
2522	struct proc_dir_entry *p;
2523
2524	afinfo->seq_ops.start		= tcp_seq_start;
2525	afinfo->seq_ops.next		= tcp_seq_next;
2526	afinfo->seq_ops.stop		= tcp_seq_stop;
2527
2528	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2529			     afinfo->seq_fops, afinfo);
2530	if (!p)
2531		rc = -ENOMEM;
2532	return rc;
2533}
2534EXPORT_SYMBOL(tcp_proc_register);
2535
2536void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2537{
2538	remove_proc_entry(afinfo->name, net->proc_net);
2539}
2540EXPORT_SYMBOL(tcp_proc_unregister);
2541
2542static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2543			 struct seq_file *f, int i, kuid_t uid)
2544{
2545	const struct inet_request_sock *ireq = inet_rsk(req);
2546	long delta = req->expires - jiffies;
2547
2548	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2549		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2550		i,
2551		ireq->ir_loc_addr,
2552		ntohs(inet_sk(sk)->inet_sport),
2553		ireq->ir_rmt_addr,
2554		ntohs(ireq->ir_rmt_port),
2555		TCP_SYN_RECV,
2556		0, 0, /* could print option size, but that is af dependent. */
2557		1,    /* timers active (only the expire timer) */
2558		jiffies_delta_to_clock_t(delta),
2559		req->num_timeout,
2560		from_kuid_munged(seq_user_ns(f), uid),
 
2561		0,  /* non standard timer */
2562		0, /* open_requests have no inode */
2563		atomic_read(&sk->sk_refcnt),
2564		req);
2565}
2566
2567static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2568{
2569	int timer_active;
2570	unsigned long timer_expires;
2571	const struct tcp_sock *tp = tcp_sk(sk);
2572	const struct inet_connection_sock *icsk = inet_csk(sk);
2573	const struct inet_sock *inet = inet_sk(sk);
2574	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2575	__be32 dest = inet->inet_daddr;
2576	__be32 src = inet->inet_rcv_saddr;
2577	__u16 destp = ntohs(inet->inet_dport);
2578	__u16 srcp = ntohs(inet->inet_sport);
2579	int rx_queue;
 
2580
2581	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2582	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2583	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2584		timer_active	= 1;
2585		timer_expires	= icsk->icsk_timeout;
2586	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2587		timer_active	= 4;
2588		timer_expires	= icsk->icsk_timeout;
2589	} else if (timer_pending(&sk->sk_timer)) {
2590		timer_active	= 2;
2591		timer_expires	= sk->sk_timer.expires;
2592	} else {
2593		timer_active	= 0;
2594		timer_expires = jiffies;
2595	}
2596
2597	if (sk->sk_state == TCP_LISTEN)
 
2598		rx_queue = sk->sk_ack_backlog;
2599	else
2600		/*
2601		 * because we dont lock socket, we might find a transient negative value
2602		 */
2603		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2604
2605	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2606			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2607		i, src, srcp, dest, destp, sk->sk_state,
2608		tp->write_seq - tp->snd_una,
2609		rx_queue,
2610		timer_active,
2611		jiffies_delta_to_clock_t(timer_expires - jiffies),
2612		icsk->icsk_retransmits,
2613		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2614		icsk->icsk_probes_out,
2615		sock_i_ino(sk),
2616		atomic_read(&sk->sk_refcnt), sk,
2617		jiffies_to_clock_t(icsk->icsk_rto),
2618		jiffies_to_clock_t(icsk->icsk_ack.ato),
2619		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2620		tp->snd_cwnd,
2621		sk->sk_state == TCP_LISTEN ?
2622		    (fastopenq ? fastopenq->max_qlen : 0) :
2623		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2624}
2625
2626static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2627			       struct seq_file *f, int i)
2628{
 
2629	__be32 dest, src;
2630	__u16 destp, srcp;
2631	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2632
2633	dest  = tw->tw_daddr;
2634	src   = tw->tw_rcv_saddr;
2635	destp = ntohs(tw->tw_dport);
2636	srcp  = ntohs(tw->tw_sport);
2637
2638	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2639		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2640		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2641		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2642		atomic_read(&tw->tw_refcnt), tw);
2643}
2644
2645#define TMPSZ 150
2646
2647static int tcp4_seq_show(struct seq_file *seq, void *v)
2648{
2649	struct tcp_iter_state *st;
2650	struct sock *sk = v;
2651
2652	seq_setwidth(seq, TMPSZ - 1);
2653	if (v == SEQ_START_TOKEN) {
2654		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2655			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2656			   "inode");
2657		goto out;
2658	}
2659	st = seq->private;
2660
2661	switch (st->state) {
2662	case TCP_SEQ_STATE_LISTENING:
2663	case TCP_SEQ_STATE_ESTABLISHED:
2664		if (sk->sk_state == TCP_TIME_WAIT)
2665			get_timewait4_sock(v, seq, st->num);
2666		else
2667			get_tcp4_sock(v, seq, st->num);
2668		break;
2669	case TCP_SEQ_STATE_OPENREQ:
2670		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2671		break;
2672	}
2673out:
2674	seq_pad(seq, '\n');
2675	return 0;
2676}
2677
2678static const struct file_operations tcp_afinfo_seq_fops = {
2679	.owner   = THIS_MODULE,
2680	.open    = tcp_seq_open,
2681	.read    = seq_read,
2682	.llseek  = seq_lseek,
2683	.release = seq_release_net
2684};
2685
2686static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2687	.name		= "tcp",
2688	.family		= AF_INET,
2689	.seq_fops	= &tcp_afinfo_seq_fops,
2690	.seq_ops	= {
2691		.show		= tcp4_seq_show,
2692	},
2693};
2694
2695static int __net_init tcp4_proc_init_net(struct net *net)
2696{
2697	return tcp_proc_register(net, &tcp4_seq_afinfo);
2698}
2699
2700static void __net_exit tcp4_proc_exit_net(struct net *net)
2701{
2702	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2703}
2704
2705static struct pernet_operations tcp4_net_ops = {
2706	.init = tcp4_proc_init_net,
2707	.exit = tcp4_proc_exit_net,
2708};
2709
2710int __init tcp4_proc_init(void)
2711{
2712	return register_pernet_subsys(&tcp4_net_ops);
2713}
2714
2715void tcp4_proc_exit(void)
2716{
2717	unregister_pernet_subsys(&tcp4_net_ops);
2718}
2719#endif /* CONFIG_PROC_FS */
2720
2721struct proto tcp_prot = {
2722	.name			= "TCP",
2723	.owner			= THIS_MODULE,
2724	.close			= tcp_close,
 
2725	.connect		= tcp_v4_connect,
2726	.disconnect		= tcp_disconnect,
2727	.accept			= inet_csk_accept,
2728	.ioctl			= tcp_ioctl,
2729	.init			= tcp_v4_init_sock,
2730	.destroy		= tcp_v4_destroy_sock,
2731	.shutdown		= tcp_shutdown,
2732	.setsockopt		= tcp_setsockopt,
2733	.getsockopt		= tcp_getsockopt,
 
2734	.recvmsg		= tcp_recvmsg,
2735	.sendmsg		= tcp_sendmsg,
2736	.sendpage		= tcp_sendpage,
2737	.backlog_rcv		= tcp_v4_do_rcv,
2738	.release_cb		= tcp_release_cb,
2739	.mtu_reduced		= tcp_v4_mtu_reduced,
2740	.hash			= inet_hash,
2741	.unhash			= inet_unhash,
2742	.get_port		= inet_csk_get_port,
2743	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2744	.stream_memory_free	= tcp_stream_memory_free,
2745	.sockets_allocated	= &tcp_sockets_allocated,
2746	.orphan_count		= &tcp_orphan_count,
2747	.memory_allocated	= &tcp_memory_allocated,
2748	.memory_pressure	= &tcp_memory_pressure,
2749	.sysctl_mem		= sysctl_tcp_mem,
2750	.sysctl_wmem		= sysctl_tcp_wmem,
2751	.sysctl_rmem		= sysctl_tcp_rmem,
2752	.max_header		= MAX_TCP_HEADER,
2753	.obj_size		= sizeof(struct tcp_sock),
2754	.slab_flags		= SLAB_DESTROY_BY_RCU,
2755	.twsk_prot		= &tcp_timewait_sock_ops,
2756	.rsk_prot		= &tcp_request_sock_ops,
2757	.h.hashinfo		= &tcp_hashinfo,
2758	.no_autobind		= true,
2759#ifdef CONFIG_COMPAT
2760	.compat_setsockopt	= compat_tcp_setsockopt,
2761	.compat_getsockopt	= compat_tcp_getsockopt,
2762#endif
2763#ifdef CONFIG_MEMCG_KMEM
2764	.init_cgroup		= tcp_init_cgroup,
2765	.destroy_cgroup		= tcp_destroy_cgroup,
2766	.proto_cgroup		= tcp_proto_cgroup,
2767#endif
2768};
2769EXPORT_SYMBOL(tcp_prot);
2770
 
 
 
 
 
 
 
 
 
 
 
2771static int __net_init tcp_sk_init(struct net *net)
2772{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2773	net->ipv4.sysctl_tcp_ecn = 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2774	return 0;
2775}
 
2776
2777static void __net_exit tcp_sk_exit(struct net *net)
2778{
2779}
2780
2781static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2782{
2783	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
 
 
 
 
 
2784}
2785
2786static struct pernet_operations __net_initdata tcp_sk_ops = {
2787       .init	   = tcp_sk_init,
2788       .exit	   = tcp_sk_exit,
2789       .exit_batch = tcp_sk_exit_batch,
2790};
2791
2792void __init tcp_v4_init(void)
2793{
2794	inet_hashinfo_init(&tcp_hashinfo);
2795	if (register_pernet_subsys(&tcp_sk_ops))
2796		panic("Failed to create the TCP control socket.\n");
2797}
v4.17
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
 
  75#include <net/secure_seq.h>
 
  76#include <net/busy_poll.h>
  77
  78#include <linux/inet.h>
  79#include <linux/ipv6.h>
  80#include <linux/stddef.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
  83#include <linux/inetdevice.h>
  84
  85#include <crypto/hash.h>
  86#include <linux/scatterlist.h>
  87
  88#include <trace/events/tcp.h>
 
 
 
  89
  90#ifdef CONFIG_TCP_MD5SIG
  91static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  92			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  93#endif
  94
  95struct inet_hashinfo tcp_hashinfo;
  96EXPORT_SYMBOL(tcp_hashinfo);
  97
  98static u32 tcp_v4_init_seq(const struct sk_buff *skb)
  99{
 100	return secure_tcp_seq(ip_hdr(skb)->daddr,
 101			      ip_hdr(skb)->saddr,
 102			      tcp_hdr(skb)->dest,
 103			      tcp_hdr(skb)->source);
 104}
 105
 106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
 107{
 108	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
 
 
 
 109}
 110
 111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 112{
 113	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 114	struct tcp_sock *tp = tcp_sk(sk);
 115
 116	/* With PAWS, it is safe from the viewpoint
 117	   of data integrity. Even without PAWS it is safe provided sequence
 118	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 119
 120	   Actually, the idea is close to VJ's one, only timestamp cache is
 121	   held not per host, but per port pair and TW bucket is used as state
 122	   holder.
 123
 124	   If TW bucket has been already destroyed we fall back to VJ's scheme
 125	   and use initial timestamp retrieved from peer table.
 126	 */
 127	if (tcptw->tw_ts_recent_stamp &&
 128	    (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
 129			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 130		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 131		if (tp->write_seq == 0)
 132			tp->write_seq = 1;
 133		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 134		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 135		sock_hold(sktw);
 136		return 1;
 137	}
 138
 139	return 0;
 140}
 141EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 142
 143static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 144			      int addr_len)
 145{
 146	/* This check is replicated from tcp_v4_connect() and intended to
 147	 * prevent BPF program called below from accessing bytes that are out
 148	 * of the bound specified by user in addr_len.
 149	 */
 150	if (addr_len < sizeof(struct sockaddr_in))
 151		return -EINVAL;
 152
 153	sock_owned_by_me(sk);
 154
 155	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
 156}
 157
 158/* This will initiate an outgoing connection. */
 159int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 160{
 161	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 162	struct inet_sock *inet = inet_sk(sk);
 163	struct tcp_sock *tp = tcp_sk(sk);
 164	__be16 orig_sport, orig_dport;
 165	__be32 daddr, nexthop;
 166	struct flowi4 *fl4;
 167	struct rtable *rt;
 168	int err;
 169	struct ip_options_rcu *inet_opt;
 170	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 171
 172	if (addr_len < sizeof(struct sockaddr_in))
 173		return -EINVAL;
 174
 175	if (usin->sin_family != AF_INET)
 176		return -EAFNOSUPPORT;
 177
 178	nexthop = daddr = usin->sin_addr.s_addr;
 179	inet_opt = rcu_dereference_protected(inet->inet_opt,
 180					     lockdep_sock_is_held(sk));
 181	if (inet_opt && inet_opt->opt.srr) {
 182		if (!daddr)
 183			return -EINVAL;
 184		nexthop = inet_opt->opt.faddr;
 185	}
 186
 187	orig_sport = inet->inet_sport;
 188	orig_dport = usin->sin_port;
 189	fl4 = &inet->cork.fl.u.ip4;
 190	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 191			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 192			      IPPROTO_TCP,
 193			      orig_sport, orig_dport, sk);
 194	if (IS_ERR(rt)) {
 195		err = PTR_ERR(rt);
 196		if (err == -ENETUNREACH)
 197			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 198		return err;
 199	}
 200
 201	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 202		ip_rt_put(rt);
 203		return -ENETUNREACH;
 204	}
 205
 206	if (!inet_opt || !inet_opt->opt.srr)
 207		daddr = fl4->daddr;
 208
 209	if (!inet->inet_saddr)
 210		inet->inet_saddr = fl4->saddr;
 211	sk_rcv_saddr_set(sk, inet->inet_saddr);
 212
 213	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 214		/* Reset inherited state */
 215		tp->rx_opt.ts_recent	   = 0;
 216		tp->rx_opt.ts_recent_stamp = 0;
 217		if (likely(!tp->repair))
 218			tp->write_seq	   = 0;
 219	}
 220
 
 
 
 
 221	inet->inet_dport = usin->sin_port;
 222	sk_daddr_set(sk, daddr);
 223
 224	inet_csk(sk)->icsk_ext_hdr_len = 0;
 225	if (inet_opt)
 226		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 227
 228	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 229
 230	/* Socket identity is still unknown (sport may be zero).
 231	 * However we set state to SYN-SENT and not releasing socket
 232	 * lock select source port, enter ourselves into the hash tables and
 233	 * complete initialization after this.
 234	 */
 235	tcp_set_state(sk, TCP_SYN_SENT);
 236	err = inet_hash_connect(tcp_death_row, sk);
 237	if (err)
 238		goto failure;
 239
 240	sk_set_txhash(sk);
 241
 242	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 243			       inet->inet_sport, inet->inet_dport, sk);
 244	if (IS_ERR(rt)) {
 245		err = PTR_ERR(rt);
 246		rt = NULL;
 247		goto failure;
 248	}
 249	/* OK, now commit destination to socket.  */
 250	sk->sk_gso_type = SKB_GSO_TCPV4;
 251	sk_setup_caps(sk, &rt->dst);
 252	rt = NULL;
 253
 254	if (likely(!tp->repair)) {
 255		if (!tp->write_seq)
 256			tp->write_seq = secure_tcp_seq(inet->inet_saddr,
 257						       inet->inet_daddr,
 258						       inet->inet_sport,
 259						       usin->sin_port);
 260		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
 261						 inet->inet_saddr,
 262						 inet->inet_daddr);
 263	}
 264
 265	inet->inet_id = tp->write_seq ^ jiffies;
 266
 267	if (tcp_fastopen_defer_connect(sk, &err))
 268		return err;
 269	if (err)
 270		goto failure;
 271
 272	err = tcp_connect(sk);
 273
 
 274	if (err)
 275		goto failure;
 276
 277	return 0;
 278
 279failure:
 280	/*
 281	 * This unhashes the socket and releases the local port,
 282	 * if necessary.
 283	 */
 284	tcp_set_state(sk, TCP_CLOSE);
 285	ip_rt_put(rt);
 286	sk->sk_route_caps = 0;
 287	inet->inet_dport = 0;
 288	return err;
 289}
 290EXPORT_SYMBOL(tcp_v4_connect);
 291
 292/*
 293 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 294 * It can be called through tcp_release_cb() if socket was owned by user
 295 * at the time tcp_v4_err() was called to handle ICMP message.
 296 */
 297void tcp_v4_mtu_reduced(struct sock *sk)
 298{
 
 299	struct inet_sock *inet = inet_sk(sk);
 300	struct dst_entry *dst;
 301	u32 mtu;
 302
 303	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 304		return;
 305	mtu = tcp_sk(sk)->mtu_info;
 306	dst = inet_csk_update_pmtu(sk, mtu);
 307	if (!dst)
 308		return;
 309
 310	/* Something is about to be wrong... Remember soft error
 311	 * for the case, if this connection will not able to recover.
 312	 */
 313	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 314		sk->sk_err_soft = EMSGSIZE;
 315
 316	mtu = dst_mtu(dst);
 317
 318	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 319	    ip_sk_accept_pmtu(sk) &&
 320	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 321		tcp_sync_mss(sk, mtu);
 322
 323		/* Resend the TCP packet because it's
 324		 * clear that the old packet has been
 325		 * dropped. This is the new "fast" path mtu
 326		 * discovery.
 327		 */
 328		tcp_simple_retransmit(sk);
 329	} /* else let the usual retransmit timer handle it */
 330}
 331EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 332
 333static void do_redirect(struct sk_buff *skb, struct sock *sk)
 334{
 335	struct dst_entry *dst = __sk_dst_check(sk, 0);
 336
 337	if (dst)
 338		dst->ops->redirect(dst, sk, skb);
 339}
 340
 341
 342/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 343void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 344{
 345	struct request_sock *req = inet_reqsk(sk);
 346	struct net *net = sock_net(sk);
 347
 348	/* ICMPs are not backlogged, hence we cannot get
 349	 * an established socket here.
 350	 */
 351	if (seq != tcp_rsk(req)->snt_isn) {
 352		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 353	} else if (abort) {
 354		/*
 355		 * Still in SYN_RECV, just remove it silently.
 356		 * There is no good way to pass the error to the newly
 357		 * created socket, and POSIX does not want network
 358		 * errors returned from accept().
 359		 */
 360		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 361		tcp_listendrop(req->rsk_listener);
 362	}
 363	reqsk_put(req);
 364}
 365EXPORT_SYMBOL(tcp_req_err);
 366
 367/*
 368 * This routine is called by the ICMP module when it gets some
 369 * sort of error condition.  If err < 0 then the socket should
 370 * be closed and the error returned to the user.  If err > 0
 371 * it's just the icmp type << 8 | icmp code.  After adjustment
 372 * header points to the first 8 bytes of the tcp header.  We need
 373 * to find the appropriate port.
 374 *
 375 * The locking strategy used here is very "optimistic". When
 376 * someone else accesses the socket the ICMP is just dropped
 377 * and for some paths there is no check at all.
 378 * A more general error queue to queue errors for later handling
 379 * is probably better.
 380 *
 381 */
 382
 383void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 384{
 385	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 386	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 387	struct inet_connection_sock *icsk;
 388	struct tcp_sock *tp;
 389	struct inet_sock *inet;
 390	const int type = icmp_hdr(icmp_skb)->type;
 391	const int code = icmp_hdr(icmp_skb)->code;
 392	struct sock *sk;
 393	struct sk_buff *skb;
 394	struct request_sock *fastopen;
 395	u32 seq, snd_una;
 396	s32 remaining;
 397	u32 delta_us;
 398	int err;
 399	struct net *net = dev_net(icmp_skb->dev);
 400
 401	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
 402				       th->dest, iph->saddr, ntohs(th->source),
 403				       inet_iif(icmp_skb), 0);
 
 
 
 
 404	if (!sk) {
 405		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 406		return;
 407	}
 408	if (sk->sk_state == TCP_TIME_WAIT) {
 409		inet_twsk_put(inet_twsk(sk));
 410		return;
 411	}
 412	seq = ntohl(th->seq);
 413	if (sk->sk_state == TCP_NEW_SYN_RECV)
 414		return tcp_req_err(sk, seq,
 415				  type == ICMP_PARAMETERPROB ||
 416				  type == ICMP_TIME_EXCEEDED ||
 417				  (type == ICMP_DEST_UNREACH &&
 418				   (code == ICMP_NET_UNREACH ||
 419				    code == ICMP_HOST_UNREACH)));
 420
 421	bh_lock_sock(sk);
 422	/* If too many ICMPs get dropped on busy
 423	 * servers this needs to be solved differently.
 424	 * We do take care of PMTU discovery (RFC1191) special case :
 425	 * we can receive locally generated ICMP messages while socket is held.
 426	 */
 427	if (sock_owned_by_user(sk)) {
 428		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 429			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 430	}
 431	if (sk->sk_state == TCP_CLOSE)
 432		goto out;
 433
 434	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 435		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 436		goto out;
 437	}
 438
 439	icsk = inet_csk(sk);
 440	tp = tcp_sk(sk);
 441	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 442	fastopen = tp->fastopen_rsk;
 443	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 444	if (sk->sk_state != TCP_LISTEN &&
 445	    !between(seq, snd_una, tp->snd_nxt)) {
 446		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 
 
 447		goto out;
 448	}
 449
 450	switch (type) {
 451	case ICMP_REDIRECT:
 452		if (!sock_owned_by_user(sk))
 453			do_redirect(icmp_skb, sk);
 454		goto out;
 455	case ICMP_SOURCE_QUENCH:
 456		/* Just silently ignore these. */
 457		goto out;
 458	case ICMP_PARAMETERPROB:
 459		err = EPROTO;
 460		break;
 461	case ICMP_DEST_UNREACH:
 462		if (code > NR_ICMP_UNREACH)
 463			goto out;
 464
 465		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 466			/* We are not interested in TCP_LISTEN and open_requests
 467			 * (SYN-ACKs send out by Linux are always <576bytes so
 468			 * they should go through unfragmented).
 469			 */
 470			if (sk->sk_state == TCP_LISTEN)
 471				goto out;
 472
 473			tp->mtu_info = info;
 474			if (!sock_owned_by_user(sk)) {
 475				tcp_v4_mtu_reduced(sk);
 476			} else {
 477				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 478					sock_hold(sk);
 479			}
 480			goto out;
 481		}
 482
 483		err = icmp_err_convert[code].errno;
 484		/* check if icmp_skb allows revert of backoff
 485		 * (see draft-zimmermann-tcp-lcd) */
 486		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 487			break;
 488		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 489		    !icsk->icsk_backoff || fastopen)
 490			break;
 491
 
 
 492		if (sock_owned_by_user(sk))
 493			break;
 494
 495		icsk->icsk_backoff--;
 496		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
 497					       TCP_TIMEOUT_INIT;
 498		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 499
 500		skb = tcp_rtx_queue_head(sk);
 501		BUG_ON(!skb);
 502
 503		tcp_mstamp_refresh(tp);
 504		delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
 505		remaining = icsk->icsk_rto -
 506			    usecs_to_jiffies(delta_us);
 507
 508		if (remaining > 0) {
 509			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 510						  remaining, TCP_RTO_MAX);
 511		} else {
 512			/* RTO revert clocked out retransmission.
 513			 * Will retransmit now */
 514			tcp_retransmit_timer(sk);
 515		}
 516
 517		break;
 518	case ICMP_TIME_EXCEEDED:
 519		err = EHOSTUNREACH;
 520		break;
 521	default:
 522		goto out;
 523	}
 524
 
 
 
 
 
 
 
 
 525	switch (sk->sk_state) {
 526	case TCP_SYN_SENT:
 527	case TCP_SYN_RECV:
 528		/* Only in fast or simultaneous open. If a fast open socket is
 529		 * is already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530		 */
 531		if (fastopen && !fastopen->sk)
 532			break;
 
 533
 
 
 
 
 
 534		if (!sock_owned_by_user(sk)) {
 535			sk->sk_err = err;
 536
 537			sk->sk_error_report(sk);
 538
 539			tcp_done(sk);
 540		} else {
 541			sk->sk_err_soft = err;
 542		}
 543		goto out;
 544	}
 545
 546	/* If we've already connected we will keep trying
 547	 * until we time out, or the user gives up.
 548	 *
 549	 * rfc1122 4.2.3.9 allows to consider as hard errors
 550	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 551	 * but it is obsoleted by pmtu discovery).
 552	 *
 553	 * Note, that in modern internet, where routing is unreliable
 554	 * and in each dark corner broken firewalls sit, sending random
 555	 * errors ordered by their masters even this two messages finally lose
 556	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 557	 *
 558	 * Now we are in compliance with RFCs.
 559	 *							--ANK (980905)
 560	 */
 561
 562	inet = inet_sk(sk);
 563	if (!sock_owned_by_user(sk) && inet->recverr) {
 564		sk->sk_err = err;
 565		sk->sk_error_report(sk);
 566	} else	{ /* Only an error on timeout */
 567		sk->sk_err_soft = err;
 568	}
 569
 570out:
 571	bh_unlock_sock(sk);
 572	sock_put(sk);
 573}
 574
 575void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 576{
 577	struct tcphdr *th = tcp_hdr(skb);
 578
 579	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 580	skb->csum_start = skb_transport_header(skb) - skb->head;
 581	skb->csum_offset = offsetof(struct tcphdr, check);
 
 
 
 
 
 
 
 582}
 583
 584/* This routine computes an IPv4 TCP checksum. */
 585void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 586{
 587	const struct inet_sock *inet = inet_sk(sk);
 588
 589	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 590}
 591EXPORT_SYMBOL(tcp_v4_send_check);
 592
 593/*
 594 *	This routine will send an RST to the other tcp.
 595 *
 596 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 597 *		      for reset.
 598 *	Answer: if a packet caused RST, it is not for a socket
 599 *		existing in our system, if it is matched to a socket,
 600 *		it is just duplicate segment or bug in other side's TCP.
 601 *		So that we build reply only basing on parameters
 602 *		arrived with segment.
 603 *	Exception: precedence violation. We do not implement it in any case.
 604 */
 605
 606static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 607{
 608	const struct tcphdr *th = tcp_hdr(skb);
 609	struct {
 610		struct tcphdr th;
 611#ifdef CONFIG_TCP_MD5SIG
 612		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 613#endif
 614	} rep;
 615	struct ip_reply_arg arg;
 616#ifdef CONFIG_TCP_MD5SIG
 617	struct tcp_md5sig_key *key = NULL;
 618	const __u8 *hash_location = NULL;
 619	unsigned char newhash[16];
 620	int genhash;
 621	struct sock *sk1 = NULL;
 622#endif
 623	struct net *net;
 624
 625	/* Never send a reset in response to a reset. */
 626	if (th->rst)
 627		return;
 628
 629	/* If sk not NULL, it means we did a successful lookup and incoming
 630	 * route had to be correct. prequeue might have dropped our dst.
 631	 */
 632	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 633		return;
 634
 635	/* Swap the send and the receive. */
 636	memset(&rep, 0, sizeof(rep));
 637	rep.th.dest   = th->source;
 638	rep.th.source = th->dest;
 639	rep.th.doff   = sizeof(struct tcphdr) / 4;
 640	rep.th.rst    = 1;
 641
 642	if (th->ack) {
 643		rep.th.seq = th->ack_seq;
 644	} else {
 645		rep.th.ack = 1;
 646		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 647				       skb->len - (th->doff << 2));
 648	}
 649
 650	memset(&arg, 0, sizeof(arg));
 651	arg.iov[0].iov_base = (unsigned char *)&rep;
 652	arg.iov[0].iov_len  = sizeof(rep.th);
 653
 654	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 655#ifdef CONFIG_TCP_MD5SIG
 656	rcu_read_lock();
 657	hash_location = tcp_parse_md5sig_option(th);
 658	if (sk && sk_fullsock(sk)) {
 659		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 660					&ip_hdr(skb)->saddr, AF_INET);
 661	} else if (hash_location) {
 662		/*
 663		 * active side is lost. Try to find listening socket through
 664		 * source port, and then find md5 key through listening socket.
 665		 * we are not loose security here:
 666		 * Incoming packet is checked with md5 hash with finding key,
 667		 * no RST generated if md5 hash doesn't match.
 668		 */
 669		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
 670					     ip_hdr(skb)->saddr,
 671					     th->source, ip_hdr(skb)->daddr,
 672					     ntohs(th->source), inet_iif(skb),
 673					     tcp_v4_sdif(skb));
 674		/* don't send rst if it can't find key */
 675		if (!sk1)
 676			goto out;
 677
 678		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 679					&ip_hdr(skb)->saddr, AF_INET);
 680		if (!key)
 681			goto out;
 682
 683
 684		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 685		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 686			goto out;
 687
 
 
 
 688	}
 689
 690	if (key) {
 691		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 692				   (TCPOPT_NOP << 16) |
 693				   (TCPOPT_MD5SIG << 8) |
 694				   TCPOLEN_MD5SIG);
 695		/* Update length and the length the header thinks exists */
 696		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 697		rep.th.doff = arg.iov[0].iov_len / 4;
 698
 699		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 700				     key, ip_hdr(skb)->saddr,
 701				     ip_hdr(skb)->daddr, &rep.th);
 702	}
 703#endif
 704	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 705				      ip_hdr(skb)->saddr, /* XXX */
 706				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 707	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 708	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 709
 710	/* When socket is gone, all binding information is lost.
 711	 * routing might fail in this case. No choice here, if we choose to force
 712	 * input interface, we will misroute in case of asymmetric route.
 713	 */
 714	if (sk) {
 715		arg.bound_dev_if = sk->sk_bound_dev_if;
 716		if (sk_fullsock(sk))
 717			trace_tcp_send_reset(sk, skb);
 718	}
 719
 720	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 721		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 
 
 722
 723	arg.tos = ip_hdr(skb)->tos;
 724	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 725	local_bh_disable();
 726	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 727			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 728			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 729			      &arg, arg.iov[0].iov_len);
 730
 731	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 732	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 733	local_bh_enable();
 734
 735#ifdef CONFIG_TCP_MD5SIG
 736out:
 737	rcu_read_unlock();
 
 
 
 738#endif
 739}
 740
 741/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 742   outside socket context is ugly, certainly. What can I do?
 743 */
 744
 745static void tcp_v4_send_ack(const struct sock *sk,
 746			    struct sk_buff *skb, u32 seq, u32 ack,
 747			    u32 win, u32 tsval, u32 tsecr, int oif,
 748			    struct tcp_md5sig_key *key,
 749			    int reply_flags, u8 tos)
 750{
 751	const struct tcphdr *th = tcp_hdr(skb);
 752	struct {
 753		struct tcphdr th;
 754		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 755#ifdef CONFIG_TCP_MD5SIG
 756			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 757#endif
 758			];
 759	} rep;
 760	struct net *net = sock_net(sk);
 761	struct ip_reply_arg arg;
 
 762
 763	memset(&rep.th, 0, sizeof(struct tcphdr));
 764	memset(&arg, 0, sizeof(arg));
 765
 766	arg.iov[0].iov_base = (unsigned char *)&rep;
 767	arg.iov[0].iov_len  = sizeof(rep.th);
 768	if (tsecr) {
 769		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 770				   (TCPOPT_TIMESTAMP << 8) |
 771				   TCPOLEN_TIMESTAMP);
 772		rep.opt[1] = htonl(tsval);
 773		rep.opt[2] = htonl(tsecr);
 774		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 775	}
 776
 777	/* Swap the send and the receive. */
 778	rep.th.dest    = th->source;
 779	rep.th.source  = th->dest;
 780	rep.th.doff    = arg.iov[0].iov_len / 4;
 781	rep.th.seq     = htonl(seq);
 782	rep.th.ack_seq = htonl(ack);
 783	rep.th.ack     = 1;
 784	rep.th.window  = htons(win);
 785
 786#ifdef CONFIG_TCP_MD5SIG
 787	if (key) {
 788		int offset = (tsecr) ? 3 : 0;
 789
 790		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 791					  (TCPOPT_NOP << 16) |
 792					  (TCPOPT_MD5SIG << 8) |
 793					  TCPOLEN_MD5SIG);
 794		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 795		rep.th.doff = arg.iov[0].iov_len/4;
 796
 797		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 798				    key, ip_hdr(skb)->saddr,
 799				    ip_hdr(skb)->daddr, &rep.th);
 800	}
 801#endif
 802	arg.flags = reply_flags;
 803	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 804				      ip_hdr(skb)->saddr, /* XXX */
 805				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 806	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 807	if (oif)
 808		arg.bound_dev_if = oif;
 809	arg.tos = tos;
 810	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 811	local_bh_disable();
 812	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 813			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 814			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 815			      &arg, arg.iov[0].iov_len);
 816
 817	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 818	local_bh_enable();
 819}
 820
 821static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 822{
 823	struct inet_timewait_sock *tw = inet_twsk(sk);
 824	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 825
 826	tcp_v4_send_ack(sk, skb,
 827			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 828			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 829			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
 830			tcptw->tw_ts_recent,
 831			tw->tw_bound_dev_if,
 832			tcp_twsk_md5_key(tcptw),
 833			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 834			tw->tw_tos
 835			);
 836
 837	inet_twsk_put(tw);
 838}
 839
 840static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 841				  struct request_sock *req)
 842{
 843	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 844	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 845	 */
 846	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 847					     tcp_sk(sk)->snd_nxt;
 848
 849	/* RFC 7323 2.3
 850	 * The window field (SEG.WND) of every outgoing segment, with the
 851	 * exception of <SYN> segments, MUST be right-shifted by
 852	 * Rcv.Wind.Shift bits:
 853	 */
 854	tcp_v4_send_ack(sk, skb, seq,
 855			tcp_rsk(req)->rcv_nxt,
 856			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 857			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
 858			req->ts_recent,
 859			0,
 860			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
 861					  AF_INET),
 862			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 863			ip_hdr(skb)->tos);
 864}
 865
 866/*
 867 *	Send a SYN-ACK after having received a SYN.
 868 *	This still operates on a request_sock only, not on a big
 869 *	socket.
 870 */
 871static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 872			      struct flowi *fl,
 873			      struct request_sock *req,
 874			      struct tcp_fastopen_cookie *foc,
 875			      enum tcp_synack_type synack_type)
 876{
 877	const struct inet_request_sock *ireq = inet_rsk(req);
 878	struct flowi4 fl4;
 879	int err = -1;
 880	struct sk_buff *skb;
 881
 882	/* First, grab a route. */
 883	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 884		return -1;
 885
 886	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 887
 888	if (skb) {
 889		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 890
 
 891		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 892					    ireq->ir_rmt_addr,
 893					    ireq_opt_deref(ireq));
 894		err = net_xmit_eval(err);
 
 
 895	}
 896
 897	return err;
 898}
 899
 
 
 
 
 
 
 
 
 
 
 
 900/*
 901 *	IPv4 request_sock destructor.
 902 */
 903static void tcp_v4_reqsk_destructor(struct request_sock *req)
 904{
 905	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906}
 907
 908#ifdef CONFIG_TCP_MD5SIG
 909/*
 910 * RFC2385 MD5 checksumming requires a mapping of
 911 * IP address->MD5 Key.
 912 * We need to maintain these in the sk structure.
 913 */
 914
 915/* Find the Key structure for an address.  */
 916struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 917					 const union tcp_md5_addr *addr,
 918					 int family)
 919{
 920	const struct tcp_sock *tp = tcp_sk(sk);
 921	struct tcp_md5sig_key *key;
 922	const struct tcp_md5sig_info *md5sig;
 923	__be32 mask;
 924	struct tcp_md5sig_key *best_match = NULL;
 925	bool match;
 926
 927	/* caller either holds rcu_read_lock() or socket lock */
 928	md5sig = rcu_dereference_check(tp->md5sig_info,
 929				       lockdep_sock_is_held(sk));
 930	if (!md5sig)
 931		return NULL;
 932
 933	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 934		if (key->family != family)
 935			continue;
 936
 937		if (family == AF_INET) {
 938			mask = inet_make_mask(key->prefixlen);
 939			match = (key->addr.a4.s_addr & mask) ==
 940				(addr->a4.s_addr & mask);
 941#if IS_ENABLED(CONFIG_IPV6)
 942		} else if (family == AF_INET6) {
 943			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
 944						  key->prefixlen);
 945#endif
 946		} else {
 947			match = false;
 948		}
 949
 950		if (match && (!best_match ||
 951			      key->prefixlen > best_match->prefixlen))
 952			best_match = key;
 953	}
 954	return best_match;
 955}
 956EXPORT_SYMBOL(tcp_md5_do_lookup);
 957
 958static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
 959						      const union tcp_md5_addr *addr,
 960						      int family, u8 prefixlen)
 961{
 962	const struct tcp_sock *tp = tcp_sk(sk);
 963	struct tcp_md5sig_key *key;
 964	unsigned int size = sizeof(struct in_addr);
 965	const struct tcp_md5sig_info *md5sig;
 966
 967	/* caller either holds rcu_read_lock() or socket lock */
 968	md5sig = rcu_dereference_check(tp->md5sig_info,
 969				       lockdep_sock_is_held(sk));
 
 970	if (!md5sig)
 971		return NULL;
 972#if IS_ENABLED(CONFIG_IPV6)
 973	if (family == AF_INET6)
 974		size = sizeof(struct in6_addr);
 975#endif
 976	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 977		if (key->family != family)
 978			continue;
 979		if (!memcmp(&key->addr, addr, size) &&
 980		    key->prefixlen == prefixlen)
 981			return key;
 982	}
 983	return NULL;
 984}
 
 985
 986struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 987					 const struct sock *addr_sk)
 988{
 989	const union tcp_md5_addr *addr;
 990
 991	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
 992	return tcp_md5_do_lookup(sk, addr, AF_INET);
 993}
 994EXPORT_SYMBOL(tcp_v4_md5_lookup);
 995
 
 
 
 
 
 
 
 
 
 996/* This can be called on a newly created socket, from other files */
 997int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 998		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
 999		   gfp_t gfp)
1000{
1001	/* Add Key to the list */
1002	struct tcp_md5sig_key *key;
1003	struct tcp_sock *tp = tcp_sk(sk);
1004	struct tcp_md5sig_info *md5sig;
1005
1006	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1007	if (key) {
1008		/* Pre-existing entry - just update that one. */
1009		memcpy(key->key, newkey, newkeylen);
1010		key->keylen = newkeylen;
1011		return 0;
1012	}
1013
1014	md5sig = rcu_dereference_protected(tp->md5sig_info,
1015					   lockdep_sock_is_held(sk));
1016	if (!md5sig) {
1017		md5sig = kmalloc(sizeof(*md5sig), gfp);
1018		if (!md5sig)
1019			return -ENOMEM;
1020
1021		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1022		INIT_HLIST_HEAD(&md5sig->head);
1023		rcu_assign_pointer(tp->md5sig_info, md5sig);
1024	}
1025
1026	key = sock_kmalloc(sk, sizeof(*key), gfp);
1027	if (!key)
1028		return -ENOMEM;
1029	if (!tcp_alloc_md5sig_pool()) {
1030		sock_kfree_s(sk, key, sizeof(*key));
1031		return -ENOMEM;
1032	}
1033
1034	memcpy(key->key, newkey, newkeylen);
1035	key->keylen = newkeylen;
1036	key->family = family;
1037	key->prefixlen = prefixlen;
1038	memcpy(&key->addr, addr,
1039	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1040				      sizeof(struct in_addr));
1041	hlist_add_head_rcu(&key->node, &md5sig->head);
1042	return 0;
1043}
1044EXPORT_SYMBOL(tcp_md5_do_add);
1045
1046int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1047		   u8 prefixlen)
1048{
1049	struct tcp_md5sig_key *key;
1050
1051	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1052	if (!key)
1053		return -ENOENT;
1054	hlist_del_rcu(&key->node);
1055	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1056	kfree_rcu(key, rcu);
1057	return 0;
1058}
1059EXPORT_SYMBOL(tcp_md5_do_del);
1060
1061static void tcp_clear_md5_list(struct sock *sk)
1062{
1063	struct tcp_sock *tp = tcp_sk(sk);
1064	struct tcp_md5sig_key *key;
1065	struct hlist_node *n;
1066	struct tcp_md5sig_info *md5sig;
1067
1068	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1069
1070	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1071		hlist_del_rcu(&key->node);
1072		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1073		kfree_rcu(key, rcu);
1074	}
1075}
1076
1077static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1078				 char __user *optval, int optlen)
1079{
1080	struct tcp_md5sig cmd;
1081	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1082	u8 prefixlen = 32;
1083
1084	if (optlen < sizeof(cmd))
1085		return -EINVAL;
1086
1087	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1088		return -EFAULT;
1089
1090	if (sin->sin_family != AF_INET)
1091		return -EINVAL;
1092
1093	if (optname == TCP_MD5SIG_EXT &&
1094	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1095		prefixlen = cmd.tcpm_prefixlen;
1096		if (prefixlen > 32)
1097			return -EINVAL;
1098	}
1099
1100	if (!cmd.tcpm_keylen)
1101		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1102				      AF_INET, prefixlen);
1103
1104	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105		return -EINVAL;
1106
1107	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108			      AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1109			      GFP_KERNEL);
1110}
1111
1112static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1113				   __be32 daddr, __be32 saddr,
1114				   const struct tcphdr *th, int nbytes)
1115{
1116	struct tcp4_pseudohdr *bp;
1117	struct scatterlist sg;
1118	struct tcphdr *_th;
1119
1120	bp = hp->scratch;
 
 
 
 
 
 
1121	bp->saddr = saddr;
1122	bp->daddr = daddr;
1123	bp->pad = 0;
1124	bp->protocol = IPPROTO_TCP;
1125	bp->len = cpu_to_be16(nbytes);
1126
1127	_th = (struct tcphdr *)(bp + 1);
1128	memcpy(_th, th, sizeof(*th));
1129	_th->check = 0;
1130
1131	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1132	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1133				sizeof(*bp) + sizeof(*th));
1134	return crypto_ahash_update(hp->md5_req);
1135}
1136
1137static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1138			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1139{
1140	struct tcp_md5sig_pool *hp;
1141	struct ahash_request *req;
1142
1143	hp = tcp_get_md5sig_pool();
1144	if (!hp)
1145		goto clear_hash_noput;
1146	req = hp->md5_req;
1147
1148	if (crypto_ahash_init(req))
1149		goto clear_hash;
1150	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 
 
1151		goto clear_hash;
1152	if (tcp_md5_hash_key(hp, key))
1153		goto clear_hash;
1154	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1155	if (crypto_ahash_final(req))
1156		goto clear_hash;
1157
1158	tcp_put_md5sig_pool();
1159	return 0;
1160
1161clear_hash:
1162	tcp_put_md5sig_pool();
1163clear_hash_noput:
1164	memset(md5_hash, 0, 16);
1165	return 1;
1166}
1167
1168int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1169			const struct sock *sk,
1170			const struct sk_buff *skb)
1171{
1172	struct tcp_md5sig_pool *hp;
1173	struct ahash_request *req;
1174	const struct tcphdr *th = tcp_hdr(skb);
1175	__be32 saddr, daddr;
1176
1177	if (sk) { /* valid for establish/request sockets */
1178		saddr = sk->sk_rcv_saddr;
1179		daddr = sk->sk_daddr;
 
 
 
1180	} else {
1181		const struct iphdr *iph = ip_hdr(skb);
1182		saddr = iph->saddr;
1183		daddr = iph->daddr;
1184	}
1185
1186	hp = tcp_get_md5sig_pool();
1187	if (!hp)
1188		goto clear_hash_noput;
1189	req = hp->md5_req;
1190
1191	if (crypto_ahash_init(req))
1192		goto clear_hash;
1193
1194	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
1195		goto clear_hash;
1196	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1197		goto clear_hash;
1198	if (tcp_md5_hash_key(hp, key))
1199		goto clear_hash;
1200	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1201	if (crypto_ahash_final(req))
1202		goto clear_hash;
1203
1204	tcp_put_md5sig_pool();
1205	return 0;
1206
1207clear_hash:
1208	tcp_put_md5sig_pool();
1209clear_hash_noput:
1210	memset(md5_hash, 0, 16);
1211	return 1;
1212}
1213EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1214
1215#endif
1216
1217/* Called with rcu_read_lock() */
1218static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1219				    const struct sk_buff *skb)
1220{
1221#ifdef CONFIG_TCP_MD5SIG
1222	/*
1223	 * This gets called for each TCP segment that arrives
1224	 * so we want to be efficient.
1225	 * We have 3 drop cases:
1226	 * o No MD5 hash and one expected.
1227	 * o MD5 hash and we're not expecting one.
1228	 * o MD5 hash and its wrong.
1229	 */
1230	const __u8 *hash_location = NULL;
1231	struct tcp_md5sig_key *hash_expected;
1232	const struct iphdr *iph = ip_hdr(skb);
1233	const struct tcphdr *th = tcp_hdr(skb);
1234	int genhash;
1235	unsigned char newhash[16];
1236
1237	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1238					  AF_INET);
1239	hash_location = tcp_parse_md5sig_option(th);
1240
1241	/* We've parsed the options - do we have a hash? */
1242	if (!hash_expected && !hash_location)
1243		return false;
1244
1245	if (hash_expected && !hash_location) {
1246		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1247		return true;
1248	}
1249
1250	if (!hash_expected && hash_location) {
1251		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1252		return true;
1253	}
1254
1255	/* Okay, so this is hash_expected and hash_location -
1256	 * so we need to calculate the checksum.
1257	 */
1258	genhash = tcp_v4_md5_hash_skb(newhash,
1259				      hash_expected,
1260				      NULL, skb);
1261
1262	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1263		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1264		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1265				     &iph->saddr, ntohs(th->source),
1266				     &iph->daddr, ntohs(th->dest),
1267				     genhash ? " tcp_v4_calc_md5_hash failed"
1268				     : "");
1269		return true;
1270	}
1271	return false;
1272#endif
1273	return false;
1274}
1275
1276static void tcp_v4_init_req(struct request_sock *req,
1277			    const struct sock *sk_listener,
1278			    struct sk_buff *skb)
1279{
1280	struct inet_request_sock *ireq = inet_rsk(req);
1281	struct net *net = sock_net(sk_listener);
1282
1283	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1284	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1285	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1286}
1287
1288static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1289					  struct flowi *fl,
1290					  const struct request_sock *req)
1291{
1292	return inet_csk_route_req(sk, &fl->u.ip4, req);
1293}
1294
1295struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1296	.family		=	PF_INET,
1297	.obj_size	=	sizeof(struct tcp_request_sock),
1298	.rtx_syn_ack	=	tcp_rtx_synack,
1299	.send_ack	=	tcp_v4_reqsk_send_ack,
1300	.destructor	=	tcp_v4_reqsk_destructor,
1301	.send_reset	=	tcp_v4_send_reset,
1302	.syn_ack_timeout =	tcp_syn_ack_timeout,
1303};
1304
 
1305static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1306	.mss_clamp	=	TCP_MSS_DEFAULT,
1307#ifdef CONFIG_TCP_MD5SIG
1308	.req_md5_lookup	=	tcp_v4_md5_lookup,
1309	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 
1310#endif
1311	.init_req	=	tcp_v4_init_req,
1312#ifdef CONFIG_SYN_COOKIES
1313	.cookie_init_seq =	cookie_v4_init_sequence,
1314#endif
1315	.route_req	=	tcp_v4_route_req,
1316	.init_seq	=	tcp_v4_init_seq,
1317	.init_ts_off	=	tcp_v4_init_ts_off,
1318	.send_synack	=	tcp_v4_send_synack,
1319};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320
1321int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1322{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323	/* Never answer to SYNs send to broadcast or multicast */
1324	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1325		goto drop;
1326
1327	return tcp_conn_request(&tcp_request_sock_ops,
1328				&tcp_request_sock_ipv4_ops, sk, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330drop:
1331	tcp_listendrop(sk);
1332	return 0;
1333}
1334EXPORT_SYMBOL(tcp_v4_conn_request);
1335
1336
1337/*
1338 * The three way handshake has completed - we got a valid synack -
1339 * now create the new socket.
1340 */
1341struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1342				  struct request_sock *req,
1343				  struct dst_entry *dst,
1344				  struct request_sock *req_unhash,
1345				  bool *own_req)
1346{
1347	struct inet_request_sock *ireq;
1348	struct inet_sock *newinet;
1349	struct tcp_sock *newtp;
1350	struct sock *newsk;
1351#ifdef CONFIG_TCP_MD5SIG
1352	struct tcp_md5sig_key *key;
1353#endif
1354	struct ip_options_rcu *inet_opt;
1355
1356	if (sk_acceptq_is_full(sk))
1357		goto exit_overflow;
1358
1359	newsk = tcp_create_openreq_child(sk, req, skb);
1360	if (!newsk)
1361		goto exit_nonewsk;
1362
1363	newsk->sk_gso_type = SKB_GSO_TCPV4;
1364	inet_sk_rx_dst_set(newsk, skb);
1365
1366	newtp		      = tcp_sk(newsk);
1367	newinet		      = inet_sk(newsk);
1368	ireq		      = inet_rsk(req);
1369	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1370	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1371	newsk->sk_bound_dev_if = ireq->ir_iif;
1372	newinet->inet_saddr   = ireq->ir_loc_addr;
1373	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1374	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1375	newinet->mc_index     = inet_iif(skb);
1376	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1377	newinet->rcv_tos      = ip_hdr(skb)->tos;
1378	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1379	if (inet_opt)
1380		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1381	newinet->inet_id = newtp->write_seq ^ jiffies;
1382
1383	if (!dst) {
1384		dst = inet_csk_route_child_sock(sk, newsk, req);
1385		if (!dst)
1386			goto put_and_exit;
1387	} else {
1388		/* syncookie case : see end of cookie_v4_check() */
1389	}
1390	sk_setup_caps(newsk, dst);
1391
1392	tcp_ca_openreq_child(newsk, dst);
1393
1394	tcp_sync_mss(newsk, dst_mtu(dst));
1395	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1396
1397	tcp_initialize_rcv_mss(newsk);
1398
1399#ifdef CONFIG_TCP_MD5SIG
1400	/* Copy over the MD5 key from the original socket */
1401	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1402				AF_INET);
1403	if (key) {
1404		/*
1405		 * We're using one, so create a matching key
1406		 * on the newsk structure. If we fail to get
1407		 * memory, then we end up not copying the key
1408		 * across. Shucks.
1409		 */
1410		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1411			       AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1412		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1413	}
1414#endif
1415
1416	if (__inet_inherit_port(sk, newsk) < 0)
1417		goto put_and_exit;
1418	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1419	if (likely(*own_req)) {
1420		tcp_move_syn(newtp, req);
1421		ireq->ireq_opt = NULL;
1422	} else {
1423		newinet->inet_opt = NULL;
1424	}
1425	return newsk;
1426
1427exit_overflow:
1428	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1429exit_nonewsk:
1430	dst_release(dst);
1431exit:
1432	tcp_listendrop(sk);
1433	return NULL;
1434put_and_exit:
1435	newinet->inet_opt = NULL;
1436	inet_csk_prepare_forced_close(newsk);
1437	tcp_done(newsk);
1438	goto exit;
1439}
1440EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1441
1442static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1443{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444#ifdef CONFIG_SYN_COOKIES
1445	const struct tcphdr *th = tcp_hdr(skb);
1446
1447	if (!th->syn)
1448		sk = cookie_v4_check(sk, skb);
1449#endif
1450	return sk;
1451}
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453/* The socket must have it's spinlock held when we get
1454 * here, unless it is a TCP_LISTEN socket.
1455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1462{
1463	struct sock *rsk;
 
 
 
 
 
 
 
 
 
 
1464
1465	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1466		struct dst_entry *dst = sk->sk_rx_dst;
1467
1468		sock_rps_save_rxhash(sk, skb);
1469		sk_mark_napi_id(sk, skb);
1470		if (dst) {
1471			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1472			    !dst->ops->check(dst, 0)) {
1473				dst_release(dst);
1474				sk->sk_rx_dst = NULL;
1475			}
1476		}
1477		tcp_rcv_established(sk, skb, tcp_hdr(skb));
1478		return 0;
1479	}
1480
1481	if (tcp_checksum_complete(skb))
1482		goto csum_err;
1483
1484	if (sk->sk_state == TCP_LISTEN) {
1485		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1486
1487		if (!nsk)
1488			goto discard;
 
1489		if (nsk != sk) {
 
1490			if (tcp_child_process(sk, nsk, skb)) {
1491				rsk = nsk;
1492				goto reset;
1493			}
1494			return 0;
1495		}
1496	} else
1497		sock_rps_save_rxhash(sk, skb);
1498
1499	if (tcp_rcv_state_process(sk, skb)) {
1500		rsk = sk;
1501		goto reset;
1502	}
1503	return 0;
1504
1505reset:
1506	tcp_v4_send_reset(rsk, skb);
1507discard:
1508	kfree_skb(skb);
1509	/* Be careful here. If this function gets more complicated and
1510	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1511	 * might be destroyed here. This current version compiles correctly,
1512	 * but you have been warned.
1513	 */
1514	return 0;
1515
1516csum_err:
1517	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1518	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1519	goto discard;
1520}
1521EXPORT_SYMBOL(tcp_v4_do_rcv);
1522
1523int tcp_v4_early_demux(struct sk_buff *skb)
1524{
1525	const struct iphdr *iph;
1526	const struct tcphdr *th;
1527	struct sock *sk;
1528
1529	if (skb->pkt_type != PACKET_HOST)
1530		return 0;
1531
1532	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1533		return 0;
1534
1535	iph = ip_hdr(skb);
1536	th = tcp_hdr(skb);
1537
1538	if (th->doff < sizeof(struct tcphdr) / 4)
1539		return 0;
1540
1541	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1542				       iph->saddr, th->source,
1543				       iph->daddr, ntohs(th->dest),
1544				       skb->skb_iif, inet_sdif(skb));
1545	if (sk) {
1546		skb->sk = sk;
1547		skb->destructor = sock_edemux;
1548		if (sk_fullsock(sk)) {
1549			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1550
1551			if (dst)
1552				dst = dst_check(dst, 0);
1553			if (dst &&
1554			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1555				skb_dst_set_noref(skb, dst);
1556		}
1557	}
1558	return 0;
1559}
1560
1561bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
1562{
1563	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1564
1565	/* Only socket owner can try to collapse/prune rx queues
1566	 * to reduce memory overhead, so add a little headroom here.
1567	 * Few sockets backlog are possibly concurrently non empty.
1568	 */
1569	limit += 64*1024;
1570
1571	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1572	 * we can fix skb->truesize to its real value to avoid future drops.
1573	 * This is valid because skb is not yet charged to the socket.
1574	 * It has been noticed pure SACK packets were sometimes dropped
1575	 * (if cooked by drivers without copybreak feature).
1576	 */
1577	skb_condense(skb);
1578
1579	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1580		bh_unlock_sock(sk);
1581		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1582		return true;
1583	}
1584	return false;
1585}
1586EXPORT_SYMBOL(tcp_add_backlog);
1587
1588int tcp_filter(struct sock *sk, struct sk_buff *skb)
1589{
1590	struct tcphdr *th = (struct tcphdr *)skb->data;
1591	unsigned int eaten = skb->len;
1592	int err;
1593
1594	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1595	if (!err) {
1596		eaten -= skb->len;
1597		TCP_SKB_CB(skb)->end_seq -= eaten;
 
 
 
 
1598	}
1599	return err;
1600}
1601EXPORT_SYMBOL(tcp_filter);
1602
1603static void tcp_v4_restore_cb(struct sk_buff *skb)
1604{
1605	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1606		sizeof(struct inet_skb_parm));
1607}
1608
1609static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1610			   const struct tcphdr *th)
1611{
1612	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1613	 * barrier() makes sure compiler wont play fool^Waliasing games.
1614	 */
1615	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1616		sizeof(struct inet_skb_parm));
1617	barrier();
1618
1619	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621				    skb->len - th->doff * 4);
1622	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1624	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1625	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1626	TCP_SKB_CB(skb)->sacked	 = 0;
1627	TCP_SKB_CB(skb)->has_rxtstamp =
1628			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1629}
 
1630
1631/*
1632 *	From tcp_input.c
1633 */
1634
1635int tcp_v4_rcv(struct sk_buff *skb)
1636{
1637	struct net *net = dev_net(skb->dev);
1638	int sdif = inet_sdif(skb);
1639	const struct iphdr *iph;
1640	const struct tcphdr *th;
1641	bool refcounted;
1642	struct sock *sk;
1643	int ret;
 
1644
1645	if (skb->pkt_type != PACKET_HOST)
1646		goto discard_it;
1647
1648	/* Count it even if it's bad */
1649	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1650
1651	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1652		goto discard_it;
1653
1654	th = (const struct tcphdr *)skb->data;
1655
1656	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1657		goto bad_packet;
1658	if (!pskb_may_pull(skb, th->doff * 4))
1659		goto discard_it;
1660
1661	/* An explanation is required here, I think.
1662	 * Packet length and doff are validated by header prediction,
1663	 * provided case of th->doff==0 is eliminated.
1664	 * So, we defer the checks. */
1665
1666	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1667		goto csum_error;
1668
1669	th = (const struct tcphdr *)skb->data;
1670	iph = ip_hdr(skb);
1671lookup:
1672	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1673			       th->dest, sdif, &refcounted);
 
 
 
 
 
 
1674	if (!sk)
1675		goto no_tcp_socket;
1676
1677process:
1678	if (sk->sk_state == TCP_TIME_WAIT)
1679		goto do_time_wait;
1680
1681	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1682		struct request_sock *req = inet_reqsk(sk);
1683		bool req_stolen = false;
1684		struct sock *nsk;
1685
1686		sk = req->rsk_listener;
1687		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1688			sk_drops_add(sk, skb);
1689			reqsk_put(req);
1690			goto discard_it;
1691		}
1692		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1693			inet_csk_reqsk_queue_drop_and_put(sk, req);
1694			goto lookup;
1695		}
1696		/* We own a reference on the listener, increase it again
1697		 * as we might lose it too soon.
1698		 */
1699		sock_hold(sk);
1700		refcounted = true;
1701		nsk = NULL;
1702		if (!tcp_filter(sk, skb)) {
1703			th = (const struct tcphdr *)skb->data;
1704			iph = ip_hdr(skb);
1705			tcp_v4_fill_cb(skb, iph, th);
1706			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1707		}
1708		if (!nsk) {
1709			reqsk_put(req);
1710			if (req_stolen) {
1711				/* Another cpu got exclusive access to req
1712				 * and created a full blown socket.
1713				 * Try to feed this packet to this socket
1714				 * instead of discarding it.
1715				 */
1716				tcp_v4_restore_cb(skb);
1717				sock_put(sk);
1718				goto lookup;
1719			}
1720			goto discard_and_relse;
1721		}
1722		if (nsk == sk) {
1723			reqsk_put(req);
1724			tcp_v4_restore_cb(skb);
1725		} else if (tcp_child_process(sk, nsk, skb)) {
1726			tcp_v4_send_reset(nsk, skb);
1727			goto discard_and_relse;
1728		} else {
1729			sock_put(sk);
1730			return 0;
1731		}
1732	}
1733	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1734		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1735		goto discard_and_relse;
1736	}
1737
1738	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1739		goto discard_and_relse;
1740
1741	if (tcp_v4_inbound_md5_hash(sk, skb))
1742		goto discard_and_relse;
1743
1744	nf_reset(skb);
1745
1746	if (tcp_filter(sk, skb))
1747		goto discard_and_relse;
1748	th = (const struct tcphdr *)skb->data;
1749	iph = ip_hdr(skb);
1750	tcp_v4_fill_cb(skb, iph, th);
1751
 
1752	skb->dev = NULL;
1753
1754	if (sk->sk_state == TCP_LISTEN) {
1755		ret = tcp_v4_do_rcv(sk, skb);
1756		goto put_and_return;
1757	}
1758
1759	sk_incoming_cpu_update(sk);
1760
1761	bh_lock_sock_nested(sk);
1762	tcp_segs_in(tcp_sk(sk), skb);
1763	ret = 0;
1764	if (!sock_owned_by_user(sk)) {
1765		ret = tcp_v4_do_rcv(sk, skb);
1766	} else if (tcp_add_backlog(sk, skb)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1767		goto discard_and_relse;
1768	}
1769	bh_unlock_sock(sk);
1770
1771put_and_return:
1772	if (refcounted)
1773		sock_put(sk);
1774
1775	return ret;
1776
1777no_tcp_socket:
1778	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1779		goto discard_it;
1780
1781	tcp_v4_fill_cb(skb, iph, th);
1782
1783	if (tcp_checksum_complete(skb)) {
1784csum_error:
1785		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1786bad_packet:
1787		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1788	} else {
1789		tcp_v4_send_reset(NULL, skb);
1790	}
1791
1792discard_it:
1793	/* Discard frame. */
1794	kfree_skb(skb);
1795	return 0;
1796
1797discard_and_relse:
1798	sk_drops_add(sk, skb);
1799	if (refcounted)
1800		sock_put(sk);
1801	goto discard_it;
1802
1803do_time_wait:
1804	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1805		inet_twsk_put(inet_twsk(sk));
1806		goto discard_it;
1807	}
1808
1809	tcp_v4_fill_cb(skb, iph, th);
1810
 
 
1811	if (tcp_checksum_complete(skb)) {
1812		inet_twsk_put(inet_twsk(sk));
1813		goto csum_error;
1814	}
1815	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1816	case TCP_TW_SYN: {
1817		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1818							&tcp_hashinfo, skb,
1819							__tcp_hdrlen(th),
1820							iph->saddr, th->source,
1821							iph->daddr, th->dest,
1822							inet_iif(skb),
1823							sdif);
1824		if (sk2) {
1825			inet_twsk_deschedule_put(inet_twsk(sk));
 
1826			sk = sk2;
1827			tcp_v4_restore_cb(skb);
1828			refcounted = false;
1829			goto process;
1830		}
 
1831	}
1832		/* to ACK */
1833		/* fall through */
1834	case TCP_TW_ACK:
1835		tcp_v4_timewait_ack(sk, skb);
1836		break;
1837	case TCP_TW_RST:
1838		tcp_v4_send_reset(sk, skb);
1839		inet_twsk_deschedule_put(inet_twsk(sk));
1840		goto discard_it;
1841	case TCP_TW_SUCCESS:;
1842	}
1843	goto discard_it;
1844}
1845
1846static struct timewait_sock_ops tcp_timewait_sock_ops = {
1847	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1848	.twsk_unique	= tcp_twsk_unique,
1849	.twsk_destructor= tcp_twsk_destructor,
1850};
1851
1852void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1853{
1854	struct dst_entry *dst = skb_dst(skb);
1855
1856	if (dst && dst_hold_safe(dst)) {
1857		sk->sk_rx_dst = dst;
1858		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1859	}
1860}
1861EXPORT_SYMBOL(inet_sk_rx_dst_set);
1862
1863const struct inet_connection_sock_af_ops ipv4_specific = {
1864	.queue_xmit	   = ip_queue_xmit,
1865	.send_check	   = tcp_v4_send_check,
1866	.rebuild_header	   = inet_sk_rebuild_header,
1867	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1868	.conn_request	   = tcp_v4_conn_request,
1869	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1870	.net_header_len	   = sizeof(struct iphdr),
1871	.setsockopt	   = ip_setsockopt,
1872	.getsockopt	   = ip_getsockopt,
1873	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1874	.sockaddr_len	   = sizeof(struct sockaddr_in),
 
1875#ifdef CONFIG_COMPAT
1876	.compat_setsockopt = compat_ip_setsockopt,
1877	.compat_getsockopt = compat_ip_getsockopt,
1878#endif
1879	.mtu_reduced	   = tcp_v4_mtu_reduced,
1880};
1881EXPORT_SYMBOL(ipv4_specific);
1882
1883#ifdef CONFIG_TCP_MD5SIG
1884static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1885	.md5_lookup		= tcp_v4_md5_lookup,
1886	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1887	.md5_parse		= tcp_v4_parse_md5_keys,
1888};
1889#endif
1890
1891/* NOTE: A lot of things set to zero explicitly by call to
1892 *       sk_alloc() so need not be done here.
1893 */
1894static int tcp_v4_init_sock(struct sock *sk)
1895{
1896	struct inet_connection_sock *icsk = inet_csk(sk);
1897
1898	tcp_init_sock(sk);
1899
1900	icsk->icsk_af_ops = &ipv4_specific;
1901
1902#ifdef CONFIG_TCP_MD5SIG
1903	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1904#endif
1905
1906	return 0;
1907}
1908
1909void tcp_v4_destroy_sock(struct sock *sk)
1910{
1911	struct tcp_sock *tp = tcp_sk(sk);
1912
1913	trace_tcp_destroy_sock(sk);
1914
1915	tcp_clear_xmit_timers(sk);
1916
1917	tcp_cleanup_congestion_control(sk);
1918
1919	tcp_cleanup_ulp(sk);
1920
1921	/* Cleanup up the write buffer. */
1922	tcp_write_queue_purge(sk);
1923
1924	/* Check if we want to disable active TFO */
1925	tcp_fastopen_active_disable_ofo_check(sk);
1926
1927	/* Cleans up our, hopefully empty, out_of_order_queue. */
1928	skb_rbtree_purge(&tp->out_of_order_queue);
1929
1930#ifdef CONFIG_TCP_MD5SIG
1931	/* Clean up the MD5 key list, if any */
1932	if (tp->md5sig_info) {
1933		tcp_clear_md5_list(sk);
1934		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
1935		tp->md5sig_info = NULL;
1936	}
1937#endif
1938
 
 
 
 
 
 
 
 
1939	/* Clean up a referenced TCP bind bucket. */
1940	if (inet_csk(sk)->icsk_bind_hash)
1941		inet_put_port(sk);
1942
1943	BUG_ON(tp->fastopen_rsk);
1944
1945	/* If socket is aborted during connect operation */
1946	tcp_free_fastopen_req(tp);
1947	tcp_fastopen_destroy_cipher(sk);
1948	tcp_saved_syn_free(tp);
1949
1950	sk_sockets_allocated_dec(sk);
 
1951}
1952EXPORT_SYMBOL(tcp_v4_destroy_sock);
1953
1954#ifdef CONFIG_PROC_FS
1955/* Proc filesystem TCP sock list dumping. */
1956
1957/*
1958 * Get next listener socket follow cur.  If cur is NULL, get first socket
1959 * starting from bucket given in st->bucket; when st->bucket is zero the
1960 * very first socket in the hash table is returned.
1961 */
1962static void *listening_get_next(struct seq_file *seq, void *cur)
1963{
 
 
 
 
1964	struct tcp_iter_state *st = seq->private;
1965	struct net *net = seq_file_net(seq);
1966	struct inet_listen_hashbucket *ilb;
1967	struct sock *sk = cur;
1968
1969	if (!sk) {
1970get_head:
1971		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1972		spin_lock(&ilb->lock);
1973		sk = sk_head(&ilb->head);
1974		st->offset = 0;
1975		goto get_sk;
1976	}
1977	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1978	++st->num;
1979	++st->offset;
1980
1981	sk = sk_next(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1982get_sk:
1983	sk_for_each_from(sk) {
1984		if (!net_eq(sock_net(sk), net))
1985			continue;
1986		if (sk->sk_family == st->family)
1987			return sk;
 
 
 
 
 
 
 
 
 
 
 
 
 
1988	}
1989	spin_unlock(&ilb->lock);
1990	st->offset = 0;
1991	if (++st->bucket < INET_LHTABLE_SIZE)
1992		goto get_head;
1993	return NULL;
 
 
 
 
 
 
1994}
1995
1996static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1997{
1998	struct tcp_iter_state *st = seq->private;
1999	void *rc;
2000
2001	st->bucket = 0;
2002	st->offset = 0;
2003	rc = listening_get_next(seq, NULL);
2004
2005	while (rc && *pos) {
2006		rc = listening_get_next(seq, rc);
2007		--*pos;
2008	}
2009	return rc;
2010}
2011
2012static inline bool empty_bucket(const struct tcp_iter_state *st)
2013{
2014	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2015}
2016
2017/*
2018 * Get first established socket starting from bucket given in st->bucket.
2019 * If st->bucket is zero, the very first socket in the hash is returned.
2020 */
2021static void *established_get_first(struct seq_file *seq)
2022{
2023	struct tcp_iter_state *st = seq->private;
2024	struct net *net = seq_file_net(seq);
2025	void *rc = NULL;
2026
2027	st->offset = 0;
2028	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2029		struct sock *sk;
2030		struct hlist_nulls_node *node;
2031		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2032
2033		/* Lockless fast path for the common case of empty buckets */
2034		if (empty_bucket(st))
2035			continue;
2036
2037		spin_lock_bh(lock);
2038		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2039			if (sk->sk_family != st->family ||
2040			    !net_eq(sock_net(sk), net)) {
2041				continue;
2042			}
2043			rc = sk;
2044			goto out;
2045		}
2046		spin_unlock_bh(lock);
2047	}
2048out:
2049	return rc;
2050}
2051
2052static void *established_get_next(struct seq_file *seq, void *cur)
2053{
2054	struct sock *sk = cur;
2055	struct hlist_nulls_node *node;
2056	struct tcp_iter_state *st = seq->private;
2057	struct net *net = seq_file_net(seq);
2058
2059	++st->num;
2060	++st->offset;
2061
2062	sk = sk_nulls_next(sk);
2063
2064	sk_nulls_for_each_from(sk, node) {
2065		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2066			return sk;
2067	}
2068
2069	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2070	++st->bucket;
2071	return established_get_first(seq);
2072}
2073
2074static void *established_get_idx(struct seq_file *seq, loff_t pos)
2075{
2076	struct tcp_iter_state *st = seq->private;
2077	void *rc;
2078
2079	st->bucket = 0;
2080	rc = established_get_first(seq);
2081
2082	while (rc && pos) {
2083		rc = established_get_next(seq, rc);
2084		--pos;
2085	}
2086	return rc;
2087}
2088
2089static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2090{
2091	void *rc;
2092	struct tcp_iter_state *st = seq->private;
2093
2094	st->state = TCP_SEQ_STATE_LISTENING;
2095	rc	  = listening_get_idx(seq, &pos);
2096
2097	if (!rc) {
2098		st->state = TCP_SEQ_STATE_ESTABLISHED;
2099		rc	  = established_get_idx(seq, pos);
2100	}
2101
2102	return rc;
2103}
2104
2105static void *tcp_seek_last_pos(struct seq_file *seq)
2106{
2107	struct tcp_iter_state *st = seq->private;
2108	int offset = st->offset;
2109	int orig_num = st->num;
2110	void *rc = NULL;
2111
2112	switch (st->state) {
 
2113	case TCP_SEQ_STATE_LISTENING:
2114		if (st->bucket >= INET_LHTABLE_SIZE)
2115			break;
2116		st->state = TCP_SEQ_STATE_LISTENING;
2117		rc = listening_get_next(seq, NULL);
2118		while (offset-- && rc)
2119			rc = listening_get_next(seq, rc);
2120		if (rc)
2121			break;
2122		st->bucket = 0;
2123		st->state = TCP_SEQ_STATE_ESTABLISHED;
2124		/* Fallthrough */
2125	case TCP_SEQ_STATE_ESTABLISHED:
2126		if (st->bucket > tcp_hashinfo.ehash_mask)
2127			break;
2128		rc = established_get_first(seq);
2129		while (offset-- && rc)
2130			rc = established_get_next(seq, rc);
2131	}
2132
2133	st->num = orig_num;
2134
2135	return rc;
2136}
2137
2138static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2139{
2140	struct tcp_iter_state *st = seq->private;
2141	void *rc;
2142
2143	if (*pos && *pos == st->last_pos) {
2144		rc = tcp_seek_last_pos(seq);
2145		if (rc)
2146			goto out;
2147	}
2148
2149	st->state = TCP_SEQ_STATE_LISTENING;
2150	st->num = 0;
2151	st->bucket = 0;
2152	st->offset = 0;
2153	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2154
2155out:
2156	st->last_pos = *pos;
2157	return rc;
2158}
2159
2160static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2161{
2162	struct tcp_iter_state *st = seq->private;
2163	void *rc = NULL;
2164
2165	if (v == SEQ_START_TOKEN) {
2166		rc = tcp_get_idx(seq, 0);
2167		goto out;
2168	}
2169
2170	switch (st->state) {
 
2171	case TCP_SEQ_STATE_LISTENING:
2172		rc = listening_get_next(seq, v);
2173		if (!rc) {
2174			st->state = TCP_SEQ_STATE_ESTABLISHED;
2175			st->bucket = 0;
2176			st->offset = 0;
2177			rc	  = established_get_first(seq);
2178		}
2179		break;
2180	case TCP_SEQ_STATE_ESTABLISHED:
2181		rc = established_get_next(seq, v);
2182		break;
2183	}
2184out:
2185	++*pos;
2186	st->last_pos = *pos;
2187	return rc;
2188}
2189
2190static void tcp_seq_stop(struct seq_file *seq, void *v)
2191{
2192	struct tcp_iter_state *st = seq->private;
2193
2194	switch (st->state) {
 
 
 
 
 
2195	case TCP_SEQ_STATE_LISTENING:
2196		if (v != SEQ_START_TOKEN)
2197			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2198		break;
2199	case TCP_SEQ_STATE_ESTABLISHED:
2200		if (v)
2201			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2202		break;
2203	}
2204}
2205
2206int tcp_seq_open(struct inode *inode, struct file *file)
2207{
2208	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2209	struct tcp_iter_state *s;
2210	int err;
2211
2212	err = seq_open_net(inode, file, &afinfo->seq_ops,
2213			  sizeof(struct tcp_iter_state));
2214	if (err < 0)
2215		return err;
2216
2217	s = ((struct seq_file *)file->private_data)->private;
2218	s->family		= afinfo->family;
2219	s->last_pos		= 0;
2220	return 0;
2221}
2222EXPORT_SYMBOL(tcp_seq_open);
2223
2224int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2225{
2226	int rc = 0;
2227	struct proc_dir_entry *p;
2228
2229	afinfo->seq_ops.start		= tcp_seq_start;
2230	afinfo->seq_ops.next		= tcp_seq_next;
2231	afinfo->seq_ops.stop		= tcp_seq_stop;
2232
2233	p = proc_create_data(afinfo->name, 0444, net->proc_net,
2234			     afinfo->seq_fops, afinfo);
2235	if (!p)
2236		rc = -ENOMEM;
2237	return rc;
2238}
2239EXPORT_SYMBOL(tcp_proc_register);
2240
2241void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2242{
2243	remove_proc_entry(afinfo->name, net->proc_net);
2244}
2245EXPORT_SYMBOL(tcp_proc_unregister);
2246
2247static void get_openreq4(const struct request_sock *req,
2248			 struct seq_file *f, int i)
2249{
2250	const struct inet_request_sock *ireq = inet_rsk(req);
2251	long delta = req->rsk_timer.expires - jiffies;
2252
2253	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2254		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2255		i,
2256		ireq->ir_loc_addr,
2257		ireq->ir_num,
2258		ireq->ir_rmt_addr,
2259		ntohs(ireq->ir_rmt_port),
2260		TCP_SYN_RECV,
2261		0, 0, /* could print option size, but that is af dependent. */
2262		1,    /* timers active (only the expire timer) */
2263		jiffies_delta_to_clock_t(delta),
2264		req->num_timeout,
2265		from_kuid_munged(seq_user_ns(f),
2266				 sock_i_uid(req->rsk_listener)),
2267		0,  /* non standard timer */
2268		0, /* open_requests have no inode */
2269		0,
2270		req);
2271}
2272
2273static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2274{
2275	int timer_active;
2276	unsigned long timer_expires;
2277	const struct tcp_sock *tp = tcp_sk(sk);
2278	const struct inet_connection_sock *icsk = inet_csk(sk);
2279	const struct inet_sock *inet = inet_sk(sk);
2280	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2281	__be32 dest = inet->inet_daddr;
2282	__be32 src = inet->inet_rcv_saddr;
2283	__u16 destp = ntohs(inet->inet_dport);
2284	__u16 srcp = ntohs(inet->inet_sport);
2285	int rx_queue;
2286	int state;
2287
2288	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2289	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2290	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2291		timer_active	= 1;
2292		timer_expires	= icsk->icsk_timeout;
2293	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2294		timer_active	= 4;
2295		timer_expires	= icsk->icsk_timeout;
2296	} else if (timer_pending(&sk->sk_timer)) {
2297		timer_active	= 2;
2298		timer_expires	= sk->sk_timer.expires;
2299	} else {
2300		timer_active	= 0;
2301		timer_expires = jiffies;
2302	}
2303
2304	state = inet_sk_state_load(sk);
2305	if (state == TCP_LISTEN)
2306		rx_queue = sk->sk_ack_backlog;
2307	else
2308		/* Because we don't lock the socket,
2309		 * we might find a transient negative value.
2310		 */
2311		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2312
2313	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2314			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2315		i, src, srcp, dest, destp, state,
2316		tp->write_seq - tp->snd_una,
2317		rx_queue,
2318		timer_active,
2319		jiffies_delta_to_clock_t(timer_expires - jiffies),
2320		icsk->icsk_retransmits,
2321		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2322		icsk->icsk_probes_out,
2323		sock_i_ino(sk),
2324		refcount_read(&sk->sk_refcnt), sk,
2325		jiffies_to_clock_t(icsk->icsk_rto),
2326		jiffies_to_clock_t(icsk->icsk_ack.ato),
2327		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2328		tp->snd_cwnd,
2329		state == TCP_LISTEN ?
2330		    fastopenq->max_qlen :
2331		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2332}
2333
2334static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2335			       struct seq_file *f, int i)
2336{
2337	long delta = tw->tw_timer.expires - jiffies;
2338	__be32 dest, src;
2339	__u16 destp, srcp;
 
2340
2341	dest  = tw->tw_daddr;
2342	src   = tw->tw_rcv_saddr;
2343	destp = ntohs(tw->tw_dport);
2344	srcp  = ntohs(tw->tw_sport);
2345
2346	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2347		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2348		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2349		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2350		refcount_read(&tw->tw_refcnt), tw);
2351}
2352
2353#define TMPSZ 150
2354
2355static int tcp4_seq_show(struct seq_file *seq, void *v)
2356{
2357	struct tcp_iter_state *st;
2358	struct sock *sk = v;
2359
2360	seq_setwidth(seq, TMPSZ - 1);
2361	if (v == SEQ_START_TOKEN) {
2362		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2363			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2364			   "inode");
2365		goto out;
2366	}
2367	st = seq->private;
2368
2369	if (sk->sk_state == TCP_TIME_WAIT)
2370		get_timewait4_sock(v, seq, st->num);
2371	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2372		get_openreq4(v, seq, st->num);
2373	else
2374		get_tcp4_sock(v, seq, st->num);
 
 
 
 
 
 
2375out:
2376	seq_pad(seq, '\n');
2377	return 0;
2378}
2379
2380static const struct file_operations tcp_afinfo_seq_fops = {
 
2381	.open    = tcp_seq_open,
2382	.read    = seq_read,
2383	.llseek  = seq_lseek,
2384	.release = seq_release_net
2385};
2386
2387static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2388	.name		= "tcp",
2389	.family		= AF_INET,
2390	.seq_fops	= &tcp_afinfo_seq_fops,
2391	.seq_ops	= {
2392		.show		= tcp4_seq_show,
2393	},
2394};
2395
2396static int __net_init tcp4_proc_init_net(struct net *net)
2397{
2398	return tcp_proc_register(net, &tcp4_seq_afinfo);
2399}
2400
2401static void __net_exit tcp4_proc_exit_net(struct net *net)
2402{
2403	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2404}
2405
2406static struct pernet_operations tcp4_net_ops = {
2407	.init = tcp4_proc_init_net,
2408	.exit = tcp4_proc_exit_net,
2409};
2410
2411int __init tcp4_proc_init(void)
2412{
2413	return register_pernet_subsys(&tcp4_net_ops);
2414}
2415
2416void tcp4_proc_exit(void)
2417{
2418	unregister_pernet_subsys(&tcp4_net_ops);
2419}
2420#endif /* CONFIG_PROC_FS */
2421
2422struct proto tcp_prot = {
2423	.name			= "TCP",
2424	.owner			= THIS_MODULE,
2425	.close			= tcp_close,
2426	.pre_connect		= tcp_v4_pre_connect,
2427	.connect		= tcp_v4_connect,
2428	.disconnect		= tcp_disconnect,
2429	.accept			= inet_csk_accept,
2430	.ioctl			= tcp_ioctl,
2431	.init			= tcp_v4_init_sock,
2432	.destroy		= tcp_v4_destroy_sock,
2433	.shutdown		= tcp_shutdown,
2434	.setsockopt		= tcp_setsockopt,
2435	.getsockopt		= tcp_getsockopt,
2436	.keepalive		= tcp_set_keepalive,
2437	.recvmsg		= tcp_recvmsg,
2438	.sendmsg		= tcp_sendmsg,
2439	.sendpage		= tcp_sendpage,
2440	.backlog_rcv		= tcp_v4_do_rcv,
2441	.release_cb		= tcp_release_cb,
 
2442	.hash			= inet_hash,
2443	.unhash			= inet_unhash,
2444	.get_port		= inet_csk_get_port,
2445	.enter_memory_pressure	= tcp_enter_memory_pressure,
2446	.leave_memory_pressure	= tcp_leave_memory_pressure,
2447	.stream_memory_free	= tcp_stream_memory_free,
2448	.sockets_allocated	= &tcp_sockets_allocated,
2449	.orphan_count		= &tcp_orphan_count,
2450	.memory_allocated	= &tcp_memory_allocated,
2451	.memory_pressure	= &tcp_memory_pressure,
2452	.sysctl_mem		= sysctl_tcp_mem,
2453	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2454	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2455	.max_header		= MAX_TCP_HEADER,
2456	.obj_size		= sizeof(struct tcp_sock),
2457	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2458	.twsk_prot		= &tcp_timewait_sock_ops,
2459	.rsk_prot		= &tcp_request_sock_ops,
2460	.h.hashinfo		= &tcp_hashinfo,
2461	.no_autobind		= true,
2462#ifdef CONFIG_COMPAT
2463	.compat_setsockopt	= compat_tcp_setsockopt,
2464	.compat_getsockopt	= compat_tcp_getsockopt,
2465#endif
2466	.diag_destroy		= tcp_abort,
 
 
 
 
2467};
2468EXPORT_SYMBOL(tcp_prot);
2469
2470static void __net_exit tcp_sk_exit(struct net *net)
2471{
2472	int cpu;
2473
2474	module_put(net->ipv4.tcp_congestion_control->owner);
2475
2476	for_each_possible_cpu(cpu)
2477		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2478	free_percpu(net->ipv4.tcp_sk);
2479}
2480
2481static int __net_init tcp_sk_init(struct net *net)
2482{
2483	int res, cpu, cnt;
2484
2485	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2486	if (!net->ipv4.tcp_sk)
2487		return -ENOMEM;
2488
2489	for_each_possible_cpu(cpu) {
2490		struct sock *sk;
2491
2492		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2493					   IPPROTO_TCP, net);
2494		if (res)
2495			goto fail;
2496		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2497		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2498	}
2499
2500	net->ipv4.sysctl_tcp_ecn = 2;
2501	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2502
2503	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2504	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2505	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2506
2507	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2508	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2509	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2510
2511	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2512	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2513	net->ipv4.sysctl_tcp_syncookies = 1;
2514	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2515	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2516	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2517	net->ipv4.sysctl_tcp_orphan_retries = 0;
2518	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2519	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2520	net->ipv4.sysctl_tcp_tw_reuse = 0;
2521
2522	cnt = tcp_hashinfo.ehash_mask + 1;
2523	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2524	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2525
2526	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2527	net->ipv4.sysctl_tcp_sack = 1;
2528	net->ipv4.sysctl_tcp_window_scaling = 1;
2529	net->ipv4.sysctl_tcp_timestamps = 1;
2530	net->ipv4.sysctl_tcp_early_retrans = 3;
2531	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2532	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2533	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2534	net->ipv4.sysctl_tcp_max_reordering = 300;
2535	net->ipv4.sysctl_tcp_dsack = 1;
2536	net->ipv4.sysctl_tcp_app_win = 31;
2537	net->ipv4.sysctl_tcp_adv_win_scale = 1;
2538	net->ipv4.sysctl_tcp_frto = 2;
2539	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2540	/* This limits the percentage of the congestion window which we
2541	 * will allow a single TSO frame to consume.  Building TSO frames
2542	 * which are too large can cause TCP streams to be bursty.
2543	 */
2544	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2545	/* Default TSQ limit of four TSO segments */
2546	net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
2547	/* rfc5961 challenge ack rate limiting */
2548	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2549	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2550	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2551	net->ipv4.sysctl_tcp_autocorking = 1;
2552	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2553	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2554	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2555	if (net != &init_net) {
2556		memcpy(net->ipv4.sysctl_tcp_rmem,
2557		       init_net.ipv4.sysctl_tcp_rmem,
2558		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
2559		memcpy(net->ipv4.sysctl_tcp_wmem,
2560		       init_net.ipv4.sysctl_tcp_wmem,
2561		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
2562	}
2563	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2564	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2565	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2566	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2567
2568	/* Reno is always built in */
2569	if (!net_eq(net, &init_net) &&
2570	    try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2571		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2572	else
2573		net->ipv4.tcp_congestion_control = &tcp_reno;
2574
2575	return 0;
2576fail:
2577	tcp_sk_exit(net);
2578
2579	return res;
 
2580}
2581
2582static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2583{
2584	struct net *net;
2585
2586	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2587
2588	list_for_each_entry(net, net_exit_list, exit_list)
2589		tcp_fastopen_ctx_destroy(net);
2590}
2591
2592static struct pernet_operations __net_initdata tcp_sk_ops = {
2593       .init	   = tcp_sk_init,
2594       .exit	   = tcp_sk_exit,
2595       .exit_batch = tcp_sk_exit_batch,
2596};
2597
2598void __init tcp_v4_init(void)
2599{
 
2600	if (register_pernet_subsys(&tcp_sk_ops))
2601		panic("Failed to create the TCP control socket.\n");
2602}