Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 *		IPv4 specific functions
  10 *
 
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
 
 
 
 
 
  17 */
  18
  19/*
  20 * Changes:
  21 *		David S. Miller	:	New socket lookup architecture.
  22 *					This code is dedicated to John Dyson.
  23 *		David S. Miller :	Change semantics of established hash,
  24 *					half is devoted to TIME_WAIT sockets
  25 *					and the rest go in the other half.
  26 *		Andi Kleen :		Add support for syncookies and fixed
  27 *					some bugs: ip options weren't passed to
  28 *					the TCP layer, missed a check for an
  29 *					ACK bit.
  30 *		Andi Kleen :		Implemented fast path mtu discovery.
  31 *	     				Fixed many serious bugs in the
  32 *					request_sock handling and moved
  33 *					most of it into the af independent code.
  34 *					Added tail drop and some other bugfixes.
  35 *					Added new listen semantics.
  36 *		Mike McLagan	:	Routing by source
  37 *	Juan Jose Ciarlante:		ip_dynaddr bits
  38 *		Andi Kleen:		various fixes.
  39 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  40 *					coma.
  41 *	Andi Kleen		:	Fix new listen.
  42 *	Andi Kleen		:	Fix accept error reporting.
  43 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  44 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  45 *					a single port at the same time.
  46 */
  47
  48#define pr_fmt(fmt) "TCP: " fmt
  49
  50#include <linux/bottom_half.h>
  51#include <linux/types.h>
  52#include <linux/fcntl.h>
  53#include <linux/module.h>
  54#include <linux/random.h>
  55#include <linux/cache.h>
  56#include <linux/jhash.h>
  57#include <linux/init.h>
  58#include <linux/times.h>
  59#include <linux/slab.h>
  60
  61#include <net/net_namespace.h>
  62#include <net/icmp.h>
  63#include <net/inet_hashtables.h>
  64#include <net/tcp.h>
  65#include <net/transp_v6.h>
  66#include <net/ipv6.h>
  67#include <net/inet_common.h>
  68#include <net/timewait_sock.h>
  69#include <net/xfrm.h>
  70#include <net/secure_seq.h>
  71#include <net/busy_poll.h>
  72
  73#include <linux/inet.h>
  74#include <linux/ipv6.h>
  75#include <linux/stddef.h>
  76#include <linux/proc_fs.h>
  77#include <linux/seq_file.h>
  78#include <linux/inetdevice.h>
  79#include <linux/btf_ids.h>
  80
  81#include <crypto/hash.h>
  82#include <linux/scatterlist.h>
  83
  84#include <trace/events/tcp.h>
  85
  86#ifdef CONFIG_TCP_MD5SIG
  87static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  88			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  89#endif
  90
  91struct inet_hashinfo tcp_hashinfo;
  92EXPORT_SYMBOL(tcp_hashinfo);
  93
  94static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
  95
  96static u32 tcp_v4_init_seq(const struct sk_buff *skb)
  97{
  98	return secure_tcp_seq(ip_hdr(skb)->daddr,
  99			      ip_hdr(skb)->saddr,
 100			      tcp_hdr(skb)->dest,
 101			      tcp_hdr(skb)->source);
 102}
 103
 104static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
 105{
 106	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
 
 
 
 107}
 108
 109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 110{
 111	int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
 112	const struct inet_timewait_sock *tw = inet_twsk(sktw);
 113	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 114	struct tcp_sock *tp = tcp_sk(sk);
 115
 116	if (reuse == 2) {
 117		/* Still does not detect *everything* that goes through
 118		 * lo, since we require a loopback src or dst address
 119		 * or direct binding to 'lo' interface.
 120		 */
 121		bool loopback = false;
 122		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
 123			loopback = true;
 124#if IS_ENABLED(CONFIG_IPV6)
 125		if (tw->tw_family == AF_INET6) {
 126			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
 127			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
 128			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
 129			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
 130				loopback = true;
 131		} else
 132#endif
 133		{
 134			if (ipv4_is_loopback(tw->tw_daddr) ||
 135			    ipv4_is_loopback(tw->tw_rcv_saddr))
 136				loopback = true;
 137		}
 138		if (!loopback)
 139			reuse = 0;
 140	}
 141
 142	/* With PAWS, it is safe from the viewpoint
 143	   of data integrity. Even without PAWS it is safe provided sequence
 144	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 145
 146	   Actually, the idea is close to VJ's one, only timestamp cache is
 147	   held not per host, but per port pair and TW bucket is used as state
 148	   holder.
 149
 150	   If TW bucket has been already destroyed we fall back to VJ's scheme
 151	   and use initial timestamp retrieved from peer table.
 152	 */
 153	if (tcptw->tw_ts_recent_stamp &&
 154	    (!twp || (reuse && time_after32(ktime_get_seconds(),
 155					    tcptw->tw_ts_recent_stamp)))) {
 156		/* In case of repair and re-using TIME-WAIT sockets we still
 157		 * want to be sure that it is safe as above but honor the
 158		 * sequence numbers and time stamps set as part of the repair
 159		 * process.
 160		 *
 161		 * Without this check re-using a TIME-WAIT socket with TCP
 162		 * repair would accumulate a -1 on the repair assigned
 163		 * sequence number. The first time it is reused the sequence
 164		 * is -1, the second time -2, etc. This fixes that issue
 165		 * without appearing to create any others.
 166		 */
 167		if (likely(!tp->repair)) {
 168			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
 169
 170			if (!seq)
 171				seq = 1;
 172			WRITE_ONCE(tp->write_seq, seq);
 173			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 174			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 175		}
 176		sock_hold(sktw);
 177		return 1;
 178	}
 179
 180	return 0;
 181}
 182EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 183
 184static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 185			      int addr_len)
 186{
 187	/* This check is replicated from tcp_v4_connect() and intended to
 188	 * prevent BPF program called below from accessing bytes that are out
 189	 * of the bound specified by user in addr_len.
 190	 */
 191	if (addr_len < sizeof(struct sockaddr_in))
 192		return -EINVAL;
 193
 194	sock_owned_by_me(sk);
 195
 196	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
 197}
 198
 199/* This will initiate an outgoing connection. */
 200int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 201{
 202	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 203	struct inet_timewait_death_row *tcp_death_row;
 204	struct inet_sock *inet = inet_sk(sk);
 205	struct tcp_sock *tp = tcp_sk(sk);
 206	struct ip_options_rcu *inet_opt;
 207	struct net *net = sock_net(sk);
 208	__be16 orig_sport, orig_dport;
 209	__be32 daddr, nexthop;
 210	struct flowi4 *fl4;
 211	struct rtable *rt;
 212	int err;
 
 213
 214	if (addr_len < sizeof(struct sockaddr_in))
 215		return -EINVAL;
 216
 217	if (usin->sin_family != AF_INET)
 218		return -EAFNOSUPPORT;
 219
 220	nexthop = daddr = usin->sin_addr.s_addr;
 221	inet_opt = rcu_dereference_protected(inet->inet_opt,
 222					     lockdep_sock_is_held(sk));
 223	if (inet_opt && inet_opt->opt.srr) {
 224		if (!daddr)
 225			return -EINVAL;
 226		nexthop = inet_opt->opt.faddr;
 227	}
 228
 229	orig_sport = inet->inet_sport;
 230	orig_dport = usin->sin_port;
 231	fl4 = &inet->cork.fl.u.ip4;
 232	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 233			      sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
 234			      orig_dport, sk);
 
 235	if (IS_ERR(rt)) {
 236		err = PTR_ERR(rt);
 237		if (err == -ENETUNREACH)
 238			IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 239		return err;
 240	}
 241
 242	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 243		ip_rt_put(rt);
 244		return -ENETUNREACH;
 245	}
 246
 247	if (!inet_opt || !inet_opt->opt.srr)
 248		daddr = fl4->daddr;
 249
 250	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 251
 252	if (!inet->inet_saddr) {
 253		err = inet_bhash2_update_saddr(sk,  &fl4->saddr, AF_INET);
 254		if (err) {
 255			ip_rt_put(rt);
 256			return err;
 257		}
 258	} else {
 259		sk_rcv_saddr_set(sk, inet->inet_saddr);
 260	}
 261
 262	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 263		/* Reset inherited state */
 264		tp->rx_opt.ts_recent	   = 0;
 265		tp->rx_opt.ts_recent_stamp = 0;
 266		if (likely(!tp->repair))
 267			WRITE_ONCE(tp->write_seq, 0);
 268	}
 269
 
 
 
 
 270	inet->inet_dport = usin->sin_port;
 271	sk_daddr_set(sk, daddr);
 272
 273	inet_csk(sk)->icsk_ext_hdr_len = 0;
 274	if (inet_opt)
 275		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 276
 277	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 278
 279	/* Socket identity is still unknown (sport may be zero).
 280	 * However we set state to SYN-SENT and not releasing socket
 281	 * lock select source port, enter ourselves into the hash tables and
 282	 * complete initialization after this.
 283	 */
 284	tcp_set_state(sk, TCP_SYN_SENT);
 285	err = inet_hash_connect(tcp_death_row, sk);
 286	if (err)
 287		goto failure;
 288
 289	sk_set_txhash(sk);
 290
 291	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 292			       inet->inet_sport, inet->inet_dport, sk);
 293	if (IS_ERR(rt)) {
 294		err = PTR_ERR(rt);
 295		rt = NULL;
 296		goto failure;
 297	}
 298	/* OK, now commit destination to socket.  */
 299	sk->sk_gso_type = SKB_GSO_TCPV4;
 300	sk_setup_caps(sk, &rt->dst);
 301	rt = NULL;
 302
 303	if (likely(!tp->repair)) {
 304		if (!tp->write_seq)
 305			WRITE_ONCE(tp->write_seq,
 306				   secure_tcp_seq(inet->inet_saddr,
 307						  inet->inet_daddr,
 308						  inet->inet_sport,
 309						  usin->sin_port));
 310		tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
 311						 inet->inet_daddr);
 312	}
 313
 314	inet->inet_id = get_random_u16();
 315
 316	if (tcp_fastopen_defer_connect(sk, &err))
 317		return err;
 318	if (err)
 319		goto failure;
 320
 321	err = tcp_connect(sk);
 322
 
 323	if (err)
 324		goto failure;
 325
 326	return 0;
 327
 328failure:
 329	/*
 330	 * This unhashes the socket and releases the local port,
 331	 * if necessary.
 332	 */
 333	tcp_set_state(sk, TCP_CLOSE);
 334	inet_bhash2_reset_saddr(sk);
 335	ip_rt_put(rt);
 336	sk->sk_route_caps = 0;
 337	inet->inet_dport = 0;
 338	return err;
 339}
 340EXPORT_SYMBOL(tcp_v4_connect);
 341
 342/*
 343 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 344 * It can be called through tcp_release_cb() if socket was owned by user
 345 * at the time tcp_v4_err() was called to handle ICMP message.
 346 */
 347void tcp_v4_mtu_reduced(struct sock *sk)
 348{
 349	struct inet_sock *inet = inet_sk(sk);
 350	struct dst_entry *dst;
 351	u32 mtu;
 352
 353	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 354		return;
 355	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
 356	dst = inet_csk_update_pmtu(sk, mtu);
 357	if (!dst)
 358		return;
 359
 360	/* Something is about to be wrong... Remember soft error
 361	 * for the case, if this connection will not able to recover.
 362	 */
 363	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 364		sk->sk_err_soft = EMSGSIZE;
 365
 366	mtu = dst_mtu(dst);
 367
 368	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 369	    ip_sk_accept_pmtu(sk) &&
 370	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 371		tcp_sync_mss(sk, mtu);
 372
 373		/* Resend the TCP packet because it's
 374		 * clear that the old packet has been
 375		 * dropped. This is the new "fast" path mtu
 376		 * discovery.
 377		 */
 378		tcp_simple_retransmit(sk);
 379	} /* else let the usual retransmit timer handle it */
 380}
 381EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 382
 383static void do_redirect(struct sk_buff *skb, struct sock *sk)
 384{
 385	struct dst_entry *dst = __sk_dst_check(sk, 0);
 386
 387	if (dst)
 388		dst->ops->redirect(dst, sk, skb);
 389}
 390
 391
 392/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 393void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 394{
 395	struct request_sock *req = inet_reqsk(sk);
 396	struct net *net = sock_net(sk);
 397
 398	/* ICMPs are not backlogged, hence we cannot get
 399	 * an established socket here.
 400	 */
 401	if (seq != tcp_rsk(req)->snt_isn) {
 402		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 403	} else if (abort) {
 404		/*
 405		 * Still in SYN_RECV, just remove it silently.
 406		 * There is no good way to pass the error to the newly
 407		 * created socket, and POSIX does not want network
 408		 * errors returned from accept().
 409		 */
 410		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 411		tcp_listendrop(req->rsk_listener);
 412	}
 413	reqsk_put(req);
 414}
 415EXPORT_SYMBOL(tcp_req_err);
 416
 417/* TCP-LD (RFC 6069) logic */
 418void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
 419{
 420	struct inet_connection_sock *icsk = inet_csk(sk);
 421	struct tcp_sock *tp = tcp_sk(sk);
 422	struct sk_buff *skb;
 423	s32 remaining;
 424	u32 delta_us;
 425
 426	if (sock_owned_by_user(sk))
 427		return;
 428
 429	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 430	    !icsk->icsk_backoff)
 431		return;
 432
 433	skb = tcp_rtx_queue_head(sk);
 434	if (WARN_ON_ONCE(!skb))
 435		return;
 436
 437	icsk->icsk_backoff--;
 438	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
 439	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 440
 441	tcp_mstamp_refresh(tp);
 442	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
 443	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
 444
 445	if (remaining > 0) {
 446		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 447					  remaining, TCP_RTO_MAX);
 448	} else {
 449		/* RTO revert clocked out retransmission.
 450		 * Will retransmit now.
 451		 */
 452		tcp_retransmit_timer(sk);
 453	}
 454}
 455EXPORT_SYMBOL(tcp_ld_RTO_revert);
 456
 457/*
 458 * This routine is called by the ICMP module when it gets some
 459 * sort of error condition.  If err < 0 then the socket should
 460 * be closed and the error returned to the user.  If err > 0
 461 * it's just the icmp type << 8 | icmp code.  After adjustment
 462 * header points to the first 8 bytes of the tcp header.  We need
 463 * to find the appropriate port.
 464 *
 465 * The locking strategy used here is very "optimistic". When
 466 * someone else accesses the socket the ICMP is just dropped
 467 * and for some paths there is no check at all.
 468 * A more general error queue to queue errors for later handling
 469 * is probably better.
 470 *
 471 */
 472
 473int tcp_v4_err(struct sk_buff *skb, u32 info)
 474{
 475	const struct iphdr *iph = (const struct iphdr *)skb->data;
 476	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
 
 477	struct tcp_sock *tp;
 478	struct inet_sock *inet;
 479	const int type = icmp_hdr(skb)->type;
 480	const int code = icmp_hdr(skb)->code;
 481	struct sock *sk;
 
 482	struct request_sock *fastopen;
 483	u32 seq, snd_una;
 
 484	int err;
 485	struct net *net = dev_net(skb->dev);
 486
 487	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
 488				       iph->daddr, th->dest, iph->saddr,
 489				       ntohs(th->source), inet_iif(skb), 0);
 490	if (!sk) {
 491		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 492		return -ENOENT;
 493	}
 494	if (sk->sk_state == TCP_TIME_WAIT) {
 495		inet_twsk_put(inet_twsk(sk));
 496		return 0;
 497	}
 498	seq = ntohl(th->seq);
 499	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 500		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
 501				     type == ICMP_TIME_EXCEEDED ||
 502				     (type == ICMP_DEST_UNREACH &&
 503				      (code == ICMP_NET_UNREACH ||
 504				       code == ICMP_HOST_UNREACH)));
 505		return 0;
 506	}
 507
 508	bh_lock_sock(sk);
 509	/* If too many ICMPs get dropped on busy
 510	 * servers this needs to be solved differently.
 511	 * We do take care of PMTU discovery (RFC1191) special case :
 512	 * we can receive locally generated ICMP messages while socket is held.
 513	 */
 514	if (sock_owned_by_user(sk)) {
 515		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 516			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 517	}
 518	if (sk->sk_state == TCP_CLOSE)
 519		goto out;
 520
 521	if (static_branch_unlikely(&ip4_min_ttl)) {
 522		/* min_ttl can be changed concurrently from do_ip_setsockopt() */
 523		if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
 524			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 525			goto out;
 526		}
 527	}
 528
 
 529	tp = tcp_sk(sk);
 530	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 531	fastopen = rcu_dereference(tp->fastopen_rsk);
 532	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 533	if (sk->sk_state != TCP_LISTEN &&
 534	    !between(seq, snd_una, tp->snd_nxt)) {
 535		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 536		goto out;
 537	}
 538
 539	switch (type) {
 540	case ICMP_REDIRECT:
 541		if (!sock_owned_by_user(sk))
 542			do_redirect(skb, sk);
 543		goto out;
 544	case ICMP_SOURCE_QUENCH:
 545		/* Just silently ignore these. */
 546		goto out;
 547	case ICMP_PARAMETERPROB:
 548		err = EPROTO;
 549		break;
 550	case ICMP_DEST_UNREACH:
 551		if (code > NR_ICMP_UNREACH)
 552			goto out;
 553
 554		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 555			/* We are not interested in TCP_LISTEN and open_requests
 556			 * (SYN-ACKs send out by Linux are always <576bytes so
 557			 * they should go through unfragmented).
 558			 */
 559			if (sk->sk_state == TCP_LISTEN)
 560				goto out;
 561
 562			WRITE_ONCE(tp->mtu_info, info);
 563			if (!sock_owned_by_user(sk)) {
 564				tcp_v4_mtu_reduced(sk);
 565			} else {
 566				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 567					sock_hold(sk);
 568			}
 569			goto out;
 570		}
 571
 572		err = icmp_err_convert[code].errno;
 573		/* check if this ICMP message allows revert of backoff.
 574		 * (see RFC 6069)
 575		 */
 576		if (!fastopen &&
 577		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
 578			tcp_ld_RTO_revert(sk, seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579		break;
 580	case ICMP_TIME_EXCEEDED:
 581		err = EHOSTUNREACH;
 582		break;
 583	default:
 584		goto out;
 585	}
 586
 587	switch (sk->sk_state) {
 588	case TCP_SYN_SENT:
 589	case TCP_SYN_RECV:
 590		/* Only in fast or simultaneous open. If a fast open socket is
 591		 * already accepted it is treated as a connected one below.
 592		 */
 593		if (fastopen && !fastopen->sk)
 594			break;
 595
 596		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
 597
 598		if (!sock_owned_by_user(sk)) {
 599			sk->sk_err = err;
 600
 601			sk_error_report(sk);
 602
 603			tcp_done(sk);
 604		} else {
 605			sk->sk_err_soft = err;
 606		}
 607		goto out;
 608	}
 609
 610	/* If we've already connected we will keep trying
 611	 * until we time out, or the user gives up.
 612	 *
 613	 * rfc1122 4.2.3.9 allows to consider as hard errors
 614	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 615	 * but it is obsoleted by pmtu discovery).
 616	 *
 617	 * Note, that in modern internet, where routing is unreliable
 618	 * and in each dark corner broken firewalls sit, sending random
 619	 * errors ordered by their masters even this two messages finally lose
 620	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 621	 *
 622	 * Now we are in compliance with RFCs.
 623	 *							--ANK (980905)
 624	 */
 625
 626	inet = inet_sk(sk);
 627	if (!sock_owned_by_user(sk) && inet->recverr) {
 628		sk->sk_err = err;
 629		sk_error_report(sk);
 630	} else	{ /* Only an error on timeout */
 631		sk->sk_err_soft = err;
 632	}
 633
 634out:
 635	bh_unlock_sock(sk);
 636	sock_put(sk);
 637	return 0;
 638}
 639
 640void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 641{
 642	struct tcphdr *th = tcp_hdr(skb);
 643
 644	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 645	skb->csum_start = skb_transport_header(skb) - skb->head;
 646	skb->csum_offset = offsetof(struct tcphdr, check);
 
 
 
 
 
 
 
 647}
 648
 649/* This routine computes an IPv4 TCP checksum. */
 650void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 651{
 652	const struct inet_sock *inet = inet_sk(sk);
 653
 654	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 655}
 656EXPORT_SYMBOL(tcp_v4_send_check);
 657
 658/*
 659 *	This routine will send an RST to the other tcp.
 660 *
 661 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 662 *		      for reset.
 663 *	Answer: if a packet caused RST, it is not for a socket
 664 *		existing in our system, if it is matched to a socket,
 665 *		it is just duplicate segment or bug in other side's TCP.
 666 *		So that we build reply only basing on parameters
 667 *		arrived with segment.
 668 *	Exception: precedence violation. We do not implement it in any case.
 669 */
 670
 671#ifdef CONFIG_TCP_MD5SIG
 672#define OPTION_BYTES TCPOLEN_MD5SIG_ALIGNED
 673#else
 674#define OPTION_BYTES sizeof(__be32)
 675#endif
 676
 677static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 678{
 679	const struct tcphdr *th = tcp_hdr(skb);
 680	struct {
 681		struct tcphdr th;
 682		__be32 opt[OPTION_BYTES / sizeof(__be32)];
 
 
 683	} rep;
 684	struct ip_reply_arg arg;
 685#ifdef CONFIG_TCP_MD5SIG
 686	struct tcp_md5sig_key *key = NULL;
 687	const __u8 *hash_location = NULL;
 688	unsigned char newhash[16];
 689	int genhash;
 690	struct sock *sk1 = NULL;
 691#endif
 692	u64 transmit_time = 0;
 693	struct sock *ctl_sk;
 694	struct net *net;
 695
 696	/* Never send a reset in response to a reset. */
 697	if (th->rst)
 698		return;
 699
 700	/* If sk not NULL, it means we did a successful lookup and incoming
 701	 * route had to be correct. prequeue might have dropped our dst.
 702	 */
 703	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 704		return;
 705
 706	/* Swap the send and the receive. */
 707	memset(&rep, 0, sizeof(rep));
 708	rep.th.dest   = th->source;
 709	rep.th.source = th->dest;
 710	rep.th.doff   = sizeof(struct tcphdr) / 4;
 711	rep.th.rst    = 1;
 712
 713	if (th->ack) {
 714		rep.th.seq = th->ack_seq;
 715	} else {
 716		rep.th.ack = 1;
 717		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 718				       skb->len - (th->doff << 2));
 719	}
 720
 721	memset(&arg, 0, sizeof(arg));
 722	arg.iov[0].iov_base = (unsigned char *)&rep;
 723	arg.iov[0].iov_len  = sizeof(rep.th);
 724
 725	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 726#ifdef CONFIG_TCP_MD5SIG
 727	rcu_read_lock();
 728	hash_location = tcp_parse_md5sig_option(th);
 729	if (sk && sk_fullsock(sk)) {
 730		const union tcp_md5_addr *addr;
 731		int l3index;
 732
 733		/* sdif set, means packet ingressed via a device
 734		 * in an L3 domain and inet_iif is set to it.
 735		 */
 736		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
 737		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 738		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
 739	} else if (hash_location) {
 740		const union tcp_md5_addr *addr;
 741		int sdif = tcp_v4_sdif(skb);
 742		int dif = inet_iif(skb);
 743		int l3index;
 744
 745		/*
 746		 * active side is lost. Try to find listening socket through
 747		 * source port, and then find md5 key through listening socket.
 748		 * we are not loose security here:
 749		 * Incoming packet is checked with md5 hash with finding key,
 750		 * no RST generated if md5 hash doesn't match.
 751		 */
 752		sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
 753					     NULL, 0, ip_hdr(skb)->saddr,
 754					     th->source, ip_hdr(skb)->daddr,
 755					     ntohs(th->source), dif, sdif);
 756		/* don't send rst if it can't find key */
 757		if (!sk1)
 758			goto out;
 759
 760		/* sdif set, means packet ingressed via a device
 761		 * in an L3 domain and dif is set to it.
 762		 */
 763		l3index = sdif ? dif : 0;
 764		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 765		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
 766		if (!key)
 767			goto out;
 768
 769
 770		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 771		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 772			goto out;
 773
 774	}
 775
 776	if (key) {
 777		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 778				   (TCPOPT_NOP << 16) |
 779				   (TCPOPT_MD5SIG << 8) |
 780				   TCPOLEN_MD5SIG);
 781		/* Update length and the length the header thinks exists */
 782		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 783		rep.th.doff = arg.iov[0].iov_len / 4;
 784
 785		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 786				     key, ip_hdr(skb)->saddr,
 787				     ip_hdr(skb)->daddr, &rep.th);
 788	}
 789#endif
 790	/* Can't co-exist with TCPMD5, hence check rep.opt[0] */
 791	if (rep.opt[0] == 0) {
 792		__be32 mrst = mptcp_reset_option(skb);
 793
 794		if (mrst) {
 795			rep.opt[0] = mrst;
 796			arg.iov[0].iov_len += sizeof(mrst);
 797			rep.th.doff = arg.iov[0].iov_len / 4;
 798		}
 799	}
 800
 801	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 802				      ip_hdr(skb)->saddr, /* XXX */
 803				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 804	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 805	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 806
 807	/* When socket is gone, all binding information is lost.
 808	 * routing might fail in this case. No choice here, if we choose to force
 809	 * input interface, we will misroute in case of asymmetric route.
 810	 */
 811	if (sk) {
 812		arg.bound_dev_if = sk->sk_bound_dev_if;
 813		if (sk_fullsock(sk))
 814			trace_tcp_send_reset(sk, skb);
 815	}
 816
 817	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 818		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 819
 820	arg.tos = ip_hdr(skb)->tos;
 821	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 822	local_bh_disable();
 823	ctl_sk = this_cpu_read(ipv4_tcp_sk);
 824	sock_net_set(ctl_sk, net);
 825	if (sk) {
 826		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
 827				   inet_twsk(sk)->tw_mark : sk->sk_mark;
 828		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
 829				   inet_twsk(sk)->tw_priority : sk->sk_priority;
 830		transmit_time = tcp_transmit_time(sk);
 831		xfrm_sk_clone_policy(ctl_sk, sk);
 832	}
 833	ip_send_unicast_reply(ctl_sk,
 834			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 835			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 836			      &arg, arg.iov[0].iov_len,
 837			      transmit_time);
 838
 839	ctl_sk->sk_mark = 0;
 840	xfrm_sk_free_policy(ctl_sk);
 841	sock_net_set(ctl_sk, &init_net);
 842	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 843	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 844	local_bh_enable();
 845
 846#ifdef CONFIG_TCP_MD5SIG
 847out:
 848	rcu_read_unlock();
 849#endif
 850}
 851
 852/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 853   outside socket context is ugly, certainly. What can I do?
 854 */
 855
 856static void tcp_v4_send_ack(const struct sock *sk,
 857			    struct sk_buff *skb, u32 seq, u32 ack,
 858			    u32 win, u32 tsval, u32 tsecr, int oif,
 859			    struct tcp_md5sig_key *key,
 860			    int reply_flags, u8 tos)
 861{
 862	const struct tcphdr *th = tcp_hdr(skb);
 863	struct {
 864		struct tcphdr th;
 865		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 866#ifdef CONFIG_TCP_MD5SIG
 867			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 868#endif
 869			];
 870	} rep;
 871	struct net *net = sock_net(sk);
 872	struct ip_reply_arg arg;
 873	struct sock *ctl_sk;
 874	u64 transmit_time;
 875
 876	memset(&rep.th, 0, sizeof(struct tcphdr));
 877	memset(&arg, 0, sizeof(arg));
 878
 879	arg.iov[0].iov_base = (unsigned char *)&rep;
 880	arg.iov[0].iov_len  = sizeof(rep.th);
 881	if (tsecr) {
 882		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 883				   (TCPOPT_TIMESTAMP << 8) |
 884				   TCPOLEN_TIMESTAMP);
 885		rep.opt[1] = htonl(tsval);
 886		rep.opt[2] = htonl(tsecr);
 887		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 888	}
 889
 890	/* Swap the send and the receive. */
 891	rep.th.dest    = th->source;
 892	rep.th.source  = th->dest;
 893	rep.th.doff    = arg.iov[0].iov_len / 4;
 894	rep.th.seq     = htonl(seq);
 895	rep.th.ack_seq = htonl(ack);
 896	rep.th.ack     = 1;
 897	rep.th.window  = htons(win);
 898
 899#ifdef CONFIG_TCP_MD5SIG
 900	if (key) {
 901		int offset = (tsecr) ? 3 : 0;
 902
 903		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 904					  (TCPOPT_NOP << 16) |
 905					  (TCPOPT_MD5SIG << 8) |
 906					  TCPOLEN_MD5SIG);
 907		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 908		rep.th.doff = arg.iov[0].iov_len/4;
 909
 910		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 911				    key, ip_hdr(skb)->saddr,
 912				    ip_hdr(skb)->daddr, &rep.th);
 913	}
 914#endif
 915	arg.flags = reply_flags;
 916	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 917				      ip_hdr(skb)->saddr, /* XXX */
 918				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 919	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 920	if (oif)
 921		arg.bound_dev_if = oif;
 922	arg.tos = tos;
 923	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 924	local_bh_disable();
 925	ctl_sk = this_cpu_read(ipv4_tcp_sk);
 926	sock_net_set(ctl_sk, net);
 927	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
 928			   inet_twsk(sk)->tw_mark : sk->sk_mark;
 929	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
 930			   inet_twsk(sk)->tw_priority : sk->sk_priority;
 931	transmit_time = tcp_transmit_time(sk);
 932	ip_send_unicast_reply(ctl_sk,
 933			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 934			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 935			      &arg, arg.iov[0].iov_len,
 936			      transmit_time);
 937
 938	ctl_sk->sk_mark = 0;
 939	sock_net_set(ctl_sk, &init_net);
 940	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 941	local_bh_enable();
 942}
 943
 944static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 945{
 946	struct inet_timewait_sock *tw = inet_twsk(sk);
 947	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 948
 949	tcp_v4_send_ack(sk, skb,
 950			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 951			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 952			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
 953			tcptw->tw_ts_recent,
 954			tw->tw_bound_dev_if,
 955			tcp_twsk_md5_key(tcptw),
 956			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 957			tw->tw_tos
 958			);
 959
 960	inet_twsk_put(tw);
 961}
 962
 963static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 964				  struct request_sock *req)
 965{
 966	const union tcp_md5_addr *addr;
 967	int l3index;
 968
 969	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 970	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 971	 */
 972	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 973					     tcp_sk(sk)->snd_nxt;
 974
 975	/* RFC 7323 2.3
 976	 * The window field (SEG.WND) of every outgoing segment, with the
 977	 * exception of <SYN> segments, MUST be right-shifted by
 978	 * Rcv.Wind.Shift bits:
 979	 */
 980	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
 981	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
 982	tcp_v4_send_ack(sk, skb, seq,
 983			tcp_rsk(req)->rcv_nxt,
 984			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 985			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
 986			req->ts_recent,
 987			0,
 988			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
 
 989			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 990			ip_hdr(skb)->tos);
 991}
 992
 993/*
 994 *	Send a SYN-ACK after having received a SYN.
 995 *	This still operates on a request_sock only, not on a big
 996 *	socket.
 997 */
 998static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 999			      struct flowi *fl,
1000			      struct request_sock *req,
1001			      struct tcp_fastopen_cookie *foc,
1002			      enum tcp_synack_type synack_type,
1003			      struct sk_buff *syn_skb)
1004{
1005	const struct inet_request_sock *ireq = inet_rsk(req);
1006	struct flowi4 fl4;
1007	int err = -1;
1008	struct sk_buff *skb;
1009	u8 tos;
1010
1011	/* First, grab a route. */
1012	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1013		return -1;
1014
1015	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1016
1017	if (skb) {
1018		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1019
1020		tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
1021				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1022				(inet_sk(sk)->tos & INET_ECN_MASK) :
1023				inet_sk(sk)->tos;
1024
1025		if (!INET_ECN_is_capable(tos) &&
1026		    tcp_bpf_ca_needs_ecn((struct sock *)req))
1027			tos |= INET_ECN_ECT_0;
1028
1029		rcu_read_lock();
1030		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1031					    ireq->ir_rmt_addr,
1032					    rcu_dereference(ireq->ireq_opt),
1033					    tos);
1034		rcu_read_unlock();
1035		err = net_xmit_eval(err);
1036	}
1037
1038	return err;
1039}
1040
1041/*
1042 *	IPv4 request_sock destructor.
1043 */
1044static void tcp_v4_reqsk_destructor(struct request_sock *req)
1045{
1046	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1047}
1048
1049#ifdef CONFIG_TCP_MD5SIG
1050/*
1051 * RFC2385 MD5 checksumming requires a mapping of
1052 * IP address->MD5 Key.
1053 * We need to maintain these in the sk structure.
1054 */
1055
1056DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1057EXPORT_SYMBOL(tcp_md5_needed);
1058
1059static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1060{
1061	if (!old)
1062		return true;
1063
1064	/* l3index always overrides non-l3index */
1065	if (old->l3index && new->l3index == 0)
1066		return false;
1067	if (old->l3index == 0 && new->l3index)
1068		return true;
1069
1070	return old->prefixlen < new->prefixlen;
1071}
1072
1073/* Find the Key structure for an address.  */
1074struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1075					   const union tcp_md5_addr *addr,
1076					   int family)
1077{
1078	const struct tcp_sock *tp = tcp_sk(sk);
1079	struct tcp_md5sig_key *key;
1080	const struct tcp_md5sig_info *md5sig;
1081	__be32 mask;
1082	struct tcp_md5sig_key *best_match = NULL;
1083	bool match;
1084
1085	/* caller either holds rcu_read_lock() or socket lock */
1086	md5sig = rcu_dereference_check(tp->md5sig_info,
1087				       lockdep_sock_is_held(sk));
1088	if (!md5sig)
1089		return NULL;
1090
1091	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1092				 lockdep_sock_is_held(sk)) {
1093		if (key->family != family)
1094			continue;
1095		if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
1096			continue;
1097		if (family == AF_INET) {
1098			mask = inet_make_mask(key->prefixlen);
1099			match = (key->addr.a4.s_addr & mask) ==
1100				(addr->a4.s_addr & mask);
1101#if IS_ENABLED(CONFIG_IPV6)
1102		} else if (family == AF_INET6) {
1103			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1104						  key->prefixlen);
1105#endif
1106		} else {
1107			match = false;
1108		}
1109
1110		if (match && better_md5_match(best_match, key))
1111			best_match = key;
1112	}
1113	return best_match;
1114}
1115EXPORT_SYMBOL(__tcp_md5_do_lookup);
1116
1117static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1118						      const union tcp_md5_addr *addr,
1119						      int family, u8 prefixlen,
1120						      int l3index, u8 flags)
1121{
1122	const struct tcp_sock *tp = tcp_sk(sk);
1123	struct tcp_md5sig_key *key;
1124	unsigned int size = sizeof(struct in_addr);
1125	const struct tcp_md5sig_info *md5sig;
1126
1127	/* caller either holds rcu_read_lock() or socket lock */
1128	md5sig = rcu_dereference_check(tp->md5sig_info,
1129				       lockdep_sock_is_held(sk));
1130	if (!md5sig)
1131		return NULL;
1132#if IS_ENABLED(CONFIG_IPV6)
1133	if (family == AF_INET6)
1134		size = sizeof(struct in6_addr);
1135#endif
1136	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1137				 lockdep_sock_is_held(sk)) {
1138		if (key->family != family)
1139			continue;
1140		if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1141			continue;
1142		if (key->l3index != l3index)
1143			continue;
1144		if (!memcmp(&key->addr, addr, size) &&
1145		    key->prefixlen == prefixlen)
1146			return key;
1147	}
1148	return NULL;
1149}
 
1150
1151struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1152					 const struct sock *addr_sk)
1153{
1154	const union tcp_md5_addr *addr;
1155	int l3index;
1156
1157	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1158						 addr_sk->sk_bound_dev_if);
1159	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1160	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1161}
1162EXPORT_SYMBOL(tcp_v4_md5_lookup);
1163
1164static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1165{
1166	struct tcp_sock *tp = tcp_sk(sk);
1167	struct tcp_md5sig_info *md5sig;
1168
1169	md5sig = kmalloc(sizeof(*md5sig), gfp);
1170	if (!md5sig)
1171		return -ENOMEM;
1172
1173	sk_gso_disable(sk);
1174	INIT_HLIST_HEAD(&md5sig->head);
1175	rcu_assign_pointer(tp->md5sig_info, md5sig);
1176	return 0;
1177}
1178
1179/* This can be called on a newly created socket, from other files */
1180static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1181			    int family, u8 prefixlen, int l3index, u8 flags,
1182			    const u8 *newkey, u8 newkeylen, gfp_t gfp)
1183{
1184	/* Add Key to the list */
1185	struct tcp_md5sig_key *key;
1186	struct tcp_sock *tp = tcp_sk(sk);
1187	struct tcp_md5sig_info *md5sig;
1188
1189	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1190	if (key) {
1191		/* Pre-existing entry - just update that one.
1192		 * Note that the key might be used concurrently.
1193		 * data_race() is telling kcsan that we do not care of
1194		 * key mismatches, since changing MD5 key on live flows
1195		 * can lead to packet drops.
1196		 */
1197		data_race(memcpy(key->key, newkey, newkeylen));
1198
1199		/* Pairs with READ_ONCE() in tcp_md5_hash_key().
1200		 * Also note that a reader could catch new key->keylen value
1201		 * but old key->key[], this is the reason we use __GFP_ZERO
1202		 * at sock_kmalloc() time below these lines.
1203		 */
1204		WRITE_ONCE(key->keylen, newkeylen);
1205
1206		return 0;
1207	}
1208
1209	md5sig = rcu_dereference_protected(tp->md5sig_info,
1210					   lockdep_sock_is_held(sk));
 
 
 
 
1211
1212	key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
 
 
 
 
 
1213	if (!key)
1214		return -ENOMEM;
1215	if (!tcp_alloc_md5sig_pool()) {
1216		sock_kfree_s(sk, key, sizeof(*key));
1217		return -ENOMEM;
1218	}
1219
1220	memcpy(key->key, newkey, newkeylen);
1221	key->keylen = newkeylen;
1222	key->family = family;
1223	key->prefixlen = prefixlen;
1224	key->l3index = l3index;
1225	key->flags = flags;
1226	memcpy(&key->addr, addr,
1227	       (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1228								 sizeof(struct in_addr));
1229	hlist_add_head_rcu(&key->node, &md5sig->head);
1230	return 0;
1231}
1232
1233int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1234		   int family, u8 prefixlen, int l3index, u8 flags,
1235		   const u8 *newkey, u8 newkeylen)
1236{
1237	struct tcp_sock *tp = tcp_sk(sk);
1238
1239	if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1240		if (tcp_md5sig_info_add(sk, GFP_KERNEL))
1241			return -ENOMEM;
1242
1243		if (!static_branch_inc(&tcp_md5_needed.key)) {
1244			struct tcp_md5sig_info *md5sig;
1245
1246			md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1247			rcu_assign_pointer(tp->md5sig_info, NULL);
1248			kfree_rcu(md5sig, rcu);
1249			return -EUSERS;
1250		}
1251	}
1252
1253	return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1254				newkey, newkeylen, GFP_KERNEL);
1255}
1256EXPORT_SYMBOL(tcp_md5_do_add);
1257
1258int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1259		     int family, u8 prefixlen, int l3index,
1260		     struct tcp_md5sig_key *key)
1261{
1262	struct tcp_sock *tp = tcp_sk(sk);
1263
1264	if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1265		if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC)))
1266			return -ENOMEM;
1267
1268		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1269			struct tcp_md5sig_info *md5sig;
1270
1271			md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1272			net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1273			rcu_assign_pointer(tp->md5sig_info, NULL);
1274			kfree_rcu(md5sig, rcu);
1275			return -EUSERS;
1276		}
1277	}
1278
1279	return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1280				key->flags, key->key, key->keylen,
1281				sk_gfp_mask(sk, GFP_ATOMIC));
1282}
1283EXPORT_SYMBOL(tcp_md5_key_copy);
1284
1285int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1286		   u8 prefixlen, int l3index, u8 flags)
1287{
1288	struct tcp_md5sig_key *key;
1289
1290	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1291	if (!key)
1292		return -ENOENT;
1293	hlist_del_rcu(&key->node);
1294	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1295	kfree_rcu(key, rcu);
1296	return 0;
1297}
1298EXPORT_SYMBOL(tcp_md5_do_del);
1299
1300static void tcp_clear_md5_list(struct sock *sk)
1301{
1302	struct tcp_sock *tp = tcp_sk(sk);
1303	struct tcp_md5sig_key *key;
1304	struct hlist_node *n;
1305	struct tcp_md5sig_info *md5sig;
1306
1307	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1308
1309	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1310		hlist_del_rcu(&key->node);
1311		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1312		kfree_rcu(key, rcu);
1313	}
1314}
1315
1316static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1317				 sockptr_t optval, int optlen)
1318{
1319	struct tcp_md5sig cmd;
1320	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1321	const union tcp_md5_addr *addr;
1322	u8 prefixlen = 32;
1323	int l3index = 0;
1324	u8 flags;
1325
1326	if (optlen < sizeof(cmd))
1327		return -EINVAL;
1328
1329	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1330		return -EFAULT;
1331
1332	if (sin->sin_family != AF_INET)
1333		return -EINVAL;
1334
1335	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1336
1337	if (optname == TCP_MD5SIG_EXT &&
1338	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1339		prefixlen = cmd.tcpm_prefixlen;
1340		if (prefixlen > 32)
1341			return -EINVAL;
1342	}
1343
1344	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1345	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1346		struct net_device *dev;
1347
1348		rcu_read_lock();
1349		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1350		if (dev && netif_is_l3_master(dev))
1351			l3index = dev->ifindex;
1352
1353		rcu_read_unlock();
1354
1355		/* ok to reference set/not set outside of rcu;
1356		 * right now device MUST be an L3 master
1357		 */
1358		if (!dev || !l3index)
1359			return -EINVAL;
1360	}
1361
1362	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1363
1364	if (!cmd.tcpm_keylen)
1365		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
 
1366
1367	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1368		return -EINVAL;
1369
1370	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1371			      cmd.tcpm_key, cmd.tcpm_keylen);
 
1372}
1373
1374static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1375				   __be32 daddr, __be32 saddr,
1376				   const struct tcphdr *th, int nbytes)
1377{
1378	struct tcp4_pseudohdr *bp;
1379	struct scatterlist sg;
1380	struct tcphdr *_th;
1381
1382	bp = hp->scratch;
1383	bp->saddr = saddr;
1384	bp->daddr = daddr;
1385	bp->pad = 0;
1386	bp->protocol = IPPROTO_TCP;
1387	bp->len = cpu_to_be16(nbytes);
1388
1389	_th = (struct tcphdr *)(bp + 1);
1390	memcpy(_th, th, sizeof(*th));
1391	_th->check = 0;
1392
1393	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1394	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1395				sizeof(*bp) + sizeof(*th));
1396	return crypto_ahash_update(hp->md5_req);
1397}
1398
1399static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1400			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1401{
1402	struct tcp_md5sig_pool *hp;
1403	struct ahash_request *req;
1404
1405	hp = tcp_get_md5sig_pool();
1406	if (!hp)
1407		goto clear_hash_noput;
1408	req = hp->md5_req;
1409
1410	if (crypto_ahash_init(req))
1411		goto clear_hash;
1412	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1413		goto clear_hash;
1414	if (tcp_md5_hash_key(hp, key))
1415		goto clear_hash;
1416	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1417	if (crypto_ahash_final(req))
1418		goto clear_hash;
1419
1420	tcp_put_md5sig_pool();
1421	return 0;
1422
1423clear_hash:
1424	tcp_put_md5sig_pool();
1425clear_hash_noput:
1426	memset(md5_hash, 0, 16);
1427	return 1;
1428}
1429
1430int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1431			const struct sock *sk,
1432			const struct sk_buff *skb)
1433{
1434	struct tcp_md5sig_pool *hp;
1435	struct ahash_request *req;
1436	const struct tcphdr *th = tcp_hdr(skb);
1437	__be32 saddr, daddr;
1438
1439	if (sk) { /* valid for establish/request sockets */
1440		saddr = sk->sk_rcv_saddr;
1441		daddr = sk->sk_daddr;
1442	} else {
1443		const struct iphdr *iph = ip_hdr(skb);
1444		saddr = iph->saddr;
1445		daddr = iph->daddr;
1446	}
1447
1448	hp = tcp_get_md5sig_pool();
1449	if (!hp)
1450		goto clear_hash_noput;
1451	req = hp->md5_req;
1452
1453	if (crypto_ahash_init(req))
1454		goto clear_hash;
1455
1456	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1457		goto clear_hash;
1458	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1459		goto clear_hash;
1460	if (tcp_md5_hash_key(hp, key))
1461		goto clear_hash;
1462	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1463	if (crypto_ahash_final(req))
1464		goto clear_hash;
1465
1466	tcp_put_md5sig_pool();
1467	return 0;
1468
1469clear_hash:
1470	tcp_put_md5sig_pool();
1471clear_hash_noput:
1472	memset(md5_hash, 0, 16);
1473	return 1;
1474}
1475EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1476
1477#endif
1478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1479static void tcp_v4_init_req(struct request_sock *req,
1480			    const struct sock *sk_listener,
1481			    struct sk_buff *skb)
1482{
1483	struct inet_request_sock *ireq = inet_rsk(req);
1484	struct net *net = sock_net(sk_listener);
1485
1486	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1487	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1488	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1489}
1490
1491static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1492					  struct sk_buff *skb,
1493					  struct flowi *fl,
1494					  struct request_sock *req)
 
1495{
1496	tcp_v4_init_req(req, sk, skb);
1497
1498	if (security_inet_conn_request(sk, skb, req))
1499		return NULL;
 
 
 
 
1500
1501	return inet_csk_route_req(sk, &fl->u.ip4, req);
1502}
1503
1504struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1505	.family		=	PF_INET,
1506	.obj_size	=	sizeof(struct tcp_request_sock),
1507	.rtx_syn_ack	=	tcp_rtx_synack,
1508	.send_ack	=	tcp_v4_reqsk_send_ack,
1509	.destructor	=	tcp_v4_reqsk_destructor,
1510	.send_reset	=	tcp_v4_send_reset,
1511	.syn_ack_timeout =	tcp_syn_ack_timeout,
1512};
1513
1514const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1515	.mss_clamp	=	TCP_MSS_DEFAULT,
1516#ifdef CONFIG_TCP_MD5SIG
1517	.req_md5_lookup	=	tcp_v4_md5_lookup,
1518	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1519#endif
 
1520#ifdef CONFIG_SYN_COOKIES
1521	.cookie_init_seq =	cookie_v4_init_sequence,
1522#endif
1523	.route_req	=	tcp_v4_route_req,
1524	.init_seq	=	tcp_v4_init_seq,
1525	.init_ts_off	=	tcp_v4_init_ts_off,
1526	.send_synack	=	tcp_v4_send_synack,
1527};
1528
1529int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1530{
1531	/* Never answer to SYNs send to broadcast or multicast */
1532	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1533		goto drop;
1534
1535	return tcp_conn_request(&tcp_request_sock_ops,
1536				&tcp_request_sock_ipv4_ops, sk, skb);
1537
1538drop:
1539	tcp_listendrop(sk);
1540	return 0;
1541}
1542EXPORT_SYMBOL(tcp_v4_conn_request);
1543
1544
1545/*
1546 * The three way handshake has completed - we got a valid synack -
1547 * now create the new socket.
1548 */
1549struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1550				  struct request_sock *req,
1551				  struct dst_entry *dst,
1552				  struct request_sock *req_unhash,
1553				  bool *own_req)
1554{
1555	struct inet_request_sock *ireq;
1556	bool found_dup_sk = false;
1557	struct inet_sock *newinet;
1558	struct tcp_sock *newtp;
1559	struct sock *newsk;
1560#ifdef CONFIG_TCP_MD5SIG
1561	const union tcp_md5_addr *addr;
1562	struct tcp_md5sig_key *key;
1563	int l3index;
1564#endif
1565	struct ip_options_rcu *inet_opt;
1566
1567	if (sk_acceptq_is_full(sk))
1568		goto exit_overflow;
1569
1570	newsk = tcp_create_openreq_child(sk, req, skb);
1571	if (!newsk)
1572		goto exit_nonewsk;
1573
1574	newsk->sk_gso_type = SKB_GSO_TCPV4;
1575	inet_sk_rx_dst_set(newsk, skb);
1576
1577	newtp		      = tcp_sk(newsk);
1578	newinet		      = inet_sk(newsk);
1579	ireq		      = inet_rsk(req);
1580	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1581	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1582	newsk->sk_bound_dev_if = ireq->ir_iif;
1583	newinet->inet_saddr   = ireq->ir_loc_addr;
1584	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1585	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
 
1586	newinet->mc_index     = inet_iif(skb);
1587	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1588	newinet->rcv_tos      = ip_hdr(skb)->tos;
1589	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1590	if (inet_opt)
1591		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1592	newinet->inet_id = get_random_u16();
1593
1594	/* Set ToS of the new socket based upon the value of incoming SYN.
1595	 * ECT bits are set later in tcp_init_transfer().
1596	 */
1597	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1598		newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1599
1600	if (!dst) {
1601		dst = inet_csk_route_child_sock(sk, newsk, req);
1602		if (!dst)
1603			goto put_and_exit;
1604	} else {
1605		/* syncookie case : see end of cookie_v4_check() */
1606	}
1607	sk_setup_caps(newsk, dst);
1608
1609	tcp_ca_openreq_child(newsk, dst);
1610
1611	tcp_sync_mss(newsk, dst_mtu(dst));
1612	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1613
1614	tcp_initialize_rcv_mss(newsk);
1615
1616#ifdef CONFIG_TCP_MD5SIG
1617	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1618	/* Copy over the MD5 key from the original socket */
1619	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1620	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1621	if (key) {
1622		if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1623			goto put_and_exit;
1624		sk_gso_disable(newsk);
 
 
 
 
 
 
1625	}
1626#endif
1627
1628	if (__inet_inherit_port(sk, newsk) < 0)
1629		goto put_and_exit;
1630	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1631				       &found_dup_sk);
1632	if (likely(*own_req)) {
1633		tcp_move_syn(newtp, req);
1634		ireq->ireq_opt = NULL;
1635	} else {
1636		newinet->inet_opt = NULL;
1637
1638		if (!req_unhash && found_dup_sk) {
1639			/* This code path should only be executed in the
1640			 * syncookie case only
1641			 */
1642			bh_unlock_sock(newsk);
1643			sock_put(newsk);
1644			newsk = NULL;
1645		}
1646	}
1647	return newsk;
1648
1649exit_overflow:
1650	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1651exit_nonewsk:
1652	dst_release(dst);
1653exit:
1654	tcp_listendrop(sk);
1655	return NULL;
1656put_and_exit:
1657	newinet->inet_opt = NULL;
1658	inet_csk_prepare_forced_close(newsk);
1659	tcp_done(newsk);
1660	goto exit;
1661}
1662EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1663
1664static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1665{
1666#ifdef CONFIG_SYN_COOKIES
1667	const struct tcphdr *th = tcp_hdr(skb);
1668
1669	if (!th->syn)
1670		sk = cookie_v4_check(sk, skb);
1671#endif
1672	return sk;
1673}
1674
1675u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1676			 struct tcphdr *th, u32 *cookie)
1677{
1678	u16 mss = 0;
1679#ifdef CONFIG_SYN_COOKIES
1680	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1681				    &tcp_request_sock_ipv4_ops, sk, th);
1682	if (mss) {
1683		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
1684		tcp_synq_overflow(sk);
1685	}
1686#endif
1687	return mss;
1688}
1689
1690INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1691							   u32));
1692/* The socket must have it's spinlock held when we get
1693 * here, unless it is a TCP_LISTEN socket.
1694 *
1695 * We have a potential double-lock case here, so even when
1696 * doing backlog processing we use the BH locking scheme.
1697 * This is because we cannot sleep with the original spinlock
1698 * held.
1699 */
1700int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1701{
1702	enum skb_drop_reason reason;
1703	struct sock *rsk;
1704
1705	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1706		struct dst_entry *dst;
1707
1708		dst = rcu_dereference_protected(sk->sk_rx_dst,
1709						lockdep_sock_is_held(sk));
1710
1711		sock_rps_save_rxhash(sk, skb);
1712		sk_mark_napi_id(sk, skb);
1713		if (dst) {
1714			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1715			    !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1716					     dst, 0)) {
1717				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1718				dst_release(dst);
 
1719			}
1720		}
1721		tcp_rcv_established(sk, skb);
1722		return 0;
1723	}
1724
1725	reason = SKB_DROP_REASON_NOT_SPECIFIED;
1726	if (tcp_checksum_complete(skb))
1727		goto csum_err;
1728
1729	if (sk->sk_state == TCP_LISTEN) {
1730		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1731
1732		if (!nsk)
1733			goto discard;
1734		if (nsk != sk) {
 
 
1735			if (tcp_child_process(sk, nsk, skb)) {
1736				rsk = nsk;
1737				goto reset;
1738			}
1739			return 0;
1740		}
1741	} else
1742		sock_rps_save_rxhash(sk, skb);
1743
1744	if (tcp_rcv_state_process(sk, skb)) {
1745		rsk = sk;
1746		goto reset;
1747	}
1748	return 0;
1749
1750reset:
1751	tcp_v4_send_reset(rsk, skb);
1752discard:
1753	kfree_skb_reason(skb, reason);
1754	/* Be careful here. If this function gets more complicated and
1755	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1756	 * might be destroyed here. This current version compiles correctly,
1757	 * but you have been warned.
1758	 */
1759	return 0;
1760
1761csum_err:
1762	reason = SKB_DROP_REASON_TCP_CSUM;
1763	trace_tcp_bad_csum(skb);
1764	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1765	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1766	goto discard;
1767}
1768EXPORT_SYMBOL(tcp_v4_do_rcv);
1769
1770int tcp_v4_early_demux(struct sk_buff *skb)
1771{
1772	struct net *net = dev_net(skb->dev);
1773	const struct iphdr *iph;
1774	const struct tcphdr *th;
1775	struct sock *sk;
1776
1777	if (skb->pkt_type != PACKET_HOST)
1778		return 0;
1779
1780	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1781		return 0;
1782
1783	iph = ip_hdr(skb);
1784	th = tcp_hdr(skb);
1785
1786	if (th->doff < sizeof(struct tcphdr) / 4)
1787		return 0;
1788
1789	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1790				       iph->saddr, th->source,
1791				       iph->daddr, ntohs(th->dest),
1792				       skb->skb_iif, inet_sdif(skb));
1793	if (sk) {
1794		skb->sk = sk;
1795		skb->destructor = sock_edemux;
1796		if (sk_fullsock(sk)) {
1797			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1798
1799			if (dst)
1800				dst = dst_check(dst, 0);
1801			if (dst &&
1802			    sk->sk_rx_dst_ifindex == skb->skb_iif)
1803				skb_dst_set_noref(skb, dst);
1804		}
1805	}
1806	return 0;
1807}
1808
1809bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1810		     enum skb_drop_reason *reason)
 
 
 
 
 
 
1811{
1812	u32 limit, tail_gso_size, tail_gso_segs;
1813	struct skb_shared_info *shinfo;
1814	const struct tcphdr *th;
1815	struct tcphdr *thtail;
1816	struct sk_buff *tail;
1817	unsigned int hdrlen;
1818	bool fragstolen;
1819	u32 gso_segs;
1820	u32 gso_size;
1821	int delta;
1822
1823	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1824	 * we can fix skb->truesize to its real value to avoid future drops.
1825	 * This is valid because skb is not yet charged to the socket.
1826	 * It has been noticed pure SACK packets were sometimes dropped
1827	 * (if cooked by drivers without copybreak feature).
1828	 */
1829	skb_condense(skb);
1830
1831	skb_dst_drop(skb);
 
1832
1833	if (unlikely(tcp_checksum_complete(skb))) {
1834		bh_unlock_sock(sk);
1835		trace_tcp_bad_csum(skb);
1836		*reason = SKB_DROP_REASON_TCP_CSUM;
1837		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1838		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1839		return true;
1840	}
1841
1842	/* Attempt coalescing to last skb in backlog, even if we are
1843	 * above the limits.
1844	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
 
 
1845	 */
1846	th = (const struct tcphdr *)skb->data;
1847	hdrlen = th->doff * 4;
 
 
1848
1849	tail = sk->sk_backlog.tail;
1850	if (!tail)
1851		goto no_coalesce;
1852	thtail = (struct tcphdr *)tail->data;
1853
1854	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1855	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1856	    ((TCP_SKB_CB(tail)->tcp_flags |
1857	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1858	    !((TCP_SKB_CB(tail)->tcp_flags &
1859	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1860	    ((TCP_SKB_CB(tail)->tcp_flags ^
1861	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1862#ifdef CONFIG_TLS_DEVICE
1863	    tail->decrypted != skb->decrypted ||
1864#endif
1865	    thtail->doff != th->doff ||
1866	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1867		goto no_coalesce;
1868
1869	__skb_pull(skb, hdrlen);
1870
1871	shinfo = skb_shinfo(skb);
1872	gso_size = shinfo->gso_size ?: skb->len;
1873	gso_segs = shinfo->gso_segs ?: 1;
1874
1875	shinfo = skb_shinfo(tail);
1876	tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1877	tail_gso_segs = shinfo->gso_segs ?: 1;
1878
1879	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1880		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1881
1882		if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1883			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1884			thtail->window = th->window;
1885		}
1886
1887		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1888		 * thtail->fin, so that the fast path in tcp_rcv_established()
1889		 * is not entered if we append a packet with a FIN.
1890		 * SYN, RST, URG are not present.
1891		 * ACK is set on both packets.
1892		 * PSH : we do not really care in TCP stack,
1893		 *       at least for 'GRO' packets.
1894		 */
1895		thtail->fin |= th->fin;
1896		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1897
1898		if (TCP_SKB_CB(skb)->has_rxtstamp) {
1899			TCP_SKB_CB(tail)->has_rxtstamp = true;
1900			tail->tstamp = skb->tstamp;
1901			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1902		}
1903
1904		/* Not as strict as GRO. We only need to carry mss max value */
1905		shinfo->gso_size = max(gso_size, tail_gso_size);
1906		shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
1907
1908		sk->sk_backlog.len += delta;
1909		__NET_INC_STATS(sock_net(sk),
1910				LINUX_MIB_TCPBACKLOGCOALESCE);
1911		kfree_skb_partial(skb, fragstolen);
1912		return false;
1913	}
1914	__skb_push(skb, hdrlen);
 
 
1915
1916no_coalesce:
1917	limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
 
1918
1919	/* Only socket owner can try to collapse/prune rx queues
1920	 * to reduce memory overhead, so add a little headroom here.
1921	 * Few sockets backlog are possibly concurrently non empty.
1922	 */
1923	limit += 64 * 1024;
 
 
 
 
 
 
 
 
 
1924
1925	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1926		bh_unlock_sock(sk);
1927		*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
1928		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1929		return true;
1930	}
1931	return false;
1932}
1933EXPORT_SYMBOL(tcp_add_backlog);
1934
1935int tcp_filter(struct sock *sk, struct sk_buff *skb)
1936{
1937	struct tcphdr *th = (struct tcphdr *)skb->data;
 
 
1938
1939	return sk_filter_trim_cap(sk, skb, th->doff * 4);
 
 
 
 
 
1940}
1941EXPORT_SYMBOL(tcp_filter);
1942
1943static void tcp_v4_restore_cb(struct sk_buff *skb)
1944{
1945	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1946		sizeof(struct inet_skb_parm));
1947}
1948
1949static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1950			   const struct tcphdr *th)
1951{
1952	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1953	 * barrier() makes sure compiler wont play fool^Waliasing games.
1954	 */
1955	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1956		sizeof(struct inet_skb_parm));
1957	barrier();
1958
1959	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1960	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1961				    skb->len - th->doff * 4);
1962	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1963	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1964	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1965	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1966	TCP_SKB_CB(skb)->sacked	 = 0;
1967	TCP_SKB_CB(skb)->has_rxtstamp =
1968			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1969}
1970
1971/*
1972 *	From tcp_input.c
1973 */
1974
1975int tcp_v4_rcv(struct sk_buff *skb)
1976{
1977	struct net *net = dev_net(skb->dev);
1978	enum skb_drop_reason drop_reason;
1979	int sdif = inet_sdif(skb);
1980	int dif = inet_iif(skb);
1981	const struct iphdr *iph;
1982	const struct tcphdr *th;
1983	bool refcounted;
1984	struct sock *sk;
1985	int ret;
1986
1987	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1988	if (skb->pkt_type != PACKET_HOST)
1989		goto discard_it;
1990
1991	/* Count it even if it's bad */
1992	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1993
1994	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1995		goto discard_it;
1996
1997	th = (const struct tcphdr *)skb->data;
1998
1999	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2000		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2001		goto bad_packet;
2002	}
2003	if (!pskb_may_pull(skb, th->doff * 4))
2004		goto discard_it;
2005
2006	/* An explanation is required here, I think.
2007	 * Packet length and doff are validated by header prediction,
2008	 * provided case of th->doff==0 is eliminated.
2009	 * So, we defer the checks. */
2010
2011	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2012		goto csum_error;
2013
2014	th = (const struct tcphdr *)skb->data;
2015	iph = ip_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2016lookup:
2017	sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2018			       skb, __tcp_hdrlen(th), th->source,
2019			       th->dest, sdif, &refcounted);
2020	if (!sk)
2021		goto no_tcp_socket;
2022
2023process:
2024	if (sk->sk_state == TCP_TIME_WAIT)
2025		goto do_time_wait;
2026
2027	if (sk->sk_state == TCP_NEW_SYN_RECV) {
2028		struct request_sock *req = inet_reqsk(sk);
2029		bool req_stolen = false;
2030		struct sock *nsk;
2031
2032		sk = req->rsk_listener;
2033		if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2034			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2035		else
2036			drop_reason = tcp_inbound_md5_hash(sk, skb,
2037						   &iph->saddr, &iph->daddr,
2038						   AF_INET, dif, sdif);
2039		if (unlikely(drop_reason)) {
2040			sk_drops_add(sk, skb);
2041			reqsk_put(req);
2042			goto discard_it;
2043		}
2044		if (tcp_checksum_complete(skb)) {
2045			reqsk_put(req);
2046			goto csum_error;
2047		}
2048		if (unlikely(sk->sk_state != TCP_LISTEN)) {
2049			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2050			if (!nsk) {
2051				inet_csk_reqsk_queue_drop_and_put(sk, req);
2052				goto lookup;
2053			}
2054			sk = nsk;
2055			/* reuseport_migrate_sock() has already held one sk_refcnt
2056			 * before returning.
2057			 */
2058		} else {
2059			/* We own a reference on the listener, increase it again
2060			 * as we might lose it too soon.
2061			 */
2062			sock_hold(sk);
2063		}
 
 
 
 
2064		refcounted = true;
2065		nsk = NULL;
2066		if (!tcp_filter(sk, skb)) {
2067			th = (const struct tcphdr *)skb->data;
2068			iph = ip_hdr(skb);
2069			tcp_v4_fill_cb(skb, iph, th);
2070			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2071		} else {
2072			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2073		}
2074		if (!nsk) {
2075			reqsk_put(req);
2076			if (req_stolen) {
2077				/* Another cpu got exclusive access to req
2078				 * and created a full blown socket.
2079				 * Try to feed this packet to this socket
2080				 * instead of discarding it.
2081				 */
2082				tcp_v4_restore_cb(skb);
2083				sock_put(sk);
2084				goto lookup;
2085			}
2086			goto discard_and_relse;
2087		}
2088		nf_reset_ct(skb);
2089		if (nsk == sk) {
2090			reqsk_put(req);
2091			tcp_v4_restore_cb(skb);
2092		} else if (tcp_child_process(sk, nsk, skb)) {
2093			tcp_v4_send_reset(nsk, skb);
2094			goto discard_and_relse;
2095		} else {
2096			sock_put(sk);
2097			return 0;
2098		}
2099	}
2100
2101	if (static_branch_unlikely(&ip4_min_ttl)) {
2102		/* min_ttl can be changed concurrently from do_ip_setsockopt() */
2103		if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2104			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2105			goto discard_and_relse;
2106		}
2107	}
2108
2109	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2110		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2111		goto discard_and_relse;
2112	}
2113
2114	drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
2115					   &iph->daddr, AF_INET, dif, sdif);
2116	if (drop_reason)
2117		goto discard_and_relse;
2118
2119	nf_reset_ct(skb);
2120
2121	if (tcp_filter(sk, skb)) {
2122		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2123		goto discard_and_relse;
2124	}
2125	th = (const struct tcphdr *)skb->data;
2126	iph = ip_hdr(skb);
2127	tcp_v4_fill_cb(skb, iph, th);
2128
2129	skb->dev = NULL;
2130
2131	if (sk->sk_state == TCP_LISTEN) {
2132		ret = tcp_v4_do_rcv(sk, skb);
2133		goto put_and_return;
2134	}
2135
2136	sk_incoming_cpu_update(sk);
2137
2138	bh_lock_sock_nested(sk);
2139	tcp_segs_in(tcp_sk(sk), skb);
2140	ret = 0;
2141	if (!sock_owned_by_user(sk)) {
2142		ret = tcp_v4_do_rcv(sk, skb);
2143	} else {
2144		if (tcp_add_backlog(sk, skb, &drop_reason))
2145			goto discard_and_relse;
2146	}
2147	bh_unlock_sock(sk);
2148
2149put_and_return:
2150	if (refcounted)
2151		sock_put(sk);
2152
2153	return ret;
2154
2155no_tcp_socket:
2156	drop_reason = SKB_DROP_REASON_NO_SOCKET;
2157	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2158		goto discard_it;
2159
2160	tcp_v4_fill_cb(skb, iph, th);
2161
2162	if (tcp_checksum_complete(skb)) {
2163csum_error:
2164		drop_reason = SKB_DROP_REASON_TCP_CSUM;
2165		trace_tcp_bad_csum(skb);
2166		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2167bad_packet:
2168		__TCP_INC_STATS(net, TCP_MIB_INERRS);
2169	} else {
2170		tcp_v4_send_reset(NULL, skb);
2171	}
2172
2173discard_it:
2174	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2175	/* Discard frame. */
2176	kfree_skb_reason(skb, drop_reason);
2177	return 0;
2178
2179discard_and_relse:
2180	sk_drops_add(sk, skb);
2181	if (refcounted)
2182		sock_put(sk);
2183	goto discard_it;
2184
2185do_time_wait:
2186	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2187		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2188		inet_twsk_put(inet_twsk(sk));
2189		goto discard_it;
2190	}
2191
2192	tcp_v4_fill_cb(skb, iph, th);
2193
2194	if (tcp_checksum_complete(skb)) {
2195		inet_twsk_put(inet_twsk(sk));
2196		goto csum_error;
2197	}
2198	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2199	case TCP_TW_SYN: {
2200		struct sock *sk2 = inet_lookup_listener(net,
2201							net->ipv4.tcp_death_row.hashinfo,
2202							skb, __tcp_hdrlen(th),
2203							iph->saddr, th->source,
2204							iph->daddr, th->dest,
2205							inet_iif(skb),
2206							sdif);
2207		if (sk2) {
2208			inet_twsk_deschedule_put(inet_twsk(sk));
2209			sk = sk2;
2210			tcp_v4_restore_cb(skb);
2211			refcounted = false;
2212			goto process;
2213		}
 
2214	}
2215		/* to ACK */
2216		fallthrough;
2217	case TCP_TW_ACK:
2218		tcp_v4_timewait_ack(sk, skb);
2219		break;
2220	case TCP_TW_RST:
2221		tcp_v4_send_reset(sk, skb);
2222		inet_twsk_deschedule_put(inet_twsk(sk));
2223		goto discard_it;
2224	case TCP_TW_SUCCESS:;
2225	}
2226	goto discard_it;
2227}
2228
2229static struct timewait_sock_ops tcp_timewait_sock_ops = {
2230	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2231	.twsk_unique	= tcp_twsk_unique,
2232	.twsk_destructor= tcp_twsk_destructor,
2233};
2234
2235void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2236{
2237	struct dst_entry *dst = skb_dst(skb);
2238
2239	if (dst && dst_hold_safe(dst)) {
2240		rcu_assign_pointer(sk->sk_rx_dst, dst);
2241		sk->sk_rx_dst_ifindex = skb->skb_iif;
2242	}
2243}
2244EXPORT_SYMBOL(inet_sk_rx_dst_set);
2245
2246const struct inet_connection_sock_af_ops ipv4_specific = {
2247	.queue_xmit	   = ip_queue_xmit,
2248	.send_check	   = tcp_v4_send_check,
2249	.rebuild_header	   = inet_sk_rebuild_header,
2250	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2251	.conn_request	   = tcp_v4_conn_request,
2252	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2253	.net_header_len	   = sizeof(struct iphdr),
2254	.setsockopt	   = ip_setsockopt,
2255	.getsockopt	   = ip_getsockopt,
2256	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2257	.sockaddr_len	   = sizeof(struct sockaddr_in),
 
 
 
 
 
2258	.mtu_reduced	   = tcp_v4_mtu_reduced,
2259};
2260EXPORT_SYMBOL(ipv4_specific);
2261
2262#ifdef CONFIG_TCP_MD5SIG
2263static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2264	.md5_lookup		= tcp_v4_md5_lookup,
2265	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2266	.md5_parse		= tcp_v4_parse_md5_keys,
2267};
2268#endif
2269
2270/* NOTE: A lot of things set to zero explicitly by call to
2271 *       sk_alloc() so need not be done here.
2272 */
2273static int tcp_v4_init_sock(struct sock *sk)
2274{
2275	struct inet_connection_sock *icsk = inet_csk(sk);
2276
2277	tcp_init_sock(sk);
2278
2279	icsk->icsk_af_ops = &ipv4_specific;
2280
2281#ifdef CONFIG_TCP_MD5SIG
2282	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2283#endif
2284
2285	return 0;
2286}
2287
2288void tcp_v4_destroy_sock(struct sock *sk)
2289{
2290	struct tcp_sock *tp = tcp_sk(sk);
2291
2292	trace_tcp_destroy_sock(sk);
2293
2294	tcp_clear_xmit_timers(sk);
2295
2296	tcp_cleanup_congestion_control(sk);
2297
2298	tcp_cleanup_ulp(sk);
2299
2300	/* Cleanup up the write buffer. */
2301	tcp_write_queue_purge(sk);
2302
2303	/* Check if we want to disable active TFO */
2304	tcp_fastopen_active_disable_ofo_check(sk);
2305
2306	/* Cleans up our, hopefully empty, out_of_order_queue. */
2307	skb_rbtree_purge(&tp->out_of_order_queue);
2308
2309#ifdef CONFIG_TCP_MD5SIG
2310	/* Clean up the MD5 key list, if any */
2311	if (tp->md5sig_info) {
2312		tcp_clear_md5_list(sk);
2313		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2314		tp->md5sig_info = NULL;
2315		static_branch_slow_dec_deferred(&tcp_md5_needed);
2316	}
2317#endif
2318
 
 
 
2319	/* Clean up a referenced TCP bind bucket. */
2320	if (inet_csk(sk)->icsk_bind_hash)
2321		inet_put_port(sk);
2322
2323	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2324
2325	/* If socket is aborted during connect operation */
2326	tcp_free_fastopen_req(tp);
2327	tcp_fastopen_destroy_cipher(sk);
2328	tcp_saved_syn_free(tp);
2329
 
2330	sk_sockets_allocated_dec(sk);
 
2331}
2332EXPORT_SYMBOL(tcp_v4_destroy_sock);
2333
2334#ifdef CONFIG_PROC_FS
2335/* Proc filesystem TCP sock list dumping. */
2336
2337static unsigned short seq_file_family(const struct seq_file *seq);
2338
2339static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2340{
2341	unsigned short family = seq_file_family(seq);
2342
2343	/* AF_UNSPEC is used as a match all */
2344	return ((family == AF_UNSPEC || family == sk->sk_family) &&
2345		net_eq(sock_net(sk), seq_file_net(seq)));
2346}
2347
2348/* Find a non empty bucket (starting from st->bucket)
2349 * and return the first sk from it.
2350 */
2351static void *listening_get_first(struct seq_file *seq)
2352{
2353	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2354	struct tcp_iter_state *st = seq->private;
2355
2356	st->offset = 0;
2357	for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2358		struct inet_listen_hashbucket *ilb2;
2359		struct hlist_nulls_node *node;
2360		struct sock *sk;
2361
2362		ilb2 = &hinfo->lhash2[st->bucket];
2363		if (hlist_nulls_empty(&ilb2->nulls_head))
2364			continue;
2365
2366		spin_lock(&ilb2->lock);
2367		sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2368			if (seq_sk_match(seq, sk))
2369				return sk;
2370		}
2371		spin_unlock(&ilb2->lock);
2372	}
2373
2374	return NULL;
2375}
2376
2377/* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2378 * If "cur" is the last one in the st->bucket,
2379 * call listening_get_first() to return the first sk of the next
2380 * non empty bucket.
2381 */
2382static void *listening_get_next(struct seq_file *seq, void *cur)
2383{
2384	struct tcp_iter_state *st = seq->private;
2385	struct inet_listen_hashbucket *ilb2;
2386	struct hlist_nulls_node *node;
2387	struct inet_hashinfo *hinfo;
2388	struct sock *sk = cur;
2389
 
 
 
 
 
 
 
 
 
2390	++st->num;
2391	++st->offset;
2392
2393	sk = sk_nulls_next(sk);
2394	sk_nulls_for_each_from(sk, node) {
2395		if (seq_sk_match(seq, sk))
 
 
 
2396			return sk;
2397	}
2398
2399	hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2400	ilb2 = &hinfo->lhash2[st->bucket];
2401	spin_unlock(&ilb2->lock);
2402	++st->bucket;
2403	return listening_get_first(seq);
2404}
2405
2406static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2407{
2408	struct tcp_iter_state *st = seq->private;
2409	void *rc;
2410
2411	st->bucket = 0;
2412	st->offset = 0;
2413	rc = listening_get_first(seq);
2414
2415	while (rc && *pos) {
2416		rc = listening_get_next(seq, rc);
2417		--*pos;
2418	}
2419	return rc;
2420}
2421
2422static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2423				const struct tcp_iter_state *st)
2424{
2425	return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2426}
2427
2428/*
2429 * Get first established socket starting from bucket given in st->bucket.
2430 * If st->bucket is zero, the very first socket in the hash is returned.
2431 */
2432static void *established_get_first(struct seq_file *seq)
2433{
2434	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2435	struct tcp_iter_state *st = seq->private;
 
 
2436
2437	st->offset = 0;
2438	for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2439		struct sock *sk;
2440		struct hlist_nulls_node *node;
2441		spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2442
2443		/* Lockless fast path for the common case of empty buckets */
2444		if (empty_bucket(hinfo, st))
2445			continue;
2446
2447		spin_lock_bh(lock);
2448		sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2449			if (seq_sk_match(seq, sk))
2450				return sk;
 
 
 
 
2451		}
2452		spin_unlock_bh(lock);
2453	}
2454
2455	return NULL;
2456}
2457
2458static void *established_get_next(struct seq_file *seq, void *cur)
2459{
2460	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2461	struct tcp_iter_state *st = seq->private;
2462	struct hlist_nulls_node *node;
2463	struct sock *sk = cur;
 
 
 
2464
2465	++st->num;
2466	++st->offset;
2467
2468	sk = sk_nulls_next(sk);
2469
2470	sk_nulls_for_each_from(sk, node) {
2471		if (seq_sk_match(seq, sk))
2472			return sk;
2473	}
2474
2475	spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2476	++st->bucket;
2477	return established_get_first(seq);
2478}
2479
2480static void *established_get_idx(struct seq_file *seq, loff_t pos)
2481{
2482	struct tcp_iter_state *st = seq->private;
2483	void *rc;
2484
2485	st->bucket = 0;
2486	rc = established_get_first(seq);
2487
2488	while (rc && pos) {
2489		rc = established_get_next(seq, rc);
2490		--pos;
2491	}
2492	return rc;
2493}
2494
2495static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2496{
2497	void *rc;
2498	struct tcp_iter_state *st = seq->private;
2499
2500	st->state = TCP_SEQ_STATE_LISTENING;
2501	rc	  = listening_get_idx(seq, &pos);
2502
2503	if (!rc) {
2504		st->state = TCP_SEQ_STATE_ESTABLISHED;
2505		rc	  = established_get_idx(seq, pos);
2506	}
2507
2508	return rc;
2509}
2510
2511static void *tcp_seek_last_pos(struct seq_file *seq)
2512{
2513	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2514	struct tcp_iter_state *st = seq->private;
2515	int bucket = st->bucket;
2516	int offset = st->offset;
2517	int orig_num = st->num;
2518	void *rc = NULL;
2519
2520	switch (st->state) {
2521	case TCP_SEQ_STATE_LISTENING:
2522		if (st->bucket > hinfo->lhash2_mask)
2523			break;
2524		rc = listening_get_first(seq);
2525		while (offset-- && rc && bucket == st->bucket)
 
2526			rc = listening_get_next(seq, rc);
2527		if (rc)
2528			break;
2529		st->bucket = 0;
2530		st->state = TCP_SEQ_STATE_ESTABLISHED;
2531		fallthrough;
2532	case TCP_SEQ_STATE_ESTABLISHED:
2533		if (st->bucket > hinfo->ehash_mask)
2534			break;
2535		rc = established_get_first(seq);
2536		while (offset-- && rc && bucket == st->bucket)
2537			rc = established_get_next(seq, rc);
2538	}
2539
2540	st->num = orig_num;
2541
2542	return rc;
2543}
2544
2545void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2546{
2547	struct tcp_iter_state *st = seq->private;
2548	void *rc;
2549
2550	if (*pos && *pos == st->last_pos) {
2551		rc = tcp_seek_last_pos(seq);
2552		if (rc)
2553			goto out;
2554	}
2555
2556	st->state = TCP_SEQ_STATE_LISTENING;
2557	st->num = 0;
2558	st->bucket = 0;
2559	st->offset = 0;
2560	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2561
2562out:
2563	st->last_pos = *pos;
2564	return rc;
2565}
2566EXPORT_SYMBOL(tcp_seq_start);
2567
2568void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2569{
2570	struct tcp_iter_state *st = seq->private;
2571	void *rc = NULL;
2572
2573	if (v == SEQ_START_TOKEN) {
2574		rc = tcp_get_idx(seq, 0);
2575		goto out;
2576	}
2577
2578	switch (st->state) {
2579	case TCP_SEQ_STATE_LISTENING:
2580		rc = listening_get_next(seq, v);
2581		if (!rc) {
2582			st->state = TCP_SEQ_STATE_ESTABLISHED;
2583			st->bucket = 0;
2584			st->offset = 0;
2585			rc	  = established_get_first(seq);
2586		}
2587		break;
2588	case TCP_SEQ_STATE_ESTABLISHED:
2589		rc = established_get_next(seq, v);
2590		break;
2591	}
2592out:
2593	++*pos;
2594	st->last_pos = *pos;
2595	return rc;
2596}
2597EXPORT_SYMBOL(tcp_seq_next);
2598
2599void tcp_seq_stop(struct seq_file *seq, void *v)
2600{
2601	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2602	struct tcp_iter_state *st = seq->private;
2603
2604	switch (st->state) {
2605	case TCP_SEQ_STATE_LISTENING:
2606		if (v != SEQ_START_TOKEN)
2607			spin_unlock(&hinfo->lhash2[st->bucket].lock);
2608		break;
2609	case TCP_SEQ_STATE_ESTABLISHED:
2610		if (v)
2611			spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2612		break;
2613	}
2614}
2615EXPORT_SYMBOL(tcp_seq_stop);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2616
2617static void get_openreq4(const struct request_sock *req,
2618			 struct seq_file *f, int i)
2619{
2620	const struct inet_request_sock *ireq = inet_rsk(req);
2621	long delta = req->rsk_timer.expires - jiffies;
2622
2623	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2624		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2625		i,
2626		ireq->ir_loc_addr,
2627		ireq->ir_num,
2628		ireq->ir_rmt_addr,
2629		ntohs(ireq->ir_rmt_port),
2630		TCP_SYN_RECV,
2631		0, 0, /* could print option size, but that is af dependent. */
2632		1,    /* timers active (only the expire timer) */
2633		jiffies_delta_to_clock_t(delta),
2634		req->num_timeout,
2635		from_kuid_munged(seq_user_ns(f),
2636				 sock_i_uid(req->rsk_listener)),
2637		0,  /* non standard timer */
2638		0, /* open_requests have no inode */
2639		0,
2640		req);
2641}
2642
2643static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2644{
2645	int timer_active;
2646	unsigned long timer_expires;
2647	const struct tcp_sock *tp = tcp_sk(sk);
2648	const struct inet_connection_sock *icsk = inet_csk(sk);
2649	const struct inet_sock *inet = inet_sk(sk);
2650	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2651	__be32 dest = inet->inet_daddr;
2652	__be32 src = inet->inet_rcv_saddr;
2653	__u16 destp = ntohs(inet->inet_dport);
2654	__u16 srcp = ntohs(inet->inet_sport);
2655	int rx_queue;
2656	int state;
2657
2658	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2659	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2660	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2661		timer_active	= 1;
2662		timer_expires	= icsk->icsk_timeout;
2663	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2664		timer_active	= 4;
2665		timer_expires	= icsk->icsk_timeout;
2666	} else if (timer_pending(&sk->sk_timer)) {
2667		timer_active	= 2;
2668		timer_expires	= sk->sk_timer.expires;
2669	} else {
2670		timer_active	= 0;
2671		timer_expires = jiffies;
2672	}
2673
2674	state = inet_sk_state_load(sk);
2675	if (state == TCP_LISTEN)
2676		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2677	else
2678		/* Because we don't lock the socket,
2679		 * we might find a transient negative value.
2680		 */
2681		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2682				      READ_ONCE(tp->copied_seq), 0);
2683
2684	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2685			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2686		i, src, srcp, dest, destp, state,
2687		READ_ONCE(tp->write_seq) - tp->snd_una,
2688		rx_queue,
2689		timer_active,
2690		jiffies_delta_to_clock_t(timer_expires - jiffies),
2691		icsk->icsk_retransmits,
2692		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2693		icsk->icsk_probes_out,
2694		sock_i_ino(sk),
2695		refcount_read(&sk->sk_refcnt), sk,
2696		jiffies_to_clock_t(icsk->icsk_rto),
2697		jiffies_to_clock_t(icsk->icsk_ack.ato),
2698		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2699		tcp_snd_cwnd(tp),
2700		state == TCP_LISTEN ?
2701		    fastopenq->max_qlen :
2702		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2703}
2704
2705static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2706			       struct seq_file *f, int i)
2707{
2708	long delta = tw->tw_timer.expires - jiffies;
2709	__be32 dest, src;
2710	__u16 destp, srcp;
2711
2712	dest  = tw->tw_daddr;
2713	src   = tw->tw_rcv_saddr;
2714	destp = ntohs(tw->tw_dport);
2715	srcp  = ntohs(tw->tw_sport);
2716
2717	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2718		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2719		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2720		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2721		refcount_read(&tw->tw_refcnt), tw);
2722}
2723
2724#define TMPSZ 150
2725
2726static int tcp4_seq_show(struct seq_file *seq, void *v)
2727{
2728	struct tcp_iter_state *st;
2729	struct sock *sk = v;
2730
2731	seq_setwidth(seq, TMPSZ - 1);
2732	if (v == SEQ_START_TOKEN) {
2733		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2734			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2735			   "inode");
2736		goto out;
2737	}
2738	st = seq->private;
2739
2740	if (sk->sk_state == TCP_TIME_WAIT)
2741		get_timewait4_sock(v, seq, st->num);
2742	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2743		get_openreq4(v, seq, st->num);
2744	else
2745		get_tcp4_sock(v, seq, st->num);
2746out:
2747	seq_pad(seq, '\n');
2748	return 0;
2749}
2750
2751#ifdef CONFIG_BPF_SYSCALL
2752struct bpf_tcp_iter_state {
2753	struct tcp_iter_state state;
2754	unsigned int cur_sk;
2755	unsigned int end_sk;
2756	unsigned int max_sk;
2757	struct sock **batch;
2758	bool st_bucket_done;
2759};
2760
2761struct bpf_iter__tcp {
2762	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2763	__bpf_md_ptr(struct sock_common *, sk_common);
2764	uid_t uid __aligned(8);
2765};
2766
2767static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2768			     struct sock_common *sk_common, uid_t uid)
2769{
2770	struct bpf_iter__tcp ctx;
2771
2772	meta->seq_num--;  /* skip SEQ_START_TOKEN */
2773	ctx.meta = meta;
2774	ctx.sk_common = sk_common;
2775	ctx.uid = uid;
2776	return bpf_iter_run_prog(prog, &ctx);
2777}
2778
2779static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2780{
2781	while (iter->cur_sk < iter->end_sk)
2782		sock_put(iter->batch[iter->cur_sk++]);
2783}
2784
2785static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
2786				      unsigned int new_batch_sz)
2787{
2788	struct sock **new_batch;
2789
2790	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
2791			     GFP_USER | __GFP_NOWARN);
2792	if (!new_batch)
2793		return -ENOMEM;
2794
2795	bpf_iter_tcp_put_batch(iter);
2796	kvfree(iter->batch);
2797	iter->batch = new_batch;
2798	iter->max_sk = new_batch_sz;
2799
2800	return 0;
2801}
2802
2803static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
2804						 struct sock *start_sk)
2805{
2806	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2807	struct bpf_tcp_iter_state *iter = seq->private;
2808	struct tcp_iter_state *st = &iter->state;
2809	struct hlist_nulls_node *node;
2810	unsigned int expected = 1;
2811	struct sock *sk;
2812
2813	sock_hold(start_sk);
2814	iter->batch[iter->end_sk++] = start_sk;
2815
2816	sk = sk_nulls_next(start_sk);
2817	sk_nulls_for_each_from(sk, node) {
2818		if (seq_sk_match(seq, sk)) {
2819			if (iter->end_sk < iter->max_sk) {
2820				sock_hold(sk);
2821				iter->batch[iter->end_sk++] = sk;
2822			}
2823			expected++;
2824		}
2825	}
2826	spin_unlock(&hinfo->lhash2[st->bucket].lock);
2827
2828	return expected;
2829}
2830
2831static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
2832						   struct sock *start_sk)
2833{
2834	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2835	struct bpf_tcp_iter_state *iter = seq->private;
2836	struct tcp_iter_state *st = &iter->state;
2837	struct hlist_nulls_node *node;
2838	unsigned int expected = 1;
2839	struct sock *sk;
2840
2841	sock_hold(start_sk);
2842	iter->batch[iter->end_sk++] = start_sk;
2843
2844	sk = sk_nulls_next(start_sk);
2845	sk_nulls_for_each_from(sk, node) {
2846		if (seq_sk_match(seq, sk)) {
2847			if (iter->end_sk < iter->max_sk) {
2848				sock_hold(sk);
2849				iter->batch[iter->end_sk++] = sk;
2850			}
2851			expected++;
2852		}
2853	}
2854	spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2855
2856	return expected;
2857}
2858
2859static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
2860{
2861	struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2862	struct bpf_tcp_iter_state *iter = seq->private;
2863	struct tcp_iter_state *st = &iter->state;
2864	unsigned int expected;
2865	bool resized = false;
2866	struct sock *sk;
2867
2868	/* The st->bucket is done.  Directly advance to the next
2869	 * bucket instead of having the tcp_seek_last_pos() to skip
2870	 * one by one in the current bucket and eventually find out
2871	 * it has to advance to the next bucket.
2872	 */
2873	if (iter->st_bucket_done) {
2874		st->offset = 0;
2875		st->bucket++;
2876		if (st->state == TCP_SEQ_STATE_LISTENING &&
2877		    st->bucket > hinfo->lhash2_mask) {
2878			st->state = TCP_SEQ_STATE_ESTABLISHED;
2879			st->bucket = 0;
2880		}
2881	}
2882
2883again:
2884	/* Get a new batch */
2885	iter->cur_sk = 0;
2886	iter->end_sk = 0;
2887	iter->st_bucket_done = false;
2888
2889	sk = tcp_seek_last_pos(seq);
2890	if (!sk)
2891		return NULL; /* Done */
2892
2893	if (st->state == TCP_SEQ_STATE_LISTENING)
2894		expected = bpf_iter_tcp_listening_batch(seq, sk);
2895	else
2896		expected = bpf_iter_tcp_established_batch(seq, sk);
2897
2898	if (iter->end_sk == expected) {
2899		iter->st_bucket_done = true;
2900		return sk;
2901	}
2902
2903	if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
2904		resized = true;
2905		goto again;
2906	}
2907
2908	return sk;
2909}
2910
2911static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
2912{
2913	/* bpf iter does not support lseek, so it always
2914	 * continue from where it was stop()-ped.
2915	 */
2916	if (*pos)
2917		return bpf_iter_tcp_batch(seq);
2918
2919	return SEQ_START_TOKEN;
2920}
2921
2922static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2923{
2924	struct bpf_tcp_iter_state *iter = seq->private;
2925	struct tcp_iter_state *st = &iter->state;
2926	struct sock *sk;
2927
2928	/* Whenever seq_next() is called, the iter->cur_sk is
2929	 * done with seq_show(), so advance to the next sk in
2930	 * the batch.
2931	 */
2932	if (iter->cur_sk < iter->end_sk) {
2933		/* Keeping st->num consistent in tcp_iter_state.
2934		 * bpf_iter_tcp does not use st->num.
2935		 * meta.seq_num is used instead.
2936		 */
2937		st->num++;
2938		/* Move st->offset to the next sk in the bucket such that
2939		 * the future start() will resume at st->offset in
2940		 * st->bucket.  See tcp_seek_last_pos().
2941		 */
2942		st->offset++;
2943		sock_put(iter->batch[iter->cur_sk++]);
2944	}
2945
2946	if (iter->cur_sk < iter->end_sk)
2947		sk = iter->batch[iter->cur_sk];
2948	else
2949		sk = bpf_iter_tcp_batch(seq);
2950
2951	++*pos;
2952	/* Keeping st->last_pos consistent in tcp_iter_state.
2953	 * bpf iter does not do lseek, so st->last_pos always equals to *pos.
2954	 */
2955	st->last_pos = *pos;
2956	return sk;
2957}
2958
2959static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2960{
2961	struct bpf_iter_meta meta;
2962	struct bpf_prog *prog;
2963	struct sock *sk = v;
2964	bool slow;
2965	uid_t uid;
2966	int ret;
2967
2968	if (v == SEQ_START_TOKEN)
2969		return 0;
2970
2971	if (sk_fullsock(sk))
2972		slow = lock_sock_fast(sk);
2973
2974	if (unlikely(sk_unhashed(sk))) {
2975		ret = SEQ_SKIP;
2976		goto unlock;
2977	}
2978
2979	if (sk->sk_state == TCP_TIME_WAIT) {
2980		uid = 0;
2981	} else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2982		const struct request_sock *req = v;
2983
2984		uid = from_kuid_munged(seq_user_ns(seq),
2985				       sock_i_uid(req->rsk_listener));
2986	} else {
2987		uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2988	}
2989
2990	meta.seq = seq;
2991	prog = bpf_iter_get_info(&meta, false);
2992	ret = tcp_prog_seq_show(prog, &meta, v, uid);
2993
2994unlock:
2995	if (sk_fullsock(sk))
2996		unlock_sock_fast(sk, slow);
2997	return ret;
2998
2999}
3000
3001static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3002{
3003	struct bpf_tcp_iter_state *iter = seq->private;
3004	struct bpf_iter_meta meta;
3005	struct bpf_prog *prog;
3006
3007	if (!v) {
3008		meta.seq = seq;
3009		prog = bpf_iter_get_info(&meta, true);
3010		if (prog)
3011			(void)tcp_prog_seq_show(prog, &meta, v, 0);
3012	}
3013
3014	if (iter->cur_sk < iter->end_sk) {
3015		bpf_iter_tcp_put_batch(iter);
3016		iter->st_bucket_done = false;
3017	}
3018}
3019
3020static const struct seq_operations bpf_iter_tcp_seq_ops = {
3021	.show		= bpf_iter_tcp_seq_show,
3022	.start		= bpf_iter_tcp_seq_start,
3023	.next		= bpf_iter_tcp_seq_next,
3024	.stop		= bpf_iter_tcp_seq_stop,
3025};
3026#endif
3027static unsigned short seq_file_family(const struct seq_file *seq)
3028{
3029	const struct tcp_seq_afinfo *afinfo;
3030
3031#ifdef CONFIG_BPF_SYSCALL
3032	/* Iterated from bpf_iter.  Let the bpf prog to filter instead. */
3033	if (seq->op == &bpf_iter_tcp_seq_ops)
3034		return AF_UNSPEC;
3035#endif
3036
3037	/* Iterated from proc fs */
3038	afinfo = pde_data(file_inode(seq->file));
3039	return afinfo->family;
3040}
3041
3042static const struct seq_operations tcp4_seq_ops = {
3043	.show		= tcp4_seq_show,
3044	.start		= tcp_seq_start,
3045	.next		= tcp_seq_next,
3046	.stop		= tcp_seq_stop,
3047};
3048
3049static struct tcp_seq_afinfo tcp4_seq_afinfo = {
 
3050	.family		= AF_INET,
 
 
 
 
3051};
3052
3053static int __net_init tcp4_proc_init_net(struct net *net)
3054{
3055	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3056			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3057		return -ENOMEM;
3058	return 0;
3059}
3060
3061static void __net_exit tcp4_proc_exit_net(struct net *net)
3062{
3063	remove_proc_entry("tcp", net->proc_net);
3064}
3065
3066static struct pernet_operations tcp4_net_ops = {
3067	.init = tcp4_proc_init_net,
3068	.exit = tcp4_proc_exit_net,
3069};
3070
3071int __init tcp4_proc_init(void)
3072{
3073	return register_pernet_subsys(&tcp4_net_ops);
3074}
3075
3076void tcp4_proc_exit(void)
3077{
3078	unregister_pernet_subsys(&tcp4_net_ops);
3079}
3080#endif /* CONFIG_PROC_FS */
3081
3082/* @wake is one when sk_stream_write_space() calls us.
3083 * This sends EPOLLOUT only if notsent_bytes is half the limit.
3084 * This mimics the strategy used in sock_def_write_space().
3085 */
3086bool tcp_stream_memory_free(const struct sock *sk, int wake)
3087{
3088	const struct tcp_sock *tp = tcp_sk(sk);
3089	u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3090			    READ_ONCE(tp->snd_nxt);
3091
3092	return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3093}
3094EXPORT_SYMBOL(tcp_stream_memory_free);
3095
3096struct proto tcp_prot = {
3097	.name			= "TCP",
3098	.owner			= THIS_MODULE,
3099	.close			= tcp_close,
3100	.pre_connect		= tcp_v4_pre_connect,
3101	.connect		= tcp_v4_connect,
3102	.disconnect		= tcp_disconnect,
3103	.accept			= inet_csk_accept,
3104	.ioctl			= tcp_ioctl,
3105	.init			= tcp_v4_init_sock,
3106	.destroy		= tcp_v4_destroy_sock,
3107	.shutdown		= tcp_shutdown,
3108	.setsockopt		= tcp_setsockopt,
3109	.getsockopt		= tcp_getsockopt,
3110	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
3111	.keepalive		= tcp_set_keepalive,
3112	.recvmsg		= tcp_recvmsg,
3113	.sendmsg		= tcp_sendmsg,
3114	.sendpage		= tcp_sendpage,
3115	.backlog_rcv		= tcp_v4_do_rcv,
3116	.release_cb		= tcp_release_cb,
3117	.hash			= inet_hash,
3118	.unhash			= inet_unhash,
3119	.get_port		= inet_csk_get_port,
3120	.put_port		= inet_put_port,
3121#ifdef CONFIG_BPF_SYSCALL
3122	.psock_update_sk_prot	= tcp_bpf_update_proto,
3123#endif
3124	.enter_memory_pressure	= tcp_enter_memory_pressure,
3125	.leave_memory_pressure	= tcp_leave_memory_pressure,
3126	.stream_memory_free	= tcp_stream_memory_free,
3127	.sockets_allocated	= &tcp_sockets_allocated,
3128	.orphan_count		= &tcp_orphan_count,
3129
3130	.memory_allocated	= &tcp_memory_allocated,
3131	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
3132
3133	.memory_pressure	= &tcp_memory_pressure,
3134	.sysctl_mem		= sysctl_tcp_mem,
3135	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
3136	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
3137	.max_header		= MAX_TCP_HEADER,
3138	.obj_size		= sizeof(struct tcp_sock),
3139	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
3140	.twsk_prot		= &tcp_timewait_sock_ops,
3141	.rsk_prot		= &tcp_request_sock_ops,
3142	.h.hashinfo		= NULL,
3143	.no_autobind		= true,
 
 
 
 
3144	.diag_destroy		= tcp_abort,
3145};
3146EXPORT_SYMBOL(tcp_prot);
3147
3148static void __net_exit tcp_sk_exit(struct net *net)
3149{
3150	if (net->ipv4.tcp_congestion_control)
3151		bpf_module_put(net->ipv4.tcp_congestion_control,
3152			       net->ipv4.tcp_congestion_control->owner);
3153}
3154
3155static void __net_init tcp_set_hashinfo(struct net *net)
3156{
3157	struct inet_hashinfo *hinfo;
3158	unsigned int ehash_entries;
3159	struct net *old_net;
3160
3161	if (net_eq(net, &init_net))
3162		goto fallback;
3163
3164	old_net = current->nsproxy->net_ns;
3165	ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3166	if (!ehash_entries)
3167		goto fallback;
3168
3169	ehash_entries = roundup_pow_of_two(ehash_entries);
3170	hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3171	if (!hinfo) {
3172		pr_warn("Failed to allocate TCP ehash (entries: %u) "
3173			"for a netns, fallback to the global one\n",
3174			ehash_entries);
3175fallback:
3176		hinfo = &tcp_hashinfo;
3177		ehash_entries = tcp_hashinfo.ehash_mask + 1;
3178	}
3179
3180	net->ipv4.tcp_death_row.hashinfo = hinfo;
3181	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3182	net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3183}
3184
3185static int __net_init tcp_sk_init(struct net *net)
3186{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3187	net->ipv4.sysctl_tcp_ecn = 2;
3188	net->ipv4.sysctl_tcp_ecn_fallback = 1;
3189
3190	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3191	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3192	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3193	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3194	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3195
3196	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3197	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3198	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3199
3200	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3201	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3202	net->ipv4.sysctl_tcp_syncookies = 1;
3203	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3204	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3205	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3206	net->ipv4.sysctl_tcp_orphan_retries = 0;
3207	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3208	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3209	net->ipv4.sysctl_tcp_tw_reuse = 2;
3210	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3211
3212	refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3213	tcp_set_hashinfo(net);
3214
3215	net->ipv4.sysctl_tcp_sack = 1;
3216	net->ipv4.sysctl_tcp_window_scaling = 1;
3217	net->ipv4.sysctl_tcp_timestamps = 1;
3218	net->ipv4.sysctl_tcp_early_retrans = 3;
3219	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3220	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
3221	net->ipv4.sysctl_tcp_retrans_collapse = 1;
3222	net->ipv4.sysctl_tcp_max_reordering = 300;
3223	net->ipv4.sysctl_tcp_dsack = 1;
3224	net->ipv4.sysctl_tcp_app_win = 31;
3225	net->ipv4.sysctl_tcp_adv_win_scale = 1;
3226	net->ipv4.sysctl_tcp_frto = 2;
3227	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3228	/* This limits the percentage of the congestion window which we
3229	 * will allow a single TSO frame to consume.  Building TSO frames
3230	 * which are too large can cause TCP streams to be bursty.
3231	 */
3232	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3233	/* Default TSQ limit of 16 TSO segments */
3234	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3235
3236	/* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3237	net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3238
3239	net->ipv4.sysctl_tcp_min_tso_segs = 2;
3240	net->ipv4.sysctl_tcp_tso_rtt_log = 9;  /* 2^9 = 512 usec */
3241	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3242	net->ipv4.sysctl_tcp_autocorking = 1;
3243	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3244	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3245	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3246	if (net != &init_net) {
3247		memcpy(net->ipv4.sysctl_tcp_rmem,
3248		       init_net.ipv4.sysctl_tcp_rmem,
3249		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
3250		memcpy(net->ipv4.sysctl_tcp_wmem,
3251		       init_net.ipv4.sysctl_tcp_wmem,
3252		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
3253	}
3254	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3255	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3256	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3257	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3258	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3259	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3260
3261	/* Set default values for PLB */
3262	net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3263	net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3264	net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3265	net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3266	/* Default congestion threshold for PLB to mark a round is 50% */
3267	net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3268
3269	/* Reno is always built in */
3270	if (!net_eq(net, &init_net) &&
3271	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3272			       init_net.ipv4.tcp_congestion_control->owner))
3273		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3274	else
3275		net->ipv4.tcp_congestion_control = &tcp_reno;
3276
3277	return 0;
 
 
 
 
3278}
3279
3280static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3281{
3282	struct net *net;
3283
3284	tcp_twsk_purge(net_exit_list, AF_INET);
3285
3286	list_for_each_entry(net, net_exit_list, exit_list) {
3287		inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3288		WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3289		tcp_fastopen_ctx_destroy(net);
3290	}
3291}
3292
3293static struct pernet_operations __net_initdata tcp_sk_ops = {
3294       .init	   = tcp_sk_init,
3295       .exit	   = tcp_sk_exit,
3296       .exit_batch = tcp_sk_exit_batch,
3297};
3298
3299#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3300DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3301		     struct sock_common *sk_common, uid_t uid)
3302
3303#define INIT_BATCH_SZ 16
3304
3305static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3306{
3307	struct bpf_tcp_iter_state *iter = priv_data;
3308	int err;
3309
3310	err = bpf_iter_init_seq_net(priv_data, aux);
3311	if (err)
3312		return err;
3313
3314	err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3315	if (err) {
3316		bpf_iter_fini_seq_net(priv_data);
3317		return err;
3318	}
3319
3320	return 0;
3321}
3322
3323static void bpf_iter_fini_tcp(void *priv_data)
3324{
3325	struct bpf_tcp_iter_state *iter = priv_data;
3326
3327	bpf_iter_fini_seq_net(priv_data);
3328	kvfree(iter->batch);
3329}
3330
3331static const struct bpf_iter_seq_info tcp_seq_info = {
3332	.seq_ops		= &bpf_iter_tcp_seq_ops,
3333	.init_seq_private	= bpf_iter_init_tcp,
3334	.fini_seq_private	= bpf_iter_fini_tcp,
3335	.seq_priv_size		= sizeof(struct bpf_tcp_iter_state),
3336};
3337
3338static const struct bpf_func_proto *
3339bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3340			    const struct bpf_prog *prog)
3341{
3342	switch (func_id) {
3343	case BPF_FUNC_setsockopt:
3344		return &bpf_sk_setsockopt_proto;
3345	case BPF_FUNC_getsockopt:
3346		return &bpf_sk_getsockopt_proto;
3347	default:
3348		return NULL;
3349	}
3350}
3351
3352static struct bpf_iter_reg tcp_reg_info = {
3353	.target			= "tcp",
3354	.ctx_arg_info_size	= 1,
3355	.ctx_arg_info		= {
3356		{ offsetof(struct bpf_iter__tcp, sk_common),
3357		  PTR_TO_BTF_ID_OR_NULL },
3358	},
3359	.get_func_proto		= bpf_iter_tcp_get_func_proto,
3360	.seq_info		= &tcp_seq_info,
3361};
3362
3363static void __init bpf_iter_register(void)
3364{
3365	tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3366	if (bpf_iter_reg_target(&tcp_reg_info))
3367		pr_warn("Warning: could not register bpf iterator tcp\n");
3368}
3369
3370#endif
3371
3372void __init tcp_v4_init(void)
3373{
3374	int cpu, res;
3375
3376	for_each_possible_cpu(cpu) {
3377		struct sock *sk;
3378
3379		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3380					   IPPROTO_TCP, &init_net);
3381		if (res)
3382			panic("Failed to create the TCP control socket.\n");
3383		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3384
3385		/* Please enforce IP_DF and IPID==0 for RST and
3386		 * ACK sent in SYN-RECV and TIME-WAIT state.
3387		 */
3388		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3389
3390		per_cpu(ipv4_tcp_sk, cpu) = sk;
3391	}
3392	if (register_pernet_subsys(&tcp_sk_ops))
3393		panic("Failed to create the TCP control socket.\n");
3394
3395#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3396	bpf_iter_register();
3397#endif
3398}
v4.10.11
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
  75#include <net/secure_seq.h>
  76#include <net/busy_poll.h>
  77
  78#include <linux/inet.h>
  79#include <linux/ipv6.h>
  80#include <linux/stddef.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
 
 
  83
  84#include <crypto/hash.h>
  85#include <linux/scatterlist.h>
  86
  87int sysctl_tcp_low_latency __read_mostly;
  88
  89#ifdef CONFIG_TCP_MD5SIG
  90static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  91			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  92#endif
  93
  94struct inet_hashinfo tcp_hashinfo;
  95EXPORT_SYMBOL(tcp_hashinfo);
  96
  97static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff)
 
 
 
 
 
 
 
 
 
 
  98{
  99	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 100					  ip_hdr(skb)->saddr,
 101					  tcp_hdr(skb)->dest,
 102					  tcp_hdr(skb)->source, tsoff);
 103}
 104
 105int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 106{
 
 
 107	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 108	struct tcp_sock *tp = tcp_sk(sk);
 109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110	/* With PAWS, it is safe from the viewpoint
 111	   of data integrity. Even without PAWS it is safe provided sequence
 112	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 113
 114	   Actually, the idea is close to VJ's one, only timestamp cache is
 115	   held not per host, but per port pair and TW bucket is used as state
 116	   holder.
 117
 118	   If TW bucket has been already destroyed we fall back to VJ's scheme
 119	   and use initial timestamp retrieved from peer table.
 120	 */
 121	if (tcptw->tw_ts_recent_stamp &&
 122	    (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
 123			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 124		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 125		if (tp->write_seq == 0)
 126			tp->write_seq = 1;
 127		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 128		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129		sock_hold(sktw);
 130		return 1;
 131	}
 132
 133	return 0;
 134}
 135EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137/* This will initiate an outgoing connection. */
 138int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 139{
 140	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 
 141	struct inet_sock *inet = inet_sk(sk);
 142	struct tcp_sock *tp = tcp_sk(sk);
 
 
 143	__be16 orig_sport, orig_dport;
 144	__be32 daddr, nexthop;
 145	struct flowi4 *fl4;
 146	struct rtable *rt;
 147	int err;
 148	struct ip_options_rcu *inet_opt;
 149
 150	if (addr_len < sizeof(struct sockaddr_in))
 151		return -EINVAL;
 152
 153	if (usin->sin_family != AF_INET)
 154		return -EAFNOSUPPORT;
 155
 156	nexthop = daddr = usin->sin_addr.s_addr;
 157	inet_opt = rcu_dereference_protected(inet->inet_opt,
 158					     lockdep_sock_is_held(sk));
 159	if (inet_opt && inet_opt->opt.srr) {
 160		if (!daddr)
 161			return -EINVAL;
 162		nexthop = inet_opt->opt.faddr;
 163	}
 164
 165	orig_sport = inet->inet_sport;
 166	orig_dport = usin->sin_port;
 167	fl4 = &inet->cork.fl.u.ip4;
 168	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 169			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 170			      IPPROTO_TCP,
 171			      orig_sport, orig_dport, sk);
 172	if (IS_ERR(rt)) {
 173		err = PTR_ERR(rt);
 174		if (err == -ENETUNREACH)
 175			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 176		return err;
 177	}
 178
 179	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 180		ip_rt_put(rt);
 181		return -ENETUNREACH;
 182	}
 183
 184	if (!inet_opt || !inet_opt->opt.srr)
 185		daddr = fl4->daddr;
 186
 187	if (!inet->inet_saddr)
 188		inet->inet_saddr = fl4->saddr;
 189	sk_rcv_saddr_set(sk, inet->inet_saddr);
 
 
 
 
 
 
 
 
 190
 191	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 192		/* Reset inherited state */
 193		tp->rx_opt.ts_recent	   = 0;
 194		tp->rx_opt.ts_recent_stamp = 0;
 195		if (likely(!tp->repair))
 196			tp->write_seq	   = 0;
 197	}
 198
 199	if (tcp_death_row.sysctl_tw_recycle &&
 200	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
 201		tcp_fetch_timewait_stamp(sk, &rt->dst);
 202
 203	inet->inet_dport = usin->sin_port;
 204	sk_daddr_set(sk, daddr);
 205
 206	inet_csk(sk)->icsk_ext_hdr_len = 0;
 207	if (inet_opt)
 208		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 209
 210	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 211
 212	/* Socket identity is still unknown (sport may be zero).
 213	 * However we set state to SYN-SENT and not releasing socket
 214	 * lock select source port, enter ourselves into the hash tables and
 215	 * complete initialization after this.
 216	 */
 217	tcp_set_state(sk, TCP_SYN_SENT);
 218	err = inet_hash_connect(&tcp_death_row, sk);
 219	if (err)
 220		goto failure;
 221
 222	sk_set_txhash(sk);
 223
 224	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 225			       inet->inet_sport, inet->inet_dport, sk);
 226	if (IS_ERR(rt)) {
 227		err = PTR_ERR(rt);
 228		rt = NULL;
 229		goto failure;
 230	}
 231	/* OK, now commit destination to socket.  */
 232	sk->sk_gso_type = SKB_GSO_TCPV4;
 233	sk_setup_caps(sk, &rt->dst);
 
 234
 235	if (!tp->write_seq && likely(!tp->repair))
 236		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 237							   inet->inet_daddr,
 238							   inet->inet_sport,
 239							   usin->sin_port,
 240							   &tp->tsoffset);
 
 
 
 
 241
 242	inet->inet_id = tp->write_seq ^ jiffies;
 
 
 
 
 
 243
 244	err = tcp_connect(sk);
 245
 246	rt = NULL;
 247	if (err)
 248		goto failure;
 249
 250	return 0;
 251
 252failure:
 253	/*
 254	 * This unhashes the socket and releases the local port,
 255	 * if necessary.
 256	 */
 257	tcp_set_state(sk, TCP_CLOSE);
 
 258	ip_rt_put(rt);
 259	sk->sk_route_caps = 0;
 260	inet->inet_dport = 0;
 261	return err;
 262}
 263EXPORT_SYMBOL(tcp_v4_connect);
 264
 265/*
 266 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 267 * It can be called through tcp_release_cb() if socket was owned by user
 268 * at the time tcp_v4_err() was called to handle ICMP message.
 269 */
 270void tcp_v4_mtu_reduced(struct sock *sk)
 271{
 272	struct inet_sock *inet = inet_sk(sk);
 273	struct dst_entry *dst;
 274	u32 mtu;
 275
 276	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 277		return;
 278	mtu = tcp_sk(sk)->mtu_info;
 279	dst = inet_csk_update_pmtu(sk, mtu);
 280	if (!dst)
 281		return;
 282
 283	/* Something is about to be wrong... Remember soft error
 284	 * for the case, if this connection will not able to recover.
 285	 */
 286	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 287		sk->sk_err_soft = EMSGSIZE;
 288
 289	mtu = dst_mtu(dst);
 290
 291	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 292	    ip_sk_accept_pmtu(sk) &&
 293	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 294		tcp_sync_mss(sk, mtu);
 295
 296		/* Resend the TCP packet because it's
 297		 * clear that the old packet has been
 298		 * dropped. This is the new "fast" path mtu
 299		 * discovery.
 300		 */
 301		tcp_simple_retransmit(sk);
 302	} /* else let the usual retransmit timer handle it */
 303}
 304EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 305
 306static void do_redirect(struct sk_buff *skb, struct sock *sk)
 307{
 308	struct dst_entry *dst = __sk_dst_check(sk, 0);
 309
 310	if (dst)
 311		dst->ops->redirect(dst, sk, skb);
 312}
 313
 314
 315/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 316void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 317{
 318	struct request_sock *req = inet_reqsk(sk);
 319	struct net *net = sock_net(sk);
 320
 321	/* ICMPs are not backlogged, hence we cannot get
 322	 * an established socket here.
 323	 */
 324	if (seq != tcp_rsk(req)->snt_isn) {
 325		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 326	} else if (abort) {
 327		/*
 328		 * Still in SYN_RECV, just remove it silently.
 329		 * There is no good way to pass the error to the newly
 330		 * created socket, and POSIX does not want network
 331		 * errors returned from accept().
 332		 */
 333		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 334		tcp_listendrop(req->rsk_listener);
 335	}
 336	reqsk_put(req);
 337}
 338EXPORT_SYMBOL(tcp_req_err);
 339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340/*
 341 * This routine is called by the ICMP module when it gets some
 342 * sort of error condition.  If err < 0 then the socket should
 343 * be closed and the error returned to the user.  If err > 0
 344 * it's just the icmp type << 8 | icmp code.  After adjustment
 345 * header points to the first 8 bytes of the tcp header.  We need
 346 * to find the appropriate port.
 347 *
 348 * The locking strategy used here is very "optimistic". When
 349 * someone else accesses the socket the ICMP is just dropped
 350 * and for some paths there is no check at all.
 351 * A more general error queue to queue errors for later handling
 352 * is probably better.
 353 *
 354 */
 355
 356void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 357{
 358	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 359	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 360	struct inet_connection_sock *icsk;
 361	struct tcp_sock *tp;
 362	struct inet_sock *inet;
 363	const int type = icmp_hdr(icmp_skb)->type;
 364	const int code = icmp_hdr(icmp_skb)->code;
 365	struct sock *sk;
 366	struct sk_buff *skb;
 367	struct request_sock *fastopen;
 368	__u32 seq, snd_una;
 369	__u32 remaining;
 370	int err;
 371	struct net *net = dev_net(icmp_skb->dev);
 372
 373	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
 374				       th->dest, iph->saddr, ntohs(th->source),
 375				       inet_iif(icmp_skb));
 376	if (!sk) {
 377		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 378		return;
 379	}
 380	if (sk->sk_state == TCP_TIME_WAIT) {
 381		inet_twsk_put(inet_twsk(sk));
 382		return;
 383	}
 384	seq = ntohl(th->seq);
 385	if (sk->sk_state == TCP_NEW_SYN_RECV)
 386		return tcp_req_err(sk, seq,
 387				  type == ICMP_PARAMETERPROB ||
 388				  type == ICMP_TIME_EXCEEDED ||
 389				  (type == ICMP_DEST_UNREACH &&
 390				   (code == ICMP_NET_UNREACH ||
 391				    code == ICMP_HOST_UNREACH)));
 
 392
 393	bh_lock_sock(sk);
 394	/* If too many ICMPs get dropped on busy
 395	 * servers this needs to be solved differently.
 396	 * We do take care of PMTU discovery (RFC1191) special case :
 397	 * we can receive locally generated ICMP messages while socket is held.
 398	 */
 399	if (sock_owned_by_user(sk)) {
 400		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 401			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 402	}
 403	if (sk->sk_state == TCP_CLOSE)
 404		goto out;
 405
 406	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 407		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 408		goto out;
 
 
 
 409	}
 410
 411	icsk = inet_csk(sk);
 412	tp = tcp_sk(sk);
 413	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 414	fastopen = tp->fastopen_rsk;
 415	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 416	if (sk->sk_state != TCP_LISTEN &&
 417	    !between(seq, snd_una, tp->snd_nxt)) {
 418		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 419		goto out;
 420	}
 421
 422	switch (type) {
 423	case ICMP_REDIRECT:
 424		if (!sock_owned_by_user(sk))
 425			do_redirect(icmp_skb, sk);
 426		goto out;
 427	case ICMP_SOURCE_QUENCH:
 428		/* Just silently ignore these. */
 429		goto out;
 430	case ICMP_PARAMETERPROB:
 431		err = EPROTO;
 432		break;
 433	case ICMP_DEST_UNREACH:
 434		if (code > NR_ICMP_UNREACH)
 435			goto out;
 436
 437		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 438			/* We are not interested in TCP_LISTEN and open_requests
 439			 * (SYN-ACKs send out by Linux are always <576bytes so
 440			 * they should go through unfragmented).
 441			 */
 442			if (sk->sk_state == TCP_LISTEN)
 443				goto out;
 444
 445			tp->mtu_info = info;
 446			if (!sock_owned_by_user(sk)) {
 447				tcp_v4_mtu_reduced(sk);
 448			} else {
 449				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 450					sock_hold(sk);
 451			}
 452			goto out;
 453		}
 454
 455		err = icmp_err_convert[code].errno;
 456		/* check if icmp_skb allows revert of backoff
 457		 * (see draft-zimmermann-tcp-lcd) */
 458		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 459			break;
 460		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 461		    !icsk->icsk_backoff || fastopen)
 462			break;
 463
 464		if (sock_owned_by_user(sk))
 465			break;
 466
 467		icsk->icsk_backoff--;
 468		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
 469					       TCP_TIMEOUT_INIT;
 470		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 471
 472		skb = tcp_write_queue_head(sk);
 473		BUG_ON(!skb);
 474
 475		remaining = icsk->icsk_rto -
 476			    min(icsk->icsk_rto,
 477				tcp_time_stamp - tcp_skb_timestamp(skb));
 478
 479		if (remaining) {
 480			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 481						  remaining, TCP_RTO_MAX);
 482		} else {
 483			/* RTO revert clocked out retransmission.
 484			 * Will retransmit now */
 485			tcp_retransmit_timer(sk);
 486		}
 487
 488		break;
 489	case ICMP_TIME_EXCEEDED:
 490		err = EHOSTUNREACH;
 491		break;
 492	default:
 493		goto out;
 494	}
 495
 496	switch (sk->sk_state) {
 497	case TCP_SYN_SENT:
 498	case TCP_SYN_RECV:
 499		/* Only in fast or simultaneous open. If a fast open socket is
 500		 * is already accepted it is treated as a connected one below.
 501		 */
 502		if (fastopen && !fastopen->sk)
 503			break;
 504
 
 
 505		if (!sock_owned_by_user(sk)) {
 506			sk->sk_err = err;
 507
 508			sk->sk_error_report(sk);
 509
 510			tcp_done(sk);
 511		} else {
 512			sk->sk_err_soft = err;
 513		}
 514		goto out;
 515	}
 516
 517	/* If we've already connected we will keep trying
 518	 * until we time out, or the user gives up.
 519	 *
 520	 * rfc1122 4.2.3.9 allows to consider as hard errors
 521	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 522	 * but it is obsoleted by pmtu discovery).
 523	 *
 524	 * Note, that in modern internet, where routing is unreliable
 525	 * and in each dark corner broken firewalls sit, sending random
 526	 * errors ordered by their masters even this two messages finally lose
 527	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 528	 *
 529	 * Now we are in compliance with RFCs.
 530	 *							--ANK (980905)
 531	 */
 532
 533	inet = inet_sk(sk);
 534	if (!sock_owned_by_user(sk) && inet->recverr) {
 535		sk->sk_err = err;
 536		sk->sk_error_report(sk);
 537	} else	{ /* Only an error on timeout */
 538		sk->sk_err_soft = err;
 539	}
 540
 541out:
 542	bh_unlock_sock(sk);
 543	sock_put(sk);
 
 544}
 545
 546void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 547{
 548	struct tcphdr *th = tcp_hdr(skb);
 549
 550	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 551		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 552		skb->csum_start = skb_transport_header(skb) - skb->head;
 553		skb->csum_offset = offsetof(struct tcphdr, check);
 554	} else {
 555		th->check = tcp_v4_check(skb->len, saddr, daddr,
 556					 csum_partial(th,
 557						      th->doff << 2,
 558						      skb->csum));
 559	}
 560}
 561
 562/* This routine computes an IPv4 TCP checksum. */
 563void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 564{
 565	const struct inet_sock *inet = inet_sk(sk);
 566
 567	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 568}
 569EXPORT_SYMBOL(tcp_v4_send_check);
 570
 571/*
 572 *	This routine will send an RST to the other tcp.
 573 *
 574 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 575 *		      for reset.
 576 *	Answer: if a packet caused RST, it is not for a socket
 577 *		existing in our system, if it is matched to a socket,
 578 *		it is just duplicate segment or bug in other side's TCP.
 579 *		So that we build reply only basing on parameters
 580 *		arrived with segment.
 581 *	Exception: precedence violation. We do not implement it in any case.
 582 */
 583
 
 
 
 
 
 
 584static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 585{
 586	const struct tcphdr *th = tcp_hdr(skb);
 587	struct {
 588		struct tcphdr th;
 589#ifdef CONFIG_TCP_MD5SIG
 590		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 591#endif
 592	} rep;
 593	struct ip_reply_arg arg;
 594#ifdef CONFIG_TCP_MD5SIG
 595	struct tcp_md5sig_key *key = NULL;
 596	const __u8 *hash_location = NULL;
 597	unsigned char newhash[16];
 598	int genhash;
 599	struct sock *sk1 = NULL;
 600#endif
 
 
 601	struct net *net;
 602
 603	/* Never send a reset in response to a reset. */
 604	if (th->rst)
 605		return;
 606
 607	/* If sk not NULL, it means we did a successful lookup and incoming
 608	 * route had to be correct. prequeue might have dropped our dst.
 609	 */
 610	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 611		return;
 612
 613	/* Swap the send and the receive. */
 614	memset(&rep, 0, sizeof(rep));
 615	rep.th.dest   = th->source;
 616	rep.th.source = th->dest;
 617	rep.th.doff   = sizeof(struct tcphdr) / 4;
 618	rep.th.rst    = 1;
 619
 620	if (th->ack) {
 621		rep.th.seq = th->ack_seq;
 622	} else {
 623		rep.th.ack = 1;
 624		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 625				       skb->len - (th->doff << 2));
 626	}
 627
 628	memset(&arg, 0, sizeof(arg));
 629	arg.iov[0].iov_base = (unsigned char *)&rep;
 630	arg.iov[0].iov_len  = sizeof(rep.th);
 631
 632	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 633#ifdef CONFIG_TCP_MD5SIG
 634	rcu_read_lock();
 635	hash_location = tcp_parse_md5sig_option(th);
 636	if (sk && sk_fullsock(sk)) {
 637		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 638					&ip_hdr(skb)->saddr, AF_INET);
 
 
 
 
 
 
 
 639	} else if (hash_location) {
 
 
 
 
 
 640		/*
 641		 * active side is lost. Try to find listening socket through
 642		 * source port, and then find md5 key through listening socket.
 643		 * we are not loose security here:
 644		 * Incoming packet is checked with md5 hash with finding key,
 645		 * no RST generated if md5 hash doesn't match.
 646		 */
 647		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
 648					     ip_hdr(skb)->saddr,
 649					     th->source, ip_hdr(skb)->daddr,
 650					     ntohs(th->source), inet_iif(skb));
 651		/* don't send rst if it can't find key */
 652		if (!sk1)
 653			goto out;
 654
 655		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 656					&ip_hdr(skb)->saddr, AF_INET);
 
 
 
 
 657		if (!key)
 658			goto out;
 659
 660
 661		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 662		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 663			goto out;
 664
 665	}
 666
 667	if (key) {
 668		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 669				   (TCPOPT_NOP << 16) |
 670				   (TCPOPT_MD5SIG << 8) |
 671				   TCPOLEN_MD5SIG);
 672		/* Update length and the length the header thinks exists */
 673		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 674		rep.th.doff = arg.iov[0].iov_len / 4;
 675
 676		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 677				     key, ip_hdr(skb)->saddr,
 678				     ip_hdr(skb)->daddr, &rep.th);
 679	}
 680#endif
 
 
 
 
 
 
 
 
 
 
 
 681	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 682				      ip_hdr(skb)->saddr, /* XXX */
 683				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 684	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 685	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 686
 687	/* When socket is gone, all binding information is lost.
 688	 * routing might fail in this case. No choice here, if we choose to force
 689	 * input interface, we will misroute in case of asymmetric route.
 690	 */
 691	if (sk)
 692		arg.bound_dev_if = sk->sk_bound_dev_if;
 
 
 
 693
 694	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 695		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 696
 697	arg.tos = ip_hdr(skb)->tos;
 698	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 699	local_bh_disable();
 700	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 
 
 
 
 
 
 
 
 
 
 701			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 702			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 703			      &arg, arg.iov[0].iov_len);
 
 704
 
 
 
 705	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 706	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 707	local_bh_enable();
 708
 709#ifdef CONFIG_TCP_MD5SIG
 710out:
 711	rcu_read_unlock();
 712#endif
 713}
 714
 715/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 716   outside socket context is ugly, certainly. What can I do?
 717 */
 718
 719static void tcp_v4_send_ack(const struct sock *sk,
 720			    struct sk_buff *skb, u32 seq, u32 ack,
 721			    u32 win, u32 tsval, u32 tsecr, int oif,
 722			    struct tcp_md5sig_key *key,
 723			    int reply_flags, u8 tos)
 724{
 725	const struct tcphdr *th = tcp_hdr(skb);
 726	struct {
 727		struct tcphdr th;
 728		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 729#ifdef CONFIG_TCP_MD5SIG
 730			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 731#endif
 732			];
 733	} rep;
 734	struct net *net = sock_net(sk);
 735	struct ip_reply_arg arg;
 
 
 736
 737	memset(&rep.th, 0, sizeof(struct tcphdr));
 738	memset(&arg, 0, sizeof(arg));
 739
 740	arg.iov[0].iov_base = (unsigned char *)&rep;
 741	arg.iov[0].iov_len  = sizeof(rep.th);
 742	if (tsecr) {
 743		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 744				   (TCPOPT_TIMESTAMP << 8) |
 745				   TCPOLEN_TIMESTAMP);
 746		rep.opt[1] = htonl(tsval);
 747		rep.opt[2] = htonl(tsecr);
 748		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 749	}
 750
 751	/* Swap the send and the receive. */
 752	rep.th.dest    = th->source;
 753	rep.th.source  = th->dest;
 754	rep.th.doff    = arg.iov[0].iov_len / 4;
 755	rep.th.seq     = htonl(seq);
 756	rep.th.ack_seq = htonl(ack);
 757	rep.th.ack     = 1;
 758	rep.th.window  = htons(win);
 759
 760#ifdef CONFIG_TCP_MD5SIG
 761	if (key) {
 762		int offset = (tsecr) ? 3 : 0;
 763
 764		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 765					  (TCPOPT_NOP << 16) |
 766					  (TCPOPT_MD5SIG << 8) |
 767					  TCPOLEN_MD5SIG);
 768		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 769		rep.th.doff = arg.iov[0].iov_len/4;
 770
 771		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 772				    key, ip_hdr(skb)->saddr,
 773				    ip_hdr(skb)->daddr, &rep.th);
 774	}
 775#endif
 776	arg.flags = reply_flags;
 777	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 778				      ip_hdr(skb)->saddr, /* XXX */
 779				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 780	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 781	if (oif)
 782		arg.bound_dev_if = oif;
 783	arg.tos = tos;
 784	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
 785	local_bh_disable();
 786	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 
 
 
 
 
 
 
 787			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 788			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 789			      &arg, arg.iov[0].iov_len);
 
 790
 
 
 791	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 792	local_bh_enable();
 793}
 794
 795static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 796{
 797	struct inet_timewait_sock *tw = inet_twsk(sk);
 798	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 799
 800	tcp_v4_send_ack(sk, skb,
 801			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 802			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 803			tcp_time_stamp + tcptw->tw_ts_offset,
 804			tcptw->tw_ts_recent,
 805			tw->tw_bound_dev_if,
 806			tcp_twsk_md5_key(tcptw),
 807			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 808			tw->tw_tos
 809			);
 810
 811	inet_twsk_put(tw);
 812}
 813
 814static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 815				  struct request_sock *req)
 816{
 
 
 
 817	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 818	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 819	 */
 820	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 821					     tcp_sk(sk)->snd_nxt;
 822
 823	/* RFC 7323 2.3
 824	 * The window field (SEG.WND) of every outgoing segment, with the
 825	 * exception of <SYN> segments, MUST be right-shifted by
 826	 * Rcv.Wind.Shift bits:
 827	 */
 
 
 828	tcp_v4_send_ack(sk, skb, seq,
 829			tcp_rsk(req)->rcv_nxt,
 830			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 831			tcp_time_stamp + tcp_rsk(req)->ts_off,
 832			req->ts_recent,
 833			0,
 834			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
 835					  AF_INET),
 836			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 837			ip_hdr(skb)->tos);
 838}
 839
 840/*
 841 *	Send a SYN-ACK after having received a SYN.
 842 *	This still operates on a request_sock only, not on a big
 843 *	socket.
 844 */
 845static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 846			      struct flowi *fl,
 847			      struct request_sock *req,
 848			      struct tcp_fastopen_cookie *foc,
 849			      enum tcp_synack_type synack_type)
 
 850{
 851	const struct inet_request_sock *ireq = inet_rsk(req);
 852	struct flowi4 fl4;
 853	int err = -1;
 854	struct sk_buff *skb;
 
 855
 856	/* First, grab a route. */
 857	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 858		return -1;
 859
 860	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 861
 862	if (skb) {
 863		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 864
 
 
 
 
 
 
 
 
 
 
 865		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 866					    ireq->ir_rmt_addr,
 867					    ireq->opt);
 
 
 868		err = net_xmit_eval(err);
 869	}
 870
 871	return err;
 872}
 873
 874/*
 875 *	IPv4 request_sock destructor.
 876 */
 877static void tcp_v4_reqsk_destructor(struct request_sock *req)
 878{
 879	kfree(inet_rsk(req)->opt);
 880}
 881
 882#ifdef CONFIG_TCP_MD5SIG
 883/*
 884 * RFC2385 MD5 checksumming requires a mapping of
 885 * IP address->MD5 Key.
 886 * We need to maintain these in the sk structure.
 887 */
 888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889/* Find the Key structure for an address.  */
 890struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 891					 const union tcp_md5_addr *addr,
 892					 int family)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 893{
 894	const struct tcp_sock *tp = tcp_sk(sk);
 895	struct tcp_md5sig_key *key;
 896	unsigned int size = sizeof(struct in_addr);
 897	const struct tcp_md5sig_info *md5sig;
 898
 899	/* caller either holds rcu_read_lock() or socket lock */
 900	md5sig = rcu_dereference_check(tp->md5sig_info,
 901				       lockdep_sock_is_held(sk));
 902	if (!md5sig)
 903		return NULL;
 904#if IS_ENABLED(CONFIG_IPV6)
 905	if (family == AF_INET6)
 906		size = sizeof(struct in6_addr);
 907#endif
 908	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 
 909		if (key->family != family)
 910			continue;
 911		if (!memcmp(&key->addr, addr, size))
 
 
 
 
 
 912			return key;
 913	}
 914	return NULL;
 915}
 916EXPORT_SYMBOL(tcp_md5_do_lookup);
 917
 918struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 919					 const struct sock *addr_sk)
 920{
 921	const union tcp_md5_addr *addr;
 
 922
 
 
 923	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
 924	return tcp_md5_do_lookup(sk, addr, AF_INET);
 925}
 926EXPORT_SYMBOL(tcp_v4_md5_lookup);
 927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928/* This can be called on a newly created socket, from other files */
 929int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 930		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
 
 931{
 932	/* Add Key to the list */
 933	struct tcp_md5sig_key *key;
 934	struct tcp_sock *tp = tcp_sk(sk);
 935	struct tcp_md5sig_info *md5sig;
 936
 937	key = tcp_md5_do_lookup(sk, addr, family);
 938	if (key) {
 939		/* Pre-existing entry - just update that one. */
 940		memcpy(key->key, newkey, newkeylen);
 941		key->keylen = newkeylen;
 
 
 
 
 
 
 
 
 
 
 
 
 942		return 0;
 943	}
 944
 945	md5sig = rcu_dereference_protected(tp->md5sig_info,
 946					   lockdep_sock_is_held(sk));
 947	if (!md5sig) {
 948		md5sig = kmalloc(sizeof(*md5sig), gfp);
 949		if (!md5sig)
 950			return -ENOMEM;
 951
 952		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 953		INIT_HLIST_HEAD(&md5sig->head);
 954		rcu_assign_pointer(tp->md5sig_info, md5sig);
 955	}
 956
 957	key = sock_kmalloc(sk, sizeof(*key), gfp);
 958	if (!key)
 959		return -ENOMEM;
 960	if (!tcp_alloc_md5sig_pool()) {
 961		sock_kfree_s(sk, key, sizeof(*key));
 962		return -ENOMEM;
 963	}
 964
 965	memcpy(key->key, newkey, newkeylen);
 966	key->keylen = newkeylen;
 967	key->family = family;
 
 
 
 968	memcpy(&key->addr, addr,
 969	       (family == AF_INET6) ? sizeof(struct in6_addr) :
 970				      sizeof(struct in_addr));
 971	hlist_add_head_rcu(&key->node, &md5sig->head);
 972	return 0;
 973}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974EXPORT_SYMBOL(tcp_md5_do_add);
 975
 976int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977{
 978	struct tcp_md5sig_key *key;
 979
 980	key = tcp_md5_do_lookup(sk, addr, family);
 981	if (!key)
 982		return -ENOENT;
 983	hlist_del_rcu(&key->node);
 984	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
 985	kfree_rcu(key, rcu);
 986	return 0;
 987}
 988EXPORT_SYMBOL(tcp_md5_do_del);
 989
 990static void tcp_clear_md5_list(struct sock *sk)
 991{
 992	struct tcp_sock *tp = tcp_sk(sk);
 993	struct tcp_md5sig_key *key;
 994	struct hlist_node *n;
 995	struct tcp_md5sig_info *md5sig;
 996
 997	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
 998
 999	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1000		hlist_del_rcu(&key->node);
1001		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1002		kfree_rcu(key, rcu);
1003	}
1004}
1005
1006static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1007				 int optlen)
1008{
1009	struct tcp_md5sig cmd;
1010	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
 
 
 
 
1011
1012	if (optlen < sizeof(cmd))
1013		return -EINVAL;
1014
1015	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1016		return -EFAULT;
1017
1018	if (sin->sin_family != AF_INET)
1019		return -EINVAL;
1020
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021	if (!cmd.tcpm_keylen)
1022		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1023				      AF_INET);
1024
1025	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1026		return -EINVAL;
1027
1028	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1029			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1030			      GFP_KERNEL);
1031}
1032
1033static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1034				   __be32 daddr, __be32 saddr,
1035				   const struct tcphdr *th, int nbytes)
1036{
1037	struct tcp4_pseudohdr *bp;
1038	struct scatterlist sg;
1039	struct tcphdr *_th;
1040
1041	bp = hp->scratch;
1042	bp->saddr = saddr;
1043	bp->daddr = daddr;
1044	bp->pad = 0;
1045	bp->protocol = IPPROTO_TCP;
1046	bp->len = cpu_to_be16(nbytes);
1047
1048	_th = (struct tcphdr *)(bp + 1);
1049	memcpy(_th, th, sizeof(*th));
1050	_th->check = 0;
1051
1052	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1053	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1054				sizeof(*bp) + sizeof(*th));
1055	return crypto_ahash_update(hp->md5_req);
1056}
1057
1058static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1059			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1060{
1061	struct tcp_md5sig_pool *hp;
1062	struct ahash_request *req;
1063
1064	hp = tcp_get_md5sig_pool();
1065	if (!hp)
1066		goto clear_hash_noput;
1067	req = hp->md5_req;
1068
1069	if (crypto_ahash_init(req))
1070		goto clear_hash;
1071	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1072		goto clear_hash;
1073	if (tcp_md5_hash_key(hp, key))
1074		goto clear_hash;
1075	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1076	if (crypto_ahash_final(req))
1077		goto clear_hash;
1078
1079	tcp_put_md5sig_pool();
1080	return 0;
1081
1082clear_hash:
1083	tcp_put_md5sig_pool();
1084clear_hash_noput:
1085	memset(md5_hash, 0, 16);
1086	return 1;
1087}
1088
1089int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1090			const struct sock *sk,
1091			const struct sk_buff *skb)
1092{
1093	struct tcp_md5sig_pool *hp;
1094	struct ahash_request *req;
1095	const struct tcphdr *th = tcp_hdr(skb);
1096	__be32 saddr, daddr;
1097
1098	if (sk) { /* valid for establish/request sockets */
1099		saddr = sk->sk_rcv_saddr;
1100		daddr = sk->sk_daddr;
1101	} else {
1102		const struct iphdr *iph = ip_hdr(skb);
1103		saddr = iph->saddr;
1104		daddr = iph->daddr;
1105	}
1106
1107	hp = tcp_get_md5sig_pool();
1108	if (!hp)
1109		goto clear_hash_noput;
1110	req = hp->md5_req;
1111
1112	if (crypto_ahash_init(req))
1113		goto clear_hash;
1114
1115	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1116		goto clear_hash;
1117	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1118		goto clear_hash;
1119	if (tcp_md5_hash_key(hp, key))
1120		goto clear_hash;
1121	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1122	if (crypto_ahash_final(req))
1123		goto clear_hash;
1124
1125	tcp_put_md5sig_pool();
1126	return 0;
1127
1128clear_hash:
1129	tcp_put_md5sig_pool();
1130clear_hash_noput:
1131	memset(md5_hash, 0, 16);
1132	return 1;
1133}
1134EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1135
1136#endif
1137
1138/* Called with rcu_read_lock() */
1139static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1140				    const struct sk_buff *skb)
1141{
1142#ifdef CONFIG_TCP_MD5SIG
1143	/*
1144	 * This gets called for each TCP segment that arrives
1145	 * so we want to be efficient.
1146	 * We have 3 drop cases:
1147	 * o No MD5 hash and one expected.
1148	 * o MD5 hash and we're not expecting one.
1149	 * o MD5 hash and its wrong.
1150	 */
1151	const __u8 *hash_location = NULL;
1152	struct tcp_md5sig_key *hash_expected;
1153	const struct iphdr *iph = ip_hdr(skb);
1154	const struct tcphdr *th = tcp_hdr(skb);
1155	int genhash;
1156	unsigned char newhash[16];
1157
1158	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1159					  AF_INET);
1160	hash_location = tcp_parse_md5sig_option(th);
1161
1162	/* We've parsed the options - do we have a hash? */
1163	if (!hash_expected && !hash_location)
1164		return false;
1165
1166	if (hash_expected && !hash_location) {
1167		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1168		return true;
1169	}
1170
1171	if (!hash_expected && hash_location) {
1172		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1173		return true;
1174	}
1175
1176	/* Okay, so this is hash_expected and hash_location -
1177	 * so we need to calculate the checksum.
1178	 */
1179	genhash = tcp_v4_md5_hash_skb(newhash,
1180				      hash_expected,
1181				      NULL, skb);
1182
1183	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1184		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1185		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1186				     &iph->saddr, ntohs(th->source),
1187				     &iph->daddr, ntohs(th->dest),
1188				     genhash ? " tcp_v4_calc_md5_hash failed"
1189				     : "");
1190		return true;
1191	}
1192	return false;
1193#endif
1194	return false;
1195}
1196
1197static void tcp_v4_init_req(struct request_sock *req,
1198			    const struct sock *sk_listener,
1199			    struct sk_buff *skb)
1200{
1201	struct inet_request_sock *ireq = inet_rsk(req);
 
1202
1203	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1204	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1205	ireq->opt = tcp_v4_save_options(skb);
1206}
1207
1208static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
 
1209					  struct flowi *fl,
1210					  const struct request_sock *req,
1211					  bool *strict)
1212{
1213	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1214
1215	if (strict) {
1216		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1217			*strict = true;
1218		else
1219			*strict = false;
1220	}
1221
1222	return dst;
1223}
1224
1225struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1226	.family		=	PF_INET,
1227	.obj_size	=	sizeof(struct tcp_request_sock),
1228	.rtx_syn_ack	=	tcp_rtx_synack,
1229	.send_ack	=	tcp_v4_reqsk_send_ack,
1230	.destructor	=	tcp_v4_reqsk_destructor,
1231	.send_reset	=	tcp_v4_send_reset,
1232	.syn_ack_timeout =	tcp_syn_ack_timeout,
1233};
1234
1235static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1236	.mss_clamp	=	TCP_MSS_DEFAULT,
1237#ifdef CONFIG_TCP_MD5SIG
1238	.req_md5_lookup	=	tcp_v4_md5_lookup,
1239	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1240#endif
1241	.init_req	=	tcp_v4_init_req,
1242#ifdef CONFIG_SYN_COOKIES
1243	.cookie_init_seq =	cookie_v4_init_sequence,
1244#endif
1245	.route_req	=	tcp_v4_route_req,
1246	.init_seq	=	tcp_v4_init_sequence,
 
1247	.send_synack	=	tcp_v4_send_synack,
1248};
1249
1250int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1251{
1252	/* Never answer to SYNs send to broadcast or multicast */
1253	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1254		goto drop;
1255
1256	return tcp_conn_request(&tcp_request_sock_ops,
1257				&tcp_request_sock_ipv4_ops, sk, skb);
1258
1259drop:
1260	tcp_listendrop(sk);
1261	return 0;
1262}
1263EXPORT_SYMBOL(tcp_v4_conn_request);
1264
1265
1266/*
1267 * The three way handshake has completed - we got a valid synack -
1268 * now create the new socket.
1269 */
1270struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1271				  struct request_sock *req,
1272				  struct dst_entry *dst,
1273				  struct request_sock *req_unhash,
1274				  bool *own_req)
1275{
1276	struct inet_request_sock *ireq;
 
1277	struct inet_sock *newinet;
1278	struct tcp_sock *newtp;
1279	struct sock *newsk;
1280#ifdef CONFIG_TCP_MD5SIG
 
1281	struct tcp_md5sig_key *key;
 
1282#endif
1283	struct ip_options_rcu *inet_opt;
1284
1285	if (sk_acceptq_is_full(sk))
1286		goto exit_overflow;
1287
1288	newsk = tcp_create_openreq_child(sk, req, skb);
1289	if (!newsk)
1290		goto exit_nonewsk;
1291
1292	newsk->sk_gso_type = SKB_GSO_TCPV4;
1293	inet_sk_rx_dst_set(newsk, skb);
1294
1295	newtp		      = tcp_sk(newsk);
1296	newinet		      = inet_sk(newsk);
1297	ireq		      = inet_rsk(req);
1298	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1299	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1300	newsk->sk_bound_dev_if = ireq->ir_iif;
1301	newinet->inet_saddr	      = ireq->ir_loc_addr;
1302	inet_opt	      = ireq->opt;
1303	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1304	ireq->opt	      = NULL;
1305	newinet->mc_index     = inet_iif(skb);
1306	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1307	newinet->rcv_tos      = ip_hdr(skb)->tos;
1308	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1309	if (inet_opt)
1310		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1311	newinet->inet_id = newtp->write_seq ^ jiffies;
 
 
 
 
 
 
1312
1313	if (!dst) {
1314		dst = inet_csk_route_child_sock(sk, newsk, req);
1315		if (!dst)
1316			goto put_and_exit;
1317	} else {
1318		/* syncookie case : see end of cookie_v4_check() */
1319	}
1320	sk_setup_caps(newsk, dst);
1321
1322	tcp_ca_openreq_child(newsk, dst);
1323
1324	tcp_sync_mss(newsk, dst_mtu(dst));
1325	newtp->advmss = dst_metric_advmss(dst);
1326	if (tcp_sk(sk)->rx_opt.user_mss &&
1327	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1328		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1329
1330	tcp_initialize_rcv_mss(newsk);
1331
1332#ifdef CONFIG_TCP_MD5SIG
 
1333	/* Copy over the MD5 key from the original socket */
1334	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1335				AF_INET);
1336	if (key) {
1337		/*
1338		 * We're using one, so create a matching key
1339		 * on the newsk structure. If we fail to get
1340		 * memory, then we end up not copying the key
1341		 * across. Shucks.
1342		 */
1343		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1344			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1345		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1346	}
1347#endif
1348
1349	if (__inet_inherit_port(sk, newsk) < 0)
1350		goto put_and_exit;
1351	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1352	if (*own_req)
 
1353		tcp_move_syn(newtp, req);
 
 
 
1354
 
 
 
 
 
 
 
 
 
1355	return newsk;
1356
1357exit_overflow:
1358	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1359exit_nonewsk:
1360	dst_release(dst);
1361exit:
1362	tcp_listendrop(sk);
1363	return NULL;
1364put_and_exit:
 
1365	inet_csk_prepare_forced_close(newsk);
1366	tcp_done(newsk);
1367	goto exit;
1368}
1369EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1370
1371static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1372{
1373#ifdef CONFIG_SYN_COOKIES
1374	const struct tcphdr *th = tcp_hdr(skb);
1375
1376	if (!th->syn)
1377		sk = cookie_v4_check(sk, skb);
1378#endif
1379	return sk;
1380}
1381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382/* The socket must have it's spinlock held when we get
1383 * here, unless it is a TCP_LISTEN socket.
1384 *
1385 * We have a potential double-lock case here, so even when
1386 * doing backlog processing we use the BH locking scheme.
1387 * This is because we cannot sleep with the original spinlock
1388 * held.
1389 */
1390int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1391{
 
1392	struct sock *rsk;
1393
1394	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1395		struct dst_entry *dst = sk->sk_rx_dst;
 
 
 
1396
1397		sock_rps_save_rxhash(sk, skb);
1398		sk_mark_napi_id(sk, skb);
1399		if (dst) {
1400			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1401			    !dst->ops->check(dst, 0)) {
 
 
1402				dst_release(dst);
1403				sk->sk_rx_dst = NULL;
1404			}
1405		}
1406		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1407		return 0;
1408	}
1409
 
1410	if (tcp_checksum_complete(skb))
1411		goto csum_err;
1412
1413	if (sk->sk_state == TCP_LISTEN) {
1414		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1415
1416		if (!nsk)
1417			goto discard;
1418		if (nsk != sk) {
1419			sock_rps_save_rxhash(nsk, skb);
1420			sk_mark_napi_id(nsk, skb);
1421			if (tcp_child_process(sk, nsk, skb)) {
1422				rsk = nsk;
1423				goto reset;
1424			}
1425			return 0;
1426		}
1427	} else
1428		sock_rps_save_rxhash(sk, skb);
1429
1430	if (tcp_rcv_state_process(sk, skb)) {
1431		rsk = sk;
1432		goto reset;
1433	}
1434	return 0;
1435
1436reset:
1437	tcp_v4_send_reset(rsk, skb);
1438discard:
1439	kfree_skb(skb);
1440	/* Be careful here. If this function gets more complicated and
1441	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1442	 * might be destroyed here. This current version compiles correctly,
1443	 * but you have been warned.
1444	 */
1445	return 0;
1446
1447csum_err:
 
 
1448	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1449	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1450	goto discard;
1451}
1452EXPORT_SYMBOL(tcp_v4_do_rcv);
1453
1454void tcp_v4_early_demux(struct sk_buff *skb)
1455{
 
1456	const struct iphdr *iph;
1457	const struct tcphdr *th;
1458	struct sock *sk;
1459
1460	if (skb->pkt_type != PACKET_HOST)
1461		return;
1462
1463	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1464		return;
1465
1466	iph = ip_hdr(skb);
1467	th = tcp_hdr(skb);
1468
1469	if (th->doff < sizeof(struct tcphdr) / 4)
1470		return;
1471
1472	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1473				       iph->saddr, th->source,
1474				       iph->daddr, ntohs(th->dest),
1475				       skb->skb_iif);
1476	if (sk) {
1477		skb->sk = sk;
1478		skb->destructor = sock_edemux;
1479		if (sk_fullsock(sk)) {
1480			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1481
1482			if (dst)
1483				dst = dst_check(dst, 0);
1484			if (dst &&
1485			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1486				skb_dst_set_noref(skb, dst);
1487		}
1488	}
 
1489}
1490
1491/* Packet is added to VJ-style prequeue for processing in process
1492 * context, if a reader task is waiting. Apparently, this exciting
1493 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1494 * failed somewhere. Latency? Burstiness? Well, at least now we will
1495 * see, why it failed. 8)8)				  --ANK
1496 *
1497 */
1498bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1499{
1500	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501
1502	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1503		return false;
1504
1505	if (skb->len <= tcp_hdrlen(skb) &&
1506	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1507		return false;
 
 
 
 
 
1508
1509	/* Before escaping RCU protected region, we need to take care of skb
1510	 * dst. Prequeue is only enabled for established sockets.
1511	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1512	 * Instead of doing full sk_rx_dst validity here, let's perform
1513	 * an optimistic check.
1514	 */
1515	if (likely(sk->sk_rx_dst))
1516		skb_dst_drop(skb);
1517	else
1518		skb_dst_force_safe(skb);
1519
1520	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1521	tp->ucopy.memory += skb->truesize;
1522	if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1523	    tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1524		struct sk_buff *skb1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1525
1526		BUG_ON(sock_owned_by_user(sk));
1527		__NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1528				skb_queue_len(&tp->ucopy.prequeue));
 
 
 
 
 
 
 
1529
1530		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1531			sk_backlog_rcv(sk, skb1);
 
 
 
1532
1533		tp->ucopy.memory = 0;
1534	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1535		wake_up_interruptible_sync_poll(sk_sleep(sk),
1536					   POLLIN | POLLRDNORM | POLLRDBAND);
1537		if (!inet_csk_ack_scheduled(sk))
1538			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1539						  (3 * tcp_rto_min(sk)) / 4,
1540						  TCP_RTO_MAX);
 
1541	}
1542	return true;
1543}
1544EXPORT_SYMBOL(tcp_prequeue);
1545
1546bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1547{
1548	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1549
1550	/* Only socket owner can try to collapse/prune rx queues
1551	 * to reduce memory overhead, so add a little headroom here.
1552	 * Few sockets backlog are possibly concurrently non empty.
1553	 */
1554	limit += 64*1024;
1555
1556	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1557	 * we can fix skb->truesize to its real value to avoid future drops.
1558	 * This is valid because skb is not yet charged to the socket.
1559	 * It has been noticed pure SACK packets were sometimes dropped
1560	 * (if cooked by drivers without copybreak feature).
1561	 */
1562	if (!skb->data_len)
1563		skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
1564
1565	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1566		bh_unlock_sock(sk);
 
1567		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1568		return true;
1569	}
1570	return false;
1571}
1572EXPORT_SYMBOL(tcp_add_backlog);
1573
1574int tcp_filter(struct sock *sk, struct sk_buff *skb)
1575{
1576	struct tcphdr *th = (struct tcphdr *)skb->data;
1577	unsigned int eaten = skb->len;
1578	int err;
1579
1580	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1581	if (!err) {
1582		eaten -= skb->len;
1583		TCP_SKB_CB(skb)->end_seq -= eaten;
1584	}
1585	return err;
1586}
1587EXPORT_SYMBOL(tcp_filter);
1588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589/*
1590 *	From tcp_input.c
1591 */
1592
1593int tcp_v4_rcv(struct sk_buff *skb)
1594{
1595	struct net *net = dev_net(skb->dev);
 
 
 
1596	const struct iphdr *iph;
1597	const struct tcphdr *th;
1598	bool refcounted;
1599	struct sock *sk;
1600	int ret;
1601
 
1602	if (skb->pkt_type != PACKET_HOST)
1603		goto discard_it;
1604
1605	/* Count it even if it's bad */
1606	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1607
1608	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1609		goto discard_it;
1610
1611	th = (const struct tcphdr *)skb->data;
1612
1613	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
 
1614		goto bad_packet;
 
1615	if (!pskb_may_pull(skb, th->doff * 4))
1616		goto discard_it;
1617
1618	/* An explanation is required here, I think.
1619	 * Packet length and doff are validated by header prediction,
1620	 * provided case of th->doff==0 is eliminated.
1621	 * So, we defer the checks. */
1622
1623	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1624		goto csum_error;
1625
1626	th = (const struct tcphdr *)skb->data;
1627	iph = ip_hdr(skb);
1628	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1629	 * barrier() makes sure compiler wont play fool^Waliasing games.
1630	 */
1631	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1632		sizeof(struct inet_skb_parm));
1633	barrier();
1634
1635	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1636	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1637				    skb->len - th->doff * 4);
1638	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1639	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1640	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1641	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1642	TCP_SKB_CB(skb)->sacked	 = 0;
1643
1644lookup:
1645	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1646			       th->dest, &refcounted);
 
1647	if (!sk)
1648		goto no_tcp_socket;
1649
1650process:
1651	if (sk->sk_state == TCP_TIME_WAIT)
1652		goto do_time_wait;
1653
1654	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1655		struct request_sock *req = inet_reqsk(sk);
 
1656		struct sock *nsk;
1657
1658		sk = req->rsk_listener;
1659		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
 
 
 
 
 
 
1660			sk_drops_add(sk, skb);
1661			reqsk_put(req);
1662			goto discard_it;
1663		}
 
 
 
 
1664		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1665			inet_csk_reqsk_queue_drop_and_put(sk, req);
1666			goto lookup;
 
 
 
 
 
 
 
 
 
 
 
 
1667		}
1668		/* We own a reference on the listener, increase it again
1669		 * as we might lose it too soon.
1670		 */
1671		sock_hold(sk);
1672		refcounted = true;
1673		nsk = tcp_check_req(sk, skb, req, false);
 
 
 
 
 
 
 
 
1674		if (!nsk) {
1675			reqsk_put(req);
 
 
 
 
 
 
 
 
 
 
1676			goto discard_and_relse;
1677		}
 
1678		if (nsk == sk) {
1679			reqsk_put(req);
 
1680		} else if (tcp_child_process(sk, nsk, skb)) {
1681			tcp_v4_send_reset(nsk, skb);
1682			goto discard_and_relse;
1683		} else {
1684			sock_put(sk);
1685			return 0;
1686		}
1687	}
1688	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1689		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1690		goto discard_and_relse;
 
 
 
 
1691	}
1692
1693	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 
1694		goto discard_and_relse;
 
1695
1696	if (tcp_v4_inbound_md5_hash(sk, skb))
 
 
1697		goto discard_and_relse;
1698
1699	nf_reset(skb);
1700
1701	if (tcp_filter(sk, skb))
 
1702		goto discard_and_relse;
 
1703	th = (const struct tcphdr *)skb->data;
1704	iph = ip_hdr(skb);
 
1705
1706	skb->dev = NULL;
1707
1708	if (sk->sk_state == TCP_LISTEN) {
1709		ret = tcp_v4_do_rcv(sk, skb);
1710		goto put_and_return;
1711	}
1712
1713	sk_incoming_cpu_update(sk);
1714
1715	bh_lock_sock_nested(sk);
1716	tcp_segs_in(tcp_sk(sk), skb);
1717	ret = 0;
1718	if (!sock_owned_by_user(sk)) {
1719		if (!tcp_prequeue(sk, skb))
1720			ret = tcp_v4_do_rcv(sk, skb);
1721	} else if (tcp_add_backlog(sk, skb)) {
1722		goto discard_and_relse;
1723	}
1724	bh_unlock_sock(sk);
1725
1726put_and_return:
1727	if (refcounted)
1728		sock_put(sk);
1729
1730	return ret;
1731
1732no_tcp_socket:
 
1733	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1734		goto discard_it;
1735
 
 
1736	if (tcp_checksum_complete(skb)) {
1737csum_error:
 
 
1738		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1739bad_packet:
1740		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1741	} else {
1742		tcp_v4_send_reset(NULL, skb);
1743	}
1744
1745discard_it:
 
1746	/* Discard frame. */
1747	kfree_skb(skb);
1748	return 0;
1749
1750discard_and_relse:
1751	sk_drops_add(sk, skb);
1752	if (refcounted)
1753		sock_put(sk);
1754	goto discard_it;
1755
1756do_time_wait:
1757	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 
1758		inet_twsk_put(inet_twsk(sk));
1759		goto discard_it;
1760	}
1761
 
 
1762	if (tcp_checksum_complete(skb)) {
1763		inet_twsk_put(inet_twsk(sk));
1764		goto csum_error;
1765	}
1766	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1767	case TCP_TW_SYN: {
1768		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1769							&tcp_hashinfo, skb,
1770							__tcp_hdrlen(th),
1771							iph->saddr, th->source,
1772							iph->daddr, th->dest,
1773							inet_iif(skb));
 
1774		if (sk2) {
1775			inet_twsk_deschedule_put(inet_twsk(sk));
1776			sk = sk2;
 
1777			refcounted = false;
1778			goto process;
1779		}
1780		/* Fall through to ACK */
1781	}
 
 
1782	case TCP_TW_ACK:
1783		tcp_v4_timewait_ack(sk, skb);
1784		break;
1785	case TCP_TW_RST:
1786		tcp_v4_send_reset(sk, skb);
1787		inet_twsk_deschedule_put(inet_twsk(sk));
1788		goto discard_it;
1789	case TCP_TW_SUCCESS:;
1790	}
1791	goto discard_it;
1792}
1793
1794static struct timewait_sock_ops tcp_timewait_sock_ops = {
1795	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1796	.twsk_unique	= tcp_twsk_unique,
1797	.twsk_destructor= tcp_twsk_destructor,
1798};
1799
1800void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1801{
1802	struct dst_entry *dst = skb_dst(skb);
1803
1804	if (dst && dst_hold_safe(dst)) {
1805		sk->sk_rx_dst = dst;
1806		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1807	}
1808}
1809EXPORT_SYMBOL(inet_sk_rx_dst_set);
1810
1811const struct inet_connection_sock_af_ops ipv4_specific = {
1812	.queue_xmit	   = ip_queue_xmit,
1813	.send_check	   = tcp_v4_send_check,
1814	.rebuild_header	   = inet_sk_rebuild_header,
1815	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1816	.conn_request	   = tcp_v4_conn_request,
1817	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1818	.net_header_len	   = sizeof(struct iphdr),
1819	.setsockopt	   = ip_setsockopt,
1820	.getsockopt	   = ip_getsockopt,
1821	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1822	.sockaddr_len	   = sizeof(struct sockaddr_in),
1823	.bind_conflict	   = inet_csk_bind_conflict,
1824#ifdef CONFIG_COMPAT
1825	.compat_setsockopt = compat_ip_setsockopt,
1826	.compat_getsockopt = compat_ip_getsockopt,
1827#endif
1828	.mtu_reduced	   = tcp_v4_mtu_reduced,
1829};
1830EXPORT_SYMBOL(ipv4_specific);
1831
1832#ifdef CONFIG_TCP_MD5SIG
1833static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1834	.md5_lookup		= tcp_v4_md5_lookup,
1835	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1836	.md5_parse		= tcp_v4_parse_md5_keys,
1837};
1838#endif
1839
1840/* NOTE: A lot of things set to zero explicitly by call to
1841 *       sk_alloc() so need not be done here.
1842 */
1843static int tcp_v4_init_sock(struct sock *sk)
1844{
1845	struct inet_connection_sock *icsk = inet_csk(sk);
1846
1847	tcp_init_sock(sk);
1848
1849	icsk->icsk_af_ops = &ipv4_specific;
1850
1851#ifdef CONFIG_TCP_MD5SIG
1852	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1853#endif
1854
1855	return 0;
1856}
1857
1858void tcp_v4_destroy_sock(struct sock *sk)
1859{
1860	struct tcp_sock *tp = tcp_sk(sk);
1861
 
 
1862	tcp_clear_xmit_timers(sk);
1863
1864	tcp_cleanup_congestion_control(sk);
1865
 
 
1866	/* Cleanup up the write buffer. */
1867	tcp_write_queue_purge(sk);
1868
 
 
 
1869	/* Cleans up our, hopefully empty, out_of_order_queue. */
1870	skb_rbtree_purge(&tp->out_of_order_queue);
1871
1872#ifdef CONFIG_TCP_MD5SIG
1873	/* Clean up the MD5 key list, if any */
1874	if (tp->md5sig_info) {
1875		tcp_clear_md5_list(sk);
1876		kfree_rcu(tp->md5sig_info, rcu);
1877		tp->md5sig_info = NULL;
 
1878	}
1879#endif
1880
1881	/* Clean prequeue, it must be empty really */
1882	__skb_queue_purge(&tp->ucopy.prequeue);
1883
1884	/* Clean up a referenced TCP bind bucket. */
1885	if (inet_csk(sk)->icsk_bind_hash)
1886		inet_put_port(sk);
1887
1888	BUG_ON(tp->fastopen_rsk);
1889
1890	/* If socket is aborted during connect operation */
1891	tcp_free_fastopen_req(tp);
 
1892	tcp_saved_syn_free(tp);
1893
1894	local_bh_disable();
1895	sk_sockets_allocated_dec(sk);
1896	local_bh_enable();
1897}
1898EXPORT_SYMBOL(tcp_v4_destroy_sock);
1899
1900#ifdef CONFIG_PROC_FS
1901/* Proc filesystem TCP sock list dumping. */
1902
1903/*
1904 * Get next listener socket follow cur.  If cur is NULL, get first socket
1905 * starting from bucket given in st->bucket; when st->bucket is zero the
1906 * very first socket in the hash table is returned.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1907 */
1908static void *listening_get_next(struct seq_file *seq, void *cur)
1909{
1910	struct tcp_iter_state *st = seq->private;
1911	struct net *net = seq_file_net(seq);
1912	struct inet_listen_hashbucket *ilb;
 
1913	struct sock *sk = cur;
1914
1915	if (!sk) {
1916get_head:
1917		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1918		spin_lock(&ilb->lock);
1919		sk = sk_head(&ilb->head);
1920		st->offset = 0;
1921		goto get_sk;
1922	}
1923	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1924	++st->num;
1925	++st->offset;
1926
1927	sk = sk_next(sk);
1928get_sk:
1929	sk_for_each_from(sk) {
1930		if (!net_eq(sock_net(sk), net))
1931			continue;
1932		if (sk->sk_family == st->family)
1933			return sk;
1934	}
1935	spin_unlock(&ilb->lock);
1936	st->offset = 0;
1937	if (++st->bucket < INET_LHTABLE_SIZE)
1938		goto get_head;
1939	return NULL;
 
1940}
1941
1942static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1943{
1944	struct tcp_iter_state *st = seq->private;
1945	void *rc;
1946
1947	st->bucket = 0;
1948	st->offset = 0;
1949	rc = listening_get_next(seq, NULL);
1950
1951	while (rc && *pos) {
1952		rc = listening_get_next(seq, rc);
1953		--*pos;
1954	}
1955	return rc;
1956}
1957
1958static inline bool empty_bucket(const struct tcp_iter_state *st)
 
1959{
1960	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1961}
1962
1963/*
1964 * Get first established socket starting from bucket given in st->bucket.
1965 * If st->bucket is zero, the very first socket in the hash is returned.
1966 */
1967static void *established_get_first(struct seq_file *seq)
1968{
 
1969	struct tcp_iter_state *st = seq->private;
1970	struct net *net = seq_file_net(seq);
1971	void *rc = NULL;
1972
1973	st->offset = 0;
1974	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1975		struct sock *sk;
1976		struct hlist_nulls_node *node;
1977		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1978
1979		/* Lockless fast path for the common case of empty buckets */
1980		if (empty_bucket(st))
1981			continue;
1982
1983		spin_lock_bh(lock);
1984		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1985			if (sk->sk_family != st->family ||
1986			    !net_eq(sock_net(sk), net)) {
1987				continue;
1988			}
1989			rc = sk;
1990			goto out;
1991		}
1992		spin_unlock_bh(lock);
1993	}
1994out:
1995	return rc;
1996}
1997
1998static void *established_get_next(struct seq_file *seq, void *cur)
1999{
 
 
 
2000	struct sock *sk = cur;
2001	struct hlist_nulls_node *node;
2002	struct tcp_iter_state *st = seq->private;
2003	struct net *net = seq_file_net(seq);
2004
2005	++st->num;
2006	++st->offset;
2007
2008	sk = sk_nulls_next(sk);
2009
2010	sk_nulls_for_each_from(sk, node) {
2011		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2012			return sk;
2013	}
2014
2015	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2016	++st->bucket;
2017	return established_get_first(seq);
2018}
2019
2020static void *established_get_idx(struct seq_file *seq, loff_t pos)
2021{
2022	struct tcp_iter_state *st = seq->private;
2023	void *rc;
2024
2025	st->bucket = 0;
2026	rc = established_get_first(seq);
2027
2028	while (rc && pos) {
2029		rc = established_get_next(seq, rc);
2030		--pos;
2031	}
2032	return rc;
2033}
2034
2035static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2036{
2037	void *rc;
2038	struct tcp_iter_state *st = seq->private;
2039
2040	st->state = TCP_SEQ_STATE_LISTENING;
2041	rc	  = listening_get_idx(seq, &pos);
2042
2043	if (!rc) {
2044		st->state = TCP_SEQ_STATE_ESTABLISHED;
2045		rc	  = established_get_idx(seq, pos);
2046	}
2047
2048	return rc;
2049}
2050
2051static void *tcp_seek_last_pos(struct seq_file *seq)
2052{
 
2053	struct tcp_iter_state *st = seq->private;
 
2054	int offset = st->offset;
2055	int orig_num = st->num;
2056	void *rc = NULL;
2057
2058	switch (st->state) {
2059	case TCP_SEQ_STATE_LISTENING:
2060		if (st->bucket >= INET_LHTABLE_SIZE)
2061			break;
2062		st->state = TCP_SEQ_STATE_LISTENING;
2063		rc = listening_get_next(seq, NULL);
2064		while (offset-- && rc)
2065			rc = listening_get_next(seq, rc);
2066		if (rc)
2067			break;
2068		st->bucket = 0;
2069		st->state = TCP_SEQ_STATE_ESTABLISHED;
2070		/* Fallthrough */
2071	case TCP_SEQ_STATE_ESTABLISHED:
2072		if (st->bucket > tcp_hashinfo.ehash_mask)
2073			break;
2074		rc = established_get_first(seq);
2075		while (offset-- && rc)
2076			rc = established_get_next(seq, rc);
2077	}
2078
2079	st->num = orig_num;
2080
2081	return rc;
2082}
2083
2084static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2085{
2086	struct tcp_iter_state *st = seq->private;
2087	void *rc;
2088
2089	if (*pos && *pos == st->last_pos) {
2090		rc = tcp_seek_last_pos(seq);
2091		if (rc)
2092			goto out;
2093	}
2094
2095	st->state = TCP_SEQ_STATE_LISTENING;
2096	st->num = 0;
2097	st->bucket = 0;
2098	st->offset = 0;
2099	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2100
2101out:
2102	st->last_pos = *pos;
2103	return rc;
2104}
 
2105
2106static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2107{
2108	struct tcp_iter_state *st = seq->private;
2109	void *rc = NULL;
2110
2111	if (v == SEQ_START_TOKEN) {
2112		rc = tcp_get_idx(seq, 0);
2113		goto out;
2114	}
2115
2116	switch (st->state) {
2117	case TCP_SEQ_STATE_LISTENING:
2118		rc = listening_get_next(seq, v);
2119		if (!rc) {
2120			st->state = TCP_SEQ_STATE_ESTABLISHED;
2121			st->bucket = 0;
2122			st->offset = 0;
2123			rc	  = established_get_first(seq);
2124		}
2125		break;
2126	case TCP_SEQ_STATE_ESTABLISHED:
2127		rc = established_get_next(seq, v);
2128		break;
2129	}
2130out:
2131	++*pos;
2132	st->last_pos = *pos;
2133	return rc;
2134}
 
2135
2136static void tcp_seq_stop(struct seq_file *seq, void *v)
2137{
 
2138	struct tcp_iter_state *st = seq->private;
2139
2140	switch (st->state) {
2141	case TCP_SEQ_STATE_LISTENING:
2142		if (v != SEQ_START_TOKEN)
2143			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2144		break;
2145	case TCP_SEQ_STATE_ESTABLISHED:
2146		if (v)
2147			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2148		break;
2149	}
2150}
2151
2152int tcp_seq_open(struct inode *inode, struct file *file)
2153{
2154	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2155	struct tcp_iter_state *s;
2156	int err;
2157
2158	err = seq_open_net(inode, file, &afinfo->seq_ops,
2159			  sizeof(struct tcp_iter_state));
2160	if (err < 0)
2161		return err;
2162
2163	s = ((struct seq_file *)file->private_data)->private;
2164	s->family		= afinfo->family;
2165	s->last_pos		= 0;
2166	return 0;
2167}
2168EXPORT_SYMBOL(tcp_seq_open);
2169
2170int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2171{
2172	int rc = 0;
2173	struct proc_dir_entry *p;
2174
2175	afinfo->seq_ops.start		= tcp_seq_start;
2176	afinfo->seq_ops.next		= tcp_seq_next;
2177	afinfo->seq_ops.stop		= tcp_seq_stop;
2178
2179	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2180			     afinfo->seq_fops, afinfo);
2181	if (!p)
2182		rc = -ENOMEM;
2183	return rc;
2184}
2185EXPORT_SYMBOL(tcp_proc_register);
2186
2187void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2188{
2189	remove_proc_entry(afinfo->name, net->proc_net);
2190}
2191EXPORT_SYMBOL(tcp_proc_unregister);
2192
2193static void get_openreq4(const struct request_sock *req,
2194			 struct seq_file *f, int i)
2195{
2196	const struct inet_request_sock *ireq = inet_rsk(req);
2197	long delta = req->rsk_timer.expires - jiffies;
2198
2199	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2200		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2201		i,
2202		ireq->ir_loc_addr,
2203		ireq->ir_num,
2204		ireq->ir_rmt_addr,
2205		ntohs(ireq->ir_rmt_port),
2206		TCP_SYN_RECV,
2207		0, 0, /* could print option size, but that is af dependent. */
2208		1,    /* timers active (only the expire timer) */
2209		jiffies_delta_to_clock_t(delta),
2210		req->num_timeout,
2211		from_kuid_munged(seq_user_ns(f),
2212				 sock_i_uid(req->rsk_listener)),
2213		0,  /* non standard timer */
2214		0, /* open_requests have no inode */
2215		0,
2216		req);
2217}
2218
2219static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2220{
2221	int timer_active;
2222	unsigned long timer_expires;
2223	const struct tcp_sock *tp = tcp_sk(sk);
2224	const struct inet_connection_sock *icsk = inet_csk(sk);
2225	const struct inet_sock *inet = inet_sk(sk);
2226	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2227	__be32 dest = inet->inet_daddr;
2228	__be32 src = inet->inet_rcv_saddr;
2229	__u16 destp = ntohs(inet->inet_dport);
2230	__u16 srcp = ntohs(inet->inet_sport);
2231	int rx_queue;
2232	int state;
2233
2234	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2235	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2236	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2237		timer_active	= 1;
2238		timer_expires	= icsk->icsk_timeout;
2239	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2240		timer_active	= 4;
2241		timer_expires	= icsk->icsk_timeout;
2242	} else if (timer_pending(&sk->sk_timer)) {
2243		timer_active	= 2;
2244		timer_expires	= sk->sk_timer.expires;
2245	} else {
2246		timer_active	= 0;
2247		timer_expires = jiffies;
2248	}
2249
2250	state = sk_state_load(sk);
2251	if (state == TCP_LISTEN)
2252		rx_queue = sk->sk_ack_backlog;
2253	else
2254		/* Because we don't lock the socket,
2255		 * we might find a transient negative value.
2256		 */
2257		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
2258
2259	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2260			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2261		i, src, srcp, dest, destp, state,
2262		tp->write_seq - tp->snd_una,
2263		rx_queue,
2264		timer_active,
2265		jiffies_delta_to_clock_t(timer_expires - jiffies),
2266		icsk->icsk_retransmits,
2267		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2268		icsk->icsk_probes_out,
2269		sock_i_ino(sk),
2270		atomic_read(&sk->sk_refcnt), sk,
2271		jiffies_to_clock_t(icsk->icsk_rto),
2272		jiffies_to_clock_t(icsk->icsk_ack.ato),
2273		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2274		tp->snd_cwnd,
2275		state == TCP_LISTEN ?
2276		    fastopenq->max_qlen :
2277		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2278}
2279
2280static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2281			       struct seq_file *f, int i)
2282{
2283	long delta = tw->tw_timer.expires - jiffies;
2284	__be32 dest, src;
2285	__u16 destp, srcp;
2286
2287	dest  = tw->tw_daddr;
2288	src   = tw->tw_rcv_saddr;
2289	destp = ntohs(tw->tw_dport);
2290	srcp  = ntohs(tw->tw_sport);
2291
2292	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2293		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2294		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2295		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2296		atomic_read(&tw->tw_refcnt), tw);
2297}
2298
2299#define TMPSZ 150
2300
2301static int tcp4_seq_show(struct seq_file *seq, void *v)
2302{
2303	struct tcp_iter_state *st;
2304	struct sock *sk = v;
2305
2306	seq_setwidth(seq, TMPSZ - 1);
2307	if (v == SEQ_START_TOKEN) {
2308		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2309			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2310			   "inode");
2311		goto out;
2312	}
2313	st = seq->private;
2314
2315	if (sk->sk_state == TCP_TIME_WAIT)
2316		get_timewait4_sock(v, seq, st->num);
2317	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2318		get_openreq4(v, seq, st->num);
2319	else
2320		get_tcp4_sock(v, seq, st->num);
2321out:
2322	seq_pad(seq, '\n');
2323	return 0;
2324}
2325
2326static const struct file_operations tcp_afinfo_seq_fops = {
2327	.owner   = THIS_MODULE,
2328	.open    = tcp_seq_open,
2329	.read    = seq_read,
2330	.llseek  = seq_lseek,
2331	.release = seq_release_net
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2332};
2333
2334static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2335	.name		= "tcp",
2336	.family		= AF_INET,
2337	.seq_fops	= &tcp_afinfo_seq_fops,
2338	.seq_ops	= {
2339		.show		= tcp4_seq_show,
2340	},
2341};
2342
2343static int __net_init tcp4_proc_init_net(struct net *net)
2344{
2345	return tcp_proc_register(net, &tcp4_seq_afinfo);
 
 
 
2346}
2347
2348static void __net_exit tcp4_proc_exit_net(struct net *net)
2349{
2350	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2351}
2352
2353static struct pernet_operations tcp4_net_ops = {
2354	.init = tcp4_proc_init_net,
2355	.exit = tcp4_proc_exit_net,
2356};
2357
2358int __init tcp4_proc_init(void)
2359{
2360	return register_pernet_subsys(&tcp4_net_ops);
2361}
2362
2363void tcp4_proc_exit(void)
2364{
2365	unregister_pernet_subsys(&tcp4_net_ops);
2366}
2367#endif /* CONFIG_PROC_FS */
2368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2369struct proto tcp_prot = {
2370	.name			= "TCP",
2371	.owner			= THIS_MODULE,
2372	.close			= tcp_close,
 
2373	.connect		= tcp_v4_connect,
2374	.disconnect		= tcp_disconnect,
2375	.accept			= inet_csk_accept,
2376	.ioctl			= tcp_ioctl,
2377	.init			= tcp_v4_init_sock,
2378	.destroy		= tcp_v4_destroy_sock,
2379	.shutdown		= tcp_shutdown,
2380	.setsockopt		= tcp_setsockopt,
2381	.getsockopt		= tcp_getsockopt,
 
 
2382	.recvmsg		= tcp_recvmsg,
2383	.sendmsg		= tcp_sendmsg,
2384	.sendpage		= tcp_sendpage,
2385	.backlog_rcv		= tcp_v4_do_rcv,
2386	.release_cb		= tcp_release_cb,
2387	.hash			= inet_hash,
2388	.unhash			= inet_unhash,
2389	.get_port		= inet_csk_get_port,
 
 
 
 
2390	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2391	.stream_memory_free	= tcp_stream_memory_free,
2392	.sockets_allocated	= &tcp_sockets_allocated,
2393	.orphan_count		= &tcp_orphan_count,
 
2394	.memory_allocated	= &tcp_memory_allocated,
 
 
2395	.memory_pressure	= &tcp_memory_pressure,
2396	.sysctl_mem		= sysctl_tcp_mem,
2397	.sysctl_wmem		= sysctl_tcp_wmem,
2398	.sysctl_rmem		= sysctl_tcp_rmem,
2399	.max_header		= MAX_TCP_HEADER,
2400	.obj_size		= sizeof(struct tcp_sock),
2401	.slab_flags		= SLAB_DESTROY_BY_RCU,
2402	.twsk_prot		= &tcp_timewait_sock_ops,
2403	.rsk_prot		= &tcp_request_sock_ops,
2404	.h.hashinfo		= &tcp_hashinfo,
2405	.no_autobind		= true,
2406#ifdef CONFIG_COMPAT
2407	.compat_setsockopt	= compat_tcp_setsockopt,
2408	.compat_getsockopt	= compat_tcp_getsockopt,
2409#endif
2410	.diag_destroy		= tcp_abort,
2411};
2412EXPORT_SYMBOL(tcp_prot);
2413
2414static void __net_exit tcp_sk_exit(struct net *net)
2415{
2416	int cpu;
2417
2418	for_each_possible_cpu(cpu)
2419		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2420	free_percpu(net->ipv4.tcp_sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2421}
2422
2423static int __net_init tcp_sk_init(struct net *net)
2424{
2425	int res, cpu;
2426
2427	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2428	if (!net->ipv4.tcp_sk)
2429		return -ENOMEM;
2430
2431	for_each_possible_cpu(cpu) {
2432		struct sock *sk;
2433
2434		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2435					   IPPROTO_TCP, net);
2436		if (res)
2437			goto fail;
2438		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2439		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2440	}
2441
2442	net->ipv4.sysctl_tcp_ecn = 2;
2443	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2444
2445	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
 
2446	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2447	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
 
2448
2449	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2450	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2451	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2452
2453	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2454	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2455	net->ipv4.sysctl_tcp_syncookies = 1;
2456	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2457	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2458	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2459	net->ipv4.sysctl_tcp_orphan_retries = 0;
2460	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2461	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2462	net->ipv4.sysctl_tcp_tw_reuse = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2463
2464	return 0;
2465fail:
2466	tcp_sk_exit(net);
2467
2468	return res;
2469}
2470
2471static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2472{
2473	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
 
 
 
 
 
 
 
 
2474}
2475
2476static struct pernet_operations __net_initdata tcp_sk_ops = {
2477       .init	   = tcp_sk_init,
2478       .exit	   = tcp_sk_exit,
2479       .exit_batch = tcp_sk_exit_batch,
2480};
2481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482void __init tcp_v4_init(void)
2483{
2484	inet_hashinfo_init(&tcp_hashinfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2485	if (register_pernet_subsys(&tcp_sk_ops))
2486		panic("Failed to create the TCP control socket.\n");
 
 
 
 
2487}