Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
 
  75#include <net/secure_seq.h>
  76#include <net/busy_poll.h>
  77
  78#include <linux/inet.h>
  79#include <linux/ipv6.h>
  80#include <linux/stddef.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
  83
  84#include <crypto/hash.h>
  85#include <linux/scatterlist.h>
  86
  87int sysctl_tcp_tw_reuse __read_mostly;
  88int sysctl_tcp_low_latency __read_mostly;
  89EXPORT_SYMBOL(sysctl_tcp_low_latency);
  90
 
  91#ifdef CONFIG_TCP_MD5SIG
  92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  93			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  94#endif
  95
  96struct inet_hashinfo tcp_hashinfo;
  97EXPORT_SYMBOL(tcp_hashinfo);
  98
  99static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 100{
 101	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 102					  ip_hdr(skb)->saddr,
 103					  tcp_hdr(skb)->dest,
 104					  tcp_hdr(skb)->source);
 105}
 106
 107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 108{
 109	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 110	struct tcp_sock *tp = tcp_sk(sk);
 111
 112	/* With PAWS, it is safe from the viewpoint
 113	   of data integrity. Even without PAWS it is safe provided sequence
 114	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 115
 116	   Actually, the idea is close to VJ's one, only timestamp cache is
 117	   held not per host, but per port pair and TW bucket is used as state
 118	   holder.
 119
 120	   If TW bucket has been already destroyed we fall back to VJ's scheme
 121	   and use initial timestamp retrieved from peer table.
 122	 */
 123	if (tcptw->tw_ts_recent_stamp &&
 124	    (!twp || (sysctl_tcp_tw_reuse &&
 125			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 126		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 127		if (tp->write_seq == 0)
 128			tp->write_seq = 1;
 129		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 130		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 131		sock_hold(sktw);
 132		return 1;
 133	}
 134
 135	return 0;
 136}
 137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 138
 
 
 
 
 
 
 
 
 139/* This will initiate an outgoing connection. */
 140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 141{
 142	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 143	struct inet_sock *inet = inet_sk(sk);
 144	struct tcp_sock *tp = tcp_sk(sk);
 145	__be16 orig_sport, orig_dport;
 146	__be32 daddr, nexthop;
 147	struct flowi4 *fl4;
 148	struct rtable *rt;
 149	int err;
 150	struct ip_options_rcu *inet_opt;
 151
 152	if (addr_len < sizeof(struct sockaddr_in))
 153		return -EINVAL;
 154
 155	if (usin->sin_family != AF_INET)
 156		return -EAFNOSUPPORT;
 157
 158	nexthop = daddr = usin->sin_addr.s_addr;
 159	inet_opt = rcu_dereference_protected(inet->inet_opt,
 160					     sock_owned_by_user(sk));
 161	if (inet_opt && inet_opt->opt.srr) {
 162		if (!daddr)
 163			return -EINVAL;
 164		nexthop = inet_opt->opt.faddr;
 165	}
 166
 167	orig_sport = inet->inet_sport;
 168	orig_dport = usin->sin_port;
 169	fl4 = &inet->cork.fl.u.ip4;
 170	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 171			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 172			      IPPROTO_TCP,
 173			      orig_sport, orig_dport, sk);
 174	if (IS_ERR(rt)) {
 175		err = PTR_ERR(rt);
 176		if (err == -ENETUNREACH)
 177			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 178		return err;
 179	}
 180
 181	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 182		ip_rt_put(rt);
 183		return -ENETUNREACH;
 184	}
 185
 186	if (!inet_opt || !inet_opt->opt.srr)
 187		daddr = fl4->daddr;
 188
 189	if (!inet->inet_saddr)
 190		inet->inet_saddr = fl4->saddr;
 191	sk_rcv_saddr_set(sk, inet->inet_saddr);
 192
 193	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 194		/* Reset inherited state */
 195		tp->rx_opt.ts_recent	   = 0;
 196		tp->rx_opt.ts_recent_stamp = 0;
 197		if (likely(!tp->repair))
 198			tp->write_seq	   = 0;
 199	}
 200
 201	if (tcp_death_row.sysctl_tw_recycle &&
 202	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
 203		tcp_fetch_timewait_stamp(sk, &rt->dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204
 205	inet->inet_dport = usin->sin_port;
 206	sk_daddr_set(sk, daddr);
 207
 208	inet_csk(sk)->icsk_ext_hdr_len = 0;
 209	if (inet_opt)
 210		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 211
 212	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 213
 214	/* Socket identity is still unknown (sport may be zero).
 215	 * However we set state to SYN-SENT and not releasing socket
 216	 * lock select source port, enter ourselves into the hash tables and
 217	 * complete initialization after this.
 218	 */
 219	tcp_set_state(sk, TCP_SYN_SENT);
 220	err = inet_hash_connect(&tcp_death_row, sk);
 221	if (err)
 222		goto failure;
 223
 224	sk_set_txhash(sk);
 225
 226	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 227			       inet->inet_sport, inet->inet_dport, sk);
 228	if (IS_ERR(rt)) {
 229		err = PTR_ERR(rt);
 230		rt = NULL;
 231		goto failure;
 232	}
 233	/* OK, now commit destination to socket.  */
 234	sk->sk_gso_type = SKB_GSO_TCPV4;
 235	sk_setup_caps(sk, &rt->dst);
 236
 237	if (!tp->write_seq && likely(!tp->repair))
 238		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 239							   inet->inet_daddr,
 240							   inet->inet_sport,
 241							   usin->sin_port);
 242
 243	inet->inet_id = tp->write_seq ^ jiffies;
 244
 245	err = tcp_connect(sk);
 
 
 
 246
 247	rt = NULL;
 248	if (err)
 249		goto failure;
 250
 251	return 0;
 252
 253failure:
 254	/*
 255	 * This unhashes the socket and releases the local port,
 256	 * if necessary.
 257	 */
 258	tcp_set_state(sk, TCP_CLOSE);
 259	ip_rt_put(rt);
 260	sk->sk_route_caps = 0;
 261	inet->inet_dport = 0;
 262	return err;
 263}
 264EXPORT_SYMBOL(tcp_v4_connect);
 265
 266/*
 267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 268 * It can be called through tcp_release_cb() if socket was owned by user
 269 * at the time tcp_v4_err() was called to handle ICMP message.
 270 */
 271void tcp_v4_mtu_reduced(struct sock *sk)
 272{
 273	struct dst_entry *dst;
 274	struct inet_sock *inet = inet_sk(sk);
 275	u32 mtu = tcp_sk(sk)->mtu_info;
 276
 277	dst = inet_csk_update_pmtu(sk, mtu);
 278	if (!dst)
 
 
 
 
 
 
 
 
 
 
 
 
 279		return;
 280
 
 
 281	/* Something is about to be wrong... Remember soft error
 282	 * for the case, if this connection will not able to recover.
 283	 */
 284	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 285		sk->sk_err_soft = EMSGSIZE;
 286
 287	mtu = dst_mtu(dst);
 288
 289	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 290	    ip_sk_accept_pmtu(sk) &&
 291	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 292		tcp_sync_mss(sk, mtu);
 293
 294		/* Resend the TCP packet because it's
 295		 * clear that the old packet has been
 296		 * dropped. This is the new "fast" path mtu
 297		 * discovery.
 298		 */
 299		tcp_simple_retransmit(sk);
 300	} /* else let the usual retransmit timer handle it */
 301}
 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 303
 304static void do_redirect(struct sk_buff *skb, struct sock *sk)
 305{
 306	struct dst_entry *dst = __sk_dst_check(sk, 0);
 307
 308	if (dst)
 309		dst->ops->redirect(dst, sk, skb);
 310}
 311
 312
 313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 315{
 316	struct request_sock *req = inet_reqsk(sk);
 317	struct net *net = sock_net(sk);
 318
 319	/* ICMPs are not backlogged, hence we cannot get
 320	 * an established socket here.
 321	 */
 322	if (seq != tcp_rsk(req)->snt_isn) {
 323		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 324	} else if (abort) {
 325		/*
 326		 * Still in SYN_RECV, just remove it silently.
 327		 * There is no good way to pass the error to the newly
 328		 * created socket, and POSIX does not want network
 329		 * errors returned from accept().
 330		 */
 331		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 332		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
 333	}
 334	reqsk_put(req);
 335}
 336EXPORT_SYMBOL(tcp_req_err);
 337
 338/*
 339 * This routine is called by the ICMP module when it gets some
 340 * sort of error condition.  If err < 0 then the socket should
 341 * be closed and the error returned to the user.  If err > 0
 342 * it's just the icmp type << 8 | icmp code.  After adjustment
 343 * header points to the first 8 bytes of the tcp header.  We need
 344 * to find the appropriate port.
 345 *
 346 * The locking strategy used here is very "optimistic". When
 347 * someone else accesses the socket the ICMP is just dropped
 348 * and for some paths there is no check at all.
 349 * A more general error queue to queue errors for later handling
 350 * is probably better.
 351 *
 352 */
 353
 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 355{
 356	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 357	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 358	struct inet_connection_sock *icsk;
 359	struct tcp_sock *tp;
 360	struct inet_sock *inet;
 361	const int type = icmp_hdr(icmp_skb)->type;
 362	const int code = icmp_hdr(icmp_skb)->code;
 363	struct sock *sk;
 364	struct sk_buff *skb;
 365	struct request_sock *fastopen;
 366	__u32 seq, snd_una;
 367	__u32 remaining;
 368	int err;
 369	struct net *net = dev_net(icmp_skb->dev);
 370
 371	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
 372				       th->dest, iph->saddr, ntohs(th->source),
 373				       inet_iif(icmp_skb));
 
 
 
 
 374	if (!sk) {
 375		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 376		return;
 377	}
 378	if (sk->sk_state == TCP_TIME_WAIT) {
 379		inet_twsk_put(inet_twsk(sk));
 380		return;
 381	}
 382	seq = ntohl(th->seq);
 383	if (sk->sk_state == TCP_NEW_SYN_RECV)
 384		return tcp_req_err(sk, seq,
 385				  type == ICMP_PARAMETERPROB ||
 386				  type == ICMP_TIME_EXCEEDED ||
 387				  (type == ICMP_DEST_UNREACH &&
 388				   (code == ICMP_NET_UNREACH ||
 389				    code == ICMP_HOST_UNREACH)));
 390
 391	bh_lock_sock(sk);
 392	/* If too many ICMPs get dropped on busy
 393	 * servers this needs to be solved differently.
 394	 * We do take care of PMTU discovery (RFC1191) special case :
 395	 * we can receive locally generated ICMP messages while socket is held.
 396	 */
 397	if (sock_owned_by_user(sk)) {
 398		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
 399			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 400	}
 401	if (sk->sk_state == TCP_CLOSE)
 402		goto out;
 403
 404	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 405		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 406		goto out;
 407	}
 408
 409	icsk = inet_csk(sk);
 410	tp = tcp_sk(sk);
 411	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 412	fastopen = tp->fastopen_rsk;
 413	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 414	if (sk->sk_state != TCP_LISTEN &&
 415	    !between(seq, snd_una, tp->snd_nxt)) {
 416		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 417		goto out;
 418	}
 419
 420	switch (type) {
 421	case ICMP_REDIRECT:
 422		do_redirect(icmp_skb, sk);
 423		goto out;
 424	case ICMP_SOURCE_QUENCH:
 425		/* Just silently ignore these. */
 426		goto out;
 427	case ICMP_PARAMETERPROB:
 428		err = EPROTO;
 429		break;
 430	case ICMP_DEST_UNREACH:
 431		if (code > NR_ICMP_UNREACH)
 432			goto out;
 433
 434		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 435			/* We are not interested in TCP_LISTEN and open_requests
 436			 * (SYN-ACKs send out by Linux are always <576bytes so
 437			 * they should go through unfragmented).
 438			 */
 439			if (sk->sk_state == TCP_LISTEN)
 440				goto out;
 441
 442			tp->mtu_info = info;
 443			if (!sock_owned_by_user(sk)) {
 444				tcp_v4_mtu_reduced(sk);
 445			} else {
 446				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
 447					sock_hold(sk);
 448			}
 449			goto out;
 450		}
 451
 452		err = icmp_err_convert[code].errno;
 453		/* check if icmp_skb allows revert of backoff
 454		 * (see draft-zimmermann-tcp-lcd) */
 455		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 456			break;
 457		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 458		    !icsk->icsk_backoff || fastopen)
 459			break;
 460
 461		if (sock_owned_by_user(sk))
 462			break;
 463
 464		icsk->icsk_backoff--;
 465		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
 466					       TCP_TIMEOUT_INIT;
 467		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 468
 469		skb = tcp_write_queue_head(sk);
 470		BUG_ON(!skb);
 471
 472		remaining = icsk->icsk_rto -
 473			    min(icsk->icsk_rto,
 474				tcp_time_stamp - tcp_skb_timestamp(skb));
 475
 476		if (remaining) {
 477			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 478						  remaining, TCP_RTO_MAX);
 479		} else {
 480			/* RTO revert clocked out retransmission.
 481			 * Will retransmit now */
 482			tcp_retransmit_timer(sk);
 483		}
 484
 485		break;
 486	case ICMP_TIME_EXCEEDED:
 487		err = EHOSTUNREACH;
 488		break;
 489	default:
 490		goto out;
 491	}
 492
 493	switch (sk->sk_state) {
 494	case TCP_SYN_SENT:
 495	case TCP_SYN_RECV:
 496		/* Only in fast or simultaneous open. If a fast open socket is
 497		 * is already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 498		 */
 499		if (fastopen && !fastopen->sk)
 500			break;
 
 
 
 
 501
 
 
 
 
 
 
 
 
 
 
 
 
 
 502		if (!sock_owned_by_user(sk)) {
 503			sk->sk_err = err;
 504
 505			sk->sk_error_report(sk);
 506
 507			tcp_done(sk);
 508		} else {
 509			sk->sk_err_soft = err;
 510		}
 511		goto out;
 512	}
 513
 514	/* If we've already connected we will keep trying
 515	 * until we time out, or the user gives up.
 516	 *
 517	 * rfc1122 4.2.3.9 allows to consider as hard errors
 518	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 519	 * but it is obsoleted by pmtu discovery).
 520	 *
 521	 * Note, that in modern internet, where routing is unreliable
 522	 * and in each dark corner broken firewalls sit, sending random
 523	 * errors ordered by their masters even this two messages finally lose
 524	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 525	 *
 526	 * Now we are in compliance with RFCs.
 527	 *							--ANK (980905)
 528	 */
 529
 530	inet = inet_sk(sk);
 531	if (!sock_owned_by_user(sk) && inet->recverr) {
 532		sk->sk_err = err;
 533		sk->sk_error_report(sk);
 534	} else	{ /* Only an error on timeout */
 535		sk->sk_err_soft = err;
 536	}
 537
 538out:
 539	bh_unlock_sock(sk);
 540	sock_put(sk);
 541}
 542
 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 
 544{
 545	struct tcphdr *th = tcp_hdr(skb);
 546
 547	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 548		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 549		skb->csum_start = skb_transport_header(skb) - skb->head;
 550		skb->csum_offset = offsetof(struct tcphdr, check);
 551	} else {
 552		th->check = tcp_v4_check(skb->len, saddr, daddr,
 553					 csum_partial(th,
 554						      th->doff << 2,
 555						      skb->csum));
 556	}
 557}
 558
 559/* This routine computes an IPv4 TCP checksum. */
 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 561{
 562	const struct inet_sock *inet = inet_sk(sk);
 563
 564	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 565}
 566EXPORT_SYMBOL(tcp_v4_send_check);
 567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568/*
 569 *	This routine will send an RST to the other tcp.
 570 *
 571 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 572 *		      for reset.
 573 *	Answer: if a packet caused RST, it is not for a socket
 574 *		existing in our system, if it is matched to a socket,
 575 *		it is just duplicate segment or bug in other side's TCP.
 576 *		So that we build reply only basing on parameters
 577 *		arrived with segment.
 578 *	Exception: precedence violation. We do not implement it in any case.
 579 */
 580
 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 582{
 583	const struct tcphdr *th = tcp_hdr(skb);
 584	struct {
 585		struct tcphdr th;
 586#ifdef CONFIG_TCP_MD5SIG
 587		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 588#endif
 589	} rep;
 590	struct ip_reply_arg arg;
 591#ifdef CONFIG_TCP_MD5SIG
 592	struct tcp_md5sig_key *key = NULL;
 593	const __u8 *hash_location = NULL;
 594	unsigned char newhash[16];
 595	int genhash;
 596	struct sock *sk1 = NULL;
 597#endif
 598	struct net *net;
 599
 600	/* Never send a reset in response to a reset. */
 601	if (th->rst)
 602		return;
 603
 604	/* If sk not NULL, it means we did a successful lookup and incoming
 605	 * route had to be correct. prequeue might have dropped our dst.
 606	 */
 607	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
 608		return;
 609
 610	/* Swap the send and the receive. */
 611	memset(&rep, 0, sizeof(rep));
 612	rep.th.dest   = th->source;
 613	rep.th.source = th->dest;
 614	rep.th.doff   = sizeof(struct tcphdr) / 4;
 615	rep.th.rst    = 1;
 616
 617	if (th->ack) {
 618		rep.th.seq = th->ack_seq;
 619	} else {
 620		rep.th.ack = 1;
 621		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 622				       skb->len - (th->doff << 2));
 623	}
 624
 625	memset(&arg, 0, sizeof(arg));
 626	arg.iov[0].iov_base = (unsigned char *)&rep;
 627	arg.iov[0].iov_len  = sizeof(rep.th);
 628
 629	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 630#ifdef CONFIG_TCP_MD5SIG
 631	hash_location = tcp_parse_md5sig_option(th);
 632	if (sk && sk_fullsock(sk)) {
 633		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 634					&ip_hdr(skb)->saddr, AF_INET);
 635	} else if (hash_location) {
 636		/*
 637		 * active side is lost. Try to find listening socket through
 638		 * source port, and then find md5 key through listening socket.
 639		 * we are not loose security here:
 640		 * Incoming packet is checked with md5 hash with finding key,
 641		 * no RST generated if md5 hash doesn't match.
 642		 */
 643		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
 644					     ip_hdr(skb)->saddr,
 645					     th->source, ip_hdr(skb)->daddr,
 646					     ntohs(th->source), inet_iif(skb));
 647		/* don't send rst if it can't find key */
 648		if (!sk1)
 649			return;
 650		rcu_read_lock();
 651		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 652					&ip_hdr(skb)->saddr, AF_INET);
 653		if (!key)
 654			goto release_sk1;
 655
 656		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 657		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 658			goto release_sk1;
 
 
 
 
 659	}
 660
 661	if (key) {
 662		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 663				   (TCPOPT_NOP << 16) |
 664				   (TCPOPT_MD5SIG << 8) |
 665				   TCPOLEN_MD5SIG);
 666		/* Update length and the length the header thinks exists */
 667		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 668		rep.th.doff = arg.iov[0].iov_len / 4;
 669
 670		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 671				     key, ip_hdr(skb)->saddr,
 672				     ip_hdr(skb)->daddr, &rep.th);
 673	}
 674#endif
 675	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 676				      ip_hdr(skb)->saddr, /* XXX */
 677				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 678	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 679	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 680
 681	/* When socket is gone, all binding information is lost.
 682	 * routing might fail in this case. No choice here, if we choose to force
 683	 * input interface, we will misroute in case of asymmetric route.
 684	 */
 685	if (sk)
 686		arg.bound_dev_if = sk->sk_bound_dev_if;
 687
 688	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
 689		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 690
 
 691	arg.tos = ip_hdr(skb)->tos;
 692	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 693			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 694			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 695			      &arg, arg.iov[0].iov_len);
 696
 697	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 698	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 699
 700#ifdef CONFIG_TCP_MD5SIG
 701release_sk1:
 702	if (sk1) {
 703		rcu_read_unlock();
 704		sock_put(sk1);
 705	}
 706#endif
 707}
 708
 709/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 710   outside socket context is ugly, certainly. What can I do?
 711 */
 712
 713static void tcp_v4_send_ack(struct net *net,
 714			    struct sk_buff *skb, u32 seq, u32 ack,
 715			    u32 win, u32 tsval, u32 tsecr, int oif,
 716			    struct tcp_md5sig_key *key,
 717			    int reply_flags, u8 tos)
 718{
 719	const struct tcphdr *th = tcp_hdr(skb);
 720	struct {
 721		struct tcphdr th;
 722		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 723#ifdef CONFIG_TCP_MD5SIG
 724			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 725#endif
 726			];
 727	} rep;
 728	struct ip_reply_arg arg;
 
 729
 730	memset(&rep.th, 0, sizeof(struct tcphdr));
 731	memset(&arg, 0, sizeof(arg));
 732
 733	arg.iov[0].iov_base = (unsigned char *)&rep;
 734	arg.iov[0].iov_len  = sizeof(rep.th);
 735	if (tsecr) {
 736		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 737				   (TCPOPT_TIMESTAMP << 8) |
 738				   TCPOLEN_TIMESTAMP);
 739		rep.opt[1] = htonl(tsval);
 740		rep.opt[2] = htonl(tsecr);
 741		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 742	}
 743
 744	/* Swap the send and the receive. */
 745	rep.th.dest    = th->source;
 746	rep.th.source  = th->dest;
 747	rep.th.doff    = arg.iov[0].iov_len / 4;
 748	rep.th.seq     = htonl(seq);
 749	rep.th.ack_seq = htonl(ack);
 750	rep.th.ack     = 1;
 751	rep.th.window  = htons(win);
 752
 753#ifdef CONFIG_TCP_MD5SIG
 754	if (key) {
 755		int offset = (tsecr) ? 3 : 0;
 756
 757		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 758					  (TCPOPT_NOP << 16) |
 759					  (TCPOPT_MD5SIG << 8) |
 760					  TCPOLEN_MD5SIG);
 761		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 762		rep.th.doff = arg.iov[0].iov_len/4;
 763
 764		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 765				    key, ip_hdr(skb)->saddr,
 766				    ip_hdr(skb)->daddr, &rep.th);
 767	}
 768#endif
 769	arg.flags = reply_flags;
 770	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 771				      ip_hdr(skb)->saddr, /* XXX */
 772				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 773	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 774	if (oif)
 775		arg.bound_dev_if = oif;
 776	arg.tos = tos;
 777	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 778			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 779			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 780			      &arg, arg.iov[0].iov_len);
 781
 782	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 783}
 784
 785static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 786{
 787	struct inet_timewait_sock *tw = inet_twsk(sk);
 788	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 789
 790	tcp_v4_send_ack(sock_net(sk), skb,
 791			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 792			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 793			tcp_time_stamp + tcptw->tw_ts_offset,
 794			tcptw->tw_ts_recent,
 795			tw->tw_bound_dev_if,
 796			tcp_twsk_md5_key(tcptw),
 797			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 798			tw->tw_tos
 799			);
 800
 801	inet_twsk_put(tw);
 802}
 803
 804static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 805				  struct request_sock *req)
 806{
 807	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 808	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 809	 */
 810	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
 811					     tcp_sk(sk)->snd_nxt;
 812
 813	tcp_v4_send_ack(sock_net(sk), skb, seq,
 814			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 815			tcp_time_stamp,
 816			req->ts_recent,
 817			0,
 818			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
 819					  AF_INET),
 820			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 821			ip_hdr(skb)->tos);
 822}
 823
 824/*
 825 *	Send a SYN-ACK after having received a SYN.
 826 *	This still operates on a request_sock only, not on a big
 827 *	socket.
 828 */
 829static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 830			      struct flowi *fl,
 831			      struct request_sock *req,
 832			      struct tcp_fastopen_cookie *foc,
 833				  bool attach_req)
 834{
 835	const struct inet_request_sock *ireq = inet_rsk(req);
 836	struct flowi4 fl4;
 837	int err = -1;
 838	struct sk_buff *skb;
 839
 840	/* First, grab a route. */
 841	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 842		return -1;
 843
 844	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 845
 846	if (skb) {
 847		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 848
 849		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 850					    ireq->ir_rmt_addr,
 
 851					    ireq->opt);
 852		err = net_xmit_eval(err);
 853	}
 854
 
 855	return err;
 856}
 857
 
 
 
 
 
 
 
 858/*
 859 *	IPv4 request_sock destructor.
 860 */
 861static void tcp_v4_reqsk_destructor(struct request_sock *req)
 862{
 863	kfree(inet_rsk(req)->opt);
 864}
 865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 866#ifdef CONFIG_TCP_MD5SIG
 867/*
 868 * RFC2385 MD5 checksumming requires a mapping of
 869 * IP address->MD5 Key.
 870 * We need to maintain these in the sk structure.
 871 */
 872
 873/* Find the Key structure for an address.  */
 874struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 875					 const union tcp_md5_addr *addr,
 876					 int family)
 877{
 878	const struct tcp_sock *tp = tcp_sk(sk);
 879	struct tcp_md5sig_key *key;
 
 880	unsigned int size = sizeof(struct in_addr);
 881	const struct tcp_md5sig_info *md5sig;
 882
 883	/* caller either holds rcu_read_lock() or socket lock */
 884	md5sig = rcu_dereference_check(tp->md5sig_info,
 885				       sock_owned_by_user(sk) ||
 886				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
 887	if (!md5sig)
 888		return NULL;
 889#if IS_ENABLED(CONFIG_IPV6)
 890	if (family == AF_INET6)
 891		size = sizeof(struct in6_addr);
 892#endif
 893	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
 894		if (key->family != family)
 895			continue;
 896		if (!memcmp(&key->addr, addr, size))
 897			return key;
 898	}
 899	return NULL;
 900}
 901EXPORT_SYMBOL(tcp_md5_do_lookup);
 902
 903struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 904					 const struct sock *addr_sk)
 905{
 906	const union tcp_md5_addr *addr;
 907
 908	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
 909	return tcp_md5_do_lookup(sk, addr, AF_INET);
 910}
 911EXPORT_SYMBOL(tcp_v4_md5_lookup);
 912
 
 
 
 
 
 
 
 
 
 913/* This can be called on a newly created socket, from other files */
 914int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 915		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
 916{
 917	/* Add Key to the list */
 918	struct tcp_md5sig_key *key;
 919	struct tcp_sock *tp = tcp_sk(sk);
 920	struct tcp_md5sig_info *md5sig;
 921
 922	key = tcp_md5_do_lookup(sk, addr, family);
 923	if (key) {
 924		/* Pre-existing entry - just update that one. */
 925		memcpy(key->key, newkey, newkeylen);
 926		key->keylen = newkeylen;
 927		return 0;
 928	}
 929
 930	md5sig = rcu_dereference_protected(tp->md5sig_info,
 931					   sock_owned_by_user(sk) ||
 932					   lockdep_is_held(&sk->sk_lock.slock));
 933	if (!md5sig) {
 934		md5sig = kmalloc(sizeof(*md5sig), gfp);
 935		if (!md5sig)
 936			return -ENOMEM;
 937
 938		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 939		INIT_HLIST_HEAD(&md5sig->head);
 940		rcu_assign_pointer(tp->md5sig_info, md5sig);
 941	}
 942
 943	key = sock_kmalloc(sk, sizeof(*key), gfp);
 944	if (!key)
 945		return -ENOMEM;
 946	if (!tcp_alloc_md5sig_pool()) {
 947		sock_kfree_s(sk, key, sizeof(*key));
 948		return -ENOMEM;
 949	}
 950
 951	memcpy(key->key, newkey, newkeylen);
 952	key->keylen = newkeylen;
 953	key->family = family;
 954	memcpy(&key->addr, addr,
 955	       (family == AF_INET6) ? sizeof(struct in6_addr) :
 956				      sizeof(struct in_addr));
 957	hlist_add_head_rcu(&key->node, &md5sig->head);
 958	return 0;
 959}
 960EXPORT_SYMBOL(tcp_md5_do_add);
 961
 962int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
 963{
 
 964	struct tcp_md5sig_key *key;
 
 965
 966	key = tcp_md5_do_lookup(sk, addr, family);
 967	if (!key)
 968		return -ENOENT;
 969	hlist_del_rcu(&key->node);
 970	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
 971	kfree_rcu(key, rcu);
 
 
 
 
 972	return 0;
 973}
 974EXPORT_SYMBOL(tcp_md5_do_del);
 975
 976static void tcp_clear_md5_list(struct sock *sk)
 977{
 978	struct tcp_sock *tp = tcp_sk(sk);
 979	struct tcp_md5sig_key *key;
 980	struct hlist_node *n;
 981	struct tcp_md5sig_info *md5sig;
 982
 983	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
 984
 985	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
 
 
 986		hlist_del_rcu(&key->node);
 987		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
 988		kfree_rcu(key, rcu);
 989	}
 990}
 991
 992static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
 993				 int optlen)
 994{
 995	struct tcp_md5sig cmd;
 996	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
 997
 998	if (optlen < sizeof(cmd))
 999		return -EINVAL;
1000
1001	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1002		return -EFAULT;
1003
1004	if (sin->sin_family != AF_INET)
1005		return -EINVAL;
1006
1007	if (!cmd.tcpm_keylen)
1008		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1009				      AF_INET);
1010
1011	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1012		return -EINVAL;
1013
1014	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1015			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1016			      GFP_KERNEL);
1017}
1018
1019static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1020					__be32 daddr, __be32 saddr, int nbytes)
1021{
1022	struct tcp4_pseudohdr *bp;
1023	struct scatterlist sg;
1024
1025	bp = &hp->md5_blk.ip4;
1026
1027	/*
1028	 * 1. the TCP pseudo-header (in the order: source IP address,
1029	 * destination IP address, zero-padded protocol number, and
1030	 * segment length)
1031	 */
1032	bp->saddr = saddr;
1033	bp->daddr = daddr;
1034	bp->pad = 0;
1035	bp->protocol = IPPROTO_TCP;
1036	bp->len = cpu_to_be16(nbytes);
1037
1038	sg_init_one(&sg, bp, sizeof(*bp));
1039	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
1040	return crypto_ahash_update(hp->md5_req);
1041}
1042
1043static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1044			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1045{
1046	struct tcp_md5sig_pool *hp;
1047	struct ahash_request *req;
1048
1049	hp = tcp_get_md5sig_pool();
1050	if (!hp)
1051		goto clear_hash_noput;
1052	req = hp->md5_req;
1053
1054	if (crypto_ahash_init(req))
1055		goto clear_hash;
1056	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1057		goto clear_hash;
1058	if (tcp_md5_hash_header(hp, th))
1059		goto clear_hash;
1060	if (tcp_md5_hash_key(hp, key))
1061		goto clear_hash;
1062	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1063	if (crypto_ahash_final(req))
1064		goto clear_hash;
1065
1066	tcp_put_md5sig_pool();
1067	return 0;
1068
1069clear_hash:
1070	tcp_put_md5sig_pool();
1071clear_hash_noput:
1072	memset(md5_hash, 0, 16);
1073	return 1;
1074}
1075
1076int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1077			const struct sock *sk,
1078			const struct sk_buff *skb)
1079{
1080	struct tcp_md5sig_pool *hp;
1081	struct ahash_request *req;
1082	const struct tcphdr *th = tcp_hdr(skb);
1083	__be32 saddr, daddr;
1084
1085	if (sk) { /* valid for establish/request sockets */
1086		saddr = sk->sk_rcv_saddr;
1087		daddr = sk->sk_daddr;
 
 
 
1088	} else {
1089		const struct iphdr *iph = ip_hdr(skb);
1090		saddr = iph->saddr;
1091		daddr = iph->daddr;
1092	}
1093
1094	hp = tcp_get_md5sig_pool();
1095	if (!hp)
1096		goto clear_hash_noput;
1097	req = hp->md5_req;
1098
1099	if (crypto_ahash_init(req))
1100		goto clear_hash;
1101
1102	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1103		goto clear_hash;
1104	if (tcp_md5_hash_header(hp, th))
1105		goto clear_hash;
1106	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1107		goto clear_hash;
1108	if (tcp_md5_hash_key(hp, key))
1109		goto clear_hash;
1110	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1111	if (crypto_ahash_final(req))
1112		goto clear_hash;
1113
1114	tcp_put_md5sig_pool();
1115	return 0;
1116
1117clear_hash:
1118	tcp_put_md5sig_pool();
1119clear_hash_noput:
1120	memset(md5_hash, 0, 16);
1121	return 1;
1122}
1123EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1124
1125#endif
1126
1127/* Called with rcu_read_lock() */
1128static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1129				    const struct sk_buff *skb)
1130{
1131#ifdef CONFIG_TCP_MD5SIG
1132	/*
1133	 * This gets called for each TCP segment that arrives
1134	 * so we want to be efficient.
1135	 * We have 3 drop cases:
1136	 * o No MD5 hash and one expected.
1137	 * o MD5 hash and we're not expecting one.
1138	 * o MD5 hash and its wrong.
1139	 */
1140	const __u8 *hash_location = NULL;
1141	struct tcp_md5sig_key *hash_expected;
1142	const struct iphdr *iph = ip_hdr(skb);
1143	const struct tcphdr *th = tcp_hdr(skb);
1144	int genhash;
1145	unsigned char newhash[16];
1146
1147	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1148					  AF_INET);
1149	hash_location = tcp_parse_md5sig_option(th);
1150
1151	/* We've parsed the options - do we have a hash? */
1152	if (!hash_expected && !hash_location)
1153		return false;
1154
1155	if (hash_expected && !hash_location) {
1156		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1157		return true;
1158	}
1159
1160	if (!hash_expected && hash_location) {
1161		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1162		return true;
1163	}
1164
1165	/* Okay, so this is hash_expected and hash_location -
1166	 * so we need to calculate the checksum.
1167	 */
1168	genhash = tcp_v4_md5_hash_skb(newhash,
1169				      hash_expected,
1170				      NULL, skb);
1171
1172	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1173		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1174				     &iph->saddr, ntohs(th->source),
1175				     &iph->daddr, ntohs(th->dest),
1176				     genhash ? " tcp_v4_calc_md5_hash failed"
1177				     : "");
1178		return true;
1179	}
1180	return false;
1181#endif
1182	return false;
1183}
1184
1185static void tcp_v4_init_req(struct request_sock *req,
1186			    const struct sock *sk_listener,
1187			    struct sk_buff *skb)
1188{
1189	struct inet_request_sock *ireq = inet_rsk(req);
1190
1191	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1192	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1193	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1194	ireq->opt = tcp_v4_save_options(skb);
1195}
1196
1197static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1198					  struct flowi *fl,
1199					  const struct request_sock *req,
1200					  bool *strict)
1201{
1202	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1203
1204	if (strict) {
1205		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1206			*strict = true;
1207		else
1208			*strict = false;
1209	}
1210
1211	return dst;
1212}
1213
1214struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1215	.family		=	PF_INET,
1216	.obj_size	=	sizeof(struct tcp_request_sock),
1217	.rtx_syn_ack	=	tcp_rtx_synack,
1218	.send_ack	=	tcp_v4_reqsk_send_ack,
1219	.destructor	=	tcp_v4_reqsk_destructor,
1220	.send_reset	=	tcp_v4_send_reset,
1221	.syn_ack_timeout =	tcp_syn_ack_timeout,
1222};
1223
1224static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1225	.mss_clamp	=	TCP_MSS_DEFAULT,
1226#ifdef CONFIG_TCP_MD5SIG
1227	.req_md5_lookup	=	tcp_v4_md5_lookup,
 
1228	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1229#endif
1230	.init_req	=	tcp_v4_init_req,
1231#ifdef CONFIG_SYN_COOKIES
1232	.cookie_init_seq =	cookie_v4_init_sequence,
1233#endif
1234	.route_req	=	tcp_v4_route_req,
1235	.init_seq	=	tcp_v4_init_sequence,
1236	.send_synack	=	tcp_v4_send_synack,
1237};
 
1238
1239int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1240{
 
 
 
 
 
 
 
 
 
 
 
 
1241	/* Never answer to SYNs send to broadcast or multicast */
1242	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1243		goto drop;
1244
1245	return tcp_conn_request(&tcp_request_sock_ops,
1246				&tcp_request_sock_ipv4_ops, sk, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247
 
 
 
 
1248drop:
1249	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1250	return 0;
1251}
1252EXPORT_SYMBOL(tcp_v4_conn_request);
1253
1254
1255/*
1256 * The three way handshake has completed - we got a valid synack -
1257 * now create the new socket.
1258 */
1259struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1260				  struct request_sock *req,
1261				  struct dst_entry *dst,
1262				  struct request_sock *req_unhash,
1263				  bool *own_req)
1264{
1265	struct inet_request_sock *ireq;
1266	struct inet_sock *newinet;
1267	struct tcp_sock *newtp;
1268	struct sock *newsk;
1269#ifdef CONFIG_TCP_MD5SIG
1270	struct tcp_md5sig_key *key;
1271#endif
1272	struct ip_options_rcu *inet_opt;
1273
1274	if (sk_acceptq_is_full(sk))
1275		goto exit_overflow;
1276
1277	newsk = tcp_create_openreq_child(sk, req, skb);
1278	if (!newsk)
1279		goto exit_nonewsk;
1280
1281	newsk->sk_gso_type = SKB_GSO_TCPV4;
1282	inet_sk_rx_dst_set(newsk, skb);
1283
1284	newtp		      = tcp_sk(newsk);
1285	newinet		      = inet_sk(newsk);
1286	ireq		      = inet_rsk(req);
1287	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1288	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1289	newsk->sk_bound_dev_if = ireq->ir_iif;
1290	newinet->inet_saddr	      = ireq->ir_loc_addr;
1291	inet_opt	      = ireq->opt;
1292	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1293	ireq->opt	      = NULL;
1294	newinet->mc_index     = inet_iif(skb);
1295	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1296	newinet->rcv_tos      = ip_hdr(skb)->tos;
1297	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1298	if (inet_opt)
1299		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1300	newinet->inet_id = newtp->write_seq ^ jiffies;
1301
1302	if (!dst) {
1303		dst = inet_csk_route_child_sock(sk, newsk, req);
1304		if (!dst)
1305			goto put_and_exit;
1306	} else {
1307		/* syncookie case : see end of cookie_v4_check() */
1308	}
1309	sk_setup_caps(newsk, dst);
1310
1311	tcp_ca_openreq_child(newsk, dst);
1312
1313	tcp_sync_mss(newsk, dst_mtu(dst));
1314	newtp->advmss = dst_metric_advmss(dst);
1315	if (tcp_sk(sk)->rx_opt.user_mss &&
1316	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1317		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1318
1319	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1320
1321#ifdef CONFIG_TCP_MD5SIG
1322	/* Copy over the MD5 key from the original socket */
1323	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1324				AF_INET);
1325	if (key) {
1326		/*
1327		 * We're using one, so create a matching key
1328		 * on the newsk structure. If we fail to get
1329		 * memory, then we end up not copying the key
1330		 * across. Shucks.
1331		 */
1332		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1333			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1334		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1335	}
1336#endif
1337
1338	if (__inet_inherit_port(sk, newsk) < 0)
1339		goto put_and_exit;
1340	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1341	if (*own_req)
1342		tcp_move_syn(newtp, req);
1343
1344	return newsk;
1345
1346exit_overflow:
1347	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1348exit_nonewsk:
1349	dst_release(dst);
1350exit:
1351	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1352	return NULL;
1353put_and_exit:
1354	inet_csk_prepare_forced_close(newsk);
1355	tcp_done(newsk);
 
 
1356	goto exit;
1357}
1358EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1359
1360static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1361{
1362#ifdef CONFIG_SYN_COOKIES
1363	const struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1364
 
1365	if (!th->syn)
1366		sk = cookie_v4_check(sk, skb);
1367#endif
1368	return sk;
1369}
1370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371/* The socket must have it's spinlock held when we get
1372 * here, unless it is a TCP_LISTEN socket.
1373 *
1374 * We have a potential double-lock case here, so even when
1375 * doing backlog processing we use the BH locking scheme.
1376 * This is because we cannot sleep with the original spinlock
1377 * held.
1378 */
1379int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1380{
1381	struct sock *rsk;
 
 
 
 
 
 
 
 
 
 
1382
1383	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1384		struct dst_entry *dst = sk->sk_rx_dst;
1385
1386		sock_rps_save_rxhash(sk, skb);
1387		sk_mark_napi_id(sk, skb);
1388		if (dst) {
1389			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1390			    !dst->ops->check(dst, 0)) {
1391				dst_release(dst);
1392				sk->sk_rx_dst = NULL;
1393			}
1394		}
1395		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1396		return 0;
1397	}
1398
1399	if (tcp_checksum_complete(skb))
1400		goto csum_err;
1401
1402	if (sk->sk_state == TCP_LISTEN) {
1403		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1404
1405		if (!nsk)
1406			goto discard;
 
1407		if (nsk != sk) {
1408			sock_rps_save_rxhash(nsk, skb);
1409			sk_mark_napi_id(nsk, skb);
1410			if (tcp_child_process(sk, nsk, skb)) {
1411				rsk = nsk;
1412				goto reset;
1413			}
1414			return 0;
1415		}
1416	} else
1417		sock_rps_save_rxhash(sk, skb);
1418
1419	if (tcp_rcv_state_process(sk, skb)) {
1420		rsk = sk;
1421		goto reset;
1422	}
1423	return 0;
1424
1425reset:
1426	tcp_v4_send_reset(rsk, skb);
1427discard:
1428	kfree_skb(skb);
1429	/* Be careful here. If this function gets more complicated and
1430	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1431	 * might be destroyed here. This current version compiles correctly,
1432	 * but you have been warned.
1433	 */
1434	return 0;
1435
1436csum_err:
1437	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1438	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1439	goto discard;
1440}
1441EXPORT_SYMBOL(tcp_v4_do_rcv);
1442
1443void tcp_v4_early_demux(struct sk_buff *skb)
1444{
1445	const struct iphdr *iph;
1446	const struct tcphdr *th;
1447	struct sock *sk;
1448
1449	if (skb->pkt_type != PACKET_HOST)
1450		return;
1451
1452	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1453		return;
1454
1455	iph = ip_hdr(skb);
1456	th = tcp_hdr(skb);
1457
1458	if (th->doff < sizeof(struct tcphdr) / 4)
1459		return;
1460
1461	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1462				       iph->saddr, th->source,
1463				       iph->daddr, ntohs(th->dest),
1464				       skb->skb_iif);
1465	if (sk) {
1466		skb->sk = sk;
1467		skb->destructor = sock_edemux;
1468		if (sk_fullsock(sk)) {
1469			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1470
1471			if (dst)
1472				dst = dst_check(dst, 0);
1473			if (dst &&
1474			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1475				skb_dst_set_noref(skb, dst);
1476		}
1477	}
1478}
1479
1480/* Packet is added to VJ-style prequeue for processing in process
1481 * context, if a reader task is waiting. Apparently, this exciting
1482 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1483 * failed somewhere. Latency? Burstiness? Well, at least now we will
1484 * see, why it failed. 8)8)				  --ANK
1485 *
1486 */
1487bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1488{
1489	struct tcp_sock *tp = tcp_sk(sk);
1490
1491	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1492		return false;
1493
1494	if (skb->len <= tcp_hdrlen(skb) &&
1495	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1496		return false;
1497
1498	/* Before escaping RCU protected region, we need to take care of skb
1499	 * dst. Prequeue is only enabled for established sockets.
1500	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1501	 * Instead of doing full sk_rx_dst validity here, let's perform
1502	 * an optimistic check.
1503	 */
1504	if (likely(sk->sk_rx_dst))
1505		skb_dst_drop(skb);
1506	else
1507		skb_dst_force_safe(skb);
1508
1509	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1510	tp->ucopy.memory += skb->truesize;
1511	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1512		struct sk_buff *skb1;
1513
1514		BUG_ON(sock_owned_by_user(sk));
1515
1516		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1517			sk_backlog_rcv(sk, skb1);
1518			NET_INC_STATS_BH(sock_net(sk),
1519					 LINUX_MIB_TCPPREQUEUEDROPPED);
1520		}
1521
1522		tp->ucopy.memory = 0;
1523	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1524		wake_up_interruptible_sync_poll(sk_sleep(sk),
1525					   POLLIN | POLLRDNORM | POLLRDBAND);
1526		if (!inet_csk_ack_scheduled(sk))
1527			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1528						  (3 * tcp_rto_min(sk)) / 4,
1529						  TCP_RTO_MAX);
1530	}
1531	return true;
1532}
1533EXPORT_SYMBOL(tcp_prequeue);
1534
1535/*
1536 *	From tcp_input.c
1537 */
1538
1539int tcp_v4_rcv(struct sk_buff *skb)
1540{
1541	const struct iphdr *iph;
1542	const struct tcphdr *th;
1543	struct sock *sk;
1544	int ret;
1545	struct net *net = dev_net(skb->dev);
1546
1547	if (skb->pkt_type != PACKET_HOST)
1548		goto discard_it;
1549
1550	/* Count it even if it's bad */
1551	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1552
1553	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1554		goto discard_it;
1555
1556	th = tcp_hdr(skb);
1557
1558	if (th->doff < sizeof(struct tcphdr) / 4)
1559		goto bad_packet;
1560	if (!pskb_may_pull(skb, th->doff * 4))
1561		goto discard_it;
1562
1563	/* An explanation is required here, I think.
1564	 * Packet length and doff are validated by header prediction,
1565	 * provided case of th->doff==0 is eliminated.
1566	 * So, we defer the checks. */
1567
1568	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1569		goto csum_error;
1570
1571	th = tcp_hdr(skb);
1572	iph = ip_hdr(skb);
1573	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1574	 * barrier() makes sure compiler wont play fool^Waliasing games.
1575	 */
1576	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1577		sizeof(struct inet_skb_parm));
1578	barrier();
1579
1580	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1581	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1582				    skb->len - th->doff * 4);
1583	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1584	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1585	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1586	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1587	TCP_SKB_CB(skb)->sacked	 = 0;
1588
1589lookup:
1590	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1591			       th->dest);
1592	if (!sk)
1593		goto no_tcp_socket;
1594
1595process:
1596	if (sk->sk_state == TCP_TIME_WAIT)
1597		goto do_time_wait;
1598
1599	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1600		struct request_sock *req = inet_reqsk(sk);
1601		struct sock *nsk;
1602
1603		sk = req->rsk_listener;
1604		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1605			reqsk_put(req);
1606			goto discard_it;
1607		}
1608		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1609			inet_csk_reqsk_queue_drop_and_put(sk, req);
1610			goto lookup;
1611		}
1612		sock_hold(sk);
1613		nsk = tcp_check_req(sk, skb, req, false);
1614		if (!nsk) {
1615			reqsk_put(req);
1616			goto discard_and_relse;
1617		}
1618		if (nsk == sk) {
1619			reqsk_put(req);
1620		} else if (tcp_child_process(sk, nsk, skb)) {
1621			tcp_v4_send_reset(nsk, skb);
1622			goto discard_and_relse;
1623		} else {
1624			sock_put(sk);
1625			return 0;
1626		}
1627	}
1628	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1629		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1630		goto discard_and_relse;
1631	}
1632
1633	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1634		goto discard_and_relse;
1635
1636	if (tcp_v4_inbound_md5_hash(sk, skb))
1637		goto discard_and_relse;
1638
1639	nf_reset(skb);
1640
1641	if (sk_filter(sk, skb))
1642		goto discard_and_relse;
1643
1644	skb->dev = NULL;
1645
1646	if (sk->sk_state == TCP_LISTEN) {
1647		ret = tcp_v4_do_rcv(sk, skb);
1648		goto put_and_return;
1649	}
1650
1651	sk_incoming_cpu_update(sk);
1652
1653	bh_lock_sock_nested(sk);
1654	tcp_segs_in(tcp_sk(sk), skb);
1655	ret = 0;
1656	if (!sock_owned_by_user(sk)) {
1657		if (!tcp_prequeue(sk, skb))
 
 
 
 
1658			ret = tcp_v4_do_rcv(sk, skb);
 
 
 
 
 
 
1659	} else if (unlikely(sk_add_backlog(sk, skb,
1660					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1661		bh_unlock_sock(sk);
1662		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1663		goto discard_and_relse;
1664	}
1665	bh_unlock_sock(sk);
1666
1667put_and_return:
1668	sock_put(sk);
1669
1670	return ret;
1671
1672no_tcp_socket:
1673	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1674		goto discard_it;
1675
1676	if (tcp_checksum_complete(skb)) {
1677csum_error:
1678		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1679bad_packet:
1680		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681	} else {
1682		tcp_v4_send_reset(NULL, skb);
1683	}
1684
1685discard_it:
1686	/* Discard frame. */
1687	kfree_skb(skb);
1688	return 0;
1689
1690discard_and_relse:
1691	sock_put(sk);
1692	goto discard_it;
1693
1694do_time_wait:
1695	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1696		inet_twsk_put(inet_twsk(sk));
1697		goto discard_it;
1698	}
1699
1700	if (tcp_checksum_complete(skb)) {
 
1701		inet_twsk_put(inet_twsk(sk));
1702		goto csum_error;
1703	}
1704	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1705	case TCP_TW_SYN: {
1706		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1707							&tcp_hashinfo, skb,
1708							__tcp_hdrlen(th),
1709							iph->saddr, th->source,
1710							iph->daddr, th->dest,
1711							inet_iif(skb));
1712		if (sk2) {
1713			inet_twsk_deschedule_put(inet_twsk(sk));
 
1714			sk = sk2;
1715			goto process;
1716		}
1717		/* Fall through to ACK */
1718	}
1719	case TCP_TW_ACK:
1720		tcp_v4_timewait_ack(sk, skb);
1721		break;
1722	case TCP_TW_RST:
1723		tcp_v4_send_reset(sk, skb);
1724		inet_twsk_deschedule_put(inet_twsk(sk));
1725		goto discard_it;
1726	case TCP_TW_SUCCESS:;
1727	}
1728	goto discard_it;
1729}
1730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1731static struct timewait_sock_ops tcp_timewait_sock_ops = {
1732	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1733	.twsk_unique	= tcp_twsk_unique,
1734	.twsk_destructor= tcp_twsk_destructor,
 
1735};
1736
1737void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1738{
1739	struct dst_entry *dst = skb_dst(skb);
1740
1741	if (dst && dst_hold_safe(dst)) {
1742		sk->sk_rx_dst = dst;
1743		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1744	}
1745}
1746EXPORT_SYMBOL(inet_sk_rx_dst_set);
1747
1748const struct inet_connection_sock_af_ops ipv4_specific = {
1749	.queue_xmit	   = ip_queue_xmit,
1750	.send_check	   = tcp_v4_send_check,
1751	.rebuild_header	   = inet_sk_rebuild_header,
1752	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1753	.conn_request	   = tcp_v4_conn_request,
1754	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
 
1755	.net_header_len	   = sizeof(struct iphdr),
1756	.setsockopt	   = ip_setsockopt,
1757	.getsockopt	   = ip_getsockopt,
1758	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1759	.sockaddr_len	   = sizeof(struct sockaddr_in),
1760	.bind_conflict	   = inet_csk_bind_conflict,
1761#ifdef CONFIG_COMPAT
1762	.compat_setsockopt = compat_ip_setsockopt,
1763	.compat_getsockopt = compat_ip_getsockopt,
1764#endif
1765	.mtu_reduced	   = tcp_v4_mtu_reduced,
1766};
1767EXPORT_SYMBOL(ipv4_specific);
1768
1769#ifdef CONFIG_TCP_MD5SIG
1770static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1771	.md5_lookup		= tcp_v4_md5_lookup,
1772	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1773	.md5_parse		= tcp_v4_parse_md5_keys,
1774};
1775#endif
1776
1777/* NOTE: A lot of things set to zero explicitly by call to
1778 *       sk_alloc() so need not be done here.
1779 */
1780static int tcp_v4_init_sock(struct sock *sk)
1781{
1782	struct inet_connection_sock *icsk = inet_csk(sk);
1783
1784	tcp_init_sock(sk);
1785
1786	icsk->icsk_af_ops = &ipv4_specific;
1787
1788#ifdef CONFIG_TCP_MD5SIG
1789	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1790#endif
1791
1792	return 0;
1793}
1794
1795void tcp_v4_destroy_sock(struct sock *sk)
1796{
1797	struct tcp_sock *tp = tcp_sk(sk);
1798
1799	tcp_clear_xmit_timers(sk);
1800
1801	tcp_cleanup_congestion_control(sk);
1802
1803	/* Cleanup up the write buffer. */
1804	tcp_write_queue_purge(sk);
1805
1806	/* Cleans up our, hopefully empty, out_of_order_queue. */
1807	__skb_queue_purge(&tp->out_of_order_queue);
1808
1809#ifdef CONFIG_TCP_MD5SIG
1810	/* Clean up the MD5 key list, if any */
1811	if (tp->md5sig_info) {
1812		tcp_clear_md5_list(sk);
1813		kfree_rcu(tp->md5sig_info, rcu);
1814		tp->md5sig_info = NULL;
1815	}
1816#endif
1817
 
 
 
 
 
1818	/* Clean prequeue, it must be empty really */
1819	__skb_queue_purge(&tp->ucopy.prequeue);
1820
1821	/* Clean up a referenced TCP bind bucket. */
1822	if (inet_csk(sk)->icsk_bind_hash)
1823		inet_put_port(sk);
1824
1825	BUG_ON(tp->fastopen_rsk);
 
 
 
 
 
 
1826
1827	/* If socket is aborted during connect operation */
1828	tcp_free_fastopen_req(tp);
1829	tcp_saved_syn_free(tp);
 
 
 
1830
1831	sk_sockets_allocated_dec(sk);
1832
1833	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1834		sock_release_memcg(sk);
1835}
1836EXPORT_SYMBOL(tcp_v4_destroy_sock);
1837
1838#ifdef CONFIG_PROC_FS
1839/* Proc filesystem TCP sock list dumping. */
1840
 
 
 
 
 
 
 
 
 
 
 
 
1841/*
1842 * Get next listener socket follow cur.  If cur is NULL, get first socket
1843 * starting from bucket given in st->bucket; when st->bucket is zero the
1844 * very first socket in the hash table is returned.
1845 */
1846static void *listening_get_next(struct seq_file *seq, void *cur)
1847{
1848	struct inet_connection_sock *icsk;
1849	struct hlist_nulls_node *node;
1850	struct sock *sk = cur;
1851	struct inet_listen_hashbucket *ilb;
1852	struct tcp_iter_state *st = seq->private;
1853	struct net *net = seq_file_net(seq);
1854
1855	if (!sk) {
1856		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1857		spin_lock_bh(&ilb->lock);
1858		sk = sk_nulls_head(&ilb->head);
1859		st->offset = 0;
1860		goto get_sk;
1861	}
1862	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1863	++st->num;
1864	++st->offset;
1865
1866	sk = sk_nulls_next(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1867get_sk:
1868	sk_nulls_for_each_from(sk, node) {
1869		if (!net_eq(sock_net(sk), net))
1870			continue;
1871		if (sk->sk_family == st->family) {
1872			cur = sk;
1873			goto out;
1874		}
1875		icsk = inet_csk(sk);
 
 
 
 
 
 
 
 
 
 
1876	}
1877	spin_unlock_bh(&ilb->lock);
1878	st->offset = 0;
1879	if (++st->bucket < INET_LHTABLE_SIZE) {
1880		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1881		spin_lock_bh(&ilb->lock);
1882		sk = sk_nulls_head(&ilb->head);
1883		goto get_sk;
1884	}
1885	cur = NULL;
1886out:
1887	return cur;
1888}
1889
1890static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1891{
1892	struct tcp_iter_state *st = seq->private;
1893	void *rc;
1894
1895	st->bucket = 0;
1896	st->offset = 0;
1897	rc = listening_get_next(seq, NULL);
1898
1899	while (rc && *pos) {
1900		rc = listening_get_next(seq, rc);
1901		--*pos;
1902	}
1903	return rc;
1904}
1905
1906static inline bool empty_bucket(const struct tcp_iter_state *st)
1907{
1908	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
 
1909}
1910
1911/*
1912 * Get first established socket starting from bucket given in st->bucket.
1913 * If st->bucket is zero, the very first socket in the hash is returned.
1914 */
1915static void *established_get_first(struct seq_file *seq)
1916{
1917	struct tcp_iter_state *st = seq->private;
1918	struct net *net = seq_file_net(seq);
1919	void *rc = NULL;
1920
1921	st->offset = 0;
1922	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1923		struct sock *sk;
1924		struct hlist_nulls_node *node;
 
1925		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1926
1927		/* Lockless fast path for the common case of empty buckets */
1928		if (empty_bucket(st))
1929			continue;
1930
1931		spin_lock_bh(lock);
1932		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1933			if (sk->sk_family != st->family ||
1934			    !net_eq(sock_net(sk), net)) {
1935				continue;
1936			}
1937			rc = sk;
1938			goto out;
1939		}
 
 
 
 
 
 
 
 
 
 
1940		spin_unlock_bh(lock);
 
1941	}
1942out:
1943	return rc;
1944}
1945
1946static void *established_get_next(struct seq_file *seq, void *cur)
1947{
1948	struct sock *sk = cur;
 
1949	struct hlist_nulls_node *node;
1950	struct tcp_iter_state *st = seq->private;
1951	struct net *net = seq_file_net(seq);
1952
1953	++st->num;
1954	++st->offset;
1955
1956	sk = sk_nulls_next(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1957
1958	sk_nulls_for_each_from(sk, node) {
1959		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1960			return sk;
1961	}
1962
1963	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1964	++st->bucket;
1965	return established_get_first(seq);
 
 
 
 
1966}
1967
1968static void *established_get_idx(struct seq_file *seq, loff_t pos)
1969{
1970	struct tcp_iter_state *st = seq->private;
1971	void *rc;
1972
1973	st->bucket = 0;
1974	rc = established_get_first(seq);
1975
1976	while (rc && pos) {
1977		rc = established_get_next(seq, rc);
1978		--pos;
1979	}
1980	return rc;
1981}
1982
1983static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1984{
1985	void *rc;
1986	struct tcp_iter_state *st = seq->private;
1987
1988	st->state = TCP_SEQ_STATE_LISTENING;
1989	rc	  = listening_get_idx(seq, &pos);
1990
1991	if (!rc) {
1992		st->state = TCP_SEQ_STATE_ESTABLISHED;
1993		rc	  = established_get_idx(seq, pos);
1994	}
1995
1996	return rc;
1997}
1998
1999static void *tcp_seek_last_pos(struct seq_file *seq)
2000{
2001	struct tcp_iter_state *st = seq->private;
2002	int offset = st->offset;
2003	int orig_num = st->num;
2004	void *rc = NULL;
2005
2006	switch (st->state) {
 
2007	case TCP_SEQ_STATE_LISTENING:
2008		if (st->bucket >= INET_LHTABLE_SIZE)
2009			break;
2010		st->state = TCP_SEQ_STATE_LISTENING;
2011		rc = listening_get_next(seq, NULL);
2012		while (offset-- && rc)
2013			rc = listening_get_next(seq, rc);
2014		if (rc)
2015			break;
2016		st->bucket = 0;
2017		st->state = TCP_SEQ_STATE_ESTABLISHED;
2018		/* Fallthrough */
2019	case TCP_SEQ_STATE_ESTABLISHED:
 
 
2020		if (st->bucket > tcp_hashinfo.ehash_mask)
2021			break;
2022		rc = established_get_first(seq);
2023		while (offset-- && rc)
2024			rc = established_get_next(seq, rc);
2025	}
2026
2027	st->num = orig_num;
2028
2029	return rc;
2030}
2031
2032static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2033{
2034	struct tcp_iter_state *st = seq->private;
2035	void *rc;
2036
2037	if (*pos && *pos == st->last_pos) {
2038		rc = tcp_seek_last_pos(seq);
2039		if (rc)
2040			goto out;
2041	}
2042
2043	st->state = TCP_SEQ_STATE_LISTENING;
2044	st->num = 0;
2045	st->bucket = 0;
2046	st->offset = 0;
2047	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2048
2049out:
2050	st->last_pos = *pos;
2051	return rc;
2052}
2053
2054static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2055{
2056	struct tcp_iter_state *st = seq->private;
2057	void *rc = NULL;
2058
2059	if (v == SEQ_START_TOKEN) {
2060		rc = tcp_get_idx(seq, 0);
2061		goto out;
2062	}
2063
2064	switch (st->state) {
 
2065	case TCP_SEQ_STATE_LISTENING:
2066		rc = listening_get_next(seq, v);
2067		if (!rc) {
2068			st->state = TCP_SEQ_STATE_ESTABLISHED;
2069			st->bucket = 0;
2070			st->offset = 0;
2071			rc	  = established_get_first(seq);
2072		}
2073		break;
2074	case TCP_SEQ_STATE_ESTABLISHED:
 
2075		rc = established_get_next(seq, v);
2076		break;
2077	}
2078out:
2079	++*pos;
2080	st->last_pos = *pos;
2081	return rc;
2082}
2083
2084static void tcp_seq_stop(struct seq_file *seq, void *v)
2085{
2086	struct tcp_iter_state *st = seq->private;
2087
2088	switch (st->state) {
 
 
 
 
 
2089	case TCP_SEQ_STATE_LISTENING:
2090		if (v != SEQ_START_TOKEN)
2091			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2092		break;
 
2093	case TCP_SEQ_STATE_ESTABLISHED:
2094		if (v)
2095			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2096		break;
2097	}
2098}
2099
2100int tcp_seq_open(struct inode *inode, struct file *file)
2101{
2102	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2103	struct tcp_iter_state *s;
2104	int err;
2105
2106	err = seq_open_net(inode, file, &afinfo->seq_ops,
2107			  sizeof(struct tcp_iter_state));
2108	if (err < 0)
2109		return err;
2110
2111	s = ((struct seq_file *)file->private_data)->private;
2112	s->family		= afinfo->family;
2113	s->last_pos		= 0;
2114	return 0;
2115}
2116EXPORT_SYMBOL(tcp_seq_open);
2117
2118int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2119{
2120	int rc = 0;
2121	struct proc_dir_entry *p;
2122
2123	afinfo->seq_ops.start		= tcp_seq_start;
2124	afinfo->seq_ops.next		= tcp_seq_next;
2125	afinfo->seq_ops.stop		= tcp_seq_stop;
2126
2127	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2128			     afinfo->seq_fops, afinfo);
2129	if (!p)
2130		rc = -ENOMEM;
2131	return rc;
2132}
2133EXPORT_SYMBOL(tcp_proc_register);
2134
2135void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2136{
2137	remove_proc_entry(afinfo->name, net->proc_net);
2138}
2139EXPORT_SYMBOL(tcp_proc_unregister);
2140
2141static void get_openreq4(const struct request_sock *req,
2142			 struct seq_file *f, int i)
2143{
2144	const struct inet_request_sock *ireq = inet_rsk(req);
2145	long delta = req->rsk_timer.expires - jiffies;
2146
2147	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2148		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2149		i,
2150		ireq->ir_loc_addr,
2151		ireq->ir_num,
2152		ireq->ir_rmt_addr,
2153		ntohs(ireq->ir_rmt_port),
2154		TCP_SYN_RECV,
2155		0, 0, /* could print option size, but that is af dependent. */
2156		1,    /* timers active (only the expire timer) */
2157		jiffies_delta_to_clock_t(delta),
2158		req->num_timeout,
2159		from_kuid_munged(seq_user_ns(f),
2160				 sock_i_uid(req->rsk_listener)),
2161		0,  /* non standard timer */
2162		0, /* open_requests have no inode */
2163		0,
2164		req);
 
2165}
2166
2167static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2168{
2169	int timer_active;
2170	unsigned long timer_expires;
2171	const struct tcp_sock *tp = tcp_sk(sk);
2172	const struct inet_connection_sock *icsk = inet_csk(sk);
2173	const struct inet_sock *inet = inet_sk(sk);
2174	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2175	__be32 dest = inet->inet_daddr;
2176	__be32 src = inet->inet_rcv_saddr;
2177	__u16 destp = ntohs(inet->inet_dport);
2178	__u16 srcp = ntohs(inet->inet_sport);
2179	int rx_queue;
2180	int state;
2181
2182	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2183	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2184	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2185		timer_active	= 1;
2186		timer_expires	= icsk->icsk_timeout;
2187	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2188		timer_active	= 4;
2189		timer_expires	= icsk->icsk_timeout;
2190	} else if (timer_pending(&sk->sk_timer)) {
2191		timer_active	= 2;
2192		timer_expires	= sk->sk_timer.expires;
2193	} else {
2194		timer_active	= 0;
2195		timer_expires = jiffies;
2196	}
2197
2198	state = sk_state_load(sk);
2199	if (state == TCP_LISTEN)
2200		rx_queue = sk->sk_ack_backlog;
2201	else
2202		/* Because we don't lock the socket,
2203		 * we might find a transient negative value.
2204		 */
2205		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2206
2207	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2208			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2209		i, src, srcp, dest, destp, state,
2210		tp->write_seq - tp->snd_una,
2211		rx_queue,
2212		timer_active,
2213		jiffies_delta_to_clock_t(timer_expires - jiffies),
2214		icsk->icsk_retransmits,
2215		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2216		icsk->icsk_probes_out,
2217		sock_i_ino(sk),
2218		atomic_read(&sk->sk_refcnt), sk,
2219		jiffies_to_clock_t(icsk->icsk_rto),
2220		jiffies_to_clock_t(icsk->icsk_ack.ato),
2221		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2222		tp->snd_cwnd,
2223		state == TCP_LISTEN ?
2224		    fastopenq->max_qlen :
2225		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2226}
2227
2228static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2229			       struct seq_file *f, int i)
2230{
2231	long delta = tw->tw_timer.expires - jiffies;
2232	__be32 dest, src;
2233	__u16 destp, srcp;
 
 
 
 
2234
2235	dest  = tw->tw_daddr;
2236	src   = tw->tw_rcv_saddr;
2237	destp = ntohs(tw->tw_dport);
2238	srcp  = ntohs(tw->tw_sport);
2239
2240	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2241		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2242		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2243		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2244		atomic_read(&tw->tw_refcnt), tw);
2245}
2246
2247#define TMPSZ 150
2248
2249static int tcp4_seq_show(struct seq_file *seq, void *v)
2250{
2251	struct tcp_iter_state *st;
2252	struct sock *sk = v;
2253
2254	seq_setwidth(seq, TMPSZ - 1);
2255	if (v == SEQ_START_TOKEN) {
2256		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
 
2257			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2258			   "inode");
2259		goto out;
2260	}
2261	st = seq->private;
2262
2263	if (sk->sk_state == TCP_TIME_WAIT)
2264		get_timewait4_sock(v, seq, st->num);
2265	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2266		get_openreq4(v, seq, st->num);
2267	else
2268		get_tcp4_sock(v, seq, st->num);
 
 
 
 
 
 
 
2269out:
2270	seq_pad(seq, '\n');
2271	return 0;
2272}
2273
2274static const struct file_operations tcp_afinfo_seq_fops = {
2275	.owner   = THIS_MODULE,
2276	.open    = tcp_seq_open,
2277	.read    = seq_read,
2278	.llseek  = seq_lseek,
2279	.release = seq_release_net
2280};
2281
2282static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2283	.name		= "tcp",
2284	.family		= AF_INET,
2285	.seq_fops	= &tcp_afinfo_seq_fops,
2286	.seq_ops	= {
2287		.show		= tcp4_seq_show,
2288	},
2289};
2290
2291static int __net_init tcp4_proc_init_net(struct net *net)
2292{
2293	return tcp_proc_register(net, &tcp4_seq_afinfo);
2294}
2295
2296static void __net_exit tcp4_proc_exit_net(struct net *net)
2297{
2298	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2299}
2300
2301static struct pernet_operations tcp4_net_ops = {
2302	.init = tcp4_proc_init_net,
2303	.exit = tcp4_proc_exit_net,
2304};
2305
2306int __init tcp4_proc_init(void)
2307{
2308	return register_pernet_subsys(&tcp4_net_ops);
2309}
2310
2311void tcp4_proc_exit(void)
2312{
2313	unregister_pernet_subsys(&tcp4_net_ops);
2314}
2315#endif /* CONFIG_PROC_FS */
2316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2317struct proto tcp_prot = {
2318	.name			= "TCP",
2319	.owner			= THIS_MODULE,
2320	.close			= tcp_close,
2321	.connect		= tcp_v4_connect,
2322	.disconnect		= tcp_disconnect,
2323	.accept			= inet_csk_accept,
2324	.ioctl			= tcp_ioctl,
2325	.init			= tcp_v4_init_sock,
2326	.destroy		= tcp_v4_destroy_sock,
2327	.shutdown		= tcp_shutdown,
2328	.setsockopt		= tcp_setsockopt,
2329	.getsockopt		= tcp_getsockopt,
2330	.recvmsg		= tcp_recvmsg,
2331	.sendmsg		= tcp_sendmsg,
2332	.sendpage		= tcp_sendpage,
2333	.backlog_rcv		= tcp_v4_do_rcv,
2334	.release_cb		= tcp_release_cb,
2335	.hash			= inet_hash,
2336	.unhash			= inet_unhash,
2337	.get_port		= inet_csk_get_port,
2338	.enter_memory_pressure	= tcp_enter_memory_pressure,
2339	.stream_memory_free	= tcp_stream_memory_free,
2340	.sockets_allocated	= &tcp_sockets_allocated,
2341	.orphan_count		= &tcp_orphan_count,
2342	.memory_allocated	= &tcp_memory_allocated,
2343	.memory_pressure	= &tcp_memory_pressure,
2344	.sysctl_mem		= sysctl_tcp_mem,
2345	.sysctl_wmem		= sysctl_tcp_wmem,
2346	.sysctl_rmem		= sysctl_tcp_rmem,
2347	.max_header		= MAX_TCP_HEADER,
2348	.obj_size		= sizeof(struct tcp_sock),
2349	.slab_flags		= SLAB_DESTROY_BY_RCU,
2350	.twsk_prot		= &tcp_timewait_sock_ops,
2351	.rsk_prot		= &tcp_request_sock_ops,
2352	.h.hashinfo		= &tcp_hashinfo,
2353	.no_autobind		= true,
2354#ifdef CONFIG_COMPAT
2355	.compat_setsockopt	= compat_tcp_setsockopt,
2356	.compat_getsockopt	= compat_tcp_getsockopt,
2357#endif
2358	.diag_destroy		= tcp_abort,
 
 
 
 
2359};
2360EXPORT_SYMBOL(tcp_prot);
2361
2362static void __net_exit tcp_sk_exit(struct net *net)
2363{
2364	int cpu;
2365
2366	for_each_possible_cpu(cpu)
2367		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2368	free_percpu(net->ipv4.tcp_sk);
2369}
2370
2371static int __net_init tcp_sk_init(struct net *net)
2372{
2373	int res, cpu;
2374
2375	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2376	if (!net->ipv4.tcp_sk)
2377		return -ENOMEM;
2378
2379	for_each_possible_cpu(cpu) {
2380		struct sock *sk;
2381
2382		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2383					   IPPROTO_TCP, net);
2384		if (res)
2385			goto fail;
2386		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2387	}
2388
2389	net->ipv4.sysctl_tcp_ecn = 2;
2390	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2391
2392	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2393	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2394	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2395
2396	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2397	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2398	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2399
2400	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2401	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2402	net->ipv4.sysctl_tcp_syncookies = 1;
2403	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2404	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2405	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2406	net->ipv4.sysctl_tcp_orphan_retries = 0;
2407	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2408	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2409
2410	return 0;
2411fail:
2412	tcp_sk_exit(net);
2413
2414	return res;
2415}
2416
2417static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2418{
2419	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2420}
2421
2422static struct pernet_operations __net_initdata tcp_sk_ops = {
2423       .init	   = tcp_sk_init,
2424       .exit	   = tcp_sk_exit,
2425       .exit_batch = tcp_sk_exit_batch,
2426};
2427
2428void __init tcp_v4_init(void)
2429{
2430	inet_hashinfo_init(&tcp_hashinfo);
2431	if (register_pernet_subsys(&tcp_sk_ops))
2432		panic("Failed to create the TCP control socket.\n");
2433}
v3.5.6
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 *		IPv4 specific functions
   9 *
  10 *
  11 *		code split from:
  12 *		linux/ipv4/tcp.c
  13 *		linux/ipv4/tcp_input.c
  14 *		linux/ipv4/tcp_output.c
  15 *
  16 *		See tcp.c for author information
  17 *
  18 *	This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24/*
  25 * Changes:
  26 *		David S. Miller	:	New socket lookup architecture.
  27 *					This code is dedicated to John Dyson.
  28 *		David S. Miller :	Change semantics of established hash,
  29 *					half is devoted to TIME_WAIT sockets
  30 *					and the rest go in the other half.
  31 *		Andi Kleen :		Add support for syncookies and fixed
  32 *					some bugs: ip options weren't passed to
  33 *					the TCP layer, missed a check for an
  34 *					ACK bit.
  35 *		Andi Kleen :		Implemented fast path mtu discovery.
  36 *	     				Fixed many serious bugs in the
  37 *					request_sock handling and moved
  38 *					most of it into the af independent code.
  39 *					Added tail drop and some other bugfixes.
  40 *					Added new listen semantics.
  41 *		Mike McLagan	:	Routing by source
  42 *	Juan Jose Ciarlante:		ip_dynaddr bits
  43 *		Andi Kleen:		various fixes.
  44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
  45 *					coma.
  46 *	Andi Kleen		:	Fix new listen.
  47 *	Andi Kleen		:	Fix accept error reporting.
  48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  50 *					a single port at the same time.
  51 */
  52
  53#define pr_fmt(fmt) "TCP: " fmt
  54
  55#include <linux/bottom_half.h>
  56#include <linux/types.h>
  57#include <linux/fcntl.h>
  58#include <linux/module.h>
  59#include <linux/random.h>
  60#include <linux/cache.h>
  61#include <linux/jhash.h>
  62#include <linux/init.h>
  63#include <linux/times.h>
  64#include <linux/slab.h>
  65
  66#include <net/net_namespace.h>
  67#include <net/icmp.h>
  68#include <net/inet_hashtables.h>
  69#include <net/tcp.h>
  70#include <net/transp_v6.h>
  71#include <net/ipv6.h>
  72#include <net/inet_common.h>
  73#include <net/timewait_sock.h>
  74#include <net/xfrm.h>
  75#include <net/netdma.h>
  76#include <net/secure_seq.h>
  77#include <net/tcp_memcontrol.h>
  78
  79#include <linux/inet.h>
  80#include <linux/ipv6.h>
  81#include <linux/stddef.h>
  82#include <linux/proc_fs.h>
  83#include <linux/seq_file.h>
  84
  85#include <linux/crypto.h>
  86#include <linux/scatterlist.h>
  87
  88int sysctl_tcp_tw_reuse __read_mostly;
  89int sysctl_tcp_low_latency __read_mostly;
  90EXPORT_SYMBOL(sysctl_tcp_low_latency);
  91
  92
  93#ifdef CONFIG_TCP_MD5SIG
  94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  95			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
  96#endif
  97
  98struct inet_hashinfo tcp_hashinfo;
  99EXPORT_SYMBOL(tcp_hashinfo);
 100
 101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 102{
 103	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 104					  ip_hdr(skb)->saddr,
 105					  tcp_hdr(skb)->dest,
 106					  tcp_hdr(skb)->source);
 107}
 108
 109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 110{
 111	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
 112	struct tcp_sock *tp = tcp_sk(sk);
 113
 114	/* With PAWS, it is safe from the viewpoint
 115	   of data integrity. Even without PAWS it is safe provided sequence
 116	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 117
 118	   Actually, the idea is close to VJ's one, only timestamp cache is
 119	   held not per host, but per port pair and TW bucket is used as state
 120	   holder.
 121
 122	   If TW bucket has been already destroyed we fall back to VJ's scheme
 123	   and use initial timestamp retrieved from peer table.
 124	 */
 125	if (tcptw->tw_ts_recent_stamp &&
 126	    (twp == NULL || (sysctl_tcp_tw_reuse &&
 127			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
 128		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
 129		if (tp->write_seq == 0)
 130			tp->write_seq = 1;
 131		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
 132		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 133		sock_hold(sktw);
 134		return 1;
 135	}
 136
 137	return 0;
 138}
 139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 140
 141static int tcp_repair_connect(struct sock *sk)
 142{
 143	tcp_connect_init(sk);
 144	tcp_finish_connect(sk, NULL);
 145
 146	return 0;
 147}
 148
 149/* This will initiate an outgoing connection. */
 150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 151{
 152	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
 153	struct inet_sock *inet = inet_sk(sk);
 154	struct tcp_sock *tp = tcp_sk(sk);
 155	__be16 orig_sport, orig_dport;
 156	__be32 daddr, nexthop;
 157	struct flowi4 *fl4;
 158	struct rtable *rt;
 159	int err;
 160	struct ip_options_rcu *inet_opt;
 161
 162	if (addr_len < sizeof(struct sockaddr_in))
 163		return -EINVAL;
 164
 165	if (usin->sin_family != AF_INET)
 166		return -EAFNOSUPPORT;
 167
 168	nexthop = daddr = usin->sin_addr.s_addr;
 169	inet_opt = rcu_dereference_protected(inet->inet_opt,
 170					     sock_owned_by_user(sk));
 171	if (inet_opt && inet_opt->opt.srr) {
 172		if (!daddr)
 173			return -EINVAL;
 174		nexthop = inet_opt->opt.faddr;
 175	}
 176
 177	orig_sport = inet->inet_sport;
 178	orig_dport = usin->sin_port;
 179	fl4 = &inet->cork.fl.u.ip4;
 180	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
 181			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
 182			      IPPROTO_TCP,
 183			      orig_sport, orig_dport, sk, true);
 184	if (IS_ERR(rt)) {
 185		err = PTR_ERR(rt);
 186		if (err == -ENETUNREACH)
 187			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 188		return err;
 189	}
 190
 191	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 192		ip_rt_put(rt);
 193		return -ENETUNREACH;
 194	}
 195
 196	if (!inet_opt || !inet_opt->opt.srr)
 197		daddr = fl4->daddr;
 198
 199	if (!inet->inet_saddr)
 200		inet->inet_saddr = fl4->saddr;
 201	inet->inet_rcv_saddr = inet->inet_saddr;
 202
 203	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
 204		/* Reset inherited state */
 205		tp->rx_opt.ts_recent	   = 0;
 206		tp->rx_opt.ts_recent_stamp = 0;
 207		if (likely(!tp->repair))
 208			tp->write_seq	   = 0;
 209	}
 210
 211	if (tcp_death_row.sysctl_tw_recycle &&
 212	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
 213		struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
 214		/*
 215		 * VJ's idea. We save last timestamp seen from
 216		 * the destination in peer table, when entering state
 217		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 218		 * when trying new connection.
 219		 */
 220		if (peer) {
 221			inet_peer_refcheck(peer);
 222			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 223				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 224				tp->rx_opt.ts_recent = peer->tcp_ts;
 225			}
 226		}
 227	}
 228
 229	inet->inet_dport = usin->sin_port;
 230	inet->inet_daddr = daddr;
 231
 232	inet_csk(sk)->icsk_ext_hdr_len = 0;
 233	if (inet_opt)
 234		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 235
 236	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
 237
 238	/* Socket identity is still unknown (sport may be zero).
 239	 * However we set state to SYN-SENT and not releasing socket
 240	 * lock select source port, enter ourselves into the hash tables and
 241	 * complete initialization after this.
 242	 */
 243	tcp_set_state(sk, TCP_SYN_SENT);
 244	err = inet_hash_connect(&tcp_death_row, sk);
 245	if (err)
 246		goto failure;
 247
 
 
 248	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 249			       inet->inet_sport, inet->inet_dport, sk);
 250	if (IS_ERR(rt)) {
 251		err = PTR_ERR(rt);
 252		rt = NULL;
 253		goto failure;
 254	}
 255	/* OK, now commit destination to socket.  */
 256	sk->sk_gso_type = SKB_GSO_TCPV4;
 257	sk_setup_caps(sk, &rt->dst);
 258
 259	if (!tp->write_seq && likely(!tp->repair))
 260		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 261							   inet->inet_daddr,
 262							   inet->inet_sport,
 263							   usin->sin_port);
 264
 265	inet->inet_id = tp->write_seq ^ jiffies;
 266
 267	if (likely(!tp->repair))
 268		err = tcp_connect(sk);
 269	else
 270		err = tcp_repair_connect(sk);
 271
 272	rt = NULL;
 273	if (err)
 274		goto failure;
 275
 276	return 0;
 277
 278failure:
 279	/*
 280	 * This unhashes the socket and releases the local port,
 281	 * if necessary.
 282	 */
 283	tcp_set_state(sk, TCP_CLOSE);
 284	ip_rt_put(rt);
 285	sk->sk_route_caps = 0;
 286	inet->inet_dport = 0;
 287	return err;
 288}
 289EXPORT_SYMBOL(tcp_v4_connect);
 290
 291/*
 292 * This routine does path mtu discovery as defined in RFC1191.
 
 
 293 */
 294static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
 295{
 296	struct dst_entry *dst;
 297	struct inet_sock *inet = inet_sk(sk);
 
 298
 299	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
 300	 * send out by Linux are always <576bytes so they should go through
 301	 * unfragmented).
 302	 */
 303	if (sk->sk_state == TCP_LISTEN)
 304		return;
 305
 306	/* We don't check in the destentry if pmtu discovery is forbidden
 307	 * on this route. We just assume that no packet_to_big packets
 308	 * are send back when pmtu discovery is not active.
 309	 * There is a small race when the user changes this flag in the
 310	 * route, but I think that's acceptable.
 311	 */
 312	if ((dst = __sk_dst_check(sk, 0)) == NULL)
 313		return;
 314
 315	dst->ops->update_pmtu(dst, mtu);
 316
 317	/* Something is about to be wrong... Remember soft error
 318	 * for the case, if this connection will not able to recover.
 319	 */
 320	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 321		sk->sk_err_soft = EMSGSIZE;
 322
 323	mtu = dst_mtu(dst);
 324
 325	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 
 326	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 327		tcp_sync_mss(sk, mtu);
 328
 329		/* Resend the TCP packet because it's
 330		 * clear that the old packet has been
 331		 * dropped. This is the new "fast" path mtu
 332		 * discovery.
 333		 */
 334		tcp_simple_retransmit(sk);
 335	} /* else let the usual retransmit timer handle it */
 336}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337
 338/*
 339 * This routine is called by the ICMP module when it gets some
 340 * sort of error condition.  If err < 0 then the socket should
 341 * be closed and the error returned to the user.  If err > 0
 342 * it's just the icmp type << 8 | icmp code.  After adjustment
 343 * header points to the first 8 bytes of the tcp header.  We need
 344 * to find the appropriate port.
 345 *
 346 * The locking strategy used here is very "optimistic". When
 347 * someone else accesses the socket the ICMP is just dropped
 348 * and for some paths there is no check at all.
 349 * A more general error queue to queue errors for later handling
 350 * is probably better.
 351 *
 352 */
 353
 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 355{
 356	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 357	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
 358	struct inet_connection_sock *icsk;
 359	struct tcp_sock *tp;
 360	struct inet_sock *inet;
 361	const int type = icmp_hdr(icmp_skb)->type;
 362	const int code = icmp_hdr(icmp_skb)->code;
 363	struct sock *sk;
 364	struct sk_buff *skb;
 365	__u32 seq;
 
 366	__u32 remaining;
 367	int err;
 368	struct net *net = dev_net(icmp_skb->dev);
 369
 370	if (icmp_skb->len < (iph->ihl << 2) + 8) {
 371		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 372		return;
 373	}
 374
 375	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
 376			iph->saddr, th->source, inet_iif(icmp_skb));
 377	if (!sk) {
 378		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 379		return;
 380	}
 381	if (sk->sk_state == TCP_TIME_WAIT) {
 382		inet_twsk_put(inet_twsk(sk));
 383		return;
 384	}
 
 
 
 
 
 
 
 
 385
 386	bh_lock_sock(sk);
 387	/* If too many ICMPs get dropped on busy
 388	 * servers this needs to be solved differently.
 
 
 389	 */
 390	if (sock_owned_by_user(sk))
 391		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 392
 
 393	if (sk->sk_state == TCP_CLOSE)
 394		goto out;
 395
 396	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 397		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 398		goto out;
 399	}
 400
 401	icsk = inet_csk(sk);
 402	tp = tcp_sk(sk);
 403	seq = ntohl(th->seq);
 
 
 404	if (sk->sk_state != TCP_LISTEN &&
 405	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 406		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 407		goto out;
 408	}
 409
 410	switch (type) {
 
 
 
 411	case ICMP_SOURCE_QUENCH:
 412		/* Just silently ignore these. */
 413		goto out;
 414	case ICMP_PARAMETERPROB:
 415		err = EPROTO;
 416		break;
 417	case ICMP_DEST_UNREACH:
 418		if (code > NR_ICMP_UNREACH)
 419			goto out;
 420
 421		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 422			if (!sock_owned_by_user(sk))
 423				do_pmtu_discovery(sk, iph, info);
 
 
 
 
 
 
 
 
 
 
 
 
 424			goto out;
 425		}
 426
 427		err = icmp_err_convert[code].errno;
 428		/* check if icmp_skb allows revert of backoff
 429		 * (see draft-zimmermann-tcp-lcd) */
 430		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
 431			break;
 432		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
 433		    !icsk->icsk_backoff)
 434			break;
 435
 436		if (sock_owned_by_user(sk))
 437			break;
 438
 439		icsk->icsk_backoff--;
 440		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
 441			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
 442		tcp_bound_rto(sk);
 443
 444		skb = tcp_write_queue_head(sk);
 445		BUG_ON(!skb);
 446
 447		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
 448				tcp_time_stamp - TCP_SKB_CB(skb)->when);
 
 449
 450		if (remaining) {
 451			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 452						  remaining, TCP_RTO_MAX);
 453		} else {
 454			/* RTO revert clocked out retransmission.
 455			 * Will retransmit now */
 456			tcp_retransmit_timer(sk);
 457		}
 458
 459		break;
 460	case ICMP_TIME_EXCEEDED:
 461		err = EHOSTUNREACH;
 462		break;
 463	default:
 464		goto out;
 465	}
 466
 467	switch (sk->sk_state) {
 468		struct request_sock *req, **prev;
 469	case TCP_LISTEN:
 470		if (sock_owned_by_user(sk))
 471			goto out;
 472
 473		req = inet_csk_search_req(sk, &prev, th->dest,
 474					  iph->daddr, iph->saddr);
 475		if (!req)
 476			goto out;
 477
 478		/* ICMPs are not backlogged, hence we cannot get
 479		   an established socket here.
 480		 */
 481		WARN_ON(req->sk);
 482
 483		if (seq != tcp_rsk(req)->snt_isn) {
 484			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 485			goto out;
 486		}
 487
 488		/*
 489		 * Still in SYN_RECV, just remove it silently.
 490		 * There is no good way to pass the error to the newly
 491		 * created socket, and POSIX does not want network
 492		 * errors returned from accept().
 493		 */
 494		inet_csk_reqsk_queue_drop(sk, req, prev);
 495		goto out;
 496
 497	case TCP_SYN_SENT:
 498	case TCP_SYN_RECV:  /* Cannot happen.
 499			       It can f.e. if SYNs crossed.
 500			     */
 501		if (!sock_owned_by_user(sk)) {
 502			sk->sk_err = err;
 503
 504			sk->sk_error_report(sk);
 505
 506			tcp_done(sk);
 507		} else {
 508			sk->sk_err_soft = err;
 509		}
 510		goto out;
 511	}
 512
 513	/* If we've already connected we will keep trying
 514	 * until we time out, or the user gives up.
 515	 *
 516	 * rfc1122 4.2.3.9 allows to consider as hard errors
 517	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 518	 * but it is obsoleted by pmtu discovery).
 519	 *
 520	 * Note, that in modern internet, where routing is unreliable
 521	 * and in each dark corner broken firewalls sit, sending random
 522	 * errors ordered by their masters even this two messages finally lose
 523	 * their original sense (even Linux sends invalid PORT_UNREACHs)
 524	 *
 525	 * Now we are in compliance with RFCs.
 526	 *							--ANK (980905)
 527	 */
 528
 529	inet = inet_sk(sk);
 530	if (!sock_owned_by_user(sk) && inet->recverr) {
 531		sk->sk_err = err;
 532		sk->sk_error_report(sk);
 533	} else	{ /* Only an error on timeout */
 534		sk->sk_err_soft = err;
 535	}
 536
 537out:
 538	bh_unlock_sock(sk);
 539	sock_put(sk);
 540}
 541
 542static void __tcp_v4_send_check(struct sk_buff *skb,
 543				__be32 saddr, __be32 daddr)
 544{
 545	struct tcphdr *th = tcp_hdr(skb);
 546
 547	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 548		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
 549		skb->csum_start = skb_transport_header(skb) - skb->head;
 550		skb->csum_offset = offsetof(struct tcphdr, check);
 551	} else {
 552		th->check = tcp_v4_check(skb->len, saddr, daddr,
 553					 csum_partial(th,
 554						      th->doff << 2,
 555						      skb->csum));
 556	}
 557}
 558
 559/* This routine computes an IPv4 TCP checksum. */
 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 561{
 562	const struct inet_sock *inet = inet_sk(sk);
 563
 564	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
 565}
 566EXPORT_SYMBOL(tcp_v4_send_check);
 567
 568int tcp_v4_gso_send_check(struct sk_buff *skb)
 569{
 570	const struct iphdr *iph;
 571	struct tcphdr *th;
 572
 573	if (!pskb_may_pull(skb, sizeof(*th)))
 574		return -EINVAL;
 575
 576	iph = ip_hdr(skb);
 577	th = tcp_hdr(skb);
 578
 579	th->check = 0;
 580	skb->ip_summed = CHECKSUM_PARTIAL;
 581	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 582	return 0;
 583}
 584
 585/*
 586 *	This routine will send an RST to the other tcp.
 587 *
 588 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 589 *		      for reset.
 590 *	Answer: if a packet caused RST, it is not for a socket
 591 *		existing in our system, if it is matched to a socket,
 592 *		it is just duplicate segment or bug in other side's TCP.
 593 *		So that we build reply only basing on parameters
 594 *		arrived with segment.
 595 *	Exception: precedence violation. We do not implement it in any case.
 596 */
 597
 598static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 599{
 600	const struct tcphdr *th = tcp_hdr(skb);
 601	struct {
 602		struct tcphdr th;
 603#ifdef CONFIG_TCP_MD5SIG
 604		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
 605#endif
 606	} rep;
 607	struct ip_reply_arg arg;
 608#ifdef CONFIG_TCP_MD5SIG
 609	struct tcp_md5sig_key *key;
 610	const __u8 *hash_location = NULL;
 611	unsigned char newhash[16];
 612	int genhash;
 613	struct sock *sk1 = NULL;
 614#endif
 615	struct net *net;
 616
 617	/* Never send a reset in response to a reset. */
 618	if (th->rst)
 619		return;
 620
 621	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
 
 
 
 622		return;
 623
 624	/* Swap the send and the receive. */
 625	memset(&rep, 0, sizeof(rep));
 626	rep.th.dest   = th->source;
 627	rep.th.source = th->dest;
 628	rep.th.doff   = sizeof(struct tcphdr) / 4;
 629	rep.th.rst    = 1;
 630
 631	if (th->ack) {
 632		rep.th.seq = th->ack_seq;
 633	} else {
 634		rep.th.ack = 1;
 635		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
 636				       skb->len - (th->doff << 2));
 637	}
 638
 639	memset(&arg, 0, sizeof(arg));
 640	arg.iov[0].iov_base = (unsigned char *)&rep;
 641	arg.iov[0].iov_len  = sizeof(rep.th);
 642
 
 643#ifdef CONFIG_TCP_MD5SIG
 644	hash_location = tcp_parse_md5sig_option(th);
 645	if (!sk && hash_location) {
 
 
 
 646		/*
 647		 * active side is lost. Try to find listening socket through
 648		 * source port, and then find md5 key through listening socket.
 649		 * we are not loose security here:
 650		 * Incoming packet is checked with md5 hash with finding key,
 651		 * no RST generated if md5 hash doesn't match.
 652		 */
 653		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
 654					     &tcp_hashinfo, ip_hdr(skb)->daddr,
 
 655					     ntohs(th->source), inet_iif(skb));
 656		/* don't send rst if it can't find key */
 657		if (!sk1)
 658			return;
 659		rcu_read_lock();
 660		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 661					&ip_hdr(skb)->saddr, AF_INET);
 662		if (!key)
 663			goto release_sk1;
 664
 665		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
 666		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 667			goto release_sk1;
 668	} else {
 669		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
 670					     &ip_hdr(skb)->saddr,
 671					     AF_INET) : NULL;
 672	}
 673
 674	if (key) {
 675		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
 676				   (TCPOPT_NOP << 16) |
 677				   (TCPOPT_MD5SIG << 8) |
 678				   TCPOLEN_MD5SIG);
 679		/* Update length and the length the header thinks exists */
 680		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 681		rep.th.doff = arg.iov[0].iov_len / 4;
 682
 683		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
 684				     key, ip_hdr(skb)->saddr,
 685				     ip_hdr(skb)->daddr, &rep.th);
 686	}
 687#endif
 688	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 689				      ip_hdr(skb)->saddr, /* XXX */
 690				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 691	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 692	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 
 693	/* When socket is gone, all binding information is lost.
 694	 * routing might fail in this case. using iif for oif to
 695	 * make sure we can deliver it
 696	 */
 697	arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
 
 
 
 
 698
 699	net = dev_net(skb_dst(skb)->dev);
 700	arg.tos = ip_hdr(skb)->tos;
 701	ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
 702		      &arg, arg.iov[0].iov_len);
 
 
 703
 704	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 705	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 706
 707#ifdef CONFIG_TCP_MD5SIG
 708release_sk1:
 709	if (sk1) {
 710		rcu_read_unlock();
 711		sock_put(sk1);
 712	}
 713#endif
 714}
 715
 716/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
 717   outside socket context is ugly, certainly. What can I do?
 718 */
 719
 720static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 721			    u32 win, u32 ts, int oif,
 
 722			    struct tcp_md5sig_key *key,
 723			    int reply_flags, u8 tos)
 724{
 725	const struct tcphdr *th = tcp_hdr(skb);
 726	struct {
 727		struct tcphdr th;
 728		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
 729#ifdef CONFIG_TCP_MD5SIG
 730			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
 731#endif
 732			];
 733	} rep;
 734	struct ip_reply_arg arg;
 735	struct net *net = dev_net(skb_dst(skb)->dev);
 736
 737	memset(&rep.th, 0, sizeof(struct tcphdr));
 738	memset(&arg, 0, sizeof(arg));
 739
 740	arg.iov[0].iov_base = (unsigned char *)&rep;
 741	arg.iov[0].iov_len  = sizeof(rep.th);
 742	if (ts) {
 743		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 744				   (TCPOPT_TIMESTAMP << 8) |
 745				   TCPOLEN_TIMESTAMP);
 746		rep.opt[1] = htonl(tcp_time_stamp);
 747		rep.opt[2] = htonl(ts);
 748		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
 749	}
 750
 751	/* Swap the send and the receive. */
 752	rep.th.dest    = th->source;
 753	rep.th.source  = th->dest;
 754	rep.th.doff    = arg.iov[0].iov_len / 4;
 755	rep.th.seq     = htonl(seq);
 756	rep.th.ack_seq = htonl(ack);
 757	rep.th.ack     = 1;
 758	rep.th.window  = htons(win);
 759
 760#ifdef CONFIG_TCP_MD5SIG
 761	if (key) {
 762		int offset = (ts) ? 3 : 0;
 763
 764		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
 765					  (TCPOPT_NOP << 16) |
 766					  (TCPOPT_MD5SIG << 8) |
 767					  TCPOLEN_MD5SIG);
 768		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
 769		rep.th.doff = arg.iov[0].iov_len/4;
 770
 771		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
 772				    key, ip_hdr(skb)->saddr,
 773				    ip_hdr(skb)->daddr, &rep.th);
 774	}
 775#endif
 776	arg.flags = reply_flags;
 777	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
 778				      ip_hdr(skb)->saddr, /* XXX */
 779				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 780	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 781	if (oif)
 782		arg.bound_dev_if = oif;
 783	arg.tos = tos;
 784	ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
 785		      &arg, arg.iov[0].iov_len);
 
 
 786
 787	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 788}
 789
 790static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 791{
 792	struct inet_timewait_sock *tw = inet_twsk(sk);
 793	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 794
 795	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 
 796			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 
 797			tcptw->tw_ts_recent,
 798			tw->tw_bound_dev_if,
 799			tcp_twsk_md5_key(tcptw),
 800			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
 801			tw->tw_tos
 802			);
 803
 804	inet_twsk_put(tw);
 805}
 806
 807static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 808				  struct request_sock *req)
 809{
 810	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
 811			tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
 
 
 
 
 
 
 
 812			req->ts_recent,
 813			0,
 814			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
 815					  AF_INET),
 816			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 817			ip_hdr(skb)->tos);
 818}
 819
 820/*
 821 *	Send a SYN-ACK after having received a SYN.
 822 *	This still operates on a request_sock only, not on a big
 823 *	socket.
 824 */
 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 
 826			      struct request_sock *req,
 827			      struct request_values *rvp,
 828			      u16 queue_mapping)
 829{
 830	const struct inet_request_sock *ireq = inet_rsk(req);
 831	struct flowi4 fl4;
 832	int err = -1;
 833	struct sk_buff * skb;
 834
 835	/* First, grab a route. */
 836	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 837		return -1;
 838
 839	skb = tcp_make_synack(sk, dst, req, rvp);
 840
 841	if (skb) {
 842		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
 843
 844		skb_set_queue_mapping(skb, queue_mapping);
 845		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 846					    ireq->rmt_addr,
 847					    ireq->opt);
 848		err = net_xmit_eval(err);
 849	}
 850
 851	dst_release(dst);
 852	return err;
 853}
 854
 855static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
 856			      struct request_values *rvp)
 857{
 858	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 859	return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
 860}
 861
 862/*
 863 *	IPv4 request_sock destructor.
 864 */
 865static void tcp_v4_reqsk_destructor(struct request_sock *req)
 866{
 867	kfree(inet_rsk(req)->opt);
 868}
 869
 870/*
 871 * Return true if a syncookie should be sent
 872 */
 873bool tcp_syn_flood_action(struct sock *sk,
 874			 const struct sk_buff *skb,
 875			 const char *proto)
 876{
 877	const char *msg = "Dropping request";
 878	bool want_cookie = false;
 879	struct listen_sock *lopt;
 880
 881
 882
 883#ifdef CONFIG_SYN_COOKIES
 884	if (sysctl_tcp_syncookies) {
 885		msg = "Sending cookies";
 886		want_cookie = true;
 887		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
 888	} else
 889#endif
 890		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 891
 892	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
 893	if (!lopt->synflood_warned) {
 894		lopt->synflood_warned = 1;
 895		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
 896			proto, ntohs(tcp_hdr(skb)->dest), msg);
 897	}
 898	return want_cookie;
 899}
 900EXPORT_SYMBOL(tcp_syn_flood_action);
 901
 902/*
 903 * Save and compile IPv4 options into the request_sock if needed.
 904 */
 905static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
 906						  struct sk_buff *skb)
 907{
 908	const struct ip_options *opt = &(IPCB(skb)->opt);
 909	struct ip_options_rcu *dopt = NULL;
 910
 911	if (opt && opt->optlen) {
 912		int opt_size = sizeof(*dopt) + opt->optlen;
 913
 914		dopt = kmalloc(opt_size, GFP_ATOMIC);
 915		if (dopt) {
 916			if (ip_options_echo(&dopt->opt, skb)) {
 917				kfree(dopt);
 918				dopt = NULL;
 919			}
 920		}
 921	}
 922	return dopt;
 923}
 924
 925#ifdef CONFIG_TCP_MD5SIG
 926/*
 927 * RFC2385 MD5 checksumming requires a mapping of
 928 * IP address->MD5 Key.
 929 * We need to maintain these in the sk structure.
 930 */
 931
 932/* Find the Key structure for an address.  */
 933struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 934					 const union tcp_md5_addr *addr,
 935					 int family)
 936{
 937	struct tcp_sock *tp = tcp_sk(sk);
 938	struct tcp_md5sig_key *key;
 939	struct hlist_node *pos;
 940	unsigned int size = sizeof(struct in_addr);
 941	struct tcp_md5sig_info *md5sig;
 942
 943	/* caller either holds rcu_read_lock() or socket lock */
 944	md5sig = rcu_dereference_check(tp->md5sig_info,
 945				       sock_owned_by_user(sk) ||
 946				       lockdep_is_held(&sk->sk_lock.slock));
 947	if (!md5sig)
 948		return NULL;
 949#if IS_ENABLED(CONFIG_IPV6)
 950	if (family == AF_INET6)
 951		size = sizeof(struct in6_addr);
 952#endif
 953	hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
 954		if (key->family != family)
 955			continue;
 956		if (!memcmp(&key->addr, addr, size))
 957			return key;
 958	}
 959	return NULL;
 960}
 961EXPORT_SYMBOL(tcp_md5_do_lookup);
 962
 963struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
 964					 struct sock *addr_sk)
 965{
 966	union tcp_md5_addr *addr;
 967
 968	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
 969	return tcp_md5_do_lookup(sk, addr, AF_INET);
 970}
 971EXPORT_SYMBOL(tcp_v4_md5_lookup);
 972
 973static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
 974						      struct request_sock *req)
 975{
 976	union tcp_md5_addr *addr;
 977
 978	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
 979	return tcp_md5_do_lookup(sk, addr, AF_INET);
 980}
 981
 982/* This can be called on a newly created socket, from other files */
 983int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 984		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
 985{
 986	/* Add Key to the list */
 987	struct tcp_md5sig_key *key;
 988	struct tcp_sock *tp = tcp_sk(sk);
 989	struct tcp_md5sig_info *md5sig;
 990
 991	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
 992	if (key) {
 993		/* Pre-existing entry - just update that one. */
 994		memcpy(key->key, newkey, newkeylen);
 995		key->keylen = newkeylen;
 996		return 0;
 997	}
 998
 999	md5sig = rcu_dereference_protected(tp->md5sig_info,
1000					   sock_owned_by_user(sk));
 
1001	if (!md5sig) {
1002		md5sig = kmalloc(sizeof(*md5sig), gfp);
1003		if (!md5sig)
1004			return -ENOMEM;
1005
1006		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007		INIT_HLIST_HEAD(&md5sig->head);
1008		rcu_assign_pointer(tp->md5sig_info, md5sig);
1009	}
1010
1011	key = sock_kmalloc(sk, sizeof(*key), gfp);
1012	if (!key)
1013		return -ENOMEM;
1014	if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1015		sock_kfree_s(sk, key, sizeof(*key));
1016		return -ENOMEM;
1017	}
1018
1019	memcpy(key->key, newkey, newkeylen);
1020	key->keylen = newkeylen;
1021	key->family = family;
1022	memcpy(&key->addr, addr,
1023	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1024				      sizeof(struct in_addr));
1025	hlist_add_head_rcu(&key->node, &md5sig->head);
1026	return 0;
1027}
1028EXPORT_SYMBOL(tcp_md5_do_add);
1029
1030int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1031{
1032	struct tcp_sock *tp = tcp_sk(sk);
1033	struct tcp_md5sig_key *key;
1034	struct tcp_md5sig_info *md5sig;
1035
1036	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037	if (!key)
1038		return -ENOENT;
1039	hlist_del_rcu(&key->node);
1040	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1041	kfree_rcu(key, rcu);
1042	md5sig = rcu_dereference_protected(tp->md5sig_info,
1043					   sock_owned_by_user(sk));
1044	if (hlist_empty(&md5sig->head))
1045		tcp_free_md5sig_pool();
1046	return 0;
1047}
1048EXPORT_SYMBOL(tcp_md5_do_del);
1049
1050void tcp_clear_md5_list(struct sock *sk)
1051{
1052	struct tcp_sock *tp = tcp_sk(sk);
1053	struct tcp_md5sig_key *key;
1054	struct hlist_node *pos, *n;
1055	struct tcp_md5sig_info *md5sig;
1056
1057	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058
1059	if (!hlist_empty(&md5sig->head))
1060		tcp_free_md5sig_pool();
1061	hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1062		hlist_del_rcu(&key->node);
1063		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064		kfree_rcu(key, rcu);
1065	}
1066}
1067
1068static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069				 int optlen)
1070{
1071	struct tcp_md5sig cmd;
1072	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1073
1074	if (optlen < sizeof(cmd))
1075		return -EINVAL;
1076
1077	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1078		return -EFAULT;
1079
1080	if (sin->sin_family != AF_INET)
1081		return -EINVAL;
1082
1083	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1084		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085				      AF_INET);
1086
1087	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088		return -EINVAL;
1089
1090	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092			      GFP_KERNEL);
1093}
1094
1095static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096					__be32 daddr, __be32 saddr, int nbytes)
1097{
1098	struct tcp4_pseudohdr *bp;
1099	struct scatterlist sg;
1100
1101	bp = &hp->md5_blk.ip4;
1102
1103	/*
1104	 * 1. the TCP pseudo-header (in the order: source IP address,
1105	 * destination IP address, zero-padded protocol number, and
1106	 * segment length)
1107	 */
1108	bp->saddr = saddr;
1109	bp->daddr = daddr;
1110	bp->pad = 0;
1111	bp->protocol = IPPROTO_TCP;
1112	bp->len = cpu_to_be16(nbytes);
1113
1114	sg_init_one(&sg, bp, sizeof(*bp));
1115	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
1116}
1117
1118static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1119			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1120{
1121	struct tcp_md5sig_pool *hp;
1122	struct hash_desc *desc;
1123
1124	hp = tcp_get_md5sig_pool();
1125	if (!hp)
1126		goto clear_hash_noput;
1127	desc = &hp->md5_desc;
1128
1129	if (crypto_hash_init(desc))
1130		goto clear_hash;
1131	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132		goto clear_hash;
1133	if (tcp_md5_hash_header(hp, th))
1134		goto clear_hash;
1135	if (tcp_md5_hash_key(hp, key))
1136		goto clear_hash;
1137	if (crypto_hash_final(desc, md5_hash))
 
1138		goto clear_hash;
1139
1140	tcp_put_md5sig_pool();
1141	return 0;
1142
1143clear_hash:
1144	tcp_put_md5sig_pool();
1145clear_hash_noput:
1146	memset(md5_hash, 0, 16);
1147	return 1;
1148}
1149
1150int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1151			const struct sock *sk, const struct request_sock *req,
1152			const struct sk_buff *skb)
1153{
1154	struct tcp_md5sig_pool *hp;
1155	struct hash_desc *desc;
1156	const struct tcphdr *th = tcp_hdr(skb);
1157	__be32 saddr, daddr;
1158
1159	if (sk) {
1160		saddr = inet_sk(sk)->inet_saddr;
1161		daddr = inet_sk(sk)->inet_daddr;
1162	} else if (req) {
1163		saddr = inet_rsk(req)->loc_addr;
1164		daddr = inet_rsk(req)->rmt_addr;
1165	} else {
1166		const struct iphdr *iph = ip_hdr(skb);
1167		saddr = iph->saddr;
1168		daddr = iph->daddr;
1169	}
1170
1171	hp = tcp_get_md5sig_pool();
1172	if (!hp)
1173		goto clear_hash_noput;
1174	desc = &hp->md5_desc;
1175
1176	if (crypto_hash_init(desc))
1177		goto clear_hash;
1178
1179	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180		goto clear_hash;
1181	if (tcp_md5_hash_header(hp, th))
1182		goto clear_hash;
1183	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184		goto clear_hash;
1185	if (tcp_md5_hash_key(hp, key))
1186		goto clear_hash;
1187	if (crypto_hash_final(desc, md5_hash))
 
1188		goto clear_hash;
1189
1190	tcp_put_md5sig_pool();
1191	return 0;
1192
1193clear_hash:
1194	tcp_put_md5sig_pool();
1195clear_hash_noput:
1196	memset(md5_hash, 0, 16);
1197	return 1;
1198}
1199EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1200
1201static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 
 
 
 
1202{
 
1203	/*
1204	 * This gets called for each TCP segment that arrives
1205	 * so we want to be efficient.
1206	 * We have 3 drop cases:
1207	 * o No MD5 hash and one expected.
1208	 * o MD5 hash and we're not expecting one.
1209	 * o MD5 hash and its wrong.
1210	 */
1211	const __u8 *hash_location = NULL;
1212	struct tcp_md5sig_key *hash_expected;
1213	const struct iphdr *iph = ip_hdr(skb);
1214	const struct tcphdr *th = tcp_hdr(skb);
1215	int genhash;
1216	unsigned char newhash[16];
1217
1218	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219					  AF_INET);
1220	hash_location = tcp_parse_md5sig_option(th);
1221
1222	/* We've parsed the options - do we have a hash? */
1223	if (!hash_expected && !hash_location)
1224		return false;
1225
1226	if (hash_expected && !hash_location) {
1227		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1228		return true;
1229	}
1230
1231	if (!hash_expected && hash_location) {
1232		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1233		return true;
1234	}
1235
1236	/* Okay, so this is hash_expected and hash_location -
1237	 * so we need to calculate the checksum.
1238	 */
1239	genhash = tcp_v4_md5_hash_skb(newhash,
1240				      hash_expected,
1241				      NULL, NULL, skb);
1242
1243	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1244		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245				     &iph->saddr, ntohs(th->source),
1246				     &iph->daddr, ntohs(th->dest),
1247				     genhash ? " tcp_v4_calc_md5_hash failed"
1248				     : "");
1249		return true;
1250	}
1251	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252}
1253
1254#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255
1256struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1257	.family		=	PF_INET,
1258	.obj_size	=	sizeof(struct tcp_request_sock),
1259	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1260	.send_ack	=	tcp_v4_reqsk_send_ack,
1261	.destructor	=	tcp_v4_reqsk_destructor,
1262	.send_reset	=	tcp_v4_send_reset,
1263	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1264};
1265
 
 
1266#ifdef CONFIG_TCP_MD5SIG
1267static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1269	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 
 
 
 
 
 
 
 
1270};
1271#endif
1272
1273int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274{
1275	struct tcp_extend_values tmp_ext;
1276	struct tcp_options_received tmp_opt;
1277	const u8 *hash_location;
1278	struct request_sock *req;
1279	struct inet_request_sock *ireq;
1280	struct tcp_sock *tp = tcp_sk(sk);
1281	struct dst_entry *dst = NULL;
1282	__be32 saddr = ip_hdr(skb)->saddr;
1283	__be32 daddr = ip_hdr(skb)->daddr;
1284	__u32 isn = TCP_SKB_CB(skb)->when;
1285	bool want_cookie = false;
1286
1287	/* Never answer to SYNs send to broadcast or multicast */
1288	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1289		goto drop;
1290
1291	/* TW buckets are converted to open requests without
1292	 * limitations, they conserve resources and peer is
1293	 * evidently real one.
1294	 */
1295	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1296		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297		if (!want_cookie)
1298			goto drop;
1299	}
1300
1301	/* Accept backlog is full. If we have already queued enough
1302	 * of warm entries in syn queue, drop request. It is better than
1303	 * clogging syn queue with openreqs with exponentially increasing
1304	 * timeout.
1305	 */
1306	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1307		goto drop;
1308
1309	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1310	if (!req)
1311		goto drop;
1312
1313#ifdef CONFIG_TCP_MD5SIG
1314	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315#endif
1316
1317	tcp_clear_options(&tmp_opt);
1318	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1320	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1321
1322	if (tmp_opt.cookie_plus > 0 &&
1323	    tmp_opt.saw_tstamp &&
1324	    !tp->rx_opt.cookie_out_never &&
1325	    (sysctl_tcp_cookie_size > 0 ||
1326	     (tp->cookie_values != NULL &&
1327	      tp->cookie_values->cookie_desired > 0))) {
1328		u8 *c;
1329		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331
1332		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333			goto drop_and_release;
1334
1335		/* Secret recipe starts with IP addresses */
1336		*mess++ ^= (__force u32)daddr;
1337		*mess++ ^= (__force u32)saddr;
1338
1339		/* plus variable length Initiator Cookie */
1340		c = (u8 *)mess;
1341		while (l-- > 0)
1342			*c++ ^= *hash_location++;
1343
1344		want_cookie = false;	/* not our kind of cookie */
1345		tmp_ext.cookie_out_never = 0; /* false */
1346		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347	} else if (!tp->rx_opt.cookie_in_always) {
1348		/* redundant indications, but ensure initialization. */
1349		tmp_ext.cookie_out_never = 1; /* true */
1350		tmp_ext.cookie_plus = 0;
1351	} else {
1352		goto drop_and_release;
1353	}
1354	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1355
1356	if (want_cookie && !tmp_opt.saw_tstamp)
1357		tcp_clear_options(&tmp_opt);
1358
1359	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1360	tcp_openreq_init(req, &tmp_opt, skb);
1361
1362	ireq = inet_rsk(req);
1363	ireq->loc_addr = daddr;
1364	ireq->rmt_addr = saddr;
1365	ireq->no_srccheck = inet_sk(sk)->transparent;
1366	ireq->opt = tcp_v4_save_options(sk, skb);
1367
1368	if (security_inet_conn_request(sk, skb, req))
1369		goto drop_and_free;
1370
1371	if (!want_cookie || tmp_opt.tstamp_ok)
1372		TCP_ECN_create_request(req, skb);
1373
1374	if (want_cookie) {
1375		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376		req->cookie_ts = tmp_opt.tstamp_ok;
1377	} else if (!isn) {
1378		struct inet_peer *peer = NULL;
1379		struct flowi4 fl4;
1380
1381		/* VJ's idea. We save last timestamp seen
1382		 * from the destination in peer table, when entering
1383		 * state TIME-WAIT, and check against it before
1384		 * accepting new connection request.
1385		 *
1386		 * If "isn" is not zero, this request hit alive
1387		 * timewait bucket, so that all the necessary checks
1388		 * are made in the function processing timewait state.
1389		 */
1390		if (tmp_opt.saw_tstamp &&
1391		    tcp_death_row.sysctl_tw_recycle &&
1392		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1393		    fl4.daddr == saddr &&
1394		    (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1395			inet_peer_refcheck(peer);
1396			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1397			    (s32)(peer->tcp_ts - req->ts_recent) >
1398							TCP_PAWS_WINDOW) {
1399				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1400				goto drop_and_release;
1401			}
1402		}
1403		/* Kill the following clause, if you dislike this way. */
1404		else if (!sysctl_tcp_syncookies &&
1405			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1406			  (sysctl_max_syn_backlog >> 2)) &&
1407			 (!peer || !peer->tcp_ts_stamp) &&
1408			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1409			/* Without syncookies last quarter of
1410			 * backlog is filled with destinations,
1411			 * proven to be alive.
1412			 * It means that we continue to communicate
1413			 * to destinations, already remembered
1414			 * to the moment of synflood.
1415			 */
1416			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1417				       &saddr, ntohs(tcp_hdr(skb)->source));
1418			goto drop_and_release;
1419		}
1420
1421		isn = tcp_v4_init_sequence(skb);
1422	}
1423	tcp_rsk(req)->snt_isn = isn;
1424	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1425
1426	if (tcp_v4_send_synack(sk, dst, req,
1427			       (struct request_values *)&tmp_ext,
1428			       skb_get_queue_mapping(skb)) ||
1429	    want_cookie)
1430		goto drop_and_free;
1431
1432	inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1433	return 0;
1434
1435drop_and_release:
1436	dst_release(dst);
1437drop_and_free:
1438	reqsk_free(req);
1439drop:
 
1440	return 0;
1441}
1442EXPORT_SYMBOL(tcp_v4_conn_request);
1443
1444
1445/*
1446 * The three way handshake has completed - we got a valid synack -
1447 * now create the new socket.
1448 */
1449struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1450				  struct request_sock *req,
1451				  struct dst_entry *dst)
 
 
1452{
1453	struct inet_request_sock *ireq;
1454	struct inet_sock *newinet;
1455	struct tcp_sock *newtp;
1456	struct sock *newsk;
1457#ifdef CONFIG_TCP_MD5SIG
1458	struct tcp_md5sig_key *key;
1459#endif
1460	struct ip_options_rcu *inet_opt;
1461
1462	if (sk_acceptq_is_full(sk))
1463		goto exit_overflow;
1464
1465	newsk = tcp_create_openreq_child(sk, req, skb);
1466	if (!newsk)
1467		goto exit_nonewsk;
1468
1469	newsk->sk_gso_type = SKB_GSO_TCPV4;
 
1470
1471	newtp		      = tcp_sk(newsk);
1472	newinet		      = inet_sk(newsk);
1473	ireq		      = inet_rsk(req);
1474	newinet->inet_daddr   = ireq->rmt_addr;
1475	newinet->inet_rcv_saddr = ireq->loc_addr;
1476	newinet->inet_saddr	      = ireq->loc_addr;
 
1477	inet_opt	      = ireq->opt;
1478	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1479	ireq->opt	      = NULL;
1480	newinet->mc_index     = inet_iif(skb);
1481	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1482	newinet->rcv_tos      = ip_hdr(skb)->tos;
1483	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1484	if (inet_opt)
1485		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1486	newinet->inet_id = newtp->write_seq ^ jiffies;
1487
1488	if (!dst) {
1489		dst = inet_csk_route_child_sock(sk, newsk, req);
1490		if (!dst)
1491			goto put_and_exit;
1492	} else {
1493		/* syncookie case : see end of cookie_v4_check() */
1494	}
1495	sk_setup_caps(newsk, dst);
1496
1497	tcp_mtup_init(newsk);
 
1498	tcp_sync_mss(newsk, dst_mtu(dst));
1499	newtp->advmss = dst_metric_advmss(dst);
1500	if (tcp_sk(sk)->rx_opt.user_mss &&
1501	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1502		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1503
1504	tcp_initialize_rcv_mss(newsk);
1505	if (tcp_rsk(req)->snt_synack)
1506		tcp_valid_rtt_meas(newsk,
1507		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1508	newtp->total_retrans = req->retrans;
1509
1510#ifdef CONFIG_TCP_MD5SIG
1511	/* Copy over the MD5 key from the original socket */
1512	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1513				AF_INET);
1514	if (key != NULL) {
1515		/*
1516		 * We're using one, so create a matching key
1517		 * on the newsk structure. If we fail to get
1518		 * memory, then we end up not copying the key
1519		 * across. Shucks.
1520		 */
1521		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1522			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1523		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1524	}
1525#endif
1526
1527	if (__inet_inherit_port(sk, newsk) < 0)
1528		goto put_and_exit;
1529	__inet_hash_nolisten(newsk, NULL);
 
 
1530
1531	return newsk;
1532
1533exit_overflow:
1534	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1535exit_nonewsk:
1536	dst_release(dst);
1537exit:
1538	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1539	return NULL;
1540put_and_exit:
1541	tcp_clear_xmit_timers(newsk);
1542	tcp_cleanup_congestion_control(newsk);
1543	bh_unlock_sock(newsk);
1544	sock_put(newsk);
1545	goto exit;
1546}
1547EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1548
1549static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1550{
1551	struct tcphdr *th = tcp_hdr(skb);
1552	const struct iphdr *iph = ip_hdr(skb);
1553	struct sock *nsk;
1554	struct request_sock **prev;
1555	/* Find possible connection requests. */
1556	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1557						       iph->saddr, iph->daddr);
1558	if (req)
1559		return tcp_check_req(sk, skb, req, prev);
1560
1561	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1562			th->source, iph->daddr, th->dest, inet_iif(skb));
1563
1564	if (nsk) {
1565		if (nsk->sk_state != TCP_TIME_WAIT) {
1566			bh_lock_sock(nsk);
1567			return nsk;
1568		}
1569		inet_twsk_put(inet_twsk(nsk));
1570		return NULL;
1571	}
1572
1573#ifdef CONFIG_SYN_COOKIES
1574	if (!th->syn)
1575		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1576#endif
1577	return sk;
1578}
1579
1580static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1581{
1582	const struct iphdr *iph = ip_hdr(skb);
1583
1584	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1585		if (!tcp_v4_check(skb->len, iph->saddr,
1586				  iph->daddr, skb->csum)) {
1587			skb->ip_summed = CHECKSUM_UNNECESSARY;
1588			return 0;
1589		}
1590	}
1591
1592	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1593				       skb->len, IPPROTO_TCP, 0);
1594
1595	if (skb->len <= 76) {
1596		return __skb_checksum_complete(skb);
1597	}
1598	return 0;
1599}
1600
1601
1602/* The socket must have it's spinlock held when we get
1603 * here.
1604 *
1605 * We have a potential double-lock case here, so even when
1606 * doing backlog processing we use the BH locking scheme.
1607 * This is because we cannot sleep with the original spinlock
1608 * held.
1609 */
1610int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1611{
1612	struct sock *rsk;
1613#ifdef CONFIG_TCP_MD5SIG
1614	/*
1615	 * We really want to reject the packet as early as possible
1616	 * if:
1617	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1618	 *  o There is an MD5 option and we're not expecting one
1619	 */
1620	if (tcp_v4_inbound_md5_hash(sk, skb))
1621		goto discard;
1622#endif
1623
1624	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 
 
1625		sock_rps_save_rxhash(sk, skb);
1626		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1627			rsk = sk;
1628			goto reset;
 
 
 
 
1629		}
 
1630		return 0;
1631	}
1632
1633	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1634		goto csum_err;
1635
1636	if (sk->sk_state == TCP_LISTEN) {
1637		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
 
1638		if (!nsk)
1639			goto discard;
1640
1641		if (nsk != sk) {
1642			sock_rps_save_rxhash(nsk, skb);
 
1643			if (tcp_child_process(sk, nsk, skb)) {
1644				rsk = nsk;
1645				goto reset;
1646			}
1647			return 0;
1648		}
1649	} else
1650		sock_rps_save_rxhash(sk, skb);
1651
1652	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1653		rsk = sk;
1654		goto reset;
1655	}
1656	return 0;
1657
1658reset:
1659	tcp_v4_send_reset(rsk, skb);
1660discard:
1661	kfree_skb(skb);
1662	/* Be careful here. If this function gets more complicated and
1663	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1664	 * might be destroyed here. This current version compiles correctly,
1665	 * but you have been warned.
1666	 */
1667	return 0;
1668
1669csum_err:
 
1670	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1671	goto discard;
1672}
1673EXPORT_SYMBOL(tcp_v4_do_rcv);
1674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675/*
1676 *	From tcp_input.c
1677 */
1678
1679int tcp_v4_rcv(struct sk_buff *skb)
1680{
1681	const struct iphdr *iph;
1682	const struct tcphdr *th;
1683	struct sock *sk;
1684	int ret;
1685	struct net *net = dev_net(skb->dev);
1686
1687	if (skb->pkt_type != PACKET_HOST)
1688		goto discard_it;
1689
1690	/* Count it even if it's bad */
1691	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1692
1693	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1694		goto discard_it;
1695
1696	th = tcp_hdr(skb);
1697
1698	if (th->doff < sizeof(struct tcphdr) / 4)
1699		goto bad_packet;
1700	if (!pskb_may_pull(skb, th->doff * 4))
1701		goto discard_it;
1702
1703	/* An explanation is required here, I think.
1704	 * Packet length and doff are validated by header prediction,
1705	 * provided case of th->doff==0 is eliminated.
1706	 * So, we defer the checks. */
1707	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1708		goto bad_packet;
 
1709
1710	th = tcp_hdr(skb);
1711	iph = ip_hdr(skb);
 
 
 
 
 
 
 
1712	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1713	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1714				    skb->len - th->doff * 4);
1715	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1716	TCP_SKB_CB(skb)->when	 = 0;
 
1717	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1718	TCP_SKB_CB(skb)->sacked	 = 0;
1719
1720	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 
 
1721	if (!sk)
1722		goto no_tcp_socket;
1723
1724process:
1725	if (sk->sk_state == TCP_TIME_WAIT)
1726		goto do_time_wait;
1727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1729		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1730		goto discard_and_relse;
1731	}
1732
1733	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1734		goto discard_and_relse;
 
 
 
 
1735	nf_reset(skb);
1736
1737	if (sk_filter(sk, skb))
1738		goto discard_and_relse;
1739
1740	skb->dev = NULL;
1741
 
 
 
 
 
 
 
1742	bh_lock_sock_nested(sk);
 
1743	ret = 0;
1744	if (!sock_owned_by_user(sk)) {
1745#ifdef CONFIG_NET_DMA
1746		struct tcp_sock *tp = tcp_sk(sk);
1747		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1748			tp->ucopy.dma_chan = net_dma_find_channel();
1749		if (tp->ucopy.dma_chan)
1750			ret = tcp_v4_do_rcv(sk, skb);
1751		else
1752#endif
1753		{
1754			if (!tcp_prequeue(sk, skb))
1755				ret = tcp_v4_do_rcv(sk, skb);
1756		}
1757	} else if (unlikely(sk_add_backlog(sk, skb,
1758					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1759		bh_unlock_sock(sk);
1760		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1761		goto discard_and_relse;
1762	}
1763	bh_unlock_sock(sk);
1764
 
1765	sock_put(sk);
1766
1767	return ret;
1768
1769no_tcp_socket:
1770	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1771		goto discard_it;
1772
1773	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
 
 
1774bad_packet:
1775		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1776	} else {
1777		tcp_v4_send_reset(NULL, skb);
1778	}
1779
1780discard_it:
1781	/* Discard frame. */
1782	kfree_skb(skb);
1783	return 0;
1784
1785discard_and_relse:
1786	sock_put(sk);
1787	goto discard_it;
1788
1789do_time_wait:
1790	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1791		inet_twsk_put(inet_twsk(sk));
1792		goto discard_it;
1793	}
1794
1795	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1796		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1797		inet_twsk_put(inet_twsk(sk));
1798		goto discard_it;
1799	}
1800	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1801	case TCP_TW_SYN: {
1802		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1803							&tcp_hashinfo,
 
 
1804							iph->daddr, th->dest,
1805							inet_iif(skb));
1806		if (sk2) {
1807			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1808			inet_twsk_put(inet_twsk(sk));
1809			sk = sk2;
1810			goto process;
1811		}
1812		/* Fall through to ACK */
1813	}
1814	case TCP_TW_ACK:
1815		tcp_v4_timewait_ack(sk, skb);
1816		break;
1817	case TCP_TW_RST:
1818		goto no_tcp_socket;
 
 
1819	case TCP_TW_SUCCESS:;
1820	}
1821	goto discard_it;
1822}
1823
1824struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1825{
1826	struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1827	struct inet_sock *inet = inet_sk(sk);
1828	struct inet_peer *peer;
1829
1830	if (!rt ||
1831	    inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1832		peer = inet_getpeer_v4(inet->inet_daddr, 1);
1833		*release_it = true;
1834	} else {
1835		if (!rt->peer)
1836			rt_bind_peer(rt, inet->inet_daddr, 1);
1837		peer = rt->peer;
1838		*release_it = false;
1839	}
1840
1841	return peer;
1842}
1843EXPORT_SYMBOL(tcp_v4_get_peer);
1844
1845void *tcp_v4_tw_get_peer(struct sock *sk)
1846{
1847	const struct inet_timewait_sock *tw = inet_twsk(sk);
1848
1849	return inet_getpeer_v4(tw->tw_daddr, 1);
1850}
1851EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1852
1853static struct timewait_sock_ops tcp_timewait_sock_ops = {
1854	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1855	.twsk_unique	= tcp_twsk_unique,
1856	.twsk_destructor= tcp_twsk_destructor,
1857	.twsk_getpeer	= tcp_v4_tw_get_peer,
1858};
1859
 
 
 
 
 
 
 
 
 
 
 
1860const struct inet_connection_sock_af_ops ipv4_specific = {
1861	.queue_xmit	   = ip_queue_xmit,
1862	.send_check	   = tcp_v4_send_check,
1863	.rebuild_header	   = inet_sk_rebuild_header,
 
1864	.conn_request	   = tcp_v4_conn_request,
1865	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1866	.get_peer	   = tcp_v4_get_peer,
1867	.net_header_len	   = sizeof(struct iphdr),
1868	.setsockopt	   = ip_setsockopt,
1869	.getsockopt	   = ip_getsockopt,
1870	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1871	.sockaddr_len	   = sizeof(struct sockaddr_in),
1872	.bind_conflict	   = inet_csk_bind_conflict,
1873#ifdef CONFIG_COMPAT
1874	.compat_setsockopt = compat_ip_setsockopt,
1875	.compat_getsockopt = compat_ip_getsockopt,
1876#endif
 
1877};
1878EXPORT_SYMBOL(ipv4_specific);
1879
1880#ifdef CONFIG_TCP_MD5SIG
1881static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1882	.md5_lookup		= tcp_v4_md5_lookup,
1883	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1884	.md5_parse		= tcp_v4_parse_md5_keys,
1885};
1886#endif
1887
1888/* NOTE: A lot of things set to zero explicitly by call to
1889 *       sk_alloc() so need not be done here.
1890 */
1891static int tcp_v4_init_sock(struct sock *sk)
1892{
1893	struct inet_connection_sock *icsk = inet_csk(sk);
1894
1895	tcp_init_sock(sk);
1896
1897	icsk->icsk_af_ops = &ipv4_specific;
1898
1899#ifdef CONFIG_TCP_MD5SIG
1900	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1901#endif
1902
1903	return 0;
1904}
1905
1906void tcp_v4_destroy_sock(struct sock *sk)
1907{
1908	struct tcp_sock *tp = tcp_sk(sk);
1909
1910	tcp_clear_xmit_timers(sk);
1911
1912	tcp_cleanup_congestion_control(sk);
1913
1914	/* Cleanup up the write buffer. */
1915	tcp_write_queue_purge(sk);
1916
1917	/* Cleans up our, hopefully empty, out_of_order_queue. */
1918	__skb_queue_purge(&tp->out_of_order_queue);
1919
1920#ifdef CONFIG_TCP_MD5SIG
1921	/* Clean up the MD5 key list, if any */
1922	if (tp->md5sig_info) {
1923		tcp_clear_md5_list(sk);
1924		kfree_rcu(tp->md5sig_info, rcu);
1925		tp->md5sig_info = NULL;
1926	}
1927#endif
1928
1929#ifdef CONFIG_NET_DMA
1930	/* Cleans up our sk_async_wait_queue */
1931	__skb_queue_purge(&sk->sk_async_wait_queue);
1932#endif
1933
1934	/* Clean prequeue, it must be empty really */
1935	__skb_queue_purge(&tp->ucopy.prequeue);
1936
1937	/* Clean up a referenced TCP bind bucket. */
1938	if (inet_csk(sk)->icsk_bind_hash)
1939		inet_put_port(sk);
1940
1941	/*
1942	 * If sendmsg cached page exists, toss it.
1943	 */
1944	if (sk->sk_sndmsg_page) {
1945		__free_page(sk->sk_sndmsg_page);
1946		sk->sk_sndmsg_page = NULL;
1947	}
1948
1949	/* TCP Cookie Transactions */
1950	if (tp->cookie_values != NULL) {
1951		kref_put(&tp->cookie_values->kref,
1952			 tcp_cookie_values_release);
1953		tp->cookie_values = NULL;
1954	}
1955
1956	sk_sockets_allocated_dec(sk);
1957	sock_release_memcg(sk);
 
 
1958}
1959EXPORT_SYMBOL(tcp_v4_destroy_sock);
1960
1961#ifdef CONFIG_PROC_FS
1962/* Proc filesystem TCP sock list dumping. */
1963
1964static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1965{
1966	return hlist_nulls_empty(head) ? NULL :
1967		list_entry(head->first, struct inet_timewait_sock, tw_node);
1968}
1969
1970static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1971{
1972	return !is_a_nulls(tw->tw_node.next) ?
1973		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1974}
1975
1976/*
1977 * Get next listener socket follow cur.  If cur is NULL, get first socket
1978 * starting from bucket given in st->bucket; when st->bucket is zero the
1979 * very first socket in the hash table is returned.
1980 */
1981static void *listening_get_next(struct seq_file *seq, void *cur)
1982{
1983	struct inet_connection_sock *icsk;
1984	struct hlist_nulls_node *node;
1985	struct sock *sk = cur;
1986	struct inet_listen_hashbucket *ilb;
1987	struct tcp_iter_state *st = seq->private;
1988	struct net *net = seq_file_net(seq);
1989
1990	if (!sk) {
1991		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1992		spin_lock_bh(&ilb->lock);
1993		sk = sk_nulls_head(&ilb->head);
1994		st->offset = 0;
1995		goto get_sk;
1996	}
1997	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1998	++st->num;
1999	++st->offset;
2000
2001	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2002		struct request_sock *req = cur;
2003
2004		icsk = inet_csk(st->syn_wait_sk);
2005		req = req->dl_next;
2006		while (1) {
2007			while (req) {
2008				if (req->rsk_ops->family == st->family) {
2009					cur = req;
2010					goto out;
2011				}
2012				req = req->dl_next;
2013			}
2014			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2015				break;
2016get_req:
2017			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2018		}
2019		sk	  = sk_nulls_next(st->syn_wait_sk);
2020		st->state = TCP_SEQ_STATE_LISTENING;
2021		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2022	} else {
2023		icsk = inet_csk(sk);
2024		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2025		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2026			goto start_req;
2027		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2028		sk = sk_nulls_next(sk);
2029	}
2030get_sk:
2031	sk_nulls_for_each_from(sk, node) {
2032		if (!net_eq(sock_net(sk), net))
2033			continue;
2034		if (sk->sk_family == st->family) {
2035			cur = sk;
2036			goto out;
2037		}
2038		icsk = inet_csk(sk);
2039		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2040		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2041start_req:
2042			st->uid		= sock_i_uid(sk);
2043			st->syn_wait_sk = sk;
2044			st->state	= TCP_SEQ_STATE_OPENREQ;
2045			st->sbucket	= 0;
2046			goto get_req;
2047		}
2048		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2049	}
2050	spin_unlock_bh(&ilb->lock);
2051	st->offset = 0;
2052	if (++st->bucket < INET_LHTABLE_SIZE) {
2053		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2054		spin_lock_bh(&ilb->lock);
2055		sk = sk_nulls_head(&ilb->head);
2056		goto get_sk;
2057	}
2058	cur = NULL;
2059out:
2060	return cur;
2061}
2062
2063static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2064{
2065	struct tcp_iter_state *st = seq->private;
2066	void *rc;
2067
2068	st->bucket = 0;
2069	st->offset = 0;
2070	rc = listening_get_next(seq, NULL);
2071
2072	while (rc && *pos) {
2073		rc = listening_get_next(seq, rc);
2074		--*pos;
2075	}
2076	return rc;
2077}
2078
2079static inline bool empty_bucket(struct tcp_iter_state *st)
2080{
2081	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2082		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2083}
2084
2085/*
2086 * Get first established socket starting from bucket given in st->bucket.
2087 * If st->bucket is zero, the very first socket in the hash is returned.
2088 */
2089static void *established_get_first(struct seq_file *seq)
2090{
2091	struct tcp_iter_state *st = seq->private;
2092	struct net *net = seq_file_net(seq);
2093	void *rc = NULL;
2094
2095	st->offset = 0;
2096	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2097		struct sock *sk;
2098		struct hlist_nulls_node *node;
2099		struct inet_timewait_sock *tw;
2100		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2101
2102		/* Lockless fast path for the common case of empty buckets */
2103		if (empty_bucket(st))
2104			continue;
2105
2106		spin_lock_bh(lock);
2107		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2108			if (sk->sk_family != st->family ||
2109			    !net_eq(sock_net(sk), net)) {
2110				continue;
2111			}
2112			rc = sk;
2113			goto out;
2114		}
2115		st->state = TCP_SEQ_STATE_TIME_WAIT;
2116		inet_twsk_for_each(tw, node,
2117				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2118			if (tw->tw_family != st->family ||
2119			    !net_eq(twsk_net(tw), net)) {
2120				continue;
2121			}
2122			rc = tw;
2123			goto out;
2124		}
2125		spin_unlock_bh(lock);
2126		st->state = TCP_SEQ_STATE_ESTABLISHED;
2127	}
2128out:
2129	return rc;
2130}
2131
2132static void *established_get_next(struct seq_file *seq, void *cur)
2133{
2134	struct sock *sk = cur;
2135	struct inet_timewait_sock *tw;
2136	struct hlist_nulls_node *node;
2137	struct tcp_iter_state *st = seq->private;
2138	struct net *net = seq_file_net(seq);
2139
2140	++st->num;
2141	++st->offset;
2142
2143	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2144		tw = cur;
2145		tw = tw_next(tw);
2146get_tw:
2147		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2148			tw = tw_next(tw);
2149		}
2150		if (tw) {
2151			cur = tw;
2152			goto out;
2153		}
2154		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2155		st->state = TCP_SEQ_STATE_ESTABLISHED;
2156
2157		/* Look for next non empty bucket */
2158		st->offset = 0;
2159		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2160				empty_bucket(st))
2161			;
2162		if (st->bucket > tcp_hashinfo.ehash_mask)
2163			return NULL;
2164
2165		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2166		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2167	} else
2168		sk = sk_nulls_next(sk);
2169
2170	sk_nulls_for_each_from(sk, node) {
2171		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2172			goto found;
2173	}
2174
2175	st->state = TCP_SEQ_STATE_TIME_WAIT;
2176	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2177	goto get_tw;
2178found:
2179	cur = sk;
2180out:
2181	return cur;
2182}
2183
2184static void *established_get_idx(struct seq_file *seq, loff_t pos)
2185{
2186	struct tcp_iter_state *st = seq->private;
2187	void *rc;
2188
2189	st->bucket = 0;
2190	rc = established_get_first(seq);
2191
2192	while (rc && pos) {
2193		rc = established_get_next(seq, rc);
2194		--pos;
2195	}
2196	return rc;
2197}
2198
2199static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2200{
2201	void *rc;
2202	struct tcp_iter_state *st = seq->private;
2203
2204	st->state = TCP_SEQ_STATE_LISTENING;
2205	rc	  = listening_get_idx(seq, &pos);
2206
2207	if (!rc) {
2208		st->state = TCP_SEQ_STATE_ESTABLISHED;
2209		rc	  = established_get_idx(seq, pos);
2210	}
2211
2212	return rc;
2213}
2214
2215static void *tcp_seek_last_pos(struct seq_file *seq)
2216{
2217	struct tcp_iter_state *st = seq->private;
2218	int offset = st->offset;
2219	int orig_num = st->num;
2220	void *rc = NULL;
2221
2222	switch (st->state) {
2223	case TCP_SEQ_STATE_OPENREQ:
2224	case TCP_SEQ_STATE_LISTENING:
2225		if (st->bucket >= INET_LHTABLE_SIZE)
2226			break;
2227		st->state = TCP_SEQ_STATE_LISTENING;
2228		rc = listening_get_next(seq, NULL);
2229		while (offset-- && rc)
2230			rc = listening_get_next(seq, rc);
2231		if (rc)
2232			break;
2233		st->bucket = 0;
 
2234		/* Fallthrough */
2235	case TCP_SEQ_STATE_ESTABLISHED:
2236	case TCP_SEQ_STATE_TIME_WAIT:
2237		st->state = TCP_SEQ_STATE_ESTABLISHED;
2238		if (st->bucket > tcp_hashinfo.ehash_mask)
2239			break;
2240		rc = established_get_first(seq);
2241		while (offset-- && rc)
2242			rc = established_get_next(seq, rc);
2243	}
2244
2245	st->num = orig_num;
2246
2247	return rc;
2248}
2249
2250static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2251{
2252	struct tcp_iter_state *st = seq->private;
2253	void *rc;
2254
2255	if (*pos && *pos == st->last_pos) {
2256		rc = tcp_seek_last_pos(seq);
2257		if (rc)
2258			goto out;
2259	}
2260
2261	st->state = TCP_SEQ_STATE_LISTENING;
2262	st->num = 0;
2263	st->bucket = 0;
2264	st->offset = 0;
2265	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2266
2267out:
2268	st->last_pos = *pos;
2269	return rc;
2270}
2271
2272static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2273{
2274	struct tcp_iter_state *st = seq->private;
2275	void *rc = NULL;
2276
2277	if (v == SEQ_START_TOKEN) {
2278		rc = tcp_get_idx(seq, 0);
2279		goto out;
2280	}
2281
2282	switch (st->state) {
2283	case TCP_SEQ_STATE_OPENREQ:
2284	case TCP_SEQ_STATE_LISTENING:
2285		rc = listening_get_next(seq, v);
2286		if (!rc) {
2287			st->state = TCP_SEQ_STATE_ESTABLISHED;
2288			st->bucket = 0;
2289			st->offset = 0;
2290			rc	  = established_get_first(seq);
2291		}
2292		break;
2293	case TCP_SEQ_STATE_ESTABLISHED:
2294	case TCP_SEQ_STATE_TIME_WAIT:
2295		rc = established_get_next(seq, v);
2296		break;
2297	}
2298out:
2299	++*pos;
2300	st->last_pos = *pos;
2301	return rc;
2302}
2303
2304static void tcp_seq_stop(struct seq_file *seq, void *v)
2305{
2306	struct tcp_iter_state *st = seq->private;
2307
2308	switch (st->state) {
2309	case TCP_SEQ_STATE_OPENREQ:
2310		if (v) {
2311			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2312			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2313		}
2314	case TCP_SEQ_STATE_LISTENING:
2315		if (v != SEQ_START_TOKEN)
2316			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2317		break;
2318	case TCP_SEQ_STATE_TIME_WAIT:
2319	case TCP_SEQ_STATE_ESTABLISHED:
2320		if (v)
2321			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2322		break;
2323	}
2324}
2325
2326int tcp_seq_open(struct inode *inode, struct file *file)
2327{
2328	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2329	struct tcp_iter_state *s;
2330	int err;
2331
2332	err = seq_open_net(inode, file, &afinfo->seq_ops,
2333			  sizeof(struct tcp_iter_state));
2334	if (err < 0)
2335		return err;
2336
2337	s = ((struct seq_file *)file->private_data)->private;
2338	s->family		= afinfo->family;
2339	s->last_pos 		= 0;
2340	return 0;
2341}
2342EXPORT_SYMBOL(tcp_seq_open);
2343
2344int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2345{
2346	int rc = 0;
2347	struct proc_dir_entry *p;
2348
2349	afinfo->seq_ops.start		= tcp_seq_start;
2350	afinfo->seq_ops.next		= tcp_seq_next;
2351	afinfo->seq_ops.stop		= tcp_seq_stop;
2352
2353	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2354			     afinfo->seq_fops, afinfo);
2355	if (!p)
2356		rc = -ENOMEM;
2357	return rc;
2358}
2359EXPORT_SYMBOL(tcp_proc_register);
2360
2361void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2362{
2363	proc_net_remove(net, afinfo->name);
2364}
2365EXPORT_SYMBOL(tcp_proc_unregister);
2366
2367static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2368			 struct seq_file *f, int i, int uid, int *len)
2369{
2370	const struct inet_request_sock *ireq = inet_rsk(req);
2371	int ttd = req->expires - jiffies;
2372
2373	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2374		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2375		i,
2376		ireq->loc_addr,
2377		ntohs(inet_sk(sk)->inet_sport),
2378		ireq->rmt_addr,
2379		ntohs(ireq->rmt_port),
2380		TCP_SYN_RECV,
2381		0, 0, /* could print option size, but that is af dependent. */
2382		1,    /* timers active (only the expire timer) */
2383		jiffies_to_clock_t(ttd),
2384		req->retrans,
2385		uid,
 
2386		0,  /* non standard timer */
2387		0, /* open_requests have no inode */
2388		atomic_read(&sk->sk_refcnt),
2389		req,
2390		len);
2391}
2392
2393static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2394{
2395	int timer_active;
2396	unsigned long timer_expires;
2397	const struct tcp_sock *tp = tcp_sk(sk);
2398	const struct inet_connection_sock *icsk = inet_csk(sk);
2399	const struct inet_sock *inet = inet_sk(sk);
 
2400	__be32 dest = inet->inet_daddr;
2401	__be32 src = inet->inet_rcv_saddr;
2402	__u16 destp = ntohs(inet->inet_dport);
2403	__u16 srcp = ntohs(inet->inet_sport);
2404	int rx_queue;
 
2405
2406	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
2407		timer_active	= 1;
2408		timer_expires	= icsk->icsk_timeout;
2409	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2410		timer_active	= 4;
2411		timer_expires	= icsk->icsk_timeout;
2412	} else if (timer_pending(&sk->sk_timer)) {
2413		timer_active	= 2;
2414		timer_expires	= sk->sk_timer.expires;
2415	} else {
2416		timer_active	= 0;
2417		timer_expires = jiffies;
2418	}
2419
2420	if (sk->sk_state == TCP_LISTEN)
 
2421		rx_queue = sk->sk_ack_backlog;
2422	else
2423		/*
2424		 * because we dont lock socket, we might find a transient negative value
2425		 */
2426		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2427
2428	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2429			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2430		i, src, srcp, dest, destp, sk->sk_state,
2431		tp->write_seq - tp->snd_una,
2432		rx_queue,
2433		timer_active,
2434		jiffies_to_clock_t(timer_expires - jiffies),
2435		icsk->icsk_retransmits,
2436		sock_i_uid(sk),
2437		icsk->icsk_probes_out,
2438		sock_i_ino(sk),
2439		atomic_read(&sk->sk_refcnt), sk,
2440		jiffies_to_clock_t(icsk->icsk_rto),
2441		jiffies_to_clock_t(icsk->icsk_ack.ato),
2442		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2443		tp->snd_cwnd,
2444		tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2445		len);
 
2446}
2447
2448static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2449			       struct seq_file *f, int i, int *len)
2450{
 
2451	__be32 dest, src;
2452	__u16 destp, srcp;
2453	int ttd = tw->tw_ttd - jiffies;
2454
2455	if (ttd < 0)
2456		ttd = 0;
2457
2458	dest  = tw->tw_daddr;
2459	src   = tw->tw_rcv_saddr;
2460	destp = ntohs(tw->tw_dport);
2461	srcp  = ntohs(tw->tw_sport);
2462
2463	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2464		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2465		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2466		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2467		atomic_read(&tw->tw_refcnt), tw, len);
2468}
2469
2470#define TMPSZ 150
2471
2472static int tcp4_seq_show(struct seq_file *seq, void *v)
2473{
2474	struct tcp_iter_state *st;
2475	int len;
2476
 
2477	if (v == SEQ_START_TOKEN) {
2478		seq_printf(seq, "%-*s\n", TMPSZ - 1,
2479			   "  sl  local_address rem_address   st tx_queue "
2480			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2481			   "inode");
2482		goto out;
2483	}
2484	st = seq->private;
2485
2486	switch (st->state) {
2487	case TCP_SEQ_STATE_LISTENING:
2488	case TCP_SEQ_STATE_ESTABLISHED:
2489		get_tcp4_sock(v, seq, st->num, &len);
2490		break;
2491	case TCP_SEQ_STATE_OPENREQ:
2492		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2493		break;
2494	case TCP_SEQ_STATE_TIME_WAIT:
2495		get_timewait4_sock(v, seq, st->num, &len);
2496		break;
2497	}
2498	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2499out:
 
2500	return 0;
2501}
2502
2503static const struct file_operations tcp_afinfo_seq_fops = {
2504	.owner   = THIS_MODULE,
2505	.open    = tcp_seq_open,
2506	.read    = seq_read,
2507	.llseek  = seq_lseek,
2508	.release = seq_release_net
2509};
2510
2511static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2512	.name		= "tcp",
2513	.family		= AF_INET,
2514	.seq_fops	= &tcp_afinfo_seq_fops,
2515	.seq_ops	= {
2516		.show		= tcp4_seq_show,
2517	},
2518};
2519
2520static int __net_init tcp4_proc_init_net(struct net *net)
2521{
2522	return tcp_proc_register(net, &tcp4_seq_afinfo);
2523}
2524
2525static void __net_exit tcp4_proc_exit_net(struct net *net)
2526{
2527	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2528}
2529
2530static struct pernet_operations tcp4_net_ops = {
2531	.init = tcp4_proc_init_net,
2532	.exit = tcp4_proc_exit_net,
2533};
2534
2535int __init tcp4_proc_init(void)
2536{
2537	return register_pernet_subsys(&tcp4_net_ops);
2538}
2539
2540void tcp4_proc_exit(void)
2541{
2542	unregister_pernet_subsys(&tcp4_net_ops);
2543}
2544#endif /* CONFIG_PROC_FS */
2545
2546struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2547{
2548	const struct iphdr *iph = skb_gro_network_header(skb);
2549
2550	switch (skb->ip_summed) {
2551	case CHECKSUM_COMPLETE:
2552		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2553				  skb->csum)) {
2554			skb->ip_summed = CHECKSUM_UNNECESSARY;
2555			break;
2556		}
2557
2558		/* fall through */
2559	case CHECKSUM_NONE:
2560		NAPI_GRO_CB(skb)->flush = 1;
2561		return NULL;
2562	}
2563
2564	return tcp_gro_receive(head, skb);
2565}
2566
2567int tcp4_gro_complete(struct sk_buff *skb)
2568{
2569	const struct iphdr *iph = ip_hdr(skb);
2570	struct tcphdr *th = tcp_hdr(skb);
2571
2572	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2573				  iph->saddr, iph->daddr, 0);
2574	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2575
2576	return tcp_gro_complete(skb);
2577}
2578
2579struct proto tcp_prot = {
2580	.name			= "TCP",
2581	.owner			= THIS_MODULE,
2582	.close			= tcp_close,
2583	.connect		= tcp_v4_connect,
2584	.disconnect		= tcp_disconnect,
2585	.accept			= inet_csk_accept,
2586	.ioctl			= tcp_ioctl,
2587	.init			= tcp_v4_init_sock,
2588	.destroy		= tcp_v4_destroy_sock,
2589	.shutdown		= tcp_shutdown,
2590	.setsockopt		= tcp_setsockopt,
2591	.getsockopt		= tcp_getsockopt,
2592	.recvmsg		= tcp_recvmsg,
2593	.sendmsg		= tcp_sendmsg,
2594	.sendpage		= tcp_sendpage,
2595	.backlog_rcv		= tcp_v4_do_rcv,
 
2596	.hash			= inet_hash,
2597	.unhash			= inet_unhash,
2598	.get_port		= inet_csk_get_port,
2599	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2600	.sockets_allocated	= &tcp_sockets_allocated,
2601	.orphan_count		= &tcp_orphan_count,
2602	.memory_allocated	= &tcp_memory_allocated,
2603	.memory_pressure	= &tcp_memory_pressure,
 
2604	.sysctl_wmem		= sysctl_tcp_wmem,
2605	.sysctl_rmem		= sysctl_tcp_rmem,
2606	.max_header		= MAX_TCP_HEADER,
2607	.obj_size		= sizeof(struct tcp_sock),
2608	.slab_flags		= SLAB_DESTROY_BY_RCU,
2609	.twsk_prot		= &tcp_timewait_sock_ops,
2610	.rsk_prot		= &tcp_request_sock_ops,
2611	.h.hashinfo		= &tcp_hashinfo,
2612	.no_autobind		= true,
2613#ifdef CONFIG_COMPAT
2614	.compat_setsockopt	= compat_tcp_setsockopt,
2615	.compat_getsockopt	= compat_tcp_getsockopt,
2616#endif
2617#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2618	.init_cgroup		= tcp_init_cgroup,
2619	.destroy_cgroup		= tcp_destroy_cgroup,
2620	.proto_cgroup		= tcp_proto_cgroup,
2621#endif
2622};
2623EXPORT_SYMBOL(tcp_prot);
2624
2625static int __net_init tcp_sk_init(struct net *net)
2626{
2627	return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2628				    PF_INET, SOCK_RAW, IPPROTO_TCP, net);
 
 
 
2629}
2630
2631static void __net_exit tcp_sk_exit(struct net *net)
2632{
2633	inet_ctl_sock_destroy(net->ipv4.tcp_sock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2634}
2635
2636static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2637{
2638	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2639}
2640
2641static struct pernet_operations __net_initdata tcp_sk_ops = {
2642       .init	   = tcp_sk_init,
2643       .exit	   = tcp_sk_exit,
2644       .exit_batch = tcp_sk_exit_batch,
2645};
2646
2647void __init tcp_v4_init(void)
2648{
2649	inet_hashinfo_init(&tcp_hashinfo);
2650	if (register_pernet_subsys(&tcp_sk_ops))
2651		panic("Failed to create the TCP control socket.\n");
2652}