Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source);
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 124	int addr_type;
 125	int err;
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr))
 152		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 153
 154	addr_type = ipv6_addr_type(&usin->sin6_addr);
 155
 156	if (addr_type & IPV6_ADDR_MULTICAST)
 157		return -ENETUNREACH;
 158
 159	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 160		if (addr_len >= sizeof(struct sockaddr_in6) &&
 161		    usin->sin6_scope_id) {
 162			/* If interface is set while binding, indices
 163			 * must coincide.
 164			 */
 165			if (sk->sk_bound_dev_if &&
 166			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 167				return -EINVAL;
 168
 169			sk->sk_bound_dev_if = usin->sin6_scope_id;
 170		}
 171
 172		/* Connect to link-local address requires an interface */
 173		if (!sk->sk_bound_dev_if)
 174			return -EINVAL;
 175	}
 176
 177	if (tp->rx_opt.ts_recent_stamp &&
 178	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 179		tp->rx_opt.ts_recent = 0;
 180		tp->rx_opt.ts_recent_stamp = 0;
 181		tp->write_seq = 0;
 182	}
 183
 184	sk->sk_v6_daddr = usin->sin6_addr;
 185	np->flow_label = fl6.flowlabel;
 186
 187	/*
 188	 *	TCP over IPv4
 189	 */
 190
 191	if (addr_type == IPV6_ADDR_MAPPED) {
 192		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 193		struct sockaddr_in sin;
 194
 195		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 196
 197		if (__ipv6_only_sock(sk))
 198			return -ENETUNREACH;
 199
 200		sin.sin_family = AF_INET;
 201		sin.sin_port = usin->sin6_port;
 202		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 203
 204		icsk->icsk_af_ops = &ipv6_mapped;
 205		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 206#ifdef CONFIG_TCP_MD5SIG
 207		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 208#endif
 209
 210		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 211
 212		if (err) {
 213			icsk->icsk_ext_hdr_len = exthdrlen;
 214			icsk->icsk_af_ops = &ipv6_specific;
 215			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 216#ifdef CONFIG_TCP_MD5SIG
 217			tp->af_specific = &tcp_sock_ipv6_specific;
 218#endif
 219			goto failure;
 220		}
 221		np->saddr = sk->sk_v6_rcv_saddr;
 222
 223		return err;
 224	}
 225
 226	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 227		saddr = &sk->sk_v6_rcv_saddr;
 228
 229	fl6.flowi6_proto = IPPROTO_TCP;
 230	fl6.daddr = sk->sk_v6_daddr;
 231	fl6.saddr = saddr ? *saddr : np->saddr;
 232	fl6.flowi6_oif = sk->sk_bound_dev_if;
 233	fl6.flowi6_mark = sk->sk_mark;
 234	fl6.fl6_dport = usin->sin6_port;
 235	fl6.fl6_sport = inet->inet_sport;
 
 236
 237	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 238	final_p = fl6_update_dst(&fl6, opt, &final);
 239
 240	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 241
 242	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 243	if (IS_ERR(dst)) {
 244		err = PTR_ERR(dst);
 245		goto failure;
 246	}
 247
 248	if (!saddr) {
 249		saddr = &fl6.saddr;
 250		sk->sk_v6_rcv_saddr = *saddr;
 251	}
 252
 253	/* set the source address */
 254	np->saddr = *saddr;
 255	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 256
 257	sk->sk_gso_type = SKB_GSO_TCPV6;
 258	ip6_dst_store(sk, dst, NULL, NULL);
 259
 260	if (tcp_death_row.sysctl_tw_recycle &&
 261	    !tp->rx_opt.ts_recent_stamp &&
 262	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 263		tcp_fetch_timewait_stamp(sk, dst);
 264
 265	icsk->icsk_ext_hdr_len = 0;
 266	if (opt)
 267		icsk->icsk_ext_hdr_len = opt->opt_flen +
 268					 opt->opt_nflen;
 269
 270	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 271
 272	inet->inet_dport = usin->sin6_port;
 273
 274	tcp_set_state(sk, TCP_SYN_SENT);
 275	err = inet6_hash_connect(&tcp_death_row, sk);
 276	if (err)
 277		goto late_failure;
 278
 279	sk_set_txhash(sk);
 280
 281	if (!tp->write_seq && likely(!tp->repair))
 282		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 283							     sk->sk_v6_daddr.s6_addr32,
 284							     inet->inet_sport,
 285							     inet->inet_dport);
 
 286
 287	err = tcp_connect(sk);
 288	if (err)
 289		goto late_failure;
 290
 291	return 0;
 292
 293late_failure:
 294	tcp_set_state(sk, TCP_CLOSE);
 295	__sk_dst_reset(sk);
 296failure:
 297	inet->inet_dport = 0;
 298	sk->sk_route_caps = 0;
 299	return err;
 300}
 301
 302static void tcp_v6_mtu_reduced(struct sock *sk)
 303{
 304	struct dst_entry *dst;
 305
 306	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 307		return;
 308
 309	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 310	if (!dst)
 311		return;
 312
 313	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 314		tcp_sync_mss(sk, dst_mtu(dst));
 315		tcp_simple_retransmit(sk);
 316	}
 317}
 318
 319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 320		u8 type, u8 code, int offset, __be32 info)
 321{
 322	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 323	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 324	struct net *net = dev_net(skb->dev);
 325	struct request_sock *fastopen;
 326	struct ipv6_pinfo *np;
 327	struct tcp_sock *tp;
 328	__u32 seq, snd_una;
 329	struct sock *sk;
 330	bool fatal;
 331	int err;
 332
 333	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 334					&hdr->daddr, th->dest,
 335					&hdr->saddr, ntohs(th->source),
 336					skb->dev->ifindex);
 337
 338	if (!sk) {
 339		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 340				   ICMP6_MIB_INERRORS);
 341		return;
 342	}
 343
 344	if (sk->sk_state == TCP_TIME_WAIT) {
 345		inet_twsk_put(inet_twsk(sk));
 346		return;
 347	}
 348	seq = ntohl(th->seq);
 349	fatal = icmpv6_err_convert(type, code, &err);
 350	if (sk->sk_state == TCP_NEW_SYN_RECV)
 351		return tcp_req_err(sk, seq, fatal);
 352
 353	bh_lock_sock(sk);
 354	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 355		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 356
 357	if (sk->sk_state == TCP_CLOSE)
 358		goto out;
 359
 360	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 361		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 362		goto out;
 363	}
 364
 365	tp = tcp_sk(sk);
 366	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 367	fastopen = tp->fastopen_rsk;
 368	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 369	if (sk->sk_state != TCP_LISTEN &&
 370	    !between(seq, snd_una, tp->snd_nxt)) {
 371		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 372		goto out;
 373	}
 374
 375	np = inet6_sk(sk);
 376
 377	if (type == NDISC_REDIRECT) {
 378		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 379
 380		if (dst)
 381			dst->ops->redirect(dst, sk, skb);
 
 382		goto out;
 383	}
 384
 385	if (type == ICMPV6_PKT_TOOBIG) {
 386		/* We are not interested in TCP_LISTEN and open_requests
 387		 * (SYN-ACKs send out by Linux are always <576bytes so
 388		 * they should go through unfragmented).
 389		 */
 390		if (sk->sk_state == TCP_LISTEN)
 391			goto out;
 392
 393		if (!ip6_sk_accept_pmtu(sk))
 394			goto out;
 395
 396		tp->mtu_info = ntohl(info);
 397		if (!sock_owned_by_user(sk))
 398			tcp_v6_mtu_reduced(sk);
 399		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 400					   &tp->tsq_flags))
 401			sock_hold(sk);
 402		goto out;
 403	}
 404
 405
 406	/* Might be for an request_sock */
 407	switch (sk->sk_state) {
 408	case TCP_SYN_SENT:
 409	case TCP_SYN_RECV:
 410		/* Only in fast or simultaneous open. If a fast open socket is
 411		 * is already accepted it is treated as a connected one below.
 412		 */
 413		if (fastopen && !fastopen->sk)
 414			break;
 415
 416		if (!sock_owned_by_user(sk)) {
 417			sk->sk_err = err;
 418			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 419
 420			tcp_done(sk);
 421		} else
 422			sk->sk_err_soft = err;
 423		goto out;
 424	}
 425
 426	if (!sock_owned_by_user(sk) && np->recverr) {
 427		sk->sk_err = err;
 428		sk->sk_error_report(sk);
 429	} else
 430		sk->sk_err_soft = err;
 431
 432out:
 433	bh_unlock_sock(sk);
 434	sock_put(sk);
 435}
 436
 437
 438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 439			      struct flowi *fl,
 440			      struct request_sock *req,
 441			      struct tcp_fastopen_cookie *foc,
 442			      bool attach_req)
 443{
 444	struct inet_request_sock *ireq = inet_rsk(req);
 445	struct ipv6_pinfo *np = inet6_sk(sk);
 
 446	struct flowi6 *fl6 = &fl->u.ip6;
 447	struct sk_buff *skb;
 448	int err = -ENOMEM;
 449
 450	/* First, grab a route. */
 451	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 452					       IPPROTO_TCP)) == NULL)
 453		goto done;
 454
 455	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 456
 457	if (skb) {
 458		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 459				    &ireq->ir_v6_rmt_addr);
 460
 461		fl6->daddr = ireq->ir_v6_rmt_addr;
 462		if (np->repflow && ireq->pktopts)
 463			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 464
 465		rcu_read_lock();
 466		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
 467			       np->tclass);
 
 
 468		rcu_read_unlock();
 469		err = net_xmit_eval(err);
 470	}
 471
 472done:
 473	return err;
 474}
 475
 476
 477static void tcp_v6_reqsk_destructor(struct request_sock *req)
 478{
 
 479	kfree_skb(inet_rsk(req)->pktopts);
 480}
 481
 482#ifdef CONFIG_TCP_MD5SIG
 483static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 484						   const struct in6_addr *addr)
 485{
 486	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 487}
 488
 489static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 490						const struct sock *addr_sk)
 491{
 492	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 493}
 494
 495static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 496				 int optlen)
 497{
 498	struct tcp_md5sig cmd;
 499	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 500
 501	if (optlen < sizeof(cmd))
 502		return -EINVAL;
 503
 504	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 505		return -EFAULT;
 506
 507	if (sin6->sin6_family != AF_INET6)
 508		return -EINVAL;
 509
 510	if (!cmd.tcpm_keylen) {
 511		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 512			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 513					      AF_INET);
 514		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 515				      AF_INET6);
 516	}
 517
 518	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 519		return -EINVAL;
 520
 521	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 522		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 523				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 524
 525	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 526			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 527}
 528
 529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 530					const struct in6_addr *daddr,
 531					const struct in6_addr *saddr, int nbytes)
 
 532{
 533	struct tcp6_pseudohdr *bp;
 534	struct scatterlist sg;
 
 535
 536	bp = &hp->md5_blk.ip6;
 537	/* 1. TCP pseudo-header (RFC2460) */
 538	bp->saddr = *saddr;
 539	bp->daddr = *daddr;
 540	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 541	bp->len = cpu_to_be32(nbytes);
 542
 543	sg_init_one(&sg, bp, sizeof(*bp));
 544	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
 
 
 
 
 
 545	return crypto_ahash_update(hp->md5_req);
 546}
 547
 548static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 549			       const struct in6_addr *daddr, struct in6_addr *saddr,
 550			       const struct tcphdr *th)
 551{
 552	struct tcp_md5sig_pool *hp;
 553	struct ahash_request *req;
 554
 555	hp = tcp_get_md5sig_pool();
 556	if (!hp)
 557		goto clear_hash_noput;
 558	req = hp->md5_req;
 559
 560	if (crypto_ahash_init(req))
 561		goto clear_hash;
 562	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 563		goto clear_hash;
 564	if (tcp_md5_hash_header(hp, th))
 565		goto clear_hash;
 566	if (tcp_md5_hash_key(hp, key))
 567		goto clear_hash;
 568	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 569	if (crypto_ahash_final(req))
 570		goto clear_hash;
 571
 572	tcp_put_md5sig_pool();
 573	return 0;
 574
 575clear_hash:
 576	tcp_put_md5sig_pool();
 577clear_hash_noput:
 578	memset(md5_hash, 0, 16);
 579	return 1;
 580}
 581
 582static int tcp_v6_md5_hash_skb(char *md5_hash,
 583			       const struct tcp_md5sig_key *key,
 584			       const struct sock *sk,
 585			       const struct sk_buff *skb)
 586{
 587	const struct in6_addr *saddr, *daddr;
 588	struct tcp_md5sig_pool *hp;
 589	struct ahash_request *req;
 590	const struct tcphdr *th = tcp_hdr(skb);
 591
 592	if (sk) { /* valid for establish/request sockets */
 593		saddr = &sk->sk_v6_rcv_saddr;
 594		daddr = &sk->sk_v6_daddr;
 595	} else {
 596		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 597		saddr = &ip6h->saddr;
 598		daddr = &ip6h->daddr;
 599	}
 600
 601	hp = tcp_get_md5sig_pool();
 602	if (!hp)
 603		goto clear_hash_noput;
 604	req = hp->md5_req;
 605
 606	if (crypto_ahash_init(req))
 607		goto clear_hash;
 608
 609	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 610		goto clear_hash;
 611	if (tcp_md5_hash_header(hp, th))
 612		goto clear_hash;
 613	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 614		goto clear_hash;
 615	if (tcp_md5_hash_key(hp, key))
 616		goto clear_hash;
 617	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 618	if (crypto_ahash_final(req))
 619		goto clear_hash;
 620
 621	tcp_put_md5sig_pool();
 622	return 0;
 623
 624clear_hash:
 625	tcp_put_md5sig_pool();
 626clear_hash_noput:
 627	memset(md5_hash, 0, 16);
 628	return 1;
 629}
 630
 631#endif
 632
 633static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 634				    const struct sk_buff *skb)
 635{
 636#ifdef CONFIG_TCP_MD5SIG
 637	const __u8 *hash_location = NULL;
 638	struct tcp_md5sig_key *hash_expected;
 639	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 640	const struct tcphdr *th = tcp_hdr(skb);
 641	int genhash;
 642	u8 newhash[16];
 643
 644	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 645	hash_location = tcp_parse_md5sig_option(th);
 646
 647	/* We've parsed the options - do we have a hash? */
 648	if (!hash_expected && !hash_location)
 649		return false;
 650
 651	if (hash_expected && !hash_location) {
 652		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 653		return true;
 654	}
 655
 656	if (!hash_expected && hash_location) {
 657		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 658		return true;
 659	}
 660
 661	/* check the signature */
 662	genhash = tcp_v6_md5_hash_skb(newhash,
 663				      hash_expected,
 664				      NULL, skb);
 665
 666	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 
 667		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 668				     genhash ? "failed" : "mismatch",
 669				     &ip6h->saddr, ntohs(th->source),
 670				     &ip6h->daddr, ntohs(th->dest));
 671		return true;
 672	}
 673#endif
 674	return false;
 675}
 676
 677static void tcp_v6_init_req(struct request_sock *req,
 678			    const struct sock *sk_listener,
 679			    struct sk_buff *skb)
 680{
 681	struct inet_request_sock *ireq = inet_rsk(req);
 682	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 683
 684	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 685	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 686
 687	/* So that link locals have meaning */
 688	if (!sk_listener->sk_bound_dev_if &&
 689	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 690		ireq->ir_iif = tcp_v6_iif(skb);
 691
 692	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 693	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 694	     np->rxopt.bits.rxinfo ||
 695	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 696	     np->rxopt.bits.rxohlim || np->repflow)) {
 697		atomic_inc(&skb->users);
 698		ireq->pktopts = skb;
 699	}
 700}
 701
 702static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 703					  struct flowi *fl,
 704					  const struct request_sock *req,
 705					  bool *strict)
 706{
 707	if (strict)
 708		*strict = true;
 709	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 710}
 711
 712struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 713	.family		=	AF_INET6,
 714	.obj_size	=	sizeof(struct tcp6_request_sock),
 715	.rtx_syn_ack	=	tcp_rtx_synack,
 716	.send_ack	=	tcp_v6_reqsk_send_ack,
 717	.destructor	=	tcp_v6_reqsk_destructor,
 718	.send_reset	=	tcp_v6_send_reset,
 719	.syn_ack_timeout =	tcp_syn_ack_timeout,
 720};
 721
 722static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 723	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 724				sizeof(struct ipv6hdr),
 725#ifdef CONFIG_TCP_MD5SIG
 726	.req_md5_lookup	=	tcp_v6_md5_lookup,
 727	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 728#endif
 729	.init_req	=	tcp_v6_init_req,
 730#ifdef CONFIG_SYN_COOKIES
 731	.cookie_init_seq =	cookie_v6_init_sequence,
 732#endif
 733	.route_req	=	tcp_v6_route_req,
 734	.init_seq	=	tcp_v6_init_sequence,
 735	.send_synack	=	tcp_v6_send_synack,
 736};
 737
 738static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 739				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 740				 int oif, struct tcp_md5sig_key *key, int rst,
 741				 u8 tclass, u32 label)
 742{
 743	const struct tcphdr *th = tcp_hdr(skb);
 744	struct tcphdr *t1;
 745	struct sk_buff *buff;
 746	struct flowi6 fl6;
 747	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 748	struct sock *ctl_sk = net->ipv6.tcp_sk;
 749	unsigned int tot_len = sizeof(struct tcphdr);
 750	struct dst_entry *dst;
 751	__be32 *topt;
 752
 753	if (tsecr)
 754		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 755#ifdef CONFIG_TCP_MD5SIG
 756	if (key)
 757		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 758#endif
 759
 760	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 761			 GFP_ATOMIC);
 762	if (!buff)
 763		return;
 764
 765	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 766
 767	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 768	skb_reset_transport_header(buff);
 769
 770	/* Swap the send and the receive. */
 771	memset(t1, 0, sizeof(*t1));
 772	t1->dest = th->source;
 773	t1->source = th->dest;
 774	t1->doff = tot_len / 4;
 775	t1->seq = htonl(seq);
 776	t1->ack_seq = htonl(ack);
 777	t1->ack = !rst || !th->ack;
 778	t1->rst = rst;
 779	t1->window = htons(win);
 780
 781	topt = (__be32 *)(t1 + 1);
 782
 783	if (tsecr) {
 784		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 785				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 786		*topt++ = htonl(tsval);
 787		*topt++ = htonl(tsecr);
 788	}
 789
 790#ifdef CONFIG_TCP_MD5SIG
 791	if (key) {
 792		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 793				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 794		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 795				    &ipv6_hdr(skb)->saddr,
 796				    &ipv6_hdr(skb)->daddr, t1);
 797	}
 798#endif
 799
 800	memset(&fl6, 0, sizeof(fl6));
 801	fl6.daddr = ipv6_hdr(skb)->saddr;
 802	fl6.saddr = ipv6_hdr(skb)->daddr;
 803	fl6.flowlabel = label;
 804
 805	buff->ip_summed = CHECKSUM_PARTIAL;
 806	buff->csum = 0;
 807
 808	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 809
 810	fl6.flowi6_proto = IPPROTO_TCP;
 811	if (rt6_need_strict(&fl6.daddr) && !oif)
 812		fl6.flowi6_oif = tcp_v6_iif(skb);
 813	else {
 814		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 815			oif = skb->skb_iif;
 816
 817		fl6.flowi6_oif = oif;
 818	}
 819
 820	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 821	fl6.fl6_dport = t1->dest;
 822	fl6.fl6_sport = t1->source;
 
 823	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 824
 825	/* Pass a socket to ip6_dst_lookup either it is for RST
 826	 * Underlying function will use this to retrieve the network
 827	 * namespace
 828	 */
 829	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 830	if (!IS_ERR(dst)) {
 831		skb_dst_set(buff, dst);
 832		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 833		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 834		if (rst)
 835			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 836		return;
 837	}
 838
 839	kfree_skb(buff);
 840}
 841
 842static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 843{
 844	const struct tcphdr *th = tcp_hdr(skb);
 845	u32 seq = 0, ack_seq = 0;
 846	struct tcp_md5sig_key *key = NULL;
 847#ifdef CONFIG_TCP_MD5SIG
 848	const __u8 *hash_location = NULL;
 849	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 850	unsigned char newhash[16];
 851	int genhash;
 852	struct sock *sk1 = NULL;
 853#endif
 854	int oif;
 855
 856	if (th->rst)
 857		return;
 858
 859	/* If sk not NULL, it means we did a successful lookup and incoming
 860	 * route had to be correct. prequeue might have dropped our dst.
 861	 */
 862	if (!sk && !ipv6_unicast_destination(skb))
 863		return;
 864
 865#ifdef CONFIG_TCP_MD5SIG
 
 866	hash_location = tcp_parse_md5sig_option(th);
 867	if (sk && sk_fullsock(sk)) {
 868		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 869	} else if (hash_location) {
 870		/*
 871		 * active side is lost. Try to find listening socket through
 872		 * source port, and then find md5 key through listening socket.
 873		 * we are not loose security here:
 874		 * Incoming packet is checked with md5 hash with finding key,
 875		 * no RST generated if md5 hash doesn't match.
 876		 */
 877		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 878					   &tcp_hashinfo, NULL, 0,
 879					   &ipv6h->saddr,
 880					   th->source, &ipv6h->daddr,
 881					   ntohs(th->source), tcp_v6_iif(skb));
 882		if (!sk1)
 883			return;
 884
 885		rcu_read_lock();
 886		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 887		if (!key)
 888			goto release_sk1;
 889
 890		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 891		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 892			goto release_sk1;
 893	}
 894#endif
 895
 896	if (th->ack)
 897		seq = ntohl(th->ack_seq);
 898	else
 899		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 900			  (th->doff << 2);
 901
 902	oif = sk ? sk->sk_bound_dev_if : 0;
 903	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 904
 905#ifdef CONFIG_TCP_MD5SIG
 906release_sk1:
 907	if (sk1) {
 908		rcu_read_unlock();
 909		sock_put(sk1);
 910	}
 911#endif
 912}
 913
 914static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 915			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 916			    struct tcp_md5sig_key *key, u8 tclass,
 917			    u32 label)
 918{
 919	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 920			     tclass, label);
 921}
 922
 923static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 924{
 925	struct inet_timewait_sock *tw = inet_twsk(sk);
 926	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 927
 928	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 929			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 930			tcp_time_stamp + tcptw->tw_ts_offset,
 931			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 932			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 933
 934	inet_twsk_put(tw);
 935}
 936
 937static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 938				  struct request_sock *req)
 939{
 940	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 941	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 942	 */
 
 
 
 
 
 943	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 944			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 945			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 946			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 
 
 947			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 948			0, 0);
 949}
 950
 951
 952static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 953{
 954#ifdef CONFIG_SYN_COOKIES
 955	const struct tcphdr *th = tcp_hdr(skb);
 956
 957	if (!th->syn)
 958		sk = cookie_v6_check(sk, skb);
 959#endif
 960	return sk;
 961}
 962
 963static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 964{
 965	if (skb->protocol == htons(ETH_P_IP))
 966		return tcp_v4_conn_request(sk, skb);
 967
 968	if (!ipv6_unicast_destination(skb))
 969		goto drop;
 970
 971	return tcp_conn_request(&tcp6_request_sock_ops,
 972				&tcp_request_sock_ipv6_ops, sk, skb);
 973
 974drop:
 975	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 976	return 0; /* don't send reset */
 977}
 978
 
 
 
 
 
 
 
 
 
 
 979static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 980					 struct request_sock *req,
 981					 struct dst_entry *dst,
 982					 struct request_sock *req_unhash,
 983					 bool *own_req)
 984{
 985	struct inet_request_sock *ireq;
 986	struct ipv6_pinfo *newnp;
 987	const struct ipv6_pinfo *np = inet6_sk(sk);
 988	struct ipv6_txoptions *opt;
 989	struct tcp6_sock *newtcp6sk;
 990	struct inet_sock *newinet;
 991	struct tcp_sock *newtp;
 992	struct sock *newsk;
 993#ifdef CONFIG_TCP_MD5SIG
 994	struct tcp_md5sig_key *key;
 995#endif
 996	struct flowi6 fl6;
 997
 998	if (skb->protocol == htons(ETH_P_IP)) {
 999		/*
1000		 *	v6 mapped
1001		 */
1002
1003		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004					     req_unhash, own_req);
1005
1006		if (!newsk)
1007			return NULL;
1008
1009		newtcp6sk = (struct tcp6_sock *)newsk;
1010		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1011
1012		newinet = inet_sk(newsk);
1013		newnp = inet6_sk(newsk);
1014		newtp = tcp_sk(newsk);
1015
1016		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1017
1018		newnp->saddr = newsk->sk_v6_rcv_saddr;
1019
1020		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1021		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022#ifdef CONFIG_TCP_MD5SIG
1023		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024#endif
1025
1026		newnp->ipv6_ac_list = NULL;
1027		newnp->ipv6_fl_list = NULL;
1028		newnp->pktoptions  = NULL;
1029		newnp->opt	   = NULL;
1030		newnp->mcast_oif   = tcp_v6_iif(skb);
1031		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1032		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1033		if (np->repflow)
1034			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1035
1036		/*
1037		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038		 * here, tcp_create_openreq_child now does this for us, see the comment in
1039		 * that function for the gory details. -acme
1040		 */
1041
1042		/* It is tricky place. Until this moment IPv4 tcp
1043		   worked with IPv6 icsk.icsk_af_ops.
1044		   Sync it now.
1045		 */
1046		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1047
1048		return newsk;
1049	}
1050
1051	ireq = inet_rsk(req);
1052
1053	if (sk_acceptq_is_full(sk))
1054		goto out_overflow;
1055
1056	if (!dst) {
1057		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1058		if (!dst)
1059			goto out;
1060	}
1061
1062	newsk = tcp_create_openreq_child(sk, req, skb);
1063	if (!newsk)
1064		goto out_nonewsk;
1065
1066	/*
1067	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068	 * count here, tcp_create_openreq_child now does this for us, see the
1069	 * comment in that function for the gory details. -acme
1070	 */
1071
1072	newsk->sk_gso_type = SKB_GSO_TCPV6;
1073	ip6_dst_store(newsk, dst, NULL, NULL);
1074	inet6_sk_rx_dst_set(newsk, skb);
1075
1076	newtcp6sk = (struct tcp6_sock *)newsk;
1077	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1078
1079	newtp = tcp_sk(newsk);
1080	newinet = inet_sk(newsk);
1081	newnp = inet6_sk(newsk);
1082
1083	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1084
1085	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086	newnp->saddr = ireq->ir_v6_loc_addr;
1087	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088	newsk->sk_bound_dev_if = ireq->ir_iif;
1089
1090	/* Now IPv6 options...
1091
1092	   First: no IPv4 options.
1093	 */
1094	newinet->inet_opt = NULL;
1095	newnp->ipv6_ac_list = NULL;
1096	newnp->ipv6_fl_list = NULL;
1097
1098	/* Clone RX bits */
1099	newnp->rxopt.all = np->rxopt.all;
1100
1101	newnp->pktoptions = NULL;
1102	newnp->opt	  = NULL;
1103	newnp->mcast_oif  = tcp_v6_iif(skb);
1104	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1106	if (np->repflow)
1107		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1108
1109	/* Clone native IPv6 options from listening socket (if any)
1110
1111	   Yes, keeping reference count would be much more clever,
1112	   but we make one more one thing there: reattach optmem
1113	   to newsk.
1114	 */
1115	opt = rcu_dereference(np->opt);
 
 
1116	if (opt) {
1117		opt = ipv6_dup_options(newsk, opt);
1118		RCU_INIT_POINTER(newnp->opt, opt);
1119	}
1120	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1121	if (opt)
1122		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1123						    opt->opt_flen;
1124
1125	tcp_ca_openreq_child(newsk, dst);
1126
1127	tcp_sync_mss(newsk, dst_mtu(dst));
1128	newtp->advmss = dst_metric_advmss(dst);
1129	if (tcp_sk(sk)->rx_opt.user_mss &&
1130	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1132
1133	tcp_initialize_rcv_mss(newsk);
1134
1135	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1137
1138#ifdef CONFIG_TCP_MD5SIG
1139	/* Copy over the MD5 key from the original socket */
1140	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1141	if (key) {
1142		/* We're using one, so create a matching key
1143		 * on the newsk structure. If we fail to get
1144		 * memory, then we end up not copying the key
1145		 * across. Shucks.
1146		 */
1147		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148			       AF_INET6, key->key, key->keylen,
1149			       sk_gfp_mask(sk, GFP_ATOMIC));
1150	}
1151#endif
1152
1153	if (__inet_inherit_port(sk, newsk) < 0) {
1154		inet_csk_prepare_forced_close(newsk);
1155		tcp_done(newsk);
1156		goto out;
1157	}
1158	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1159	if (*own_req) {
1160		tcp_move_syn(newtp, req);
1161
1162		/* Clone pktoptions received with SYN, if we own the req */
1163		if (ireq->pktopts) {
1164			newnp->pktoptions = skb_clone(ireq->pktopts,
1165						      sk_gfp_mask(sk, GFP_ATOMIC));
1166			consume_skb(ireq->pktopts);
1167			ireq->pktopts = NULL;
1168			if (newnp->pktoptions)
 
1169				skb_set_owner_r(newnp->pktoptions, newsk);
 
1170		}
1171	}
1172
1173	return newsk;
1174
1175out_overflow:
1176	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177out_nonewsk:
1178	dst_release(dst);
1179out:
1180	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1181	return NULL;
1182}
1183
1184/* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1186 *
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1190 * held.
1191 */
1192static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1193{
1194	struct ipv6_pinfo *np = inet6_sk(sk);
1195	struct tcp_sock *tp;
1196	struct sk_buff *opt_skb = NULL;
1197
1198	/* Imagine: socket is IPv6. IPv4 packet arrives,
1199	   goes to IPv4 receive handler and backlogged.
1200	   From backlog it always goes here. Kerboom...
1201	   Fortunately, tcp_rcv_established and rcv_established
1202	   handle them correctly, but it is not case with
1203	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1204	 */
1205
1206	if (skb->protocol == htons(ETH_P_IP))
1207		return tcp_v4_do_rcv(sk, skb);
1208
1209	if (sk_filter(sk, skb))
1210		goto discard;
1211
1212	/*
1213	 *	socket locking is here for SMP purposes as backlog rcv
1214	 *	is currently called with bh processing disabled.
1215	 */
1216
1217	/* Do Stevens' IPV6_PKTOPTIONS.
1218
1219	   Yes, guys, it is the only place in our code, where we
1220	   may make it not affecting IPv4.
1221	   The rest of code is protocol independent,
1222	   and I do not like idea to uglify IPv4.
1223
1224	   Actually, all the idea behind IPV6_PKTOPTIONS
1225	   looks not very well thought. For now we latch
1226	   options, received in the last packet, enqueued
1227	   by tcp. Feel free to propose better solution.
1228					       --ANK (980728)
1229	 */
1230	if (np->rxopt.all)
1231		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1232
1233	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1234		struct dst_entry *dst = sk->sk_rx_dst;
1235
1236		sock_rps_save_rxhash(sk, skb);
1237		sk_mark_napi_id(sk, skb);
1238		if (dst) {
1239			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1240			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1241				dst_release(dst);
1242				sk->sk_rx_dst = NULL;
1243			}
1244		}
1245
1246		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1247		if (opt_skb)
1248			goto ipv6_pktoptions;
1249		return 0;
1250	}
1251
1252	if (tcp_checksum_complete(skb))
1253		goto csum_err;
1254
1255	if (sk->sk_state == TCP_LISTEN) {
1256		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1257
1258		if (!nsk)
1259			goto discard;
1260
1261		if (nsk != sk) {
1262			sock_rps_save_rxhash(nsk, skb);
1263			sk_mark_napi_id(nsk, skb);
1264			if (tcp_child_process(sk, nsk, skb))
1265				goto reset;
1266			if (opt_skb)
1267				__kfree_skb(opt_skb);
1268			return 0;
1269		}
1270	} else
1271		sock_rps_save_rxhash(sk, skb);
1272
1273	if (tcp_rcv_state_process(sk, skb))
1274		goto reset;
1275	if (opt_skb)
1276		goto ipv6_pktoptions;
1277	return 0;
1278
1279reset:
1280	tcp_v6_send_reset(sk, skb);
1281discard:
1282	if (opt_skb)
1283		__kfree_skb(opt_skb);
1284	kfree_skb(skb);
1285	return 0;
1286csum_err:
1287	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1288	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1289	goto discard;
1290
1291
1292ipv6_pktoptions:
1293	/* Do you ask, what is it?
1294
1295	   1. skb was enqueued by tcp.
1296	   2. skb is added to tail of read queue, rather than out of order.
1297	   3. socket is not in passive state.
1298	   4. Finally, it really contains options, which user wants to receive.
1299	 */
1300	tp = tcp_sk(sk);
1301	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1302	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1303		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1304			np->mcast_oif = tcp_v6_iif(opt_skb);
1305		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1306			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1307		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1308			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1309		if (np->repflow)
1310			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1311		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1312			skb_set_owner_r(opt_skb, sk);
 
1313			opt_skb = xchg(&np->pktoptions, opt_skb);
1314		} else {
1315			__kfree_skb(opt_skb);
1316			opt_skb = xchg(&np->pktoptions, NULL);
1317		}
1318	}
1319
1320	kfree_skb(opt_skb);
1321	return 0;
1322}
1323
1324static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1325			   const struct tcphdr *th)
1326{
1327	/* This is tricky: we move IP6CB at its correct location into
1328	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329	 * _decode_session6() uses IP6CB().
1330	 * barrier() makes sure compiler won't play aliasing games.
1331	 */
1332	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1333		sizeof(struct inet6_skb_parm));
1334	barrier();
1335
1336	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1337	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1338				    skb->len - th->doff*4);
1339	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1340	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1341	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1342	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1343	TCP_SKB_CB(skb)->sacked = 0;
1344}
1345
1346static void tcp_v6_restore_cb(struct sk_buff *skb)
1347{
1348	/* We need to move header back to the beginning if xfrm6_policy_check()
1349	 * and tcp_v6_fill_cb() are going to be called again.
1350	 */
1351	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1352		sizeof(struct inet6_skb_parm));
1353}
1354
1355static int tcp_v6_rcv(struct sk_buff *skb)
1356{
1357	const struct tcphdr *th;
1358	const struct ipv6hdr *hdr;
 
1359	struct sock *sk;
1360	int ret;
1361	struct net *net = dev_net(skb->dev);
1362
1363	if (skb->pkt_type != PACKET_HOST)
1364		goto discard_it;
1365
1366	/*
1367	 *	Count it even if it's bad.
1368	 */
1369	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1370
1371	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1372		goto discard_it;
1373
1374	th = tcp_hdr(skb);
1375
1376	if (th->doff < sizeof(struct tcphdr)/4)
1377		goto bad_packet;
1378	if (!pskb_may_pull(skb, th->doff*4))
1379		goto discard_it;
1380
1381	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1382		goto csum_error;
1383
1384	th = tcp_hdr(skb);
1385	hdr = ipv6_hdr(skb);
1386
1387lookup:
1388	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1389				th->source, th->dest, inet6_iif(skb));
 
1390	if (!sk)
1391		goto no_tcp_socket;
1392
1393process:
1394	if (sk->sk_state == TCP_TIME_WAIT)
1395		goto do_time_wait;
1396
1397	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1398		struct request_sock *req = inet_reqsk(sk);
1399		struct sock *nsk;
1400
1401		sk = req->rsk_listener;
1402		tcp_v6_fill_cb(skb, hdr, th);
1403		if (tcp_v6_inbound_md5_hash(sk, skb)) {
 
1404			reqsk_put(req);
1405			goto discard_it;
1406		}
1407		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1408			inet_csk_reqsk_queue_drop_and_put(sk, req);
1409			goto lookup;
1410		}
1411		sock_hold(sk);
 
1412		nsk = tcp_check_req(sk, skb, req, false);
1413		if (!nsk) {
1414			reqsk_put(req);
1415			goto discard_and_relse;
1416		}
1417		if (nsk == sk) {
1418			reqsk_put(req);
1419			tcp_v6_restore_cb(skb);
1420		} else if (tcp_child_process(sk, nsk, skb)) {
1421			tcp_v6_send_reset(nsk, skb);
1422			goto discard_and_relse;
1423		} else {
1424			sock_put(sk);
1425			return 0;
1426		}
1427	}
1428	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1430		goto discard_and_relse;
1431	}
1432
1433	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1434		goto discard_and_relse;
1435
1436	tcp_v6_fill_cb(skb, hdr, th);
1437
1438	if (tcp_v6_inbound_md5_hash(sk, skb))
1439		goto discard_and_relse;
1440
1441	if (sk_filter(sk, skb))
1442		goto discard_and_relse;
 
 
1443
1444	skb->dev = NULL;
1445
1446	if (sk->sk_state == TCP_LISTEN) {
1447		ret = tcp_v6_do_rcv(sk, skb);
1448		goto put_and_return;
1449	}
1450
1451	sk_incoming_cpu_update(sk);
1452
1453	bh_lock_sock_nested(sk);
1454	tcp_segs_in(tcp_sk(sk), skb);
1455	ret = 0;
1456	if (!sock_owned_by_user(sk)) {
1457		if (!tcp_prequeue(sk, skb))
1458			ret = tcp_v6_do_rcv(sk, skb);
1459	} else if (unlikely(sk_add_backlog(sk, skb,
1460					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1461		bh_unlock_sock(sk);
1462		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1463		goto discard_and_relse;
1464	}
1465	bh_unlock_sock(sk);
1466
1467put_and_return:
1468	sock_put(sk);
 
1469	return ret ? -1 : 0;
1470
1471no_tcp_socket:
1472	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1473		goto discard_it;
1474
1475	tcp_v6_fill_cb(skb, hdr, th);
1476
1477	if (tcp_checksum_complete(skb)) {
1478csum_error:
1479		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1480bad_packet:
1481		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482	} else {
1483		tcp_v6_send_reset(NULL, skb);
1484	}
1485
1486discard_it:
1487	kfree_skb(skb);
1488	return 0;
1489
1490discard_and_relse:
1491	sock_put(sk);
 
 
1492	goto discard_it;
1493
1494do_time_wait:
1495	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1496		inet_twsk_put(inet_twsk(sk));
1497		goto discard_it;
1498	}
1499
1500	tcp_v6_fill_cb(skb, hdr, th);
1501
1502	if (tcp_checksum_complete(skb)) {
1503		inet_twsk_put(inet_twsk(sk));
1504		goto csum_error;
1505	}
1506
1507	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1508	case TCP_TW_SYN:
1509	{
1510		struct sock *sk2;
1511
1512		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1513					    skb, __tcp_hdrlen(th),
1514					    &ipv6_hdr(skb)->saddr, th->source,
1515					    &ipv6_hdr(skb)->daddr,
1516					    ntohs(th->dest), tcp_v6_iif(skb));
1517		if (sk2) {
1518			struct inet_timewait_sock *tw = inet_twsk(sk);
1519			inet_twsk_deschedule_put(tw);
1520			sk = sk2;
1521			tcp_v6_restore_cb(skb);
 
1522			goto process;
1523		}
1524		/* Fall through to ACK */
1525	}
1526	case TCP_TW_ACK:
1527		tcp_v6_timewait_ack(sk, skb);
1528		break;
1529	case TCP_TW_RST:
1530		tcp_v6_restore_cb(skb);
1531		tcp_v6_send_reset(sk, skb);
1532		inet_twsk_deschedule_put(inet_twsk(sk));
1533		goto discard_it;
1534	case TCP_TW_SUCCESS:
1535		;
1536	}
1537	goto discard_it;
1538}
1539
1540static void tcp_v6_early_demux(struct sk_buff *skb)
1541{
1542	const struct ipv6hdr *hdr;
1543	const struct tcphdr *th;
1544	struct sock *sk;
1545
1546	if (skb->pkt_type != PACKET_HOST)
1547		return;
1548
1549	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1550		return;
1551
1552	hdr = ipv6_hdr(skb);
1553	th = tcp_hdr(skb);
1554
1555	if (th->doff < sizeof(struct tcphdr) / 4)
1556		return;
1557
1558	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1559	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560					&hdr->saddr, th->source,
1561					&hdr->daddr, ntohs(th->dest),
1562					inet6_iif(skb));
1563	if (sk) {
1564		skb->sk = sk;
1565		skb->destructor = sock_edemux;
1566		if (sk_fullsock(sk)) {
1567			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1568
1569			if (dst)
1570				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571			if (dst &&
1572			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573				skb_dst_set_noref(skb, dst);
1574		}
1575	}
1576}
1577
1578static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1580	.twsk_unique	= tcp_twsk_unique,
1581	.twsk_destructor = tcp_twsk_destructor,
1582};
1583
1584static const struct inet_connection_sock_af_ops ipv6_specific = {
1585	.queue_xmit	   = inet6_csk_xmit,
1586	.send_check	   = tcp_v6_send_check,
1587	.rebuild_header	   = inet6_sk_rebuild_header,
1588	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1589	.conn_request	   = tcp_v6_conn_request,
1590	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1591	.net_header_len	   = sizeof(struct ipv6hdr),
1592	.net_frag_header_len = sizeof(struct frag_hdr),
1593	.setsockopt	   = ipv6_setsockopt,
1594	.getsockopt	   = ipv6_getsockopt,
1595	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1596	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1597	.bind_conflict	   = inet6_csk_bind_conflict,
1598#ifdef CONFIG_COMPAT
1599	.compat_setsockopt = compat_ipv6_setsockopt,
1600	.compat_getsockopt = compat_ipv6_getsockopt,
1601#endif
1602	.mtu_reduced	   = tcp_v6_mtu_reduced,
1603};
1604
1605#ifdef CONFIG_TCP_MD5SIG
1606static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1607	.md5_lookup	=	tcp_v6_md5_lookup,
1608	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1609	.md5_parse	=	tcp_v6_parse_md5_keys,
1610};
1611#endif
1612
1613/*
1614 *	TCP over IPv4 via INET6 API
1615 */
1616static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617	.queue_xmit	   = ip_queue_xmit,
1618	.send_check	   = tcp_v4_send_check,
1619	.rebuild_header	   = inet_sk_rebuild_header,
1620	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1621	.conn_request	   = tcp_v6_conn_request,
1622	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1623	.net_header_len	   = sizeof(struct iphdr),
1624	.setsockopt	   = ipv6_setsockopt,
1625	.getsockopt	   = ipv6_getsockopt,
1626	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1627	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1628	.bind_conflict	   = inet6_csk_bind_conflict,
1629#ifdef CONFIG_COMPAT
1630	.compat_setsockopt = compat_ipv6_setsockopt,
1631	.compat_getsockopt = compat_ipv6_getsockopt,
1632#endif
1633	.mtu_reduced	   = tcp_v4_mtu_reduced,
1634};
1635
1636#ifdef CONFIG_TCP_MD5SIG
1637static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1638	.md5_lookup	=	tcp_v4_md5_lookup,
1639	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1640	.md5_parse	=	tcp_v6_parse_md5_keys,
1641};
1642#endif
1643
1644/* NOTE: A lot of things set to zero explicitly by call to
1645 *       sk_alloc() so need not be done here.
1646 */
1647static int tcp_v6_init_sock(struct sock *sk)
1648{
1649	struct inet_connection_sock *icsk = inet_csk(sk);
1650
1651	tcp_init_sock(sk);
1652
1653	icsk->icsk_af_ops = &ipv6_specific;
1654
1655#ifdef CONFIG_TCP_MD5SIG
1656	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657#endif
1658
1659	return 0;
1660}
1661
1662static void tcp_v6_destroy_sock(struct sock *sk)
1663{
1664	tcp_v4_destroy_sock(sk);
1665	inet6_destroy_sock(sk);
1666}
1667
1668#ifdef CONFIG_PROC_FS
1669/* Proc filesystem TCPv6 sock list dumping. */
1670static void get_openreq6(struct seq_file *seq,
1671			 const struct request_sock *req, int i)
1672{
1673	long ttd = req->rsk_timer.expires - jiffies;
1674	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677	if (ttd < 0)
1678		ttd = 0;
1679
1680	seq_printf(seq,
1681		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683		   i,
1684		   src->s6_addr32[0], src->s6_addr32[1],
1685		   src->s6_addr32[2], src->s6_addr32[3],
1686		   inet_rsk(req)->ir_num,
1687		   dest->s6_addr32[0], dest->s6_addr32[1],
1688		   dest->s6_addr32[2], dest->s6_addr32[3],
1689		   ntohs(inet_rsk(req)->ir_rmt_port),
1690		   TCP_SYN_RECV,
1691		   0, 0, /* could print option size, but that is af dependent. */
1692		   1,   /* timers active (only the expire timer) */
1693		   jiffies_to_clock_t(ttd),
1694		   req->num_timeout,
1695		   from_kuid_munged(seq_user_ns(seq),
1696				    sock_i_uid(req->rsk_listener)),
1697		   0,  /* non standard timer */
1698		   0, /* open_requests have no inode */
1699		   0, req);
1700}
1701
1702static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1703{
1704	const struct in6_addr *dest, *src;
1705	__u16 destp, srcp;
1706	int timer_active;
1707	unsigned long timer_expires;
1708	const struct inet_sock *inet = inet_sk(sp);
1709	const struct tcp_sock *tp = tcp_sk(sp);
1710	const struct inet_connection_sock *icsk = inet_csk(sp);
1711	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1712	int rx_queue;
1713	int state;
1714
1715	dest  = &sp->sk_v6_daddr;
1716	src   = &sp->sk_v6_rcv_saddr;
1717	destp = ntohs(inet->inet_dport);
1718	srcp  = ntohs(inet->inet_sport);
1719
1720	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
1721		timer_active	= 1;
1722		timer_expires	= icsk->icsk_timeout;
1723	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1724		timer_active	= 4;
1725		timer_expires	= icsk->icsk_timeout;
1726	} else if (timer_pending(&sp->sk_timer)) {
1727		timer_active	= 2;
1728		timer_expires	= sp->sk_timer.expires;
1729	} else {
1730		timer_active	= 0;
1731		timer_expires = jiffies;
1732	}
1733
1734	state = sk_state_load(sp);
1735	if (state == TCP_LISTEN)
1736		rx_queue = sp->sk_ack_backlog;
1737	else
1738		/* Because we don't lock the socket,
1739		 * we might find a transient negative value.
1740		 */
1741		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1742
1743	seq_printf(seq,
1744		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1746		   i,
1747		   src->s6_addr32[0], src->s6_addr32[1],
1748		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1749		   dest->s6_addr32[0], dest->s6_addr32[1],
1750		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1751		   state,
1752		   tp->write_seq - tp->snd_una,
1753		   rx_queue,
1754		   timer_active,
1755		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1756		   icsk->icsk_retransmits,
1757		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1758		   icsk->icsk_probes_out,
1759		   sock_i_ino(sp),
1760		   atomic_read(&sp->sk_refcnt), sp,
1761		   jiffies_to_clock_t(icsk->icsk_rto),
1762		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1763		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1764		   tp->snd_cwnd,
1765		   state == TCP_LISTEN ?
1766			fastopenq->max_qlen :
1767			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1768		   );
1769}
1770
1771static void get_timewait6_sock(struct seq_file *seq,
1772			       struct inet_timewait_sock *tw, int i)
1773{
1774	long delta = tw->tw_timer.expires - jiffies;
1775	const struct in6_addr *dest, *src;
1776	__u16 destp, srcp;
1777
1778	dest = &tw->tw_v6_daddr;
1779	src  = &tw->tw_v6_rcv_saddr;
1780	destp = ntohs(tw->tw_dport);
1781	srcp  = ntohs(tw->tw_sport);
1782
1783	seq_printf(seq,
1784		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1785		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1786		   i,
1787		   src->s6_addr32[0], src->s6_addr32[1],
1788		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1789		   dest->s6_addr32[0], dest->s6_addr32[1],
1790		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1791		   tw->tw_substate, 0, 0,
1792		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1793		   atomic_read(&tw->tw_refcnt), tw);
1794}
1795
1796static int tcp6_seq_show(struct seq_file *seq, void *v)
1797{
1798	struct tcp_iter_state *st;
1799	struct sock *sk = v;
1800
1801	if (v == SEQ_START_TOKEN) {
1802		seq_puts(seq,
1803			 "  sl  "
1804			 "local_address                         "
1805			 "remote_address                        "
1806			 "st tx_queue rx_queue tr tm->when retrnsmt"
1807			 "   uid  timeout inode\n");
1808		goto out;
1809	}
1810	st = seq->private;
1811
1812	if (sk->sk_state == TCP_TIME_WAIT)
1813		get_timewait6_sock(seq, v, st->num);
1814	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1815		get_openreq6(seq, v, st->num);
1816	else
1817		get_tcp6_sock(seq, v, st->num);
1818out:
1819	return 0;
1820}
1821
1822static const struct file_operations tcp6_afinfo_seq_fops = {
1823	.owner   = THIS_MODULE,
1824	.open    = tcp_seq_open,
1825	.read    = seq_read,
1826	.llseek  = seq_lseek,
1827	.release = seq_release_net
1828};
1829
1830static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1831	.name		= "tcp6",
1832	.family		= AF_INET6,
1833	.seq_fops	= &tcp6_afinfo_seq_fops,
1834	.seq_ops	= {
1835		.show		= tcp6_seq_show,
1836	},
1837};
1838
1839int __net_init tcp6_proc_init(struct net *net)
1840{
1841	return tcp_proc_register(net, &tcp6_seq_afinfo);
1842}
1843
1844void tcp6_proc_exit(struct net *net)
1845{
1846	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1847}
1848#endif
1849
1850static void tcp_v6_clear_sk(struct sock *sk, int size)
1851{
1852	struct inet_sock *inet = inet_sk(sk);
1853
1854	/* we do not want to clear pinet6 field, because of RCU lookups */
1855	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1856
1857	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1858	memset(&inet->pinet6 + 1, 0, size);
1859}
1860
1861struct proto tcpv6_prot = {
1862	.name			= "TCPv6",
1863	.owner			= THIS_MODULE,
1864	.close			= tcp_close,
1865	.connect		= tcp_v6_connect,
1866	.disconnect		= tcp_disconnect,
1867	.accept			= inet_csk_accept,
1868	.ioctl			= tcp_ioctl,
1869	.init			= tcp_v6_init_sock,
1870	.destroy		= tcp_v6_destroy_sock,
1871	.shutdown		= tcp_shutdown,
1872	.setsockopt		= tcp_setsockopt,
1873	.getsockopt		= tcp_getsockopt,
1874	.recvmsg		= tcp_recvmsg,
1875	.sendmsg		= tcp_sendmsg,
1876	.sendpage		= tcp_sendpage,
1877	.backlog_rcv		= tcp_v6_do_rcv,
1878	.release_cb		= tcp_release_cb,
1879	.hash			= inet6_hash,
1880	.unhash			= inet_unhash,
1881	.get_port		= inet_csk_get_port,
1882	.enter_memory_pressure	= tcp_enter_memory_pressure,
1883	.stream_memory_free	= tcp_stream_memory_free,
1884	.sockets_allocated	= &tcp_sockets_allocated,
1885	.memory_allocated	= &tcp_memory_allocated,
1886	.memory_pressure	= &tcp_memory_pressure,
1887	.orphan_count		= &tcp_orphan_count,
1888	.sysctl_mem		= sysctl_tcp_mem,
1889	.sysctl_wmem		= sysctl_tcp_wmem,
1890	.sysctl_rmem		= sysctl_tcp_rmem,
1891	.max_header		= MAX_TCP_HEADER,
1892	.obj_size		= sizeof(struct tcp6_sock),
1893	.slab_flags		= SLAB_DESTROY_BY_RCU,
1894	.twsk_prot		= &tcp6_timewait_sock_ops,
1895	.rsk_prot		= &tcp6_request_sock_ops,
1896	.h.hashinfo		= &tcp_hashinfo,
1897	.no_autobind		= true,
1898#ifdef CONFIG_COMPAT
1899	.compat_setsockopt	= compat_tcp_setsockopt,
1900	.compat_getsockopt	= compat_tcp_getsockopt,
1901#endif
1902	.clear_sk		= tcp_v6_clear_sk,
1903	.diag_destroy		= tcp_abort,
1904};
1905
1906static const struct inet6_protocol tcpv6_protocol = {
1907	.early_demux	=	tcp_v6_early_demux,
1908	.handler	=	tcp_v6_rcv,
1909	.err_handler	=	tcp_v6_err,
1910	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1911};
1912
1913static struct inet_protosw tcpv6_protosw = {
1914	.type		=	SOCK_STREAM,
1915	.protocol	=	IPPROTO_TCP,
1916	.prot		=	&tcpv6_prot,
1917	.ops		=	&inet6_stream_ops,
1918	.flags		=	INET_PROTOSW_PERMANENT |
1919				INET_PROTOSW_ICSK,
1920};
1921
1922static int __net_init tcpv6_net_init(struct net *net)
1923{
1924	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1925				    SOCK_RAW, IPPROTO_TCP, net);
1926}
1927
1928static void __net_exit tcpv6_net_exit(struct net *net)
1929{
1930	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1931}
1932
1933static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1934{
1935	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1936}
1937
1938static struct pernet_operations tcpv6_net_ops = {
1939	.init	    = tcpv6_net_init,
1940	.exit	    = tcpv6_net_exit,
1941	.exit_batch = tcpv6_net_exit_batch,
1942};
1943
1944int __init tcpv6_init(void)
1945{
1946	int ret;
1947
1948	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1949	if (ret)
1950		goto out;
1951
1952	/* register inet6 protocol */
1953	ret = inet6_register_protosw(&tcpv6_protosw);
1954	if (ret)
1955		goto out_tcpv6_protocol;
1956
1957	ret = register_pernet_subsys(&tcpv6_net_ops);
1958	if (ret)
1959		goto out_tcpv6_protosw;
1960out:
1961	return ret;
1962
1963out_tcpv6_protosw:
1964	inet6_unregister_protosw(&tcpv6_protosw);
1965out_tcpv6_protocol:
1966	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967	goto out;
1968}
1969
1970void tcpv6_exit(void)
1971{
1972	unregister_pernet_subsys(&tcpv6_net_ops);
1973	inet6_unregister_protosw(&tcpv6_protosw);
1974	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1975}
v4.10.11
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source, tsoff);
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 124	int addr_type;
 125	int err;
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr)) {
 152		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 153			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 154					       &usin->sin6_addr);
 155		else
 156			usin->sin6_addr = in6addr_loopback;
 157	}
 158
 159	addr_type = ipv6_addr_type(&usin->sin6_addr);
 160
 161	if (addr_type & IPV6_ADDR_MULTICAST)
 162		return -ENETUNREACH;
 163
 164	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 165		if (addr_len >= sizeof(struct sockaddr_in6) &&
 166		    usin->sin6_scope_id) {
 167			/* If interface is set while binding, indices
 168			 * must coincide.
 169			 */
 170			if (sk->sk_bound_dev_if &&
 171			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 172				return -EINVAL;
 173
 174			sk->sk_bound_dev_if = usin->sin6_scope_id;
 175		}
 176
 177		/* Connect to link-local address requires an interface */
 178		if (!sk->sk_bound_dev_if)
 179			return -EINVAL;
 180	}
 181
 182	if (tp->rx_opt.ts_recent_stamp &&
 183	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 184		tp->rx_opt.ts_recent = 0;
 185		tp->rx_opt.ts_recent_stamp = 0;
 186		tp->write_seq = 0;
 187	}
 188
 189	sk->sk_v6_daddr = usin->sin6_addr;
 190	np->flow_label = fl6.flowlabel;
 191
 192	/*
 193	 *	TCP over IPv4
 194	 */
 195
 196	if (addr_type & IPV6_ADDR_MAPPED) {
 197		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 198		struct sockaddr_in sin;
 199
 200		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 201
 202		if (__ipv6_only_sock(sk))
 203			return -ENETUNREACH;
 204
 205		sin.sin_family = AF_INET;
 206		sin.sin_port = usin->sin6_port;
 207		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 208
 209		icsk->icsk_af_ops = &ipv6_mapped;
 210		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 211#ifdef CONFIG_TCP_MD5SIG
 212		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 213#endif
 214
 215		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 216
 217		if (err) {
 218			icsk->icsk_ext_hdr_len = exthdrlen;
 219			icsk->icsk_af_ops = &ipv6_specific;
 220			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222			tp->af_specific = &tcp_sock_ipv6_specific;
 223#endif
 224			goto failure;
 225		}
 226		np->saddr = sk->sk_v6_rcv_saddr;
 227
 228		return err;
 229	}
 230
 231	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 232		saddr = &sk->sk_v6_rcv_saddr;
 233
 234	fl6.flowi6_proto = IPPROTO_TCP;
 235	fl6.daddr = sk->sk_v6_daddr;
 236	fl6.saddr = saddr ? *saddr : np->saddr;
 237	fl6.flowi6_oif = sk->sk_bound_dev_if;
 238	fl6.flowi6_mark = sk->sk_mark;
 239	fl6.fl6_dport = usin->sin6_port;
 240	fl6.fl6_sport = inet->inet_sport;
 241	fl6.flowi6_uid = sk->sk_uid;
 242
 243	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 244	final_p = fl6_update_dst(&fl6, opt, &final);
 245
 246	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 247
 248	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 249	if (IS_ERR(dst)) {
 250		err = PTR_ERR(dst);
 251		goto failure;
 252	}
 253
 254	if (!saddr) {
 255		saddr = &fl6.saddr;
 256		sk->sk_v6_rcv_saddr = *saddr;
 257	}
 258
 259	/* set the source address */
 260	np->saddr = *saddr;
 261	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 262
 263	sk->sk_gso_type = SKB_GSO_TCPV6;
 264	ip6_dst_store(sk, dst, NULL, NULL);
 265
 266	if (tcp_death_row.sysctl_tw_recycle &&
 267	    !tp->rx_opt.ts_recent_stamp &&
 268	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 269		tcp_fetch_timewait_stamp(sk, dst);
 270
 271	icsk->icsk_ext_hdr_len = 0;
 272	if (opt)
 273		icsk->icsk_ext_hdr_len = opt->opt_flen +
 274					 opt->opt_nflen;
 275
 276	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 277
 278	inet->inet_dport = usin->sin6_port;
 279
 280	tcp_set_state(sk, TCP_SYN_SENT);
 281	err = inet6_hash_connect(&tcp_death_row, sk);
 282	if (err)
 283		goto late_failure;
 284
 285	sk_set_txhash(sk);
 286
 287	if (!tp->write_seq && likely(!tp->repair))
 288		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 289							     sk->sk_v6_daddr.s6_addr32,
 290							     inet->inet_sport,
 291							     inet->inet_dport,
 292							     &tp->tsoffset);
 293
 294	err = tcp_connect(sk);
 295	if (err)
 296		goto late_failure;
 297
 298	return 0;
 299
 300late_failure:
 301	tcp_set_state(sk, TCP_CLOSE);
 302	__sk_dst_reset(sk);
 303failure:
 304	inet->inet_dport = 0;
 305	sk->sk_route_caps = 0;
 306	return err;
 307}
 308
 309static void tcp_v6_mtu_reduced(struct sock *sk)
 310{
 311	struct dst_entry *dst;
 312
 313	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 314		return;
 315
 316	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 317	if (!dst)
 318		return;
 319
 320	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 321		tcp_sync_mss(sk, dst_mtu(dst));
 322		tcp_simple_retransmit(sk);
 323	}
 324}
 325
 326static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 327		u8 type, u8 code, int offset, __be32 info)
 328{
 329	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 330	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 331	struct net *net = dev_net(skb->dev);
 332	struct request_sock *fastopen;
 333	struct ipv6_pinfo *np;
 334	struct tcp_sock *tp;
 335	__u32 seq, snd_una;
 336	struct sock *sk;
 337	bool fatal;
 338	int err;
 339
 340	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 341					&hdr->daddr, th->dest,
 342					&hdr->saddr, ntohs(th->source),
 343					skb->dev->ifindex);
 344
 345	if (!sk) {
 346		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 347				  ICMP6_MIB_INERRORS);
 348		return;
 349	}
 350
 351	if (sk->sk_state == TCP_TIME_WAIT) {
 352		inet_twsk_put(inet_twsk(sk));
 353		return;
 354	}
 355	seq = ntohl(th->seq);
 356	fatal = icmpv6_err_convert(type, code, &err);
 357	if (sk->sk_state == TCP_NEW_SYN_RECV)
 358		return tcp_req_err(sk, seq, fatal);
 359
 360	bh_lock_sock(sk);
 361	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 362		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 363
 364	if (sk->sk_state == TCP_CLOSE)
 365		goto out;
 366
 367	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 368		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 369		goto out;
 370	}
 371
 372	tp = tcp_sk(sk);
 373	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 374	fastopen = tp->fastopen_rsk;
 375	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 376	if (sk->sk_state != TCP_LISTEN &&
 377	    !between(seq, snd_una, tp->snd_nxt)) {
 378		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 379		goto out;
 380	}
 381
 382	np = inet6_sk(sk);
 383
 384	if (type == NDISC_REDIRECT) {
 385		if (!sock_owned_by_user(sk)) {
 386			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 387
 388			if (dst)
 389				dst->ops->redirect(dst, sk, skb);
 390		}
 391		goto out;
 392	}
 393
 394	if (type == ICMPV6_PKT_TOOBIG) {
 395		/* We are not interested in TCP_LISTEN and open_requests
 396		 * (SYN-ACKs send out by Linux are always <576bytes so
 397		 * they should go through unfragmented).
 398		 */
 399		if (sk->sk_state == TCP_LISTEN)
 400			goto out;
 401
 402		if (!ip6_sk_accept_pmtu(sk))
 403			goto out;
 404
 405		tp->mtu_info = ntohl(info);
 406		if (!sock_owned_by_user(sk))
 407			tcp_v6_mtu_reduced(sk);
 408		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 409					   &sk->sk_tsq_flags))
 410			sock_hold(sk);
 411		goto out;
 412	}
 413
 414
 415	/* Might be for an request_sock */
 416	switch (sk->sk_state) {
 417	case TCP_SYN_SENT:
 418	case TCP_SYN_RECV:
 419		/* Only in fast or simultaneous open. If a fast open socket is
 420		 * is already accepted it is treated as a connected one below.
 421		 */
 422		if (fastopen && !fastopen->sk)
 423			break;
 424
 425		if (!sock_owned_by_user(sk)) {
 426			sk->sk_err = err;
 427			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 428
 429			tcp_done(sk);
 430		} else
 431			sk->sk_err_soft = err;
 432		goto out;
 433	}
 434
 435	if (!sock_owned_by_user(sk) && np->recverr) {
 436		sk->sk_err = err;
 437		sk->sk_error_report(sk);
 438	} else
 439		sk->sk_err_soft = err;
 440
 441out:
 442	bh_unlock_sock(sk);
 443	sock_put(sk);
 444}
 445
 446
 447static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 448			      struct flowi *fl,
 449			      struct request_sock *req,
 450			      struct tcp_fastopen_cookie *foc,
 451			      enum tcp_synack_type synack_type)
 452{
 453	struct inet_request_sock *ireq = inet_rsk(req);
 454	struct ipv6_pinfo *np = inet6_sk(sk);
 455	struct ipv6_txoptions *opt;
 456	struct flowi6 *fl6 = &fl->u.ip6;
 457	struct sk_buff *skb;
 458	int err = -ENOMEM;
 459
 460	/* First, grab a route. */
 461	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 462					       IPPROTO_TCP)) == NULL)
 463		goto done;
 464
 465	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 466
 467	if (skb) {
 468		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 469				    &ireq->ir_v6_rmt_addr);
 470
 471		fl6->daddr = ireq->ir_v6_rmt_addr;
 472		if (np->repflow && ireq->pktopts)
 473			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 474
 475		rcu_read_lock();
 476		opt = ireq->ipv6_opt;
 477		if (!opt)
 478			opt = rcu_dereference(np->opt);
 479		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
 480		rcu_read_unlock();
 481		err = net_xmit_eval(err);
 482	}
 483
 484done:
 485	return err;
 486}
 487
 488
 489static void tcp_v6_reqsk_destructor(struct request_sock *req)
 490{
 491	kfree(inet_rsk(req)->ipv6_opt);
 492	kfree_skb(inet_rsk(req)->pktopts);
 493}
 494
 495#ifdef CONFIG_TCP_MD5SIG
 496static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 497						   const struct in6_addr *addr)
 498{
 499	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 500}
 501
 502static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 503						const struct sock *addr_sk)
 504{
 505	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 506}
 507
 508static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 509				 int optlen)
 510{
 511	struct tcp_md5sig cmd;
 512	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 513
 514	if (optlen < sizeof(cmd))
 515		return -EINVAL;
 516
 517	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 518		return -EFAULT;
 519
 520	if (sin6->sin6_family != AF_INET6)
 521		return -EINVAL;
 522
 523	if (!cmd.tcpm_keylen) {
 524		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 525			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 526					      AF_INET);
 527		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 528				      AF_INET6);
 529	}
 530
 531	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 532		return -EINVAL;
 533
 534	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 535		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 536				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 537
 538	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 539			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 540}
 541
 542static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
 543				   const struct in6_addr *daddr,
 544				   const struct in6_addr *saddr,
 545				   const struct tcphdr *th, int nbytes)
 546{
 547	struct tcp6_pseudohdr *bp;
 548	struct scatterlist sg;
 549	struct tcphdr *_th;
 550
 551	bp = hp->scratch;
 552	/* 1. TCP pseudo-header (RFC2460) */
 553	bp->saddr = *saddr;
 554	bp->daddr = *daddr;
 555	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 556	bp->len = cpu_to_be32(nbytes);
 557
 558	_th = (struct tcphdr *)(bp + 1);
 559	memcpy(_th, th, sizeof(*th));
 560	_th->check = 0;
 561
 562	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 563	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
 564				sizeof(*bp) + sizeof(*th));
 565	return crypto_ahash_update(hp->md5_req);
 566}
 567
 568static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 569			       const struct in6_addr *daddr, struct in6_addr *saddr,
 570			       const struct tcphdr *th)
 571{
 572	struct tcp_md5sig_pool *hp;
 573	struct ahash_request *req;
 574
 575	hp = tcp_get_md5sig_pool();
 576	if (!hp)
 577		goto clear_hash_noput;
 578	req = hp->md5_req;
 579
 580	if (crypto_ahash_init(req))
 581		goto clear_hash;
 582	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 
 
 583		goto clear_hash;
 584	if (tcp_md5_hash_key(hp, key))
 585		goto clear_hash;
 586	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 587	if (crypto_ahash_final(req))
 588		goto clear_hash;
 589
 590	tcp_put_md5sig_pool();
 591	return 0;
 592
 593clear_hash:
 594	tcp_put_md5sig_pool();
 595clear_hash_noput:
 596	memset(md5_hash, 0, 16);
 597	return 1;
 598}
 599
 600static int tcp_v6_md5_hash_skb(char *md5_hash,
 601			       const struct tcp_md5sig_key *key,
 602			       const struct sock *sk,
 603			       const struct sk_buff *skb)
 604{
 605	const struct in6_addr *saddr, *daddr;
 606	struct tcp_md5sig_pool *hp;
 607	struct ahash_request *req;
 608	const struct tcphdr *th = tcp_hdr(skb);
 609
 610	if (sk) { /* valid for establish/request sockets */
 611		saddr = &sk->sk_v6_rcv_saddr;
 612		daddr = &sk->sk_v6_daddr;
 613	} else {
 614		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 615		saddr = &ip6h->saddr;
 616		daddr = &ip6h->daddr;
 617	}
 618
 619	hp = tcp_get_md5sig_pool();
 620	if (!hp)
 621		goto clear_hash_noput;
 622	req = hp->md5_req;
 623
 624	if (crypto_ahash_init(req))
 625		goto clear_hash;
 626
 627	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
 628		goto clear_hash;
 629	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 630		goto clear_hash;
 631	if (tcp_md5_hash_key(hp, key))
 632		goto clear_hash;
 633	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 634	if (crypto_ahash_final(req))
 635		goto clear_hash;
 636
 637	tcp_put_md5sig_pool();
 638	return 0;
 639
 640clear_hash:
 641	tcp_put_md5sig_pool();
 642clear_hash_noput:
 643	memset(md5_hash, 0, 16);
 644	return 1;
 645}
 646
 647#endif
 648
 649static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 650				    const struct sk_buff *skb)
 651{
 652#ifdef CONFIG_TCP_MD5SIG
 653	const __u8 *hash_location = NULL;
 654	struct tcp_md5sig_key *hash_expected;
 655	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 656	const struct tcphdr *th = tcp_hdr(skb);
 657	int genhash;
 658	u8 newhash[16];
 659
 660	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 661	hash_location = tcp_parse_md5sig_option(th);
 662
 663	/* We've parsed the options - do we have a hash? */
 664	if (!hash_expected && !hash_location)
 665		return false;
 666
 667	if (hash_expected && !hash_location) {
 668		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 669		return true;
 670	}
 671
 672	if (!hash_expected && hash_location) {
 673		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 674		return true;
 675	}
 676
 677	/* check the signature */
 678	genhash = tcp_v6_md5_hash_skb(newhash,
 679				      hash_expected,
 680				      NULL, skb);
 681
 682	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 683		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
 684		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 685				     genhash ? "failed" : "mismatch",
 686				     &ip6h->saddr, ntohs(th->source),
 687				     &ip6h->daddr, ntohs(th->dest));
 688		return true;
 689	}
 690#endif
 691	return false;
 692}
 693
 694static void tcp_v6_init_req(struct request_sock *req,
 695			    const struct sock *sk_listener,
 696			    struct sk_buff *skb)
 697{
 698	struct inet_request_sock *ireq = inet_rsk(req);
 699	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 700
 701	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 702	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 703
 704	/* So that link locals have meaning */
 705	if (!sk_listener->sk_bound_dev_if &&
 706	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 707		ireq->ir_iif = tcp_v6_iif(skb);
 708
 709	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 710	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 711	     np->rxopt.bits.rxinfo ||
 712	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 713	     np->rxopt.bits.rxohlim || np->repflow)) {
 714		atomic_inc(&skb->users);
 715		ireq->pktopts = skb;
 716	}
 717}
 718
 719static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 720					  struct flowi *fl,
 721					  const struct request_sock *req,
 722					  bool *strict)
 723{
 724	if (strict)
 725		*strict = true;
 726	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 727}
 728
 729struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 730	.family		=	AF_INET6,
 731	.obj_size	=	sizeof(struct tcp6_request_sock),
 732	.rtx_syn_ack	=	tcp_rtx_synack,
 733	.send_ack	=	tcp_v6_reqsk_send_ack,
 734	.destructor	=	tcp_v6_reqsk_destructor,
 735	.send_reset	=	tcp_v6_send_reset,
 736	.syn_ack_timeout =	tcp_syn_ack_timeout,
 737};
 738
 739static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 740	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 741				sizeof(struct ipv6hdr),
 742#ifdef CONFIG_TCP_MD5SIG
 743	.req_md5_lookup	=	tcp_v6_md5_lookup,
 744	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 745#endif
 746	.init_req	=	tcp_v6_init_req,
 747#ifdef CONFIG_SYN_COOKIES
 748	.cookie_init_seq =	cookie_v6_init_sequence,
 749#endif
 750	.route_req	=	tcp_v6_route_req,
 751	.init_seq	=	tcp_v6_init_sequence,
 752	.send_synack	=	tcp_v6_send_synack,
 753};
 754
 755static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 756				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 757				 int oif, struct tcp_md5sig_key *key, int rst,
 758				 u8 tclass, __be32 label)
 759{
 760	const struct tcphdr *th = tcp_hdr(skb);
 761	struct tcphdr *t1;
 762	struct sk_buff *buff;
 763	struct flowi6 fl6;
 764	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 765	struct sock *ctl_sk = net->ipv6.tcp_sk;
 766	unsigned int tot_len = sizeof(struct tcphdr);
 767	struct dst_entry *dst;
 768	__be32 *topt;
 769
 770	if (tsecr)
 771		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 772#ifdef CONFIG_TCP_MD5SIG
 773	if (key)
 774		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 775#endif
 776
 777	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 778			 GFP_ATOMIC);
 779	if (!buff)
 780		return;
 781
 782	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 783
 784	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 785	skb_reset_transport_header(buff);
 786
 787	/* Swap the send and the receive. */
 788	memset(t1, 0, sizeof(*t1));
 789	t1->dest = th->source;
 790	t1->source = th->dest;
 791	t1->doff = tot_len / 4;
 792	t1->seq = htonl(seq);
 793	t1->ack_seq = htonl(ack);
 794	t1->ack = !rst || !th->ack;
 795	t1->rst = rst;
 796	t1->window = htons(win);
 797
 798	topt = (__be32 *)(t1 + 1);
 799
 800	if (tsecr) {
 801		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 802				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 803		*topt++ = htonl(tsval);
 804		*topt++ = htonl(tsecr);
 805	}
 806
 807#ifdef CONFIG_TCP_MD5SIG
 808	if (key) {
 809		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 810				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 811		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 812				    &ipv6_hdr(skb)->saddr,
 813				    &ipv6_hdr(skb)->daddr, t1);
 814	}
 815#endif
 816
 817	memset(&fl6, 0, sizeof(fl6));
 818	fl6.daddr = ipv6_hdr(skb)->saddr;
 819	fl6.saddr = ipv6_hdr(skb)->daddr;
 820	fl6.flowlabel = label;
 821
 822	buff->ip_summed = CHECKSUM_PARTIAL;
 823	buff->csum = 0;
 824
 825	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 826
 827	fl6.flowi6_proto = IPPROTO_TCP;
 828	if (rt6_need_strict(&fl6.daddr) && !oif)
 829		fl6.flowi6_oif = tcp_v6_iif(skb);
 830	else {
 831		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 832			oif = skb->skb_iif;
 833
 834		fl6.flowi6_oif = oif;
 835	}
 836
 837	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 838	fl6.fl6_dport = t1->dest;
 839	fl6.fl6_sport = t1->source;
 840	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 841	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 842
 843	/* Pass a socket to ip6_dst_lookup either it is for RST
 844	 * Underlying function will use this to retrieve the network
 845	 * namespace
 846	 */
 847	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 848	if (!IS_ERR(dst)) {
 849		skb_dst_set(buff, dst);
 850		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
 851		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 852		if (rst)
 853			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 854		return;
 855	}
 856
 857	kfree_skb(buff);
 858}
 859
 860static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 861{
 862	const struct tcphdr *th = tcp_hdr(skb);
 863	u32 seq = 0, ack_seq = 0;
 864	struct tcp_md5sig_key *key = NULL;
 865#ifdef CONFIG_TCP_MD5SIG
 866	const __u8 *hash_location = NULL;
 867	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 868	unsigned char newhash[16];
 869	int genhash;
 870	struct sock *sk1 = NULL;
 871#endif
 872	int oif;
 873
 874	if (th->rst)
 875		return;
 876
 877	/* If sk not NULL, it means we did a successful lookup and incoming
 878	 * route had to be correct. prequeue might have dropped our dst.
 879	 */
 880	if (!sk && !ipv6_unicast_destination(skb))
 881		return;
 882
 883#ifdef CONFIG_TCP_MD5SIG
 884	rcu_read_lock();
 885	hash_location = tcp_parse_md5sig_option(th);
 886	if (sk && sk_fullsock(sk)) {
 887		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 888	} else if (hash_location) {
 889		/*
 890		 * active side is lost. Try to find listening socket through
 891		 * source port, and then find md5 key through listening socket.
 892		 * we are not loose security here:
 893		 * Incoming packet is checked with md5 hash with finding key,
 894		 * no RST generated if md5 hash doesn't match.
 895		 */
 896		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 897					   &tcp_hashinfo, NULL, 0,
 898					   &ipv6h->saddr,
 899					   th->source, &ipv6h->daddr,
 900					   ntohs(th->source), tcp_v6_iif(skb));
 901		if (!sk1)
 902			goto out;
 903
 
 904		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 905		if (!key)
 906			goto out;
 907
 908		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 909		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 910			goto out;
 911	}
 912#endif
 913
 914	if (th->ack)
 915		seq = ntohl(th->ack_seq);
 916	else
 917		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 918			  (th->doff << 2);
 919
 920	oif = sk ? sk->sk_bound_dev_if : 0;
 921	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 922
 923#ifdef CONFIG_TCP_MD5SIG
 924out:
 925	rcu_read_unlock();
 
 
 
 926#endif
 927}
 928
 929static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 930			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 931			    struct tcp_md5sig_key *key, u8 tclass,
 932			    __be32 label)
 933{
 934	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 935			     tclass, label);
 936}
 937
 938static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 939{
 940	struct inet_timewait_sock *tw = inet_twsk(sk);
 941	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 942
 943	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 944			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 945			tcp_time_stamp + tcptw->tw_ts_offset,
 946			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 947			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 948
 949	inet_twsk_put(tw);
 950}
 951
 952static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 953				  struct request_sock *req)
 954{
 955	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 956	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 957	 */
 958	/* RFC 7323 2.3
 959	 * The window field (SEG.WND) of every outgoing segment, with the
 960	 * exception of <SYN> segments, MUST be right-shifted by
 961	 * Rcv.Wind.Shift bits:
 962	 */
 963	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 964			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 965			tcp_rsk(req)->rcv_nxt,
 966			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 967			tcp_time_stamp + tcp_rsk(req)->ts_off,
 968			req->ts_recent, sk->sk_bound_dev_if,
 969			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 970			0, 0);
 971}
 972
 973
 974static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 975{
 976#ifdef CONFIG_SYN_COOKIES
 977	const struct tcphdr *th = tcp_hdr(skb);
 978
 979	if (!th->syn)
 980		sk = cookie_v6_check(sk, skb);
 981#endif
 982	return sk;
 983}
 984
 985static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 986{
 987	if (skb->protocol == htons(ETH_P_IP))
 988		return tcp_v4_conn_request(sk, skb);
 989
 990	if (!ipv6_unicast_destination(skb))
 991		goto drop;
 992
 993	return tcp_conn_request(&tcp6_request_sock_ops,
 994				&tcp_request_sock_ipv6_ops, sk, skb);
 995
 996drop:
 997	tcp_listendrop(sk);
 998	return 0; /* don't send reset */
 999}
1000
1001static void tcp_v6_restore_cb(struct sk_buff *skb)
1002{
1003	/* We need to move header back to the beginning if xfrm6_policy_check()
1004	 * and tcp_v6_fill_cb() are going to be called again.
1005	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1006	 */
1007	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1008		sizeof(struct inet6_skb_parm));
1009}
1010
1011static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1012					 struct request_sock *req,
1013					 struct dst_entry *dst,
1014					 struct request_sock *req_unhash,
1015					 bool *own_req)
1016{
1017	struct inet_request_sock *ireq;
1018	struct ipv6_pinfo *newnp;
1019	const struct ipv6_pinfo *np = inet6_sk(sk);
1020	struct ipv6_txoptions *opt;
1021	struct tcp6_sock *newtcp6sk;
1022	struct inet_sock *newinet;
1023	struct tcp_sock *newtp;
1024	struct sock *newsk;
1025#ifdef CONFIG_TCP_MD5SIG
1026	struct tcp_md5sig_key *key;
1027#endif
1028	struct flowi6 fl6;
1029
1030	if (skb->protocol == htons(ETH_P_IP)) {
1031		/*
1032		 *	v6 mapped
1033		 */
1034
1035		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1036					     req_unhash, own_req);
1037
1038		if (!newsk)
1039			return NULL;
1040
1041		newtcp6sk = (struct tcp6_sock *)newsk;
1042		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1043
1044		newinet = inet_sk(newsk);
1045		newnp = inet6_sk(newsk);
1046		newtp = tcp_sk(newsk);
1047
1048		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1049
1050		newnp->saddr = newsk->sk_v6_rcv_saddr;
1051
1052		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1053		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1054#ifdef CONFIG_TCP_MD5SIG
1055		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1056#endif
1057
1058		newnp->ipv6_ac_list = NULL;
1059		newnp->ipv6_fl_list = NULL;
1060		newnp->pktoptions  = NULL;
1061		newnp->opt	   = NULL;
1062		newnp->mcast_oif   = tcp_v6_iif(skb);
1063		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1064		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1065		if (np->repflow)
1066			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1067
1068		/*
1069		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1070		 * here, tcp_create_openreq_child now does this for us, see the comment in
1071		 * that function for the gory details. -acme
1072		 */
1073
1074		/* It is tricky place. Until this moment IPv4 tcp
1075		   worked with IPv6 icsk.icsk_af_ops.
1076		   Sync it now.
1077		 */
1078		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1079
1080		return newsk;
1081	}
1082
1083	ireq = inet_rsk(req);
1084
1085	if (sk_acceptq_is_full(sk))
1086		goto out_overflow;
1087
1088	if (!dst) {
1089		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1090		if (!dst)
1091			goto out;
1092	}
1093
1094	newsk = tcp_create_openreq_child(sk, req, skb);
1095	if (!newsk)
1096		goto out_nonewsk;
1097
1098	/*
1099	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1100	 * count here, tcp_create_openreq_child now does this for us, see the
1101	 * comment in that function for the gory details. -acme
1102	 */
1103
1104	newsk->sk_gso_type = SKB_GSO_TCPV6;
1105	ip6_dst_store(newsk, dst, NULL, NULL);
1106	inet6_sk_rx_dst_set(newsk, skb);
1107
1108	newtcp6sk = (struct tcp6_sock *)newsk;
1109	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1110
1111	newtp = tcp_sk(newsk);
1112	newinet = inet_sk(newsk);
1113	newnp = inet6_sk(newsk);
1114
1115	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1116
1117	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1118	newnp->saddr = ireq->ir_v6_loc_addr;
1119	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1120	newsk->sk_bound_dev_if = ireq->ir_iif;
1121
1122	/* Now IPv6 options...
1123
1124	   First: no IPv4 options.
1125	 */
1126	newinet->inet_opt = NULL;
1127	newnp->ipv6_ac_list = NULL;
1128	newnp->ipv6_fl_list = NULL;
1129
1130	/* Clone RX bits */
1131	newnp->rxopt.all = np->rxopt.all;
1132
1133	newnp->pktoptions = NULL;
1134	newnp->opt	  = NULL;
1135	newnp->mcast_oif  = tcp_v6_iif(skb);
1136	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1137	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1138	if (np->repflow)
1139		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1140
1141	/* Clone native IPv6 options from listening socket (if any)
1142
1143	   Yes, keeping reference count would be much more clever,
1144	   but we make one more one thing there: reattach optmem
1145	   to newsk.
1146	 */
1147	opt = ireq->ipv6_opt;
1148	if (!opt)
1149		opt = rcu_dereference(np->opt);
1150	if (opt) {
1151		opt = ipv6_dup_options(newsk, opt);
1152		RCU_INIT_POINTER(newnp->opt, opt);
1153	}
1154	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1155	if (opt)
1156		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1157						    opt->opt_flen;
1158
1159	tcp_ca_openreq_child(newsk, dst);
1160
1161	tcp_sync_mss(newsk, dst_mtu(dst));
1162	newtp->advmss = dst_metric_advmss(dst);
1163	if (tcp_sk(sk)->rx_opt.user_mss &&
1164	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1165		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1166
1167	tcp_initialize_rcv_mss(newsk);
1168
1169	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1170	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1171
1172#ifdef CONFIG_TCP_MD5SIG
1173	/* Copy over the MD5 key from the original socket */
1174	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1175	if (key) {
1176		/* We're using one, so create a matching key
1177		 * on the newsk structure. If we fail to get
1178		 * memory, then we end up not copying the key
1179		 * across. Shucks.
1180		 */
1181		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1182			       AF_INET6, key->key, key->keylen,
1183			       sk_gfp_mask(sk, GFP_ATOMIC));
1184	}
1185#endif
1186
1187	if (__inet_inherit_port(sk, newsk) < 0) {
1188		inet_csk_prepare_forced_close(newsk);
1189		tcp_done(newsk);
1190		goto out;
1191	}
1192	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1193	if (*own_req) {
1194		tcp_move_syn(newtp, req);
1195
1196		/* Clone pktoptions received with SYN, if we own the req */
1197		if (ireq->pktopts) {
1198			newnp->pktoptions = skb_clone(ireq->pktopts,
1199						      sk_gfp_mask(sk, GFP_ATOMIC));
1200			consume_skb(ireq->pktopts);
1201			ireq->pktopts = NULL;
1202			if (newnp->pktoptions) {
1203				tcp_v6_restore_cb(newnp->pktoptions);
1204				skb_set_owner_r(newnp->pktoptions, newsk);
1205			}
1206		}
1207	}
1208
1209	return newsk;
1210
1211out_overflow:
1212	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1213out_nonewsk:
1214	dst_release(dst);
1215out:
1216	tcp_listendrop(sk);
1217	return NULL;
1218}
1219
1220/* The socket must have it's spinlock held when we get
1221 * here, unless it is a TCP_LISTEN socket.
1222 *
1223 * We have a potential double-lock case here, so even when
1224 * doing backlog processing we use the BH locking scheme.
1225 * This is because we cannot sleep with the original spinlock
1226 * held.
1227 */
1228static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1229{
1230	struct ipv6_pinfo *np = inet6_sk(sk);
1231	struct tcp_sock *tp;
1232	struct sk_buff *opt_skb = NULL;
1233
1234	/* Imagine: socket is IPv6. IPv4 packet arrives,
1235	   goes to IPv4 receive handler and backlogged.
1236	   From backlog it always goes here. Kerboom...
1237	   Fortunately, tcp_rcv_established and rcv_established
1238	   handle them correctly, but it is not case with
1239	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1240	 */
1241
1242	if (skb->protocol == htons(ETH_P_IP))
1243		return tcp_v4_do_rcv(sk, skb);
1244
1245	if (tcp_filter(sk, skb))
1246		goto discard;
1247
1248	/*
1249	 *	socket locking is here for SMP purposes as backlog rcv
1250	 *	is currently called with bh processing disabled.
1251	 */
1252
1253	/* Do Stevens' IPV6_PKTOPTIONS.
1254
1255	   Yes, guys, it is the only place in our code, where we
1256	   may make it not affecting IPv4.
1257	   The rest of code is protocol independent,
1258	   and I do not like idea to uglify IPv4.
1259
1260	   Actually, all the idea behind IPV6_PKTOPTIONS
1261	   looks not very well thought. For now we latch
1262	   options, received in the last packet, enqueued
1263	   by tcp. Feel free to propose better solution.
1264					       --ANK (980728)
1265	 */
1266	if (np->rxopt.all)
1267		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1268
1269	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1270		struct dst_entry *dst = sk->sk_rx_dst;
1271
1272		sock_rps_save_rxhash(sk, skb);
1273		sk_mark_napi_id(sk, skb);
1274		if (dst) {
1275			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1276			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1277				dst_release(dst);
1278				sk->sk_rx_dst = NULL;
1279			}
1280		}
1281
1282		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1283		if (opt_skb)
1284			goto ipv6_pktoptions;
1285		return 0;
1286	}
1287
1288	if (tcp_checksum_complete(skb))
1289		goto csum_err;
1290
1291	if (sk->sk_state == TCP_LISTEN) {
1292		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1293
1294		if (!nsk)
1295			goto discard;
1296
1297		if (nsk != sk) {
1298			sock_rps_save_rxhash(nsk, skb);
1299			sk_mark_napi_id(nsk, skb);
1300			if (tcp_child_process(sk, nsk, skb))
1301				goto reset;
1302			if (opt_skb)
1303				__kfree_skb(opt_skb);
1304			return 0;
1305		}
1306	} else
1307		sock_rps_save_rxhash(sk, skb);
1308
1309	if (tcp_rcv_state_process(sk, skb))
1310		goto reset;
1311	if (opt_skb)
1312		goto ipv6_pktoptions;
1313	return 0;
1314
1315reset:
1316	tcp_v6_send_reset(sk, skb);
1317discard:
1318	if (opt_skb)
1319		__kfree_skb(opt_skb);
1320	kfree_skb(skb);
1321	return 0;
1322csum_err:
1323	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1324	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1325	goto discard;
1326
1327
1328ipv6_pktoptions:
1329	/* Do you ask, what is it?
1330
1331	   1. skb was enqueued by tcp.
1332	   2. skb is added to tail of read queue, rather than out of order.
1333	   3. socket is not in passive state.
1334	   4. Finally, it really contains options, which user wants to receive.
1335	 */
1336	tp = tcp_sk(sk);
1337	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1338	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1339		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1340			np->mcast_oif = tcp_v6_iif(opt_skb);
1341		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1342			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1343		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1344			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1345		if (np->repflow)
1346			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1347		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1348			skb_set_owner_r(opt_skb, sk);
1349			tcp_v6_restore_cb(opt_skb);
1350			opt_skb = xchg(&np->pktoptions, opt_skb);
1351		} else {
1352			__kfree_skb(opt_skb);
1353			opt_skb = xchg(&np->pktoptions, NULL);
1354		}
1355	}
1356
1357	kfree_skb(opt_skb);
1358	return 0;
1359}
1360
1361static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1362			   const struct tcphdr *th)
1363{
1364	/* This is tricky: we move IP6CB at its correct location into
1365	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1366	 * _decode_session6() uses IP6CB().
1367	 * barrier() makes sure compiler won't play aliasing games.
1368	 */
1369	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1370		sizeof(struct inet6_skb_parm));
1371	barrier();
1372
1373	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1374	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1375				    skb->len - th->doff*4);
1376	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1377	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1378	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1379	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1380	TCP_SKB_CB(skb)->sacked = 0;
1381}
1382
 
 
 
 
 
 
 
 
 
1383static int tcp_v6_rcv(struct sk_buff *skb)
1384{
1385	const struct tcphdr *th;
1386	const struct ipv6hdr *hdr;
1387	bool refcounted;
1388	struct sock *sk;
1389	int ret;
1390	struct net *net = dev_net(skb->dev);
1391
1392	if (skb->pkt_type != PACKET_HOST)
1393		goto discard_it;
1394
1395	/*
1396	 *	Count it even if it's bad.
1397	 */
1398	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1399
1400	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1401		goto discard_it;
1402
1403	th = (const struct tcphdr *)skb->data;
1404
1405	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1406		goto bad_packet;
1407	if (!pskb_may_pull(skb, th->doff*4))
1408		goto discard_it;
1409
1410	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1411		goto csum_error;
1412
1413	th = (const struct tcphdr *)skb->data;
1414	hdr = ipv6_hdr(skb);
1415
1416lookup:
1417	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1418				th->source, th->dest, inet6_iif(skb),
1419				&refcounted);
1420	if (!sk)
1421		goto no_tcp_socket;
1422
1423process:
1424	if (sk->sk_state == TCP_TIME_WAIT)
1425		goto do_time_wait;
1426
1427	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1428		struct request_sock *req = inet_reqsk(sk);
1429		struct sock *nsk;
1430
1431		sk = req->rsk_listener;
1432		tcp_v6_fill_cb(skb, hdr, th);
1433		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1434			sk_drops_add(sk, skb);
1435			reqsk_put(req);
1436			goto discard_it;
1437		}
1438		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1439			inet_csk_reqsk_queue_drop_and_put(sk, req);
1440			goto lookup;
1441		}
1442		sock_hold(sk);
1443		refcounted = true;
1444		nsk = tcp_check_req(sk, skb, req, false);
1445		if (!nsk) {
1446			reqsk_put(req);
1447			goto discard_and_relse;
1448		}
1449		if (nsk == sk) {
1450			reqsk_put(req);
1451			tcp_v6_restore_cb(skb);
1452		} else if (tcp_child_process(sk, nsk, skb)) {
1453			tcp_v6_send_reset(nsk, skb);
1454			goto discard_and_relse;
1455		} else {
1456			sock_put(sk);
1457			return 0;
1458		}
1459	}
1460	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1461		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1462		goto discard_and_relse;
1463	}
1464
1465	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1466		goto discard_and_relse;
1467
1468	tcp_v6_fill_cb(skb, hdr, th);
1469
1470	if (tcp_v6_inbound_md5_hash(sk, skb))
1471		goto discard_and_relse;
1472
1473	if (tcp_filter(sk, skb))
1474		goto discard_and_relse;
1475	th = (const struct tcphdr *)skb->data;
1476	hdr = ipv6_hdr(skb);
1477
1478	skb->dev = NULL;
1479
1480	if (sk->sk_state == TCP_LISTEN) {
1481		ret = tcp_v6_do_rcv(sk, skb);
1482		goto put_and_return;
1483	}
1484
1485	sk_incoming_cpu_update(sk);
1486
1487	bh_lock_sock_nested(sk);
1488	tcp_segs_in(tcp_sk(sk), skb);
1489	ret = 0;
1490	if (!sock_owned_by_user(sk)) {
1491		if (!tcp_prequeue(sk, skb))
1492			ret = tcp_v6_do_rcv(sk, skb);
1493	} else if (tcp_add_backlog(sk, skb)) {
 
 
 
1494		goto discard_and_relse;
1495	}
1496	bh_unlock_sock(sk);
1497
1498put_and_return:
1499	if (refcounted)
1500		sock_put(sk);
1501	return ret ? -1 : 0;
1502
1503no_tcp_socket:
1504	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1505		goto discard_it;
1506
1507	tcp_v6_fill_cb(skb, hdr, th);
1508
1509	if (tcp_checksum_complete(skb)) {
1510csum_error:
1511		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1512bad_packet:
1513		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1514	} else {
1515		tcp_v6_send_reset(NULL, skb);
1516	}
1517
1518discard_it:
1519	kfree_skb(skb);
1520	return 0;
1521
1522discard_and_relse:
1523	sk_drops_add(sk, skb);
1524	if (refcounted)
1525		sock_put(sk);
1526	goto discard_it;
1527
1528do_time_wait:
1529	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1530		inet_twsk_put(inet_twsk(sk));
1531		goto discard_it;
1532	}
1533
1534	tcp_v6_fill_cb(skb, hdr, th);
1535
1536	if (tcp_checksum_complete(skb)) {
1537		inet_twsk_put(inet_twsk(sk));
1538		goto csum_error;
1539	}
1540
1541	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1542	case TCP_TW_SYN:
1543	{
1544		struct sock *sk2;
1545
1546		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1547					    skb, __tcp_hdrlen(th),
1548					    &ipv6_hdr(skb)->saddr, th->source,
1549					    &ipv6_hdr(skb)->daddr,
1550					    ntohs(th->dest), tcp_v6_iif(skb));
1551		if (sk2) {
1552			struct inet_timewait_sock *tw = inet_twsk(sk);
1553			inet_twsk_deschedule_put(tw);
1554			sk = sk2;
1555			tcp_v6_restore_cb(skb);
1556			refcounted = false;
1557			goto process;
1558		}
1559		/* Fall through to ACK */
1560	}
1561	case TCP_TW_ACK:
1562		tcp_v6_timewait_ack(sk, skb);
1563		break;
1564	case TCP_TW_RST:
1565		tcp_v6_restore_cb(skb);
1566		tcp_v6_send_reset(sk, skb);
1567		inet_twsk_deschedule_put(inet_twsk(sk));
1568		goto discard_it;
1569	case TCP_TW_SUCCESS:
1570		;
1571	}
1572	goto discard_it;
1573}
1574
1575static void tcp_v6_early_demux(struct sk_buff *skb)
1576{
1577	const struct ipv6hdr *hdr;
1578	const struct tcphdr *th;
1579	struct sock *sk;
1580
1581	if (skb->pkt_type != PACKET_HOST)
1582		return;
1583
1584	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1585		return;
1586
1587	hdr = ipv6_hdr(skb);
1588	th = tcp_hdr(skb);
1589
1590	if (th->doff < sizeof(struct tcphdr) / 4)
1591		return;
1592
1593	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1594	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1595					&hdr->saddr, th->source,
1596					&hdr->daddr, ntohs(th->dest),
1597					inet6_iif(skb));
1598	if (sk) {
1599		skb->sk = sk;
1600		skb->destructor = sock_edemux;
1601		if (sk_fullsock(sk)) {
1602			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1603
1604			if (dst)
1605				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1606			if (dst &&
1607			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1608				skb_dst_set_noref(skb, dst);
1609		}
1610	}
1611}
1612
1613static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1614	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1615	.twsk_unique	= tcp_twsk_unique,
1616	.twsk_destructor = tcp_twsk_destructor,
1617};
1618
1619static const struct inet_connection_sock_af_ops ipv6_specific = {
1620	.queue_xmit	   = inet6_csk_xmit,
1621	.send_check	   = tcp_v6_send_check,
1622	.rebuild_header	   = inet6_sk_rebuild_header,
1623	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1624	.conn_request	   = tcp_v6_conn_request,
1625	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1626	.net_header_len	   = sizeof(struct ipv6hdr),
1627	.net_frag_header_len = sizeof(struct frag_hdr),
1628	.setsockopt	   = ipv6_setsockopt,
1629	.getsockopt	   = ipv6_getsockopt,
1630	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1631	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1632	.bind_conflict	   = inet6_csk_bind_conflict,
1633#ifdef CONFIG_COMPAT
1634	.compat_setsockopt = compat_ipv6_setsockopt,
1635	.compat_getsockopt = compat_ipv6_getsockopt,
1636#endif
1637	.mtu_reduced	   = tcp_v6_mtu_reduced,
1638};
1639
1640#ifdef CONFIG_TCP_MD5SIG
1641static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1642	.md5_lookup	=	tcp_v6_md5_lookup,
1643	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1644	.md5_parse	=	tcp_v6_parse_md5_keys,
1645};
1646#endif
1647
1648/*
1649 *	TCP over IPv4 via INET6 API
1650 */
1651static const struct inet_connection_sock_af_ops ipv6_mapped = {
1652	.queue_xmit	   = ip_queue_xmit,
1653	.send_check	   = tcp_v4_send_check,
1654	.rebuild_header	   = inet_sk_rebuild_header,
1655	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1656	.conn_request	   = tcp_v6_conn_request,
1657	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1658	.net_header_len	   = sizeof(struct iphdr),
1659	.setsockopt	   = ipv6_setsockopt,
1660	.getsockopt	   = ipv6_getsockopt,
1661	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1662	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1663	.bind_conflict	   = inet6_csk_bind_conflict,
1664#ifdef CONFIG_COMPAT
1665	.compat_setsockopt = compat_ipv6_setsockopt,
1666	.compat_getsockopt = compat_ipv6_getsockopt,
1667#endif
1668	.mtu_reduced	   = tcp_v4_mtu_reduced,
1669};
1670
1671#ifdef CONFIG_TCP_MD5SIG
1672static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1673	.md5_lookup	=	tcp_v4_md5_lookup,
1674	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1675	.md5_parse	=	tcp_v6_parse_md5_keys,
1676};
1677#endif
1678
1679/* NOTE: A lot of things set to zero explicitly by call to
1680 *       sk_alloc() so need not be done here.
1681 */
1682static int tcp_v6_init_sock(struct sock *sk)
1683{
1684	struct inet_connection_sock *icsk = inet_csk(sk);
1685
1686	tcp_init_sock(sk);
1687
1688	icsk->icsk_af_ops = &ipv6_specific;
1689
1690#ifdef CONFIG_TCP_MD5SIG
1691	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1692#endif
1693
1694	return 0;
1695}
1696
1697static void tcp_v6_destroy_sock(struct sock *sk)
1698{
1699	tcp_v4_destroy_sock(sk);
1700	inet6_destroy_sock(sk);
1701}
1702
1703#ifdef CONFIG_PROC_FS
1704/* Proc filesystem TCPv6 sock list dumping. */
1705static void get_openreq6(struct seq_file *seq,
1706			 const struct request_sock *req, int i)
1707{
1708	long ttd = req->rsk_timer.expires - jiffies;
1709	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1710	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1711
1712	if (ttd < 0)
1713		ttd = 0;
1714
1715	seq_printf(seq,
1716		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1717		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1718		   i,
1719		   src->s6_addr32[0], src->s6_addr32[1],
1720		   src->s6_addr32[2], src->s6_addr32[3],
1721		   inet_rsk(req)->ir_num,
1722		   dest->s6_addr32[0], dest->s6_addr32[1],
1723		   dest->s6_addr32[2], dest->s6_addr32[3],
1724		   ntohs(inet_rsk(req)->ir_rmt_port),
1725		   TCP_SYN_RECV,
1726		   0, 0, /* could print option size, but that is af dependent. */
1727		   1,   /* timers active (only the expire timer) */
1728		   jiffies_to_clock_t(ttd),
1729		   req->num_timeout,
1730		   from_kuid_munged(seq_user_ns(seq),
1731				    sock_i_uid(req->rsk_listener)),
1732		   0,  /* non standard timer */
1733		   0, /* open_requests have no inode */
1734		   0, req);
1735}
1736
1737static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1738{
1739	const struct in6_addr *dest, *src;
1740	__u16 destp, srcp;
1741	int timer_active;
1742	unsigned long timer_expires;
1743	const struct inet_sock *inet = inet_sk(sp);
1744	const struct tcp_sock *tp = tcp_sk(sp);
1745	const struct inet_connection_sock *icsk = inet_csk(sp);
1746	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1747	int rx_queue;
1748	int state;
1749
1750	dest  = &sp->sk_v6_daddr;
1751	src   = &sp->sk_v6_rcv_saddr;
1752	destp = ntohs(inet->inet_dport);
1753	srcp  = ntohs(inet->inet_sport);
1754
1755	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1756	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1757	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1758		timer_active	= 1;
1759		timer_expires	= icsk->icsk_timeout;
1760	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1761		timer_active	= 4;
1762		timer_expires	= icsk->icsk_timeout;
1763	} else if (timer_pending(&sp->sk_timer)) {
1764		timer_active	= 2;
1765		timer_expires	= sp->sk_timer.expires;
1766	} else {
1767		timer_active	= 0;
1768		timer_expires = jiffies;
1769	}
1770
1771	state = sk_state_load(sp);
1772	if (state == TCP_LISTEN)
1773		rx_queue = sp->sk_ack_backlog;
1774	else
1775		/* Because we don't lock the socket,
1776		 * we might find a transient negative value.
1777		 */
1778		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1779
1780	seq_printf(seq,
1781		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1782		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1783		   i,
1784		   src->s6_addr32[0], src->s6_addr32[1],
1785		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1786		   dest->s6_addr32[0], dest->s6_addr32[1],
1787		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1788		   state,
1789		   tp->write_seq - tp->snd_una,
1790		   rx_queue,
1791		   timer_active,
1792		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1793		   icsk->icsk_retransmits,
1794		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1795		   icsk->icsk_probes_out,
1796		   sock_i_ino(sp),
1797		   atomic_read(&sp->sk_refcnt), sp,
1798		   jiffies_to_clock_t(icsk->icsk_rto),
1799		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1800		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1801		   tp->snd_cwnd,
1802		   state == TCP_LISTEN ?
1803			fastopenq->max_qlen :
1804			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1805		   );
1806}
1807
1808static void get_timewait6_sock(struct seq_file *seq,
1809			       struct inet_timewait_sock *tw, int i)
1810{
1811	long delta = tw->tw_timer.expires - jiffies;
1812	const struct in6_addr *dest, *src;
1813	__u16 destp, srcp;
1814
1815	dest = &tw->tw_v6_daddr;
1816	src  = &tw->tw_v6_rcv_saddr;
1817	destp = ntohs(tw->tw_dport);
1818	srcp  = ntohs(tw->tw_sport);
1819
1820	seq_printf(seq,
1821		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1822		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1823		   i,
1824		   src->s6_addr32[0], src->s6_addr32[1],
1825		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1826		   dest->s6_addr32[0], dest->s6_addr32[1],
1827		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1828		   tw->tw_substate, 0, 0,
1829		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1830		   atomic_read(&tw->tw_refcnt), tw);
1831}
1832
1833static int tcp6_seq_show(struct seq_file *seq, void *v)
1834{
1835	struct tcp_iter_state *st;
1836	struct sock *sk = v;
1837
1838	if (v == SEQ_START_TOKEN) {
1839		seq_puts(seq,
1840			 "  sl  "
1841			 "local_address                         "
1842			 "remote_address                        "
1843			 "st tx_queue rx_queue tr tm->when retrnsmt"
1844			 "   uid  timeout inode\n");
1845		goto out;
1846	}
1847	st = seq->private;
1848
1849	if (sk->sk_state == TCP_TIME_WAIT)
1850		get_timewait6_sock(seq, v, st->num);
1851	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1852		get_openreq6(seq, v, st->num);
1853	else
1854		get_tcp6_sock(seq, v, st->num);
1855out:
1856	return 0;
1857}
1858
1859static const struct file_operations tcp6_afinfo_seq_fops = {
1860	.owner   = THIS_MODULE,
1861	.open    = tcp_seq_open,
1862	.read    = seq_read,
1863	.llseek  = seq_lseek,
1864	.release = seq_release_net
1865};
1866
1867static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1868	.name		= "tcp6",
1869	.family		= AF_INET6,
1870	.seq_fops	= &tcp6_afinfo_seq_fops,
1871	.seq_ops	= {
1872		.show		= tcp6_seq_show,
1873	},
1874};
1875
1876int __net_init tcp6_proc_init(struct net *net)
1877{
1878	return tcp_proc_register(net, &tcp6_seq_afinfo);
1879}
1880
1881void tcp6_proc_exit(struct net *net)
1882{
1883	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1884}
1885#endif
1886
 
 
 
 
 
 
 
 
 
 
 
1887struct proto tcpv6_prot = {
1888	.name			= "TCPv6",
1889	.owner			= THIS_MODULE,
1890	.close			= tcp_close,
1891	.connect		= tcp_v6_connect,
1892	.disconnect		= tcp_disconnect,
1893	.accept			= inet_csk_accept,
1894	.ioctl			= tcp_ioctl,
1895	.init			= tcp_v6_init_sock,
1896	.destroy		= tcp_v6_destroy_sock,
1897	.shutdown		= tcp_shutdown,
1898	.setsockopt		= tcp_setsockopt,
1899	.getsockopt		= tcp_getsockopt,
1900	.recvmsg		= tcp_recvmsg,
1901	.sendmsg		= tcp_sendmsg,
1902	.sendpage		= tcp_sendpage,
1903	.backlog_rcv		= tcp_v6_do_rcv,
1904	.release_cb		= tcp_release_cb,
1905	.hash			= inet6_hash,
1906	.unhash			= inet_unhash,
1907	.get_port		= inet_csk_get_port,
1908	.enter_memory_pressure	= tcp_enter_memory_pressure,
1909	.stream_memory_free	= tcp_stream_memory_free,
1910	.sockets_allocated	= &tcp_sockets_allocated,
1911	.memory_allocated	= &tcp_memory_allocated,
1912	.memory_pressure	= &tcp_memory_pressure,
1913	.orphan_count		= &tcp_orphan_count,
1914	.sysctl_mem		= sysctl_tcp_mem,
1915	.sysctl_wmem		= sysctl_tcp_wmem,
1916	.sysctl_rmem		= sysctl_tcp_rmem,
1917	.max_header		= MAX_TCP_HEADER,
1918	.obj_size		= sizeof(struct tcp6_sock),
1919	.slab_flags		= SLAB_DESTROY_BY_RCU,
1920	.twsk_prot		= &tcp6_timewait_sock_ops,
1921	.rsk_prot		= &tcp6_request_sock_ops,
1922	.h.hashinfo		= &tcp_hashinfo,
1923	.no_autobind		= true,
1924#ifdef CONFIG_COMPAT
1925	.compat_setsockopt	= compat_tcp_setsockopt,
1926	.compat_getsockopt	= compat_tcp_getsockopt,
1927#endif
 
1928	.diag_destroy		= tcp_abort,
1929};
1930
1931static const struct inet6_protocol tcpv6_protocol = {
1932	.early_demux	=	tcp_v6_early_demux,
1933	.handler	=	tcp_v6_rcv,
1934	.err_handler	=	tcp_v6_err,
1935	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1936};
1937
1938static struct inet_protosw tcpv6_protosw = {
1939	.type		=	SOCK_STREAM,
1940	.protocol	=	IPPROTO_TCP,
1941	.prot		=	&tcpv6_prot,
1942	.ops		=	&inet6_stream_ops,
1943	.flags		=	INET_PROTOSW_PERMANENT |
1944				INET_PROTOSW_ICSK,
1945};
1946
1947static int __net_init tcpv6_net_init(struct net *net)
1948{
1949	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1950				    SOCK_RAW, IPPROTO_TCP, net);
1951}
1952
1953static void __net_exit tcpv6_net_exit(struct net *net)
1954{
1955	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1956}
1957
1958static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1959{
1960	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1961}
1962
1963static struct pernet_operations tcpv6_net_ops = {
1964	.init	    = tcpv6_net_init,
1965	.exit	    = tcpv6_net_exit,
1966	.exit_batch = tcpv6_net_exit_batch,
1967};
1968
1969int __init tcpv6_init(void)
1970{
1971	int ret;
1972
1973	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1974	if (ret)
1975		goto out;
1976
1977	/* register inet6 protocol */
1978	ret = inet6_register_protosw(&tcpv6_protosw);
1979	if (ret)
1980		goto out_tcpv6_protocol;
1981
1982	ret = register_pernet_subsys(&tcpv6_net_ops);
1983	if (ret)
1984		goto out_tcpv6_protosw;
1985out:
1986	return ret;
1987
1988out_tcpv6_protosw:
1989	inet6_unregister_protosw(&tcpv6_protosw);
1990out_tcpv6_protocol:
1991	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1992	goto out;
1993}
1994
1995void tcpv6_exit(void)
1996{
1997	unregister_pernet_subsys(&tcpv6_net_ops);
1998	inet6_unregister_protosw(&tcpv6_protosw);
1999	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2000}