Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
 
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
 
 
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
 
 
 
 
 
 
 
 
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 
 
 
 
 
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 
 124	int addr_type;
 125	int err;
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr))
 152		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 153
 154	addr_type = ipv6_addr_type(&usin->sin6_addr);
 155
 156	if (addr_type & IPV6_ADDR_MULTICAST)
 157		return -ENETUNREACH;
 158
 159	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 160		if (addr_len >= sizeof(struct sockaddr_in6) &&
 161		    usin->sin6_scope_id) {
 162			/* If interface is set while binding, indices
 163			 * must coincide.
 164			 */
 165			if (sk->sk_bound_dev_if &&
 166			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 167				return -EINVAL;
 168
 169			sk->sk_bound_dev_if = usin->sin6_scope_id;
 170		}
 171
 172		/* Connect to link-local address requires an interface */
 173		if (!sk->sk_bound_dev_if)
 174			return -EINVAL;
 175	}
 176
 177	if (tp->rx_opt.ts_recent_stamp &&
 178	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 179		tp->rx_opt.ts_recent = 0;
 180		tp->rx_opt.ts_recent_stamp = 0;
 181		tp->write_seq = 0;
 182	}
 183
 184	sk->sk_v6_daddr = usin->sin6_addr;
 185	np->flow_label = fl6.flowlabel;
 186
 187	/*
 188	 *	TCP over IPv4
 189	 */
 190
 191	if (addr_type == IPV6_ADDR_MAPPED) {
 192		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 193		struct sockaddr_in sin;
 194
 195		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 196
 197		if (__ipv6_only_sock(sk))
 198			return -ENETUNREACH;
 199
 200		sin.sin_family = AF_INET;
 201		sin.sin_port = usin->sin6_port;
 202		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 203
 204		icsk->icsk_af_ops = &ipv6_mapped;
 
 
 
 205		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 206#ifdef CONFIG_TCP_MD5SIG
 207		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 208#endif
 209
 210		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 211
 212		if (err) {
 213			icsk->icsk_ext_hdr_len = exthdrlen;
 214			icsk->icsk_af_ops = &ipv6_specific;
 
 
 
 215			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 216#ifdef CONFIG_TCP_MD5SIG
 217			tp->af_specific = &tcp_sock_ipv6_specific;
 218#endif
 219			goto failure;
 220		}
 221		np->saddr = sk->sk_v6_rcv_saddr;
 222
 223		return err;
 224	}
 225
 226	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 227		saddr = &sk->sk_v6_rcv_saddr;
 228
 229	fl6.flowi6_proto = IPPROTO_TCP;
 230	fl6.daddr = sk->sk_v6_daddr;
 231	fl6.saddr = saddr ? *saddr : np->saddr;
 
 232	fl6.flowi6_oif = sk->sk_bound_dev_if;
 233	fl6.flowi6_mark = sk->sk_mark;
 234	fl6.fl6_dport = usin->sin6_port;
 235	fl6.fl6_sport = inet->inet_sport;
 
 236
 237	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 238	final_p = fl6_update_dst(&fl6, opt, &final);
 239
 240	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 241
 242	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 243	if (IS_ERR(dst)) {
 244		err = PTR_ERR(dst);
 245		goto failure;
 246	}
 247
 
 
 
 248	if (!saddr) {
 249		saddr = &fl6.saddr;
 250		sk->sk_v6_rcv_saddr = *saddr;
 
 
 
 251	}
 252
 253	/* set the source address */
 254	np->saddr = *saddr;
 255	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 256
 257	sk->sk_gso_type = SKB_GSO_TCPV6;
 258	ip6_dst_store(sk, dst, NULL, NULL);
 259
 260	if (tcp_death_row.sysctl_tw_recycle &&
 261	    !tp->rx_opt.ts_recent_stamp &&
 262	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 263		tcp_fetch_timewait_stamp(sk, dst);
 264
 265	icsk->icsk_ext_hdr_len = 0;
 266	if (opt)
 267		icsk->icsk_ext_hdr_len = opt->opt_flen +
 268					 opt->opt_nflen;
 269
 270	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 271
 272	inet->inet_dport = usin->sin6_port;
 273
 274	tcp_set_state(sk, TCP_SYN_SENT);
 275	err = inet6_hash_connect(&tcp_death_row, sk);
 276	if (err)
 277		goto late_failure;
 278
 279	sk_set_txhash(sk);
 280
 281	if (!tp->write_seq && likely(!tp->repair))
 282		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 283							     sk->sk_v6_daddr.s6_addr32,
 284							     inet->inet_sport,
 285							     inet->inet_dport);
 
 
 
 
 
 
 
 
 
 
 286
 287	err = tcp_connect(sk);
 288	if (err)
 289		goto late_failure;
 290
 291	return 0;
 292
 293late_failure:
 294	tcp_set_state(sk, TCP_CLOSE);
 295	__sk_dst_reset(sk);
 296failure:
 297	inet->inet_dport = 0;
 298	sk->sk_route_caps = 0;
 299	return err;
 300}
 301
 302static void tcp_v6_mtu_reduced(struct sock *sk)
 303{
 304	struct dst_entry *dst;
 
 305
 306	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 307		return;
 308
 309	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 
 
 
 
 
 
 
 
 310	if (!dst)
 311		return;
 312
 313	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 314		tcp_sync_mss(sk, dst_mtu(dst));
 315		tcp_simple_retransmit(sk);
 316	}
 317}
 318
 319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 320		u8 type, u8 code, int offset, __be32 info)
 321{
 322	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 323	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 324	struct net *net = dev_net(skb->dev);
 325	struct request_sock *fastopen;
 326	struct ipv6_pinfo *np;
 327	struct tcp_sock *tp;
 328	__u32 seq, snd_una;
 329	struct sock *sk;
 330	bool fatal;
 331	int err;
 332
 333	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 334					&hdr->daddr, th->dest,
 335					&hdr->saddr, ntohs(th->source),
 336					skb->dev->ifindex);
 337
 338	if (!sk) {
 339		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 340				   ICMP6_MIB_INERRORS);
 341		return;
 342	}
 343
 344	if (sk->sk_state == TCP_TIME_WAIT) {
 
 
 345		inet_twsk_put(inet_twsk(sk));
 346		return;
 347	}
 348	seq = ntohl(th->seq);
 349	fatal = icmpv6_err_convert(type, code, &err);
 350	if (sk->sk_state == TCP_NEW_SYN_RECV)
 351		return tcp_req_err(sk, seq, fatal);
 
 
 
 
 
 
 
 352
 353	bh_lock_sock(sk);
 354	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 355		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 356
 357	if (sk->sk_state == TCP_CLOSE)
 358		goto out;
 359
 360	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 361		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 362		goto out;
 
 
 
 363	}
 364
 365	tp = tcp_sk(sk);
 366	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 367	fastopen = tp->fastopen_rsk;
 368	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 369	if (sk->sk_state != TCP_LISTEN &&
 370	    !between(seq, snd_una, tp->snd_nxt)) {
 371		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 372		goto out;
 373	}
 374
 375	np = inet6_sk(sk);
 376
 377	if (type == NDISC_REDIRECT) {
 378		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 379
 380		if (dst)
 381			dst->ops->redirect(dst, sk, skb);
 
 382		goto out;
 383	}
 384
 385	if (type == ICMPV6_PKT_TOOBIG) {
 
 
 386		/* We are not interested in TCP_LISTEN and open_requests
 387		 * (SYN-ACKs send out by Linux are always <576bytes so
 388		 * they should go through unfragmented).
 389		 */
 390		if (sk->sk_state == TCP_LISTEN)
 391			goto out;
 392
 393		if (!ip6_sk_accept_pmtu(sk))
 394			goto out;
 395
 396		tp->mtu_info = ntohl(info);
 
 
 
 
 397		if (!sock_owned_by_user(sk))
 398			tcp_v6_mtu_reduced(sk);
 399		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 400					   &tp->tsq_flags))
 401			sock_hold(sk);
 402		goto out;
 403	}
 404
 405
 406	/* Might be for an request_sock */
 407	switch (sk->sk_state) {
 408	case TCP_SYN_SENT:
 409	case TCP_SYN_RECV:
 410		/* Only in fast or simultaneous open. If a fast open socket is
 411		 * is already accepted it is treated as a connected one below.
 412		 */
 413		if (fastopen && !fastopen->sk)
 414			break;
 415
 
 
 416		if (!sock_owned_by_user(sk)) {
 417			sk->sk_err = err;
 418			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 419
 420			tcp_done(sk);
 421		} else
 422			sk->sk_err_soft = err;
 
 423		goto out;
 
 
 
 
 
 
 
 
 
 424	}
 425
 426	if (!sock_owned_by_user(sk) && np->recverr) {
 427		sk->sk_err = err;
 428		sk->sk_error_report(sk);
 429	} else
 430		sk->sk_err_soft = err;
 431
 432out:
 433	bh_unlock_sock(sk);
 434	sock_put(sk);
 
 435}
 436
 437
 438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 439			      struct flowi *fl,
 440			      struct request_sock *req,
 441			      struct tcp_fastopen_cookie *foc,
 442			      bool attach_req)
 
 443{
 444	struct inet_request_sock *ireq = inet_rsk(req);
 445	struct ipv6_pinfo *np = inet6_sk(sk);
 
 446	struct flowi6 *fl6 = &fl->u.ip6;
 447	struct sk_buff *skb;
 448	int err = -ENOMEM;
 
 449
 450	/* First, grab a route. */
 451	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 452					       IPPROTO_TCP)) == NULL)
 453		goto done;
 454
 455	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 456
 457	if (skb) {
 458		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 459				    &ireq->ir_v6_rmt_addr);
 460
 461		fl6->daddr = ireq->ir_v6_rmt_addr;
 462		if (np->repflow && ireq->pktopts)
 463			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 464
 
 
 
 
 
 
 
 
 
 465		rcu_read_lock();
 466		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
 467			       np->tclass);
 
 
 
 468		rcu_read_unlock();
 469		err = net_xmit_eval(err);
 470	}
 471
 472done:
 473	return err;
 474}
 475
 476
 477static void tcp_v6_reqsk_destructor(struct request_sock *req)
 478{
 479	kfree_skb(inet_rsk(req)->pktopts);
 
 480}
 481
 482#ifdef CONFIG_TCP_MD5SIG
 483static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 484						   const struct in6_addr *addr)
 
 485{
 486	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 
 487}
 488
 489static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 490						const struct sock *addr_sk)
 491{
 492	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 
 
 
 
 
 493}
 494
 495static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 496				 int optlen)
 497{
 498	struct tcp_md5sig cmd;
 499	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 
 
 
 
 
 500
 501	if (optlen < sizeof(cmd))
 502		return -EINVAL;
 503
 504	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 505		return -EFAULT;
 506
 507	if (sin6->sin6_family != AF_INET6)
 508		return -EINVAL;
 509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 510	if (!cmd.tcpm_keylen) {
 511		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 512			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 513					      AF_INET);
 
 514		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 515				      AF_INET6);
 516	}
 517
 518	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 519		return -EINVAL;
 520
 521	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 522		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 523				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 524
 525	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 526			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 
 527}
 528
 529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 530					const struct in6_addr *daddr,
 531					const struct in6_addr *saddr, int nbytes)
 
 532{
 533	struct tcp6_pseudohdr *bp;
 534	struct scatterlist sg;
 
 535
 536	bp = &hp->md5_blk.ip6;
 537	/* 1. TCP pseudo-header (RFC2460) */
 538	bp->saddr = *saddr;
 539	bp->daddr = *daddr;
 540	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 541	bp->len = cpu_to_be32(nbytes);
 542
 543	sg_init_one(&sg, bp, sizeof(*bp));
 544	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
 545	return crypto_ahash_update(hp->md5_req);
 
 
 
 
 
 546}
 547
 548static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 549			       const struct in6_addr *daddr, struct in6_addr *saddr,
 550			       const struct tcphdr *th)
 551{
 552	struct tcp_md5sig_pool *hp;
 553	struct ahash_request *req;
 554
 555	hp = tcp_get_md5sig_pool();
 556	if (!hp)
 557		goto clear_hash_noput;
 558	req = hp->md5_req;
 559
 560	if (crypto_ahash_init(req))
 561		goto clear_hash;
 562	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 563		goto clear_hash;
 564	if (tcp_md5_hash_header(hp, th))
 565		goto clear_hash;
 566	if (tcp_md5_hash_key(hp, key))
 567		goto clear_hash;
 568	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 569	if (crypto_ahash_final(req))
 570		goto clear_hash;
 571
 572	tcp_put_md5sig_pool();
 573	return 0;
 574
 575clear_hash:
 576	tcp_put_md5sig_pool();
 577clear_hash_noput:
 578	memset(md5_hash, 0, 16);
 579	return 1;
 580}
 581
 582static int tcp_v6_md5_hash_skb(char *md5_hash,
 583			       const struct tcp_md5sig_key *key,
 584			       const struct sock *sk,
 585			       const struct sk_buff *skb)
 586{
 587	const struct in6_addr *saddr, *daddr;
 588	struct tcp_md5sig_pool *hp;
 589	struct ahash_request *req;
 590	const struct tcphdr *th = tcp_hdr(skb);
 
 
 591
 592	if (sk) { /* valid for establish/request sockets */
 593		saddr = &sk->sk_v6_rcv_saddr;
 594		daddr = &sk->sk_v6_daddr;
 595	} else {
 596		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 597		saddr = &ip6h->saddr;
 598		daddr = &ip6h->daddr;
 599	}
 600
 601	hp = tcp_get_md5sig_pool();
 602	if (!hp)
 603		goto clear_hash_noput;
 604	req = hp->md5_req;
 605
 606	if (crypto_ahash_init(req))
 607		goto clear_hash;
 608
 609	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 610		goto clear_hash;
 611	if (tcp_md5_hash_header(hp, th))
 612		goto clear_hash;
 613	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 614		goto clear_hash;
 615	if (tcp_md5_hash_key(hp, key))
 616		goto clear_hash;
 617	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 618	if (crypto_ahash_final(req))
 619		goto clear_hash;
 620
 621	tcp_put_md5sig_pool();
 622	return 0;
 623
 624clear_hash:
 625	tcp_put_md5sig_pool();
 626clear_hash_noput:
 627	memset(md5_hash, 0, 16);
 628	return 1;
 629}
 630
 631#endif
 632
 633static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 634				    const struct sk_buff *skb)
 635{
 636#ifdef CONFIG_TCP_MD5SIG
 637	const __u8 *hash_location = NULL;
 638	struct tcp_md5sig_key *hash_expected;
 639	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 640	const struct tcphdr *th = tcp_hdr(skb);
 641	int genhash;
 642	u8 newhash[16];
 643
 644	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 645	hash_location = tcp_parse_md5sig_option(th);
 646
 647	/* We've parsed the options - do we have a hash? */
 648	if (!hash_expected && !hash_location)
 649		return false;
 650
 651	if (hash_expected && !hash_location) {
 652		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 653		return true;
 654	}
 655
 656	if (!hash_expected && hash_location) {
 657		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 658		return true;
 659	}
 660
 661	/* check the signature */
 662	genhash = tcp_v6_md5_hash_skb(newhash,
 663				      hash_expected,
 664				      NULL, skb);
 665
 666	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 667		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 668				     genhash ? "failed" : "mismatch",
 669				     &ip6h->saddr, ntohs(th->source),
 670				     &ip6h->daddr, ntohs(th->dest));
 671		return true;
 672	}
 673#endif
 674	return false;
 675}
 676
 677static void tcp_v6_init_req(struct request_sock *req,
 678			    const struct sock *sk_listener,
 679			    struct sk_buff *skb)
 680{
 
 681	struct inet_request_sock *ireq = inet_rsk(req);
 682	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 683
 684	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 685	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 686
 687	/* So that link locals have meaning */
 688	if (!sk_listener->sk_bound_dev_if &&
 689	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 690		ireq->ir_iif = tcp_v6_iif(skb);
 691
 692	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 693	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 694	     np->rxopt.bits.rxinfo ||
 695	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 696	     np->rxopt.bits.rxohlim || np->repflow)) {
 697		atomic_inc(&skb->users);
 698		ireq->pktopts = skb;
 699	}
 700}
 701
 702static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 
 703					  struct flowi *fl,
 704					  const struct request_sock *req,
 705					  bool *strict)
 706{
 707	if (strict)
 708		*strict = true;
 
 
 
 709	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 710}
 711
 712struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 713	.family		=	AF_INET6,
 714	.obj_size	=	sizeof(struct tcp6_request_sock),
 715	.rtx_syn_ack	=	tcp_rtx_synack,
 716	.send_ack	=	tcp_v6_reqsk_send_ack,
 717	.destructor	=	tcp_v6_reqsk_destructor,
 718	.send_reset	=	tcp_v6_send_reset,
 719	.syn_ack_timeout =	tcp_syn_ack_timeout,
 720};
 721
 722static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 723	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 724				sizeof(struct ipv6hdr),
 725#ifdef CONFIG_TCP_MD5SIG
 726	.req_md5_lookup	=	tcp_v6_md5_lookup,
 727	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 728#endif
 729	.init_req	=	tcp_v6_init_req,
 
 
 
 
 730#ifdef CONFIG_SYN_COOKIES
 731	.cookie_init_seq =	cookie_v6_init_sequence,
 732#endif
 733	.route_req	=	tcp_v6_route_req,
 734	.init_seq	=	tcp_v6_init_sequence,
 
 735	.send_synack	=	tcp_v6_send_synack,
 736};
 737
 738static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 739				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 740				 int oif, struct tcp_md5sig_key *key, int rst,
 741				 u8 tclass, u32 label)
 742{
 743	const struct tcphdr *th = tcp_hdr(skb);
 744	struct tcphdr *t1;
 745	struct sk_buff *buff;
 746	struct flowi6 fl6;
 747	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 748	struct sock *ctl_sk = net->ipv6.tcp_sk;
 749	unsigned int tot_len = sizeof(struct tcphdr);
 
 750	struct dst_entry *dst;
 751	__be32 *topt;
 752
 753	if (tsecr)
 754		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 755#ifdef CONFIG_TCP_MD5SIG
 756	if (key)
 757		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 
 
 
 
 
 
 
 
 
 
 758#endif
 759
 760	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 761			 GFP_ATOMIC);
 762	if (!buff)
 763		return;
 764
 765	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 766
 767	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 768	skb_reset_transport_header(buff);
 769
 770	/* Swap the send and the receive. */
 771	memset(t1, 0, sizeof(*t1));
 772	t1->dest = th->source;
 773	t1->source = th->dest;
 774	t1->doff = tot_len / 4;
 775	t1->seq = htonl(seq);
 776	t1->ack_seq = htonl(ack);
 777	t1->ack = !rst || !th->ack;
 778	t1->rst = rst;
 779	t1->window = htons(win);
 780
 781	topt = (__be32 *)(t1 + 1);
 782
 783	if (tsecr) {
 784		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 785				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 786		*topt++ = htonl(tsval);
 787		*topt++ = htonl(tsecr);
 788	}
 789
 
 
 
 790#ifdef CONFIG_TCP_MD5SIG
 791	if (key) {
 792		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 793				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 794		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 795				    &ipv6_hdr(skb)->saddr,
 796				    &ipv6_hdr(skb)->daddr, t1);
 797	}
 798#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799
 800	memset(&fl6, 0, sizeof(fl6));
 801	fl6.daddr = ipv6_hdr(skb)->saddr;
 802	fl6.saddr = ipv6_hdr(skb)->daddr;
 803	fl6.flowlabel = label;
 804
 805	buff->ip_summed = CHECKSUM_PARTIAL;
 806	buff->csum = 0;
 807
 808	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 809
 810	fl6.flowi6_proto = IPPROTO_TCP;
 811	if (rt6_need_strict(&fl6.daddr) && !oif)
 812		fl6.flowi6_oif = tcp_v6_iif(skb);
 813	else {
 814		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 815			oif = skb->skb_iif;
 816
 817		fl6.flowi6_oif = oif;
 818	}
 819
 820	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 
 
 
 
 
 
 
 
 
 
 
 821	fl6.fl6_dport = t1->dest;
 822	fl6.fl6_sport = t1->source;
 823	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 824
 825	/* Pass a socket to ip6_dst_lookup either it is for RST
 826	 * Underlying function will use this to retrieve the network
 827	 * namespace
 828	 */
 829	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 
 
 
 830	if (!IS_ERR(dst)) {
 831		skb_dst_set(buff, dst);
 832		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 833		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 
 834		if (rst)
 835			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 836		return;
 837	}
 838
 839	kfree_skb(buff);
 840}
 841
 842static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 843{
 844	const struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
 
 
 
 845	u32 seq = 0, ack_seq = 0;
 846	struct tcp_md5sig_key *key = NULL;
 
 
 
 
 847#ifdef CONFIG_TCP_MD5SIG
 848	const __u8 *hash_location = NULL;
 849	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 850	unsigned char newhash[16];
 851	int genhash;
 852	struct sock *sk1 = NULL;
 853#endif
 854	int oif;
 855
 856	if (th->rst)
 857		return;
 858
 859	/* If sk not NULL, it means we did a successful lookup and incoming
 860	 * route had to be correct. prequeue might have dropped our dst.
 861	 */
 862	if (!sk && !ipv6_unicast_destination(skb))
 863		return;
 864
 
 
 
 
 
 
 
 865#ifdef CONFIG_TCP_MD5SIG
 866	hash_location = tcp_parse_md5sig_option(th);
 867	if (sk && sk_fullsock(sk)) {
 868		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 869	} else if (hash_location) {
 
 
 
 
 
 
 
 
 
 
 
 
 870		/*
 871		 * active side is lost. Try to find listening socket through
 872		 * source port, and then find md5 key through listening socket.
 873		 * we are not loose security here:
 874		 * Incoming packet is checked with md5 hash with finding key,
 875		 * no RST generated if md5 hash doesn't match.
 876		 */
 877		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 878					   &tcp_hashinfo, NULL, 0,
 879					   &ipv6h->saddr,
 880					   th->source, &ipv6h->daddr,
 881					   ntohs(th->source), tcp_v6_iif(skb));
 882		if (!sk1)
 883			return;
 884
 885		rcu_read_lock();
 886		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 887		if (!key)
 888			goto release_sk1;
 889
 890		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 891		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 892			goto release_sk1;
 
 
 
 
 
 893	}
 894#endif
 895
 896	if (th->ack)
 897		seq = ntohl(th->ack_seq);
 898	else
 899		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 900			  (th->doff << 2);
 901
 902	oif = sk ? sk->sk_bound_dev_if : 0;
 903	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 904
 905#ifdef CONFIG_TCP_MD5SIG
 906release_sk1:
 907	if (sk1) {
 908		rcu_read_unlock();
 909		sock_put(sk1);
 
 
 
 
 
 
 
 
 
 
 
 
 910	}
 
 
 
 
 
 
 
 
 
 
 911#endif
 912}
 913
 914static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 915			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 916			    struct tcp_md5sig_key *key, u8 tclass,
 917			    u32 label)
 918{
 919	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 920			     tclass, label);
 921}
 922
 923static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 924{
 925	struct inet_timewait_sock *tw = inet_twsk(sk);
 926	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 927
 928	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 929			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 930			tcp_time_stamp + tcptw->tw_ts_offset,
 931			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 932			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 
 933
 
 
 
 934	inet_twsk_put(tw);
 935}
 936
 937static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 938				  struct request_sock *req)
 939{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 941	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 942	 */
 
 
 
 
 
 943	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 944			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 945			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 946			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 947			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 948			0, 0);
 
 
 
 
 
 949}
 950
 951
 952static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 953{
 954#ifdef CONFIG_SYN_COOKIES
 955	const struct tcphdr *th = tcp_hdr(skb);
 956
 957	if (!th->syn)
 958		sk = cookie_v6_check(sk, skb);
 959#endif
 960	return sk;
 961}
 962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 963static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 964{
 965	if (skb->protocol == htons(ETH_P_IP))
 966		return tcp_v4_conn_request(sk, skb);
 967
 968	if (!ipv6_unicast_destination(skb))
 969		goto drop;
 970
 
 
 
 
 
 971	return tcp_conn_request(&tcp6_request_sock_ops,
 972				&tcp_request_sock_ipv6_ops, sk, skb);
 973
 974drop:
 975	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 976	return 0; /* don't send reset */
 977}
 978
 
 
 
 
 
 
 
 
 
 
 979static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 980					 struct request_sock *req,
 981					 struct dst_entry *dst,
 982					 struct request_sock *req_unhash,
 983					 bool *own_req)
 984{
 985	struct inet_request_sock *ireq;
 986	struct ipv6_pinfo *newnp;
 987	const struct ipv6_pinfo *np = inet6_sk(sk);
 988	struct ipv6_txoptions *opt;
 989	struct tcp6_sock *newtcp6sk;
 990	struct inet_sock *newinet;
 
 991	struct tcp_sock *newtp;
 992	struct sock *newsk;
 993#ifdef CONFIG_TCP_MD5SIG
 994	struct tcp_md5sig_key *key;
 
 995#endif
 996	struct flowi6 fl6;
 997
 998	if (skb->protocol == htons(ETH_P_IP)) {
 999		/*
1000		 *	v6 mapped
1001		 */
1002
1003		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004					     req_unhash, own_req);
1005
1006		if (!newsk)
1007			return NULL;
1008
1009		newtcp6sk = (struct tcp6_sock *)newsk;
1010		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1011
1012		newinet = inet_sk(newsk);
1013		newnp = inet6_sk(newsk);
1014		newtp = tcp_sk(newsk);
1015
1016		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1017
1018		newnp->saddr = newsk->sk_v6_rcv_saddr;
1019
1020		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 
 
1021		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022#ifdef CONFIG_TCP_MD5SIG
1023		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024#endif
1025
 
1026		newnp->ipv6_ac_list = NULL;
1027		newnp->ipv6_fl_list = NULL;
1028		newnp->pktoptions  = NULL;
1029		newnp->opt	   = NULL;
1030		newnp->mcast_oif   = tcp_v6_iif(skb);
1031		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1032		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1033		if (np->repflow)
1034			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1035
1036		/*
1037		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038		 * here, tcp_create_openreq_child now does this for us, see the comment in
1039		 * that function for the gory details. -acme
1040		 */
1041
1042		/* It is tricky place. Until this moment IPv4 tcp
1043		   worked with IPv6 icsk.icsk_af_ops.
1044		   Sync it now.
1045		 */
1046		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1047
1048		return newsk;
1049	}
1050
1051	ireq = inet_rsk(req);
1052
1053	if (sk_acceptq_is_full(sk))
1054		goto out_overflow;
1055
1056	if (!dst) {
1057		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1058		if (!dst)
1059			goto out;
1060	}
1061
1062	newsk = tcp_create_openreq_child(sk, req, skb);
1063	if (!newsk)
1064		goto out_nonewsk;
1065
1066	/*
1067	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068	 * count here, tcp_create_openreq_child now does this for us, see the
1069	 * comment in that function for the gory details. -acme
1070	 */
1071
1072	newsk->sk_gso_type = SKB_GSO_TCPV6;
1073	ip6_dst_store(newsk, dst, NULL, NULL);
1074	inet6_sk_rx_dst_set(newsk, skb);
1075
1076	newtcp6sk = (struct tcp6_sock *)newsk;
1077	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1078
1079	newtp = tcp_sk(newsk);
1080	newinet = inet_sk(newsk);
1081	newnp = inet6_sk(newsk);
1082
1083	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1084
1085	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086	newnp->saddr = ireq->ir_v6_loc_addr;
1087	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088	newsk->sk_bound_dev_if = ireq->ir_iif;
1089
1090	/* Now IPv6 options...
1091
1092	   First: no IPv4 options.
1093	 */
1094	newinet->inet_opt = NULL;
 
1095	newnp->ipv6_ac_list = NULL;
1096	newnp->ipv6_fl_list = NULL;
1097
1098	/* Clone RX bits */
1099	newnp->rxopt.all = np->rxopt.all;
1100
1101	newnp->pktoptions = NULL;
1102	newnp->opt	  = NULL;
1103	newnp->mcast_oif  = tcp_v6_iif(skb);
1104	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1106	if (np->repflow)
1107		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1108
 
 
 
 
 
 
1109	/* Clone native IPv6 options from listening socket (if any)
1110
1111	   Yes, keeping reference count would be much more clever,
1112	   but we make one more one thing there: reattach optmem
1113	   to newsk.
1114	 */
1115	opt = rcu_dereference(np->opt);
 
 
1116	if (opt) {
1117		opt = ipv6_dup_options(newsk, opt);
1118		RCU_INIT_POINTER(newnp->opt, opt);
1119	}
1120	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1121	if (opt)
1122		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1123						    opt->opt_flen;
1124
1125	tcp_ca_openreq_child(newsk, dst);
1126
1127	tcp_sync_mss(newsk, dst_mtu(dst));
1128	newtp->advmss = dst_metric_advmss(dst);
1129	if (tcp_sk(sk)->rx_opt.user_mss &&
1130	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1132
1133	tcp_initialize_rcv_mss(newsk);
1134
1135	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1137
1138#ifdef CONFIG_TCP_MD5SIG
1139	/* Copy over the MD5 key from the original socket */
1140	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1141	if (key) {
1142		/* We're using one, so create a matching key
1143		 * on the newsk structure. If we fail to get
1144		 * memory, then we end up not copying the key
1145		 * across. Shucks.
1146		 */
1147		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148			       AF_INET6, key->key, key->keylen,
1149			       sk_gfp_mask(sk, GFP_ATOMIC));
 
 
 
 
1150	}
1151#endif
 
 
 
 
 
1152
1153	if (__inet_inherit_port(sk, newsk) < 0) {
1154		inet_csk_prepare_forced_close(newsk);
1155		tcp_done(newsk);
1156		goto out;
1157	}
1158	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
1159	if (*own_req) {
1160		tcp_move_syn(newtp, req);
1161
1162		/* Clone pktoptions received with SYN, if we own the req */
1163		if (ireq->pktopts) {
1164			newnp->pktoptions = skb_clone(ireq->pktopts,
1165						      sk_gfp_mask(sk, GFP_ATOMIC));
1166			consume_skb(ireq->pktopts);
1167			ireq->pktopts = NULL;
1168			if (newnp->pktoptions)
1169				skb_set_owner_r(newnp->pktoptions, newsk);
 
 
 
 
 
 
 
 
 
1170		}
1171	}
1172
1173	return newsk;
1174
1175out_overflow:
1176	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177out_nonewsk:
1178	dst_release(dst);
1179out:
1180	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1181	return NULL;
1182}
1183
 
 
1184/* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1186 *
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1190 * held.
1191 */
1192static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 
1193{
1194	struct ipv6_pinfo *np = inet6_sk(sk);
1195	struct tcp_sock *tp;
1196	struct sk_buff *opt_skb = NULL;
 
 
1197
1198	/* Imagine: socket is IPv6. IPv4 packet arrives,
1199	   goes to IPv4 receive handler and backlogged.
1200	   From backlog it always goes here. Kerboom...
1201	   Fortunately, tcp_rcv_established and rcv_established
1202	   handle them correctly, but it is not case with
1203	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1204	 */
1205
1206	if (skb->protocol == htons(ETH_P_IP))
1207		return tcp_v4_do_rcv(sk, skb);
1208
1209	if (sk_filter(sk, skb))
1210		goto discard;
1211
1212	/*
1213	 *	socket locking is here for SMP purposes as backlog rcv
1214	 *	is currently called with bh processing disabled.
1215	 */
1216
1217	/* Do Stevens' IPV6_PKTOPTIONS.
1218
1219	   Yes, guys, it is the only place in our code, where we
1220	   may make it not affecting IPv4.
1221	   The rest of code is protocol independent,
1222	   and I do not like idea to uglify IPv4.
1223
1224	   Actually, all the idea behind IPV6_PKTOPTIONS
1225	   looks not very well thought. For now we latch
1226	   options, received in the last packet, enqueued
1227	   by tcp. Feel free to propose better solution.
1228					       --ANK (980728)
1229	 */
1230	if (np->rxopt.all)
1231		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1232
 
1233	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1234		struct dst_entry *dst = sk->sk_rx_dst;
 
 
 
1235
1236		sock_rps_save_rxhash(sk, skb);
1237		sk_mark_napi_id(sk, skb);
1238		if (dst) {
1239			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1240			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
 
 
1241				dst_release(dst);
1242				sk->sk_rx_dst = NULL;
1243			}
1244		}
1245
1246		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1247		if (opt_skb)
1248			goto ipv6_pktoptions;
1249		return 0;
1250	}
1251
1252	if (tcp_checksum_complete(skb))
1253		goto csum_err;
1254
1255	if (sk->sk_state == TCP_LISTEN) {
1256		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1257
1258		if (!nsk)
1259			goto discard;
1260
1261		if (nsk != sk) {
1262			sock_rps_save_rxhash(nsk, skb);
1263			sk_mark_napi_id(nsk, skb);
1264			if (tcp_child_process(sk, nsk, skb))
1265				goto reset;
1266			if (opt_skb)
1267				__kfree_skb(opt_skb);
1268			return 0;
1269		}
1270	} else
1271		sock_rps_save_rxhash(sk, skb);
1272
1273	if (tcp_rcv_state_process(sk, skb))
1274		goto reset;
1275	if (opt_skb)
1276		goto ipv6_pktoptions;
1277	return 0;
1278
1279reset:
1280	tcp_v6_send_reset(sk, skb);
1281discard:
1282	if (opt_skb)
1283		__kfree_skb(opt_skb);
1284	kfree_skb(skb);
1285	return 0;
1286csum_err:
1287	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1288	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 
1289	goto discard;
1290
1291
1292ipv6_pktoptions:
1293	/* Do you ask, what is it?
1294
1295	   1. skb was enqueued by tcp.
1296	   2. skb is added to tail of read queue, rather than out of order.
1297	   3. socket is not in passive state.
1298	   4. Finally, it really contains options, which user wants to receive.
1299	 */
1300	tp = tcp_sk(sk);
1301	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1302	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1303		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1304			np->mcast_oif = tcp_v6_iif(opt_skb);
1305		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1306			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
 
1307		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1308			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1309		if (np->repflow)
1310			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1311		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1312			skb_set_owner_r(opt_skb, sk);
1313			opt_skb = xchg(&np->pktoptions, opt_skb);
1314		} else {
1315			__kfree_skb(opt_skb);
1316			opt_skb = xchg(&np->pktoptions, NULL);
1317		}
1318	}
1319
1320	kfree_skb(opt_skb);
1321	return 0;
1322}
1323
1324static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1325			   const struct tcphdr *th)
1326{
1327	/* This is tricky: we move IP6CB at its correct location into
1328	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329	 * _decode_session6() uses IP6CB().
1330	 * barrier() makes sure compiler won't play aliasing games.
1331	 */
1332	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1333		sizeof(struct inet6_skb_parm));
1334	barrier();
1335
1336	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1337	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1338				    skb->len - th->doff*4);
1339	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1340	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1341	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1342	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1343	TCP_SKB_CB(skb)->sacked = 0;
 
 
1344}
1345
1346static void tcp_v6_restore_cb(struct sk_buff *skb)
1347{
1348	/* We need to move header back to the beginning if xfrm6_policy_check()
1349	 * and tcp_v6_fill_cb() are going to be called again.
1350	 */
1351	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1352		sizeof(struct inet6_skb_parm));
1353}
1354
1355static int tcp_v6_rcv(struct sk_buff *skb)
1356{
 
 
 
1357	const struct tcphdr *th;
1358	const struct ipv6hdr *hdr;
 
1359	struct sock *sk;
1360	int ret;
1361	struct net *net = dev_net(skb->dev);
1362
 
1363	if (skb->pkt_type != PACKET_HOST)
1364		goto discard_it;
1365
1366	/*
1367	 *	Count it even if it's bad.
1368	 */
1369	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1370
1371	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1372		goto discard_it;
1373
1374	th = tcp_hdr(skb);
1375
1376	if (th->doff < sizeof(struct tcphdr)/4)
 
1377		goto bad_packet;
 
1378	if (!pskb_may_pull(skb, th->doff*4))
1379		goto discard_it;
1380
1381	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1382		goto csum_error;
1383
1384	th = tcp_hdr(skb);
1385	hdr = ipv6_hdr(skb);
1386
1387lookup:
1388	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1389				th->source, th->dest, inet6_iif(skb));
 
1390	if (!sk)
1391		goto no_tcp_socket;
1392
1393process:
1394	if (sk->sk_state == TCP_TIME_WAIT)
1395		goto do_time_wait;
1396
1397	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1398		struct request_sock *req = inet_reqsk(sk);
 
1399		struct sock *nsk;
1400
1401		sk = req->rsk_listener;
1402		tcp_v6_fill_cb(skb, hdr, th);
1403		if (tcp_v6_inbound_md5_hash(sk, skb)) {
 
 
 
 
 
 
1404			reqsk_put(req);
1405			goto discard_it;
1406		}
 
 
 
 
1407		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1408			inet_csk_reqsk_queue_drop_and_put(sk, req);
1409			goto lookup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410		}
1411		sock_hold(sk);
1412		nsk = tcp_check_req(sk, skb, req, false);
1413		if (!nsk) {
1414			reqsk_put(req);
 
 
 
 
 
 
 
 
 
 
1415			goto discard_and_relse;
1416		}
 
1417		if (nsk == sk) {
1418			reqsk_put(req);
1419			tcp_v6_restore_cb(skb);
1420		} else if (tcp_child_process(sk, nsk, skb)) {
1421			tcp_v6_send_reset(nsk, skb);
1422			goto discard_and_relse;
1423		} else {
1424			sock_put(sk);
1425			return 0;
1426		}
1427	}
1428	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1430		goto discard_and_relse;
 
 
 
 
 
1431	}
1432
1433	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 
1434		goto discard_and_relse;
 
1435
1436	tcp_v6_fill_cb(skb, hdr, th);
1437
1438	if (tcp_v6_inbound_md5_hash(sk, skb))
1439		goto discard_and_relse;
1440
1441	if (sk_filter(sk, skb))
 
 
 
1442		goto discard_and_relse;
 
 
 
 
1443
1444	skb->dev = NULL;
1445
1446	if (sk->sk_state == TCP_LISTEN) {
1447		ret = tcp_v6_do_rcv(sk, skb);
1448		goto put_and_return;
1449	}
1450
1451	sk_incoming_cpu_update(sk);
1452
1453	bh_lock_sock_nested(sk);
1454	tcp_segs_in(tcp_sk(sk), skb);
1455	ret = 0;
1456	if (!sock_owned_by_user(sk)) {
1457		if (!tcp_prequeue(sk, skb))
1458			ret = tcp_v6_do_rcv(sk, skb);
1459	} else if (unlikely(sk_add_backlog(sk, skb,
1460					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1461		bh_unlock_sock(sk);
1462		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1463		goto discard_and_relse;
1464	}
1465	bh_unlock_sock(sk);
1466
1467put_and_return:
1468	sock_put(sk);
 
1469	return ret ? -1 : 0;
1470
1471no_tcp_socket:
 
1472	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1473		goto discard_it;
1474
1475	tcp_v6_fill_cb(skb, hdr, th);
1476
1477	if (tcp_checksum_complete(skb)) {
1478csum_error:
1479		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
 
 
1480bad_packet:
1481		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482	} else {
1483		tcp_v6_send_reset(NULL, skb);
1484	}
1485
1486discard_it:
1487	kfree_skb(skb);
 
1488	return 0;
1489
1490discard_and_relse:
1491	sock_put(sk);
 
 
1492	goto discard_it;
1493
1494do_time_wait:
1495	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 
1496		inet_twsk_put(inet_twsk(sk));
1497		goto discard_it;
1498	}
1499
1500	tcp_v6_fill_cb(skb, hdr, th);
1501
1502	if (tcp_checksum_complete(skb)) {
1503		inet_twsk_put(inet_twsk(sk));
1504		goto csum_error;
1505	}
1506
1507	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1508	case TCP_TW_SYN:
1509	{
1510		struct sock *sk2;
1511
1512		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1513					    skb, __tcp_hdrlen(th),
1514					    &ipv6_hdr(skb)->saddr, th->source,
1515					    &ipv6_hdr(skb)->daddr,
1516					    ntohs(th->dest), tcp_v6_iif(skb));
 
 
1517		if (sk2) {
1518			struct inet_timewait_sock *tw = inet_twsk(sk);
1519			inet_twsk_deschedule_put(tw);
1520			sk = sk2;
1521			tcp_v6_restore_cb(skb);
 
1522			goto process;
1523		}
1524		/* Fall through to ACK */
1525	}
 
 
1526	case TCP_TW_ACK:
1527		tcp_v6_timewait_ack(sk, skb);
1528		break;
1529	case TCP_TW_RST:
1530		tcp_v6_restore_cb(skb);
1531		tcp_v6_send_reset(sk, skb);
1532		inet_twsk_deschedule_put(inet_twsk(sk));
1533		goto discard_it;
1534	case TCP_TW_SUCCESS:
1535		;
1536	}
1537	goto discard_it;
1538}
1539
1540static void tcp_v6_early_demux(struct sk_buff *skb)
1541{
 
1542	const struct ipv6hdr *hdr;
1543	const struct tcphdr *th;
1544	struct sock *sk;
1545
1546	if (skb->pkt_type != PACKET_HOST)
1547		return;
1548
1549	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1550		return;
1551
1552	hdr = ipv6_hdr(skb);
1553	th = tcp_hdr(skb);
1554
1555	if (th->doff < sizeof(struct tcphdr) / 4)
1556		return;
1557
1558	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1559	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560					&hdr->saddr, th->source,
1561					&hdr->daddr, ntohs(th->dest),
1562					inet6_iif(skb));
1563	if (sk) {
1564		skb->sk = sk;
1565		skb->destructor = sock_edemux;
1566		if (sk_fullsock(sk)) {
1567			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1568
1569			if (dst)
1570				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571			if (dst &&
1572			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573				skb_dst_set_noref(skb, dst);
1574		}
1575	}
1576}
1577
1578static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1580	.twsk_unique	= tcp_twsk_unique,
1581	.twsk_destructor = tcp_twsk_destructor,
1582};
1583
1584static const struct inet_connection_sock_af_ops ipv6_specific = {
 
 
 
 
 
1585	.queue_xmit	   = inet6_csk_xmit,
1586	.send_check	   = tcp_v6_send_check,
1587	.rebuild_header	   = inet6_sk_rebuild_header,
1588	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1589	.conn_request	   = tcp_v6_conn_request,
1590	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1591	.net_header_len	   = sizeof(struct ipv6hdr),
1592	.net_frag_header_len = sizeof(struct frag_hdr),
1593	.setsockopt	   = ipv6_setsockopt,
1594	.getsockopt	   = ipv6_getsockopt,
1595	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1596	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1597	.bind_conflict	   = inet6_csk_bind_conflict,
1598#ifdef CONFIG_COMPAT
1599	.compat_setsockopt = compat_ipv6_setsockopt,
1600	.compat_getsockopt = compat_ipv6_getsockopt,
1601#endif
1602	.mtu_reduced	   = tcp_v6_mtu_reduced,
1603};
1604
1605#ifdef CONFIG_TCP_MD5SIG
1606static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
 
1607	.md5_lookup	=	tcp_v6_md5_lookup,
1608	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1609	.md5_parse	=	tcp_v6_parse_md5_keys,
 
 
 
 
 
 
 
1610};
1611#endif
1612
1613/*
1614 *	TCP over IPv4 via INET6 API
1615 */
1616static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617	.queue_xmit	   = ip_queue_xmit,
1618	.send_check	   = tcp_v4_send_check,
1619	.rebuild_header	   = inet_sk_rebuild_header,
1620	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1621	.conn_request	   = tcp_v6_conn_request,
1622	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1623	.net_header_len	   = sizeof(struct iphdr),
1624	.setsockopt	   = ipv6_setsockopt,
1625	.getsockopt	   = ipv6_getsockopt,
1626	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1627	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1628	.bind_conflict	   = inet6_csk_bind_conflict,
1629#ifdef CONFIG_COMPAT
1630	.compat_setsockopt = compat_ipv6_setsockopt,
1631	.compat_getsockopt = compat_ipv6_getsockopt,
1632#endif
1633	.mtu_reduced	   = tcp_v4_mtu_reduced,
1634};
1635
1636#ifdef CONFIG_TCP_MD5SIG
1637static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
 
1638	.md5_lookup	=	tcp_v4_md5_lookup,
1639	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1640	.md5_parse	=	tcp_v6_parse_md5_keys,
 
 
 
 
 
 
 
1641};
1642#endif
1643
1644/* NOTE: A lot of things set to zero explicitly by call to
1645 *       sk_alloc() so need not be done here.
1646 */
1647static int tcp_v6_init_sock(struct sock *sk)
1648{
1649	struct inet_connection_sock *icsk = inet_csk(sk);
1650
1651	tcp_init_sock(sk);
1652
1653	icsk->icsk_af_ops = &ipv6_specific;
1654
1655#ifdef CONFIG_TCP_MD5SIG
1656	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657#endif
1658
1659	return 0;
1660}
1661
1662static void tcp_v6_destroy_sock(struct sock *sk)
1663{
1664	tcp_v4_destroy_sock(sk);
1665	inet6_destroy_sock(sk);
1666}
1667
1668#ifdef CONFIG_PROC_FS
1669/* Proc filesystem TCPv6 sock list dumping. */
1670static void get_openreq6(struct seq_file *seq,
1671			 const struct request_sock *req, int i)
1672{
1673	long ttd = req->rsk_timer.expires - jiffies;
1674	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677	if (ttd < 0)
1678		ttd = 0;
1679
1680	seq_printf(seq,
1681		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683		   i,
1684		   src->s6_addr32[0], src->s6_addr32[1],
1685		   src->s6_addr32[2], src->s6_addr32[3],
1686		   inet_rsk(req)->ir_num,
1687		   dest->s6_addr32[0], dest->s6_addr32[1],
1688		   dest->s6_addr32[2], dest->s6_addr32[3],
1689		   ntohs(inet_rsk(req)->ir_rmt_port),
1690		   TCP_SYN_RECV,
1691		   0, 0, /* could print option size, but that is af dependent. */
1692		   1,   /* timers active (only the expire timer) */
1693		   jiffies_to_clock_t(ttd),
1694		   req->num_timeout,
1695		   from_kuid_munged(seq_user_ns(seq),
1696				    sock_i_uid(req->rsk_listener)),
1697		   0,  /* non standard timer */
1698		   0, /* open_requests have no inode */
1699		   0, req);
1700}
1701
1702static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1703{
1704	const struct in6_addr *dest, *src;
1705	__u16 destp, srcp;
1706	int timer_active;
1707	unsigned long timer_expires;
1708	const struct inet_sock *inet = inet_sk(sp);
1709	const struct tcp_sock *tp = tcp_sk(sp);
1710	const struct inet_connection_sock *icsk = inet_csk(sp);
1711	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1712	int rx_queue;
1713	int state;
1714
1715	dest  = &sp->sk_v6_daddr;
1716	src   = &sp->sk_v6_rcv_saddr;
1717	destp = ntohs(inet->inet_dport);
1718	srcp  = ntohs(inet->inet_sport);
1719
1720	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
1721		timer_active	= 1;
1722		timer_expires	= icsk->icsk_timeout;
1723	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1724		timer_active	= 4;
1725		timer_expires	= icsk->icsk_timeout;
1726	} else if (timer_pending(&sp->sk_timer)) {
1727		timer_active	= 2;
1728		timer_expires	= sp->sk_timer.expires;
1729	} else {
1730		timer_active	= 0;
1731		timer_expires = jiffies;
1732	}
1733
1734	state = sk_state_load(sp);
1735	if (state == TCP_LISTEN)
1736		rx_queue = sp->sk_ack_backlog;
1737	else
1738		/* Because we don't lock the socket,
1739		 * we might find a transient negative value.
1740		 */
1741		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
1742
1743	seq_printf(seq,
1744		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1746		   i,
1747		   src->s6_addr32[0], src->s6_addr32[1],
1748		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1749		   dest->s6_addr32[0], dest->s6_addr32[1],
1750		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1751		   state,
1752		   tp->write_seq - tp->snd_una,
1753		   rx_queue,
1754		   timer_active,
1755		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1756		   icsk->icsk_retransmits,
1757		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1758		   icsk->icsk_probes_out,
1759		   sock_i_ino(sp),
1760		   atomic_read(&sp->sk_refcnt), sp,
1761		   jiffies_to_clock_t(icsk->icsk_rto),
1762		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1763		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1764		   tp->snd_cwnd,
1765		   state == TCP_LISTEN ?
1766			fastopenq->max_qlen :
1767			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1768		   );
1769}
1770
1771static void get_timewait6_sock(struct seq_file *seq,
1772			       struct inet_timewait_sock *tw, int i)
1773{
1774	long delta = tw->tw_timer.expires - jiffies;
1775	const struct in6_addr *dest, *src;
1776	__u16 destp, srcp;
1777
1778	dest = &tw->tw_v6_daddr;
1779	src  = &tw->tw_v6_rcv_saddr;
1780	destp = ntohs(tw->tw_dport);
1781	srcp  = ntohs(tw->tw_sport);
1782
1783	seq_printf(seq,
1784		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1785		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1786		   i,
1787		   src->s6_addr32[0], src->s6_addr32[1],
1788		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1789		   dest->s6_addr32[0], dest->s6_addr32[1],
1790		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1791		   tw->tw_substate, 0, 0,
1792		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1793		   atomic_read(&tw->tw_refcnt), tw);
1794}
1795
1796static int tcp6_seq_show(struct seq_file *seq, void *v)
1797{
1798	struct tcp_iter_state *st;
1799	struct sock *sk = v;
1800
1801	if (v == SEQ_START_TOKEN) {
1802		seq_puts(seq,
1803			 "  sl  "
1804			 "local_address                         "
1805			 "remote_address                        "
1806			 "st tx_queue rx_queue tr tm->when retrnsmt"
1807			 "   uid  timeout inode\n");
1808		goto out;
1809	}
1810	st = seq->private;
1811
1812	if (sk->sk_state == TCP_TIME_WAIT)
1813		get_timewait6_sock(seq, v, st->num);
1814	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1815		get_openreq6(seq, v, st->num);
1816	else
1817		get_tcp6_sock(seq, v, st->num);
1818out:
1819	return 0;
1820}
1821
1822static const struct file_operations tcp6_afinfo_seq_fops = {
1823	.owner   = THIS_MODULE,
1824	.open    = tcp_seq_open,
1825	.read    = seq_read,
1826	.llseek  = seq_lseek,
1827	.release = seq_release_net
1828};
1829
1830static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1831	.name		= "tcp6",
1832	.family		= AF_INET6,
1833	.seq_fops	= &tcp6_afinfo_seq_fops,
1834	.seq_ops	= {
1835		.show		= tcp6_seq_show,
1836	},
1837};
1838
1839int __net_init tcp6_proc_init(struct net *net)
1840{
1841	return tcp_proc_register(net, &tcp6_seq_afinfo);
 
 
 
1842}
1843
1844void tcp6_proc_exit(struct net *net)
1845{
1846	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1847}
1848#endif
1849
1850static void tcp_v6_clear_sk(struct sock *sk, int size)
1851{
1852	struct inet_sock *inet = inet_sk(sk);
1853
1854	/* we do not want to clear pinet6 field, because of RCU lookups */
1855	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1856
1857	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1858	memset(&inet->pinet6 + 1, 0, size);
1859}
1860
1861struct proto tcpv6_prot = {
1862	.name			= "TCPv6",
1863	.owner			= THIS_MODULE,
1864	.close			= tcp_close,
 
1865	.connect		= tcp_v6_connect,
1866	.disconnect		= tcp_disconnect,
1867	.accept			= inet_csk_accept,
1868	.ioctl			= tcp_ioctl,
1869	.init			= tcp_v6_init_sock,
1870	.destroy		= tcp_v6_destroy_sock,
1871	.shutdown		= tcp_shutdown,
1872	.setsockopt		= tcp_setsockopt,
1873	.getsockopt		= tcp_getsockopt,
 
 
1874	.recvmsg		= tcp_recvmsg,
1875	.sendmsg		= tcp_sendmsg,
1876	.sendpage		= tcp_sendpage,
1877	.backlog_rcv		= tcp_v6_do_rcv,
1878	.release_cb		= tcp_release_cb,
1879	.hash			= inet6_hash,
1880	.unhash			= inet_unhash,
1881	.get_port		= inet_csk_get_port,
 
 
 
 
1882	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
1883	.stream_memory_free	= tcp_stream_memory_free,
1884	.sockets_allocated	= &tcp_sockets_allocated,
 
1885	.memory_allocated	= &tcp_memory_allocated,
 
 
1886	.memory_pressure	= &tcp_memory_pressure,
1887	.orphan_count		= &tcp_orphan_count,
1888	.sysctl_mem		= sysctl_tcp_mem,
1889	.sysctl_wmem		= sysctl_tcp_wmem,
1890	.sysctl_rmem		= sysctl_tcp_rmem,
1891	.max_header		= MAX_TCP_HEADER,
1892	.obj_size		= sizeof(struct tcp6_sock),
1893	.slab_flags		= SLAB_DESTROY_BY_RCU,
 
1894	.twsk_prot		= &tcp6_timewait_sock_ops,
1895	.rsk_prot		= &tcp6_request_sock_ops,
1896	.h.hashinfo		= &tcp_hashinfo,
1897	.no_autobind		= true,
1898#ifdef CONFIG_COMPAT
1899	.compat_setsockopt	= compat_tcp_setsockopt,
1900	.compat_getsockopt	= compat_tcp_getsockopt,
1901#endif
1902	.clear_sk		= tcp_v6_clear_sk,
1903	.diag_destroy		= tcp_abort,
1904};
 
1905
1906static const struct inet6_protocol tcpv6_protocol = {
1907	.early_demux	=	tcp_v6_early_demux,
1908	.handler	=	tcp_v6_rcv,
1909	.err_handler	=	tcp_v6_err,
1910	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1911};
1912
1913static struct inet_protosw tcpv6_protosw = {
1914	.type		=	SOCK_STREAM,
1915	.protocol	=	IPPROTO_TCP,
1916	.prot		=	&tcpv6_prot,
1917	.ops		=	&inet6_stream_ops,
1918	.flags		=	INET_PROTOSW_PERMANENT |
1919				INET_PROTOSW_ICSK,
1920};
1921
1922static int __net_init tcpv6_net_init(struct net *net)
1923{
1924	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1925				    SOCK_RAW, IPPROTO_TCP, net);
1926}
1927
1928static void __net_exit tcpv6_net_exit(struct net *net)
1929{
1930	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1931}
1932
1933static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1934{
1935	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1936}
1937
1938static struct pernet_operations tcpv6_net_ops = {
1939	.init	    = tcpv6_net_init,
1940	.exit	    = tcpv6_net_exit,
1941	.exit_batch = tcpv6_net_exit_batch,
1942};
1943
1944int __init tcpv6_init(void)
1945{
1946	int ret;
1947
1948	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1949	if (ret)
1950		goto out;
1951
1952	/* register inet6 protocol */
1953	ret = inet6_register_protosw(&tcpv6_protosw);
1954	if (ret)
1955		goto out_tcpv6_protocol;
1956
1957	ret = register_pernet_subsys(&tcpv6_net_ops);
1958	if (ret)
1959		goto out_tcpv6_protosw;
 
 
 
 
 
1960out:
1961	return ret;
1962
 
 
1963out_tcpv6_protosw:
1964	inet6_unregister_protosw(&tcpv6_protosw);
1965out_tcpv6_protocol:
1966	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967	goto out;
1968}
1969
1970void tcpv6_exit(void)
1971{
1972	unregister_pernet_subsys(&tcpv6_net_ops);
1973	inet6_unregister_protosw(&tcpv6_protosw);
1974	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1975}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	TCP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on:
  10 *	linux/net/ipv4/tcp.c
  11 *	linux/net/ipv4/tcp_input.c
  12 *	linux/net/ipv4/tcp_output.c
  13 *
  14 *	Fixes:
  15 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  16 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  17 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  18 *					a single port at the same time.
  19 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
 
 
 
 
 
  20 */
  21
  22#include <linux/bottom_half.h>
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/jiffies.h>
  30#include <linux/in.h>
  31#include <linux/in6.h>
  32#include <linux/netdevice.h>
  33#include <linux/init.h>
  34#include <linux/jhash.h>
  35#include <linux/ipsec.h>
  36#include <linux/times.h>
  37#include <linux/slab.h>
  38#include <linux/uaccess.h>
  39#include <linux/ipv6.h>
  40#include <linux/icmpv6.h>
  41#include <linux/random.h>
  42#include <linux/indirect_call_wrapper.h>
  43
  44#include <net/tcp.h>
  45#include <net/ndisc.h>
  46#include <net/inet6_hashtables.h>
  47#include <net/inet6_connection_sock.h>
  48#include <net/ipv6.h>
  49#include <net/transp_v6.h>
  50#include <net/addrconf.h>
  51#include <net/ip6_route.h>
  52#include <net/ip6_checksum.h>
  53#include <net/inet_ecn.h>
  54#include <net/protocol.h>
  55#include <net/xfrm.h>
  56#include <net/snmp.h>
  57#include <net/dsfield.h>
  58#include <net/timewait_sock.h>
  59#include <net/inet_common.h>
  60#include <net/secure_seq.h>
  61#include <net/busy_poll.h>
  62
  63#include <linux/proc_fs.h>
  64#include <linux/seq_file.h>
  65
  66#include <crypto/hash.h>
  67#include <linux/scatterlist.h>
  68
  69#include <trace/events/tcp.h>
  70
  71static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  72static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  73				      struct request_sock *req);
  74
  75INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  76
  77static const struct inet_connection_sock_af_ops ipv6_mapped;
  78const struct inet_connection_sock_af_ops ipv6_specific;
  79#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
  80static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 
 
 
 
 
 
  82#endif
  83
  84/* Helper returning the inet6 address from a given tcp socket.
  85 * It can be used in TCP stack instead of inet6_sk(sk).
  86 * This avoids a dereference and allow compiler optimizations.
  87 * It is a specialized version of inet6_sk_generic().
  88 */
  89#define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \
  90					      struct tcp6_sock, tcp)->inet6)
  91
  92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  93{
  94	struct dst_entry *dst = skb_dst(skb);
  95
  96	if (dst && dst_hold_safe(dst)) {
  97		const struct rt6_info *rt = (const struct rt6_info *)dst;
  98
  99		rcu_assign_pointer(sk->sk_rx_dst, dst);
 100		sk->sk_rx_dst_ifindex = skb->skb_iif;
 101		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
 102	}
 103}
 104
 105static u32 tcp_v6_init_seq(const struct sk_buff *skb)
 106{
 107	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
 108				ipv6_hdr(skb)->saddr.s6_addr32,
 109				tcp_hdr(skb)->dest,
 110				tcp_hdr(skb)->source);
 111}
 112
 113static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
 114{
 115	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
 116				   ipv6_hdr(skb)->saddr.s6_addr32);
 117}
 118
 119static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 120			      int addr_len)
 121{
 122	/* This check is replicated from tcp_v6_connect() and intended to
 123	 * prevent BPF program called below from accessing bytes that are out
 124	 * of the bound specified by user in addr_len.
 125	 */
 126	if (addr_len < SIN6_LEN_RFC2133)
 127		return -EINVAL;
 128
 129	sock_owned_by_me(sk);
 130
 131	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
 132}
 133
 134static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 135			  int addr_len)
 136{
 137	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 
 138	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 139	struct in6_addr *saddr = NULL, *final_p, final;
 140	struct inet_timewait_death_row *tcp_death_row;
 141	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 142	struct inet_sock *inet = inet_sk(sk);
 143	struct tcp_sock *tp = tcp_sk(sk);
 144	struct net *net = sock_net(sk);
 145	struct ipv6_txoptions *opt;
 
 146	struct dst_entry *dst;
 147	struct flowi6 fl6;
 148	int addr_type;
 149	int err;
 150
 151	if (addr_len < SIN6_LEN_RFC2133)
 152		return -EINVAL;
 153
 154	if (usin->sin6_family != AF_INET6)
 155		return -EAFNOSUPPORT;
 156
 157	memset(&fl6, 0, sizeof(fl6));
 158
 159	if (inet6_test_bit(SNDFLOW, sk)) {
 160		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 161		IP6_ECN_flow_init(fl6.flowlabel);
 162		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 163			struct ip6_flowlabel *flowlabel;
 164			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 165			if (IS_ERR(flowlabel))
 166				return -EINVAL;
 167			fl6_sock_release(flowlabel);
 168		}
 169	}
 170
 171	/*
 172	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 173	 */
 174
 175	if (ipv6_addr_any(&usin->sin6_addr)) {
 176		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 177			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 178					       &usin->sin6_addr);
 179		else
 180			usin->sin6_addr = in6addr_loopback;
 181	}
 182
 183	addr_type = ipv6_addr_type(&usin->sin6_addr);
 184
 185	if (addr_type & IPV6_ADDR_MULTICAST)
 186		return -ENETUNREACH;
 187
 188	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 189		if (addr_len >= sizeof(struct sockaddr_in6) &&
 190		    usin->sin6_scope_id) {
 191			/* If interface is set while binding, indices
 192			 * must coincide.
 193			 */
 194			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 
 195				return -EINVAL;
 196
 197			sk->sk_bound_dev_if = usin->sin6_scope_id;
 198		}
 199
 200		/* Connect to link-local address requires an interface */
 201		if (!sk->sk_bound_dev_if)
 202			return -EINVAL;
 203	}
 204
 205	if (tp->rx_opt.ts_recent_stamp &&
 206	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 207		tp->rx_opt.ts_recent = 0;
 208		tp->rx_opt.ts_recent_stamp = 0;
 209		WRITE_ONCE(tp->write_seq, 0);
 210	}
 211
 212	sk->sk_v6_daddr = usin->sin6_addr;
 213	np->flow_label = fl6.flowlabel;
 214
 215	/*
 216	 *	TCP over IPv4
 217	 */
 218
 219	if (addr_type & IPV6_ADDR_MAPPED) {
 220		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 221		struct sockaddr_in sin;
 222
 223		if (ipv6_only_sock(sk))
 
 
 224			return -ENETUNREACH;
 225
 226		sin.sin_family = AF_INET;
 227		sin.sin_port = usin->sin6_port;
 228		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 229
 230		/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
 231		WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
 232		if (sk_is_mptcp(sk))
 233			mptcpv6_handle_mapped(sk, true);
 234		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 235#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
 236		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 237#endif
 238
 239		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 240
 241		if (err) {
 242			icsk->icsk_ext_hdr_len = exthdrlen;
 243			/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
 244			WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
 245			if (sk_is_mptcp(sk))
 246				mptcpv6_handle_mapped(sk, false);
 247			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 248#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
 249			tp->af_specific = &tcp_sock_ipv6_specific;
 250#endif
 251			goto failure;
 252		}
 253		np->saddr = sk->sk_v6_rcv_saddr;
 254
 255		return err;
 256	}
 257
 258	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 259		saddr = &sk->sk_v6_rcv_saddr;
 260
 261	fl6.flowi6_proto = IPPROTO_TCP;
 262	fl6.daddr = sk->sk_v6_daddr;
 263	fl6.saddr = saddr ? *saddr : np->saddr;
 264	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
 265	fl6.flowi6_oif = sk->sk_bound_dev_if;
 266	fl6.flowi6_mark = sk->sk_mark;
 267	fl6.fl6_dport = usin->sin6_port;
 268	fl6.fl6_sport = inet->inet_sport;
 269	fl6.flowi6_uid = sk->sk_uid;
 270
 271	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 272	final_p = fl6_update_dst(&fl6, opt, &final);
 273
 274	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
 275
 276	dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
 277	if (IS_ERR(dst)) {
 278		err = PTR_ERR(dst);
 279		goto failure;
 280	}
 281
 282	tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
 283	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 284
 285	if (!saddr) {
 286		saddr = &fl6.saddr;
 287
 288		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
 289		if (err)
 290			goto failure;
 291	}
 292
 293	/* set the source address */
 294	np->saddr = *saddr;
 295	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 296
 297	sk->sk_gso_type = SKB_GSO_TCPV6;
 298	ip6_dst_store(sk, dst, NULL, NULL);
 299
 
 
 
 
 
 300	icsk->icsk_ext_hdr_len = 0;
 301	if (opt)
 302		icsk->icsk_ext_hdr_len = opt->opt_flen +
 303					 opt->opt_nflen;
 304
 305	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 306
 307	inet->inet_dport = usin->sin6_port;
 308
 309	tcp_set_state(sk, TCP_SYN_SENT);
 310	err = inet6_hash_connect(tcp_death_row, sk);
 311	if (err)
 312		goto late_failure;
 313
 314	sk_set_txhash(sk);
 315
 316	if (likely(!tp->repair)) {
 317		if (!tp->write_seq)
 318			WRITE_ONCE(tp->write_seq,
 319				   secure_tcpv6_seq(np->saddr.s6_addr32,
 320						    sk->sk_v6_daddr.s6_addr32,
 321						    inet->inet_sport,
 322						    inet->inet_dport));
 323		tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
 324						   sk->sk_v6_daddr.s6_addr32);
 325	}
 326
 327	if (tcp_fastopen_defer_connect(sk, &err))
 328		return err;
 329	if (err)
 330		goto late_failure;
 331
 332	err = tcp_connect(sk);
 333	if (err)
 334		goto late_failure;
 335
 336	return 0;
 337
 338late_failure:
 339	tcp_set_state(sk, TCP_CLOSE);
 340	inet_bhash2_reset_saddr(sk);
 341failure:
 342	inet->inet_dport = 0;
 343	sk->sk_route_caps = 0;
 344	return err;
 345}
 346
 347static void tcp_v6_mtu_reduced(struct sock *sk)
 348{
 349	struct dst_entry *dst;
 350	u32 mtu;
 351
 352	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 353		return;
 354
 355	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
 356
 357	/* Drop requests trying to increase our current mss.
 358	 * Check done in __ip6_rt_update_pmtu() is too late.
 359	 */
 360	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
 361		return;
 362
 363	dst = inet6_csk_update_pmtu(sk, mtu);
 364	if (!dst)
 365		return;
 366
 367	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 368		tcp_sync_mss(sk, dst_mtu(dst));
 369		tcp_simple_retransmit(sk);
 370	}
 371}
 372
 373static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 374		u8 type, u8 code, int offset, __be32 info)
 375{
 376	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 377	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 378	struct net *net = dev_net(skb->dev);
 379	struct request_sock *fastopen;
 380	struct ipv6_pinfo *np;
 381	struct tcp_sock *tp;
 382	__u32 seq, snd_una;
 383	struct sock *sk;
 384	bool fatal;
 385	int err;
 386
 387	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
 388					&hdr->daddr, th->dest,
 389					&hdr->saddr, ntohs(th->source),
 390					skb->dev->ifindex, inet6_sdif(skb));
 391
 392	if (!sk) {
 393		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 394				  ICMP6_MIB_INERRORS);
 395		return -ENOENT;
 396	}
 397
 398	if (sk->sk_state == TCP_TIME_WAIT) {
 399		/* To increase the counter of ignored icmps for TCP-AO */
 400		tcp_ao_ignore_icmp(sk, AF_INET6, type, code);
 401		inet_twsk_put(inet_twsk(sk));
 402		return 0;
 403	}
 404	seq = ntohl(th->seq);
 405	fatal = icmpv6_err_convert(type, code, &err);
 406	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 407		tcp_req_err(sk, seq, fatal);
 408		return 0;
 409	}
 410
 411	if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) {
 412		sock_put(sk);
 413		return 0;
 414	}
 415
 416	bh_lock_sock(sk);
 417	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 418		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 419
 420	if (sk->sk_state == TCP_CLOSE)
 421		goto out;
 422
 423	if (static_branch_unlikely(&ip6_min_hopcount)) {
 424		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
 425		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
 426			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 427			goto out;
 428		}
 429	}
 430
 431	tp = tcp_sk(sk);
 432	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 433	fastopen = rcu_dereference(tp->fastopen_rsk);
 434	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 435	if (sk->sk_state != TCP_LISTEN &&
 436	    !between(seq, snd_una, tp->snd_nxt)) {
 437		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 438		goto out;
 439	}
 440
 441	np = tcp_inet6_sk(sk);
 442
 443	if (type == NDISC_REDIRECT) {
 444		if (!sock_owned_by_user(sk)) {
 445			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 446
 447			if (dst)
 448				dst->ops->redirect(dst, sk, skb);
 449		}
 450		goto out;
 451	}
 452
 453	if (type == ICMPV6_PKT_TOOBIG) {
 454		u32 mtu = ntohl(info);
 455
 456		/* We are not interested in TCP_LISTEN and open_requests
 457		 * (SYN-ACKs send out by Linux are always <576bytes so
 458		 * they should go through unfragmented).
 459		 */
 460		if (sk->sk_state == TCP_LISTEN)
 461			goto out;
 462
 463		if (!ip6_sk_accept_pmtu(sk))
 464			goto out;
 465
 466		if (mtu < IPV6_MIN_MTU)
 467			goto out;
 468
 469		WRITE_ONCE(tp->mtu_info, mtu);
 470
 471		if (!sock_owned_by_user(sk))
 472			tcp_v6_mtu_reduced(sk);
 473		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 474					   &sk->sk_tsq_flags))
 475			sock_hold(sk);
 476		goto out;
 477	}
 478
 479
 480	/* Might be for an request_sock */
 481	switch (sk->sk_state) {
 482	case TCP_SYN_SENT:
 483	case TCP_SYN_RECV:
 484		/* Only in fast or simultaneous open. If a fast open socket is
 485		 * already accepted it is treated as a connected one below.
 486		 */
 487		if (fastopen && !fastopen->sk)
 488			break;
 489
 490		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
 491
 492		if (!sock_owned_by_user(sk)) {
 493			WRITE_ONCE(sk->sk_err, err);
 494			sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 495
 496			tcp_done(sk);
 497		} else {
 498			WRITE_ONCE(sk->sk_err_soft, err);
 499		}
 500		goto out;
 501	case TCP_LISTEN:
 502		break;
 503	default:
 504		/* check if this ICMP message allows revert of backoff.
 505		 * (see RFC 6069)
 506		 */
 507		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
 508		    code == ICMPV6_NOROUTE)
 509			tcp_ld_RTO_revert(sk, seq);
 510	}
 511
 512	if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
 513		WRITE_ONCE(sk->sk_err, err);
 514		sk_error_report(sk);
 515	} else {
 516		WRITE_ONCE(sk->sk_err_soft, err);
 517	}
 518out:
 519	bh_unlock_sock(sk);
 520	sock_put(sk);
 521	return 0;
 522}
 523
 524
 525static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 526			      struct flowi *fl,
 527			      struct request_sock *req,
 528			      struct tcp_fastopen_cookie *foc,
 529			      enum tcp_synack_type synack_type,
 530			      struct sk_buff *syn_skb)
 531{
 532	struct inet_request_sock *ireq = inet_rsk(req);
 533	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 534	struct ipv6_txoptions *opt;
 535	struct flowi6 *fl6 = &fl->u.ip6;
 536	struct sk_buff *skb;
 537	int err = -ENOMEM;
 538	u8 tclass;
 539
 540	/* First, grab a route. */
 541	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 542					       IPPROTO_TCP)) == NULL)
 543		goto done;
 544
 545	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
 546
 547	if (skb) {
 548		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 549				    &ireq->ir_v6_rmt_addr);
 550
 551		fl6->daddr = ireq->ir_v6_rmt_addr;
 552		if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
 553			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 554
 555		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
 556				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
 557				(np->tclass & INET_ECN_MASK) :
 558				np->tclass;
 559
 560		if (!INET_ECN_is_capable(tclass) &&
 561		    tcp_bpf_ca_needs_ecn((struct sock *)req))
 562			tclass |= INET_ECN_ECT_0;
 563
 564		rcu_read_lock();
 565		opt = ireq->ipv6_opt;
 566		if (!opt)
 567			opt = rcu_dereference(np->opt);
 568		err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
 569			       opt, tclass, READ_ONCE(sk->sk_priority));
 570		rcu_read_unlock();
 571		err = net_xmit_eval(err);
 572	}
 573
 574done:
 575	return err;
 576}
 577
 578
 579static void tcp_v6_reqsk_destructor(struct request_sock *req)
 580{
 581	kfree(inet_rsk(req)->ipv6_opt);
 582	consume_skb(inet_rsk(req)->pktopts);
 583}
 584
 585#ifdef CONFIG_TCP_MD5SIG
 586static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 587						   const struct in6_addr *addr,
 588						   int l3index)
 589{
 590	return tcp_md5_do_lookup(sk, l3index,
 591				 (union tcp_md5_addr *)addr, AF_INET6);
 592}
 593
 594static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 595						const struct sock *addr_sk)
 596{
 597	int l3index;
 598
 599	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
 600						 addr_sk->sk_bound_dev_if);
 601	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
 602				    l3index);
 603}
 604
 605static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 606				 sockptr_t optval, int optlen)
 607{
 608	struct tcp_md5sig cmd;
 609	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 610	union tcp_ao_addr *addr;
 611	int l3index = 0;
 612	u8 prefixlen;
 613	bool l3flag;
 614	u8 flags;
 615
 616	if (optlen < sizeof(cmd))
 617		return -EINVAL;
 618
 619	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
 620		return -EFAULT;
 621
 622	if (sin6->sin6_family != AF_INET6)
 623		return -EINVAL;
 624
 625	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
 626	l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
 627
 628	if (optname == TCP_MD5SIG_EXT &&
 629	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
 630		prefixlen = cmd.tcpm_prefixlen;
 631		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
 632					prefixlen > 32))
 633			return -EINVAL;
 634	} else {
 635		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
 636	}
 637
 638	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
 639	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
 640		struct net_device *dev;
 641
 642		rcu_read_lock();
 643		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
 644		if (dev && netif_is_l3_master(dev))
 645			l3index = dev->ifindex;
 646		rcu_read_unlock();
 647
 648		/* ok to reference set/not set outside of rcu;
 649		 * right now device MUST be an L3 master
 650		 */
 651		if (!dev || !l3index)
 652			return -EINVAL;
 653	}
 654
 655	if (!cmd.tcpm_keylen) {
 656		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 657			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 658					      AF_INET, prefixlen,
 659					      l3index, flags);
 660		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 661				      AF_INET6, prefixlen, l3index, flags);
 662	}
 663
 664	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 665		return -EINVAL;
 666
 667	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
 668		addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3];
 669
 670		/* Don't allow keys for peers that have a matching TCP-AO key.
 671		 * See the comment in tcp_ao_add_cmd()
 672		 */
 673		if (tcp_ao_required(sk, addr, AF_INET,
 674				    l3flag ? l3index : -1, false))
 675			return -EKEYREJECTED;
 676		return tcp_md5_do_add(sk, addr,
 677				      AF_INET, prefixlen, l3index, flags,
 678				      cmd.tcpm_key, cmd.tcpm_keylen);
 679	}
 680
 681	addr = (union tcp_md5_addr *)&sin6->sin6_addr;
 682
 683	/* Don't allow keys for peers that have a matching TCP-AO key.
 684	 * See the comment in tcp_ao_add_cmd()
 685	 */
 686	if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false))
 687		return -EKEYREJECTED;
 688
 689	return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags,
 690			      cmd.tcpm_key, cmd.tcpm_keylen);
 691}
 692
 693static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp,
 694				   const struct in6_addr *daddr,
 695				   const struct in6_addr *saddr,
 696				   const struct tcphdr *th, int nbytes)
 697{
 698	struct tcp6_pseudohdr *bp;
 699	struct scatterlist sg;
 700	struct tcphdr *_th;
 701
 702	bp = hp->scratch;
 703	/* 1. TCP pseudo-header (RFC2460) */
 704	bp->saddr = *saddr;
 705	bp->daddr = *daddr;
 706	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 707	bp->len = cpu_to_be32(nbytes);
 708
 709	_th = (struct tcphdr *)(bp + 1);
 710	memcpy(_th, th, sizeof(*th));
 711	_th->check = 0;
 712
 713	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 714	ahash_request_set_crypt(hp->req, &sg, NULL,
 715				sizeof(*bp) + sizeof(*th));
 716	return crypto_ahash_update(hp->req);
 717}
 718
 719static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 720			       const struct in6_addr *daddr, struct in6_addr *saddr,
 721			       const struct tcphdr *th)
 722{
 723	struct tcp_sigpool hp;
 
 724
 725	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
 726		goto clear_hash_nostart;
 
 
 727
 728	if (crypto_ahash_init(hp.req))
 729		goto clear_hash;
 730	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
 731		goto clear_hash;
 732	if (tcp_md5_hash_key(&hp, key))
 733		goto clear_hash;
 734	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
 735	if (crypto_ahash_final(hp.req))
 
 
 736		goto clear_hash;
 737
 738	tcp_sigpool_end(&hp);
 739	return 0;
 740
 741clear_hash:
 742	tcp_sigpool_end(&hp);
 743clear_hash_nostart:
 744	memset(md5_hash, 0, 16);
 745	return 1;
 746}
 747
 748static int tcp_v6_md5_hash_skb(char *md5_hash,
 749			       const struct tcp_md5sig_key *key,
 750			       const struct sock *sk,
 751			       const struct sk_buff *skb)
 752{
 
 
 
 753	const struct tcphdr *th = tcp_hdr(skb);
 754	const struct in6_addr *saddr, *daddr;
 755	struct tcp_sigpool hp;
 756
 757	if (sk) { /* valid for establish/request sockets */
 758		saddr = &sk->sk_v6_rcv_saddr;
 759		daddr = &sk->sk_v6_daddr;
 760	} else {
 761		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 762		saddr = &ip6h->saddr;
 763		daddr = &ip6h->daddr;
 764	}
 765
 766	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
 767		goto clear_hash_nostart;
 
 
 768
 769	if (crypto_ahash_init(hp.req))
 770		goto clear_hash;
 771
 772	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
 
 
 773		goto clear_hash;
 774	if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
 775		goto clear_hash;
 776	if (tcp_md5_hash_key(&hp, key))
 777		goto clear_hash;
 778	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
 779	if (crypto_ahash_final(hp.req))
 780		goto clear_hash;
 781
 782	tcp_sigpool_end(&hp);
 783	return 0;
 784
 785clear_hash:
 786	tcp_sigpool_end(&hp);
 787clear_hash_nostart:
 788	memset(md5_hash, 0, 16);
 789	return 1;
 790}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791#endif
 
 
 792
 793static void tcp_v6_init_req(struct request_sock *req,
 794			    const struct sock *sk_listener,
 795			    struct sk_buff *skb)
 796{
 797	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 798	struct inet_request_sock *ireq = inet_rsk(req);
 799	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
 800
 801	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 802	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 803
 804	/* So that link locals have meaning */
 805	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
 806	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 807		ireq->ir_iif = tcp_v6_iif(skb);
 808
 809	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 810	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 811	     np->rxopt.bits.rxinfo ||
 812	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 813	     np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
 814		refcount_inc(&skb->users);
 815		ireq->pktopts = skb;
 816	}
 817}
 818
 819static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 820					  struct sk_buff *skb,
 821					  struct flowi *fl,
 822					  struct request_sock *req)
 
 823{
 824	tcp_v6_init_req(req, sk, skb);
 825
 826	if (security_inet_conn_request(sk, skb, req))
 827		return NULL;
 828
 829	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 830}
 831
 832struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 833	.family		=	AF_INET6,
 834	.obj_size	=	sizeof(struct tcp6_request_sock),
 835	.rtx_syn_ack	=	tcp_rtx_synack,
 836	.send_ack	=	tcp_v6_reqsk_send_ack,
 837	.destructor	=	tcp_v6_reqsk_destructor,
 838	.send_reset	=	tcp_v6_send_reset,
 839	.syn_ack_timeout =	tcp_syn_ack_timeout,
 840};
 841
 842const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 843	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 844				sizeof(struct ipv6hdr),
 845#ifdef CONFIG_TCP_MD5SIG
 846	.req_md5_lookup	=	tcp_v6_md5_lookup,
 847	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 848#endif
 849#ifdef CONFIG_TCP_AO
 850	.ao_lookup	=	tcp_v6_ao_lookup_rsk,
 851	.ao_calc_key	=	tcp_v6_ao_calc_key_rsk,
 852	.ao_synack_hash =	tcp_v6_ao_synack_hash,
 853#endif
 854#ifdef CONFIG_SYN_COOKIES
 855	.cookie_init_seq =	cookie_v6_init_sequence,
 856#endif
 857	.route_req	=	tcp_v6_route_req,
 858	.init_seq	=	tcp_v6_init_seq,
 859	.init_ts_off	=	tcp_v6_init_ts_off,
 860	.send_synack	=	tcp_v6_send_synack,
 861};
 862
 863static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 864				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 865				 int oif, int rst, u8 tclass, __be32 label,
 866				 u32 priority, u32 txhash, struct tcp_key *key)
 867{
 868	const struct tcphdr *th = tcp_hdr(skb);
 869	struct tcphdr *t1;
 870	struct sk_buff *buff;
 871	struct flowi6 fl6;
 872	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 873	struct sock *ctl_sk = net->ipv6.tcp_sk;
 874	unsigned int tot_len = sizeof(struct tcphdr);
 875	__be32 mrst = 0, *topt;
 876	struct dst_entry *dst;
 877	__u32 mark = 0;
 878
 879	if (tsecr)
 880		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 881	if (tcp_key_is_md5(key))
 
 882		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 883	if (tcp_key_is_ao(key))
 884		tot_len += tcp_ao_len_aligned(key->ao_key);
 885
 886#ifdef CONFIG_MPTCP
 887	if (rst && !tcp_key_is_md5(key)) {
 888		mrst = mptcp_reset_option(skb);
 889
 890		if (mrst)
 891			tot_len += sizeof(__be32);
 892	}
 893#endif
 894
 895	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
 
 896	if (!buff)
 897		return;
 898
 899	skb_reserve(buff, MAX_TCP_HEADER);
 900
 901	t1 = skb_push(buff, tot_len);
 902	skb_reset_transport_header(buff);
 903
 904	/* Swap the send and the receive. */
 905	memset(t1, 0, sizeof(*t1));
 906	t1->dest = th->source;
 907	t1->source = th->dest;
 908	t1->doff = tot_len / 4;
 909	t1->seq = htonl(seq);
 910	t1->ack_seq = htonl(ack);
 911	t1->ack = !rst || !th->ack;
 912	t1->rst = rst;
 913	t1->window = htons(win);
 914
 915	topt = (__be32 *)(t1 + 1);
 916
 917	if (tsecr) {
 918		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 919				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 920		*topt++ = htonl(tsval);
 921		*topt++ = htonl(tsecr);
 922	}
 923
 924	if (mrst)
 925		*topt++ = mrst;
 926
 927#ifdef CONFIG_TCP_MD5SIG
 928	if (tcp_key_is_md5(key)) {
 929		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 930				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 931		tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
 932				    &ipv6_hdr(skb)->saddr,
 933				    &ipv6_hdr(skb)->daddr, t1);
 934	}
 935#endif
 936#ifdef CONFIG_TCP_AO
 937	if (tcp_key_is_ao(key)) {
 938		*topt++ = htonl((TCPOPT_AO << 24) |
 939				(tcp_ao_len(key->ao_key) << 16) |
 940				(key->ao_key->sndid << 8) |
 941				(key->rcv_next));
 942
 943		tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
 944				key->traffic_key,
 945				(union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
 946				(union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
 947				t1, key->sne);
 948	}
 949#endif
 950
 951	memset(&fl6, 0, sizeof(fl6));
 952	fl6.daddr = ipv6_hdr(skb)->saddr;
 953	fl6.saddr = ipv6_hdr(skb)->daddr;
 954	fl6.flowlabel = label;
 955
 956	buff->ip_summed = CHECKSUM_PARTIAL;
 
 957
 958	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 959
 960	fl6.flowi6_proto = IPPROTO_TCP;
 961	if (rt6_need_strict(&fl6.daddr) && !oif)
 962		fl6.flowi6_oif = tcp_v6_iif(skb);
 963	else {
 964		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 965			oif = skb->skb_iif;
 966
 967		fl6.flowi6_oif = oif;
 968	}
 969
 970	if (sk) {
 971		if (sk->sk_state == TCP_TIME_WAIT)
 972			mark = inet_twsk(sk)->tw_mark;
 973		else
 974			mark = READ_ONCE(sk->sk_mark);
 975		skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
 976	}
 977	if (txhash) {
 978		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
 979		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
 980	}
 981	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
 982	fl6.fl6_dport = t1->dest;
 983	fl6.fl6_sport = t1->source;
 984	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 985	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
 986
 987	/* Pass a socket to ip6_dst_lookup either it is for RST
 988	 * Underlying function will use this to retrieve the network
 989	 * namespace
 990	 */
 991	if (sk && sk->sk_state != TCP_TIME_WAIT)
 992		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
 993	else
 994		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
 995	if (!IS_ERR(dst)) {
 996		skb_dst_set(buff, dst);
 997		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
 998			 tclass & ~INET_ECN_MASK, priority);
 999		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1000		if (rst)
1001			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1002		return;
1003	}
1004
1005	kfree_skb(buff);
1006}
1007
1008static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1009{
1010	const struct tcphdr *th = tcp_hdr(skb);
1011	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1012	const __u8 *md5_hash_location = NULL;
1013#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1014	bool allocated_traffic_key = false;
1015#endif
1016	const struct tcp_ao_hdr *aoh;
1017	struct tcp_key key = {};
1018	u32 seq = 0, ack_seq = 0;
1019	__be32 label = 0;
1020	u32 priority = 0;
1021	struct net *net;
1022	u32 txhash = 0;
1023	int oif = 0;
1024#ifdef CONFIG_TCP_MD5SIG
 
 
1025	unsigned char newhash[16];
1026	int genhash;
1027	struct sock *sk1 = NULL;
1028#endif
 
1029
1030	if (th->rst)
1031		return;
1032
1033	/* If sk not NULL, it means we did a successful lookup and incoming
1034	 * route had to be correct. prequeue might have dropped our dst.
1035	 */
1036	if (!sk && !ipv6_unicast_destination(skb))
1037		return;
1038
1039	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1040	/* Invalid TCP option size or twice included auth */
1041	if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
1042		return;
1043#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1044	rcu_read_lock();
1045#endif
1046#ifdef CONFIG_TCP_MD5SIG
 
1047	if (sk && sk_fullsock(sk)) {
1048		int l3index;
1049
1050		/* sdif set, means packet ingressed via a device
1051		 * in an L3 domain and inet_iif is set to it.
1052		 */
1053		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1054		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1055		if (key.md5_key)
1056			key.type = TCP_KEY_MD5;
1057	} else if (md5_hash_location) {
1058		int dif = tcp_v6_iif_l3_slave(skb);
1059		int sdif = tcp_v6_sdif(skb);
1060		int l3index;
1061
1062		/*
1063		 * active side is lost. Try to find listening socket through
1064		 * source port, and then find md5 key through listening socket.
1065		 * we are not loose security here:
1066		 * Incoming packet is checked with md5 hash with finding key,
1067		 * no RST generated if md5 hash doesn't match.
1068		 */
1069		sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1070					    NULL, 0, &ipv6h->saddr, th->source,
1071					    &ipv6h->daddr, ntohs(th->source),
1072					    dif, sdif);
 
1073		if (!sk1)
1074			goto out;
1075
1076		/* sdif set, means packet ingressed via a device
1077		 * in an L3 domain and dif is set to it.
1078		 */
1079		l3index = tcp_v6_sdif(skb) ? dif : 0;
1080
1081		key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1082		if (!key.md5_key)
1083			goto out;
1084		key.type = TCP_KEY_MD5;
1085
1086		genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
1087		if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
1088			goto out;
1089	}
1090#endif
1091
1092	if (th->ack)
1093		seq = ntohl(th->ack_seq);
1094	else
1095		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1096			  (th->doff << 2);
1097
1098#ifdef CONFIG_TCP_AO
1099	if (aoh) {
1100		int l3index;
1101
1102		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1103		if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
1104					 &key.ao_key, &key.traffic_key,
1105					 &allocated_traffic_key,
1106					 &key.rcv_next, &key.sne))
1107			goto out;
1108		key.type = TCP_KEY_AO;
1109	}
1110#endif
1111
1112	if (sk) {
1113		oif = sk->sk_bound_dev_if;
1114		if (sk_fullsock(sk)) {
1115			trace_tcp_send_reset(sk, skb);
1116			if (inet6_test_bit(REPFLOW, sk))
1117				label = ip6_flowlabel(ipv6h);
1118			priority = READ_ONCE(sk->sk_priority);
1119			txhash = sk->sk_txhash;
1120		}
1121		if (sk->sk_state == TCP_TIME_WAIT) {
1122			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1123			priority = inet_twsk(sk)->tw_priority;
1124			txhash = inet_twsk(sk)->tw_txhash;
1125		}
1126	} else {
1127		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1128			label = ip6_flowlabel(ipv6h);
1129	}
1130
1131	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
1132			     ipv6_get_dsfield(ipv6h), label, priority, txhash,
1133			     &key);
1134
1135#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1136out:
1137	if (allocated_traffic_key)
1138		kfree(key.traffic_key);
1139	rcu_read_unlock();
1140#endif
1141}
1142
1143static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1144			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1145			    struct tcp_key *key, u8 tclass,
1146			    __be32 label, u32 priority, u32 txhash)
1147{
1148	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
1149			     tclass, label, priority, txhash, key);
1150}
1151
1152static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1153{
1154	struct inet_timewait_sock *tw = inet_twsk(sk);
1155	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1156	struct tcp_key key = {};
1157#ifdef CONFIG_TCP_AO
1158	struct tcp_ao_info *ao_info;
1159
1160	if (static_branch_unlikely(&tcp_ao_needed.key)) {
1161
1162		/* FIXME: the segment to-be-acked is not verified yet */
1163		ao_info = rcu_dereference(tcptw->ao_info);
1164		if (ao_info) {
1165			const struct tcp_ao_hdr *aoh;
1166
1167			/* Invalid TCP option size or twice included auth */
1168			if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1169				goto out;
1170			if (aoh)
1171				key.ao_key = tcp_ao_established_key(ao_info,
1172						aoh->rnext_keyid, -1);
1173		}
1174	}
1175	if (key.ao_key) {
1176		struct tcp_ao_key *rnext_key;
1177
1178		key.traffic_key = snd_other_key(key.ao_key);
1179		/* rcv_next switches to our rcv_next */
1180		rnext_key = READ_ONCE(ao_info->rnext_key);
1181		key.rcv_next = rnext_key->rcvid;
1182		key.sne = READ_ONCE(ao_info->snd_sne);
1183		key.type = TCP_KEY_AO;
1184#else
1185	if (0) {
1186#endif
1187#ifdef CONFIG_TCP_MD5SIG
1188	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1189		key.md5_key = tcp_twsk_md5_key(tcptw);
1190		if (key.md5_key)
1191			key.type = TCP_KEY_MD5;
1192#endif
1193	}
1194
1195	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1196			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1197			tcp_tw_tsval(tcptw),
1198			tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key,
1199			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
1200			tw->tw_txhash);
1201
1202#ifdef CONFIG_TCP_AO
1203out:
1204#endif
1205	inet_twsk_put(tw);
1206}
1207
1208static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1209				  struct request_sock *req)
1210{
1211	struct tcp_key key = {};
1212
1213#ifdef CONFIG_TCP_AO
1214	if (static_branch_unlikely(&tcp_ao_needed.key) &&
1215	    tcp_rsk_used_ao(req)) {
1216		const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
1217		const struct tcp_ao_hdr *aoh;
1218		int l3index;
1219
1220		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1221		/* Invalid TCP option size or twice included auth */
1222		if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1223			return;
1224		if (!aoh)
1225			return;
1226		key.ao_key = tcp_ao_do_lookup(sk, l3index,
1227					      (union tcp_ao_addr *)addr,
1228					      AF_INET6, aoh->rnext_keyid, -1);
1229		if (unlikely(!key.ao_key)) {
1230			/* Send ACK with any matching MKT for the peer */
1231			key.ao_key = tcp_ao_do_lookup(sk, l3index,
1232						      (union tcp_ao_addr *)addr,
1233						      AF_INET6, -1, -1);
1234			/* Matching key disappeared (user removed the key?)
1235			 * let the handshake timeout.
1236			 */
1237			if (!key.ao_key) {
1238				net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
1239						     addr,
1240						     ntohs(tcp_hdr(skb)->source),
1241						     &ipv6_hdr(skb)->daddr,
1242						     ntohs(tcp_hdr(skb)->dest));
1243				return;
1244			}
1245		}
1246		key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1247		if (!key.traffic_key)
1248			return;
1249
1250		key.type = TCP_KEY_AO;
1251		key.rcv_next = aoh->keyid;
1252		tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1253#else
1254	if (0) {
1255#endif
1256#ifdef CONFIG_TCP_MD5SIG
1257	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1258		int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1259
1260		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
1261						   l3index);
1262		if (key.md5_key)
1263			key.type = TCP_KEY_MD5;
1264#endif
1265	}
1266
1267	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1268	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1269	 */
1270	/* RFC 7323 2.3
1271	 * The window field (SEG.WND) of every outgoing segment, with the
1272	 * exception of <SYN> segments, MUST be right-shifted by
1273	 * Rcv.Wind.Shift bits:
1274	 */
1275	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1276			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1277			tcp_rsk(req)->rcv_nxt,
1278			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1279			tcp_rsk_tsval(tcp_rsk(req)),
1280			READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
1281			&key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
1282			READ_ONCE(sk->sk_priority),
1283			READ_ONCE(tcp_rsk(req)->txhash));
1284	if (tcp_key_is_ao(&key))
1285		kfree(key.traffic_key);
1286}
1287
1288
1289static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1290{
1291#ifdef CONFIG_SYN_COOKIES
1292	const struct tcphdr *th = tcp_hdr(skb);
1293
1294	if (!th->syn)
1295		sk = cookie_v6_check(sk, skb);
1296#endif
1297	return sk;
1298}
1299
1300u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1301			 struct tcphdr *th, u32 *cookie)
1302{
1303	u16 mss = 0;
1304#ifdef CONFIG_SYN_COOKIES
1305	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1306				    &tcp_request_sock_ipv6_ops, sk, th);
1307	if (mss) {
1308		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1309		tcp_synq_overflow(sk);
1310	}
1311#endif
1312	return mss;
1313}
1314
1315static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1316{
1317	if (skb->protocol == htons(ETH_P_IP))
1318		return tcp_v4_conn_request(sk, skb);
1319
1320	if (!ipv6_unicast_destination(skb))
1321		goto drop;
1322
1323	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1324		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1325		return 0;
1326	}
1327
1328	return tcp_conn_request(&tcp6_request_sock_ops,
1329				&tcp_request_sock_ipv6_ops, sk, skb);
1330
1331drop:
1332	tcp_listendrop(sk);
1333	return 0; /* don't send reset */
1334}
1335
1336static void tcp_v6_restore_cb(struct sk_buff *skb)
1337{
1338	/* We need to move header back to the beginning if xfrm6_policy_check()
1339	 * and tcp_v6_fill_cb() are going to be called again.
1340	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1341	 */
1342	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1343		sizeof(struct inet6_skb_parm));
1344}
1345
1346static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1347					 struct request_sock *req,
1348					 struct dst_entry *dst,
1349					 struct request_sock *req_unhash,
1350					 bool *own_req)
1351{
1352	struct inet_request_sock *ireq;
1353	struct ipv6_pinfo *newnp;
1354	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1355	struct ipv6_txoptions *opt;
 
1356	struct inet_sock *newinet;
1357	bool found_dup_sk = false;
1358	struct tcp_sock *newtp;
1359	struct sock *newsk;
1360#ifdef CONFIG_TCP_MD5SIG
1361	struct tcp_md5sig_key *key;
1362	int l3index;
1363#endif
1364	struct flowi6 fl6;
1365
1366	if (skb->protocol == htons(ETH_P_IP)) {
1367		/*
1368		 *	v6 mapped
1369		 */
1370
1371		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1372					     req_unhash, own_req);
1373
1374		if (!newsk)
1375			return NULL;
1376
1377		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1378
1379		newnp = tcp_inet6_sk(newsk);
 
1380		newtp = tcp_sk(newsk);
1381
1382		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1383
1384		newnp->saddr = newsk->sk_v6_rcv_saddr;
1385
1386		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1387		if (sk_is_mptcp(newsk))
1388			mptcpv6_handle_mapped(newsk, true);
1389		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1390#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1391		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1392#endif
1393
1394		newnp->ipv6_mc_list = NULL;
1395		newnp->ipv6_ac_list = NULL;
1396		newnp->ipv6_fl_list = NULL;
1397		newnp->pktoptions  = NULL;
1398		newnp->opt	   = NULL;
1399		newnp->mcast_oif   = inet_iif(skb);
1400		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1401		newnp->rcv_flowinfo = 0;
1402		if (inet6_test_bit(REPFLOW, sk))
1403			newnp->flow_label = 0;
1404
1405		/*
1406		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1407		 * here, tcp_create_openreq_child now does this for us, see the comment in
1408		 * that function for the gory details. -acme
1409		 */
1410
1411		/* It is tricky place. Until this moment IPv4 tcp
1412		   worked with IPv6 icsk.icsk_af_ops.
1413		   Sync it now.
1414		 */
1415		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1416
1417		return newsk;
1418	}
1419
1420	ireq = inet_rsk(req);
1421
1422	if (sk_acceptq_is_full(sk))
1423		goto out_overflow;
1424
1425	if (!dst) {
1426		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1427		if (!dst)
1428			goto out;
1429	}
1430
1431	newsk = tcp_create_openreq_child(sk, req, skb);
1432	if (!newsk)
1433		goto out_nonewsk;
1434
1435	/*
1436	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1437	 * count here, tcp_create_openreq_child now does this for us, see the
1438	 * comment in that function for the gory details. -acme
1439	 */
1440
1441	newsk->sk_gso_type = SKB_GSO_TCPV6;
1442	ip6_dst_store(newsk, dst, NULL, NULL);
1443	inet6_sk_rx_dst_set(newsk, skb);
1444
1445	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1446
1447	newtp = tcp_sk(newsk);
1448	newinet = inet_sk(newsk);
1449	newnp = tcp_inet6_sk(newsk);
1450
1451	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1452
1453	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1454	newnp->saddr = ireq->ir_v6_loc_addr;
1455	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1456	newsk->sk_bound_dev_if = ireq->ir_iif;
1457
1458	/* Now IPv6 options...
1459
1460	   First: no IPv4 options.
1461	 */
1462	newinet->inet_opt = NULL;
1463	newnp->ipv6_mc_list = NULL;
1464	newnp->ipv6_ac_list = NULL;
1465	newnp->ipv6_fl_list = NULL;
1466
1467	/* Clone RX bits */
1468	newnp->rxopt.all = np->rxopt.all;
1469
1470	newnp->pktoptions = NULL;
1471	newnp->opt	  = NULL;
1472	newnp->mcast_oif  = tcp_v6_iif(skb);
1473	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1474	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1475	if (inet6_test_bit(REPFLOW, sk))
1476		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1477
1478	/* Set ToS of the new socket based upon the value of incoming SYN.
1479	 * ECT bits are set later in tcp_init_transfer().
1480	 */
1481	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1482		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1483
1484	/* Clone native IPv6 options from listening socket (if any)
1485
1486	   Yes, keeping reference count would be much more clever,
1487	   but we make one more one thing there: reattach optmem
1488	   to newsk.
1489	 */
1490	opt = ireq->ipv6_opt;
1491	if (!opt)
1492		opt = rcu_dereference(np->opt);
1493	if (opt) {
1494		opt = ipv6_dup_options(newsk, opt);
1495		RCU_INIT_POINTER(newnp->opt, opt);
1496	}
1497	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1498	if (opt)
1499		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1500						    opt->opt_flen;
1501
1502	tcp_ca_openreq_child(newsk, dst);
1503
1504	tcp_sync_mss(newsk, dst_mtu(dst));
1505	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1506
1507	tcp_initialize_rcv_mss(newsk);
1508
1509	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1510	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1511
1512#ifdef CONFIG_TCP_MD5SIG
1513	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1514
1515	if (!tcp_rsk_used_ao(req)) {
1516		/* Copy over the MD5 key from the original socket */
1517		key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1518		if (key) {
1519			const union tcp_md5_addr *addr;
1520
1521			addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1522			if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1523				inet_csk_prepare_forced_close(newsk);
1524				tcp_done(newsk);
1525				goto out;
1526			}
1527		}
1528	}
1529#endif
1530#ifdef CONFIG_TCP_AO
1531	/* Copy over tcp_ao_info if any */
1532	if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
1533		goto out; /* OOM */
1534#endif
1535
1536	if (__inet_inherit_port(sk, newsk) < 0) {
1537		inet_csk_prepare_forced_close(newsk);
1538		tcp_done(newsk);
1539		goto out;
1540	}
1541	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1542				       &found_dup_sk);
1543	if (*own_req) {
1544		tcp_move_syn(newtp, req);
1545
1546		/* Clone pktoptions received with SYN, if we own the req */
1547		if (ireq->pktopts) {
1548			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
 
1549			consume_skb(ireq->pktopts);
1550			ireq->pktopts = NULL;
1551			if (newnp->pktoptions)
1552				tcp_v6_restore_cb(newnp->pktoptions);
1553		}
1554	} else {
1555		if (!req_unhash && found_dup_sk) {
1556			/* This code path should only be executed in the
1557			 * syncookie case only
1558			 */
1559			bh_unlock_sock(newsk);
1560			sock_put(newsk);
1561			newsk = NULL;
1562		}
1563	}
1564
1565	return newsk;
1566
1567out_overflow:
1568	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1569out_nonewsk:
1570	dst_release(dst);
1571out:
1572	tcp_listendrop(sk);
1573	return NULL;
1574}
1575
1576INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1577							   u32));
1578/* The socket must have it's spinlock held when we get
1579 * here, unless it is a TCP_LISTEN socket.
1580 *
1581 * We have a potential double-lock case here, so even when
1582 * doing backlog processing we use the BH locking scheme.
1583 * This is because we cannot sleep with the original spinlock
1584 * held.
1585 */
1586INDIRECT_CALLABLE_SCOPE
1587int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1588{
1589	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 
1590	struct sk_buff *opt_skb = NULL;
1591	enum skb_drop_reason reason;
1592	struct tcp_sock *tp;
1593
1594	/* Imagine: socket is IPv6. IPv4 packet arrives,
1595	   goes to IPv4 receive handler and backlogged.
1596	   From backlog it always goes here. Kerboom...
1597	   Fortunately, tcp_rcv_established and rcv_established
1598	   handle them correctly, but it is not case with
1599	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1600	 */
1601
1602	if (skb->protocol == htons(ETH_P_IP))
1603		return tcp_v4_do_rcv(sk, skb);
1604
 
 
 
1605	/*
1606	 *	socket locking is here for SMP purposes as backlog rcv
1607	 *	is currently called with bh processing disabled.
1608	 */
1609
1610	/* Do Stevens' IPV6_PKTOPTIONS.
1611
1612	   Yes, guys, it is the only place in our code, where we
1613	   may make it not affecting IPv4.
1614	   The rest of code is protocol independent,
1615	   and I do not like idea to uglify IPv4.
1616
1617	   Actually, all the idea behind IPV6_PKTOPTIONS
1618	   looks not very well thought. For now we latch
1619	   options, received in the last packet, enqueued
1620	   by tcp. Feel free to propose better solution.
1621					       --ANK (980728)
1622	 */
1623	if (np->rxopt.all)
1624		opt_skb = skb_clone_and_charge_r(skb, sk);
1625
1626	reason = SKB_DROP_REASON_NOT_SPECIFIED;
1627	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1628		struct dst_entry *dst;
1629
1630		dst = rcu_dereference_protected(sk->sk_rx_dst,
1631						lockdep_sock_is_held(sk));
1632
1633		sock_rps_save_rxhash(sk, skb);
1634		sk_mark_napi_id(sk, skb);
1635		if (dst) {
1636			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1637			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1638					    dst, sk->sk_rx_dst_cookie) == NULL) {
1639				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1640				dst_release(dst);
 
1641			}
1642		}
1643
1644		tcp_rcv_established(sk, skb);
1645		if (opt_skb)
1646			goto ipv6_pktoptions;
1647		return 0;
1648	}
1649
1650	if (tcp_checksum_complete(skb))
1651		goto csum_err;
1652
1653	if (sk->sk_state == TCP_LISTEN) {
1654		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1655
1656		if (!nsk)
1657			goto discard;
1658
1659		if (nsk != sk) {
 
 
1660			if (tcp_child_process(sk, nsk, skb))
1661				goto reset;
1662			if (opt_skb)
1663				__kfree_skb(opt_skb);
1664			return 0;
1665		}
1666	} else
1667		sock_rps_save_rxhash(sk, skb);
1668
1669	if (tcp_rcv_state_process(sk, skb))
1670		goto reset;
1671	if (opt_skb)
1672		goto ipv6_pktoptions;
1673	return 0;
1674
1675reset:
1676	tcp_v6_send_reset(sk, skb);
1677discard:
1678	if (opt_skb)
1679		__kfree_skb(opt_skb);
1680	kfree_skb_reason(skb, reason);
1681	return 0;
1682csum_err:
1683	reason = SKB_DROP_REASON_TCP_CSUM;
1684	trace_tcp_bad_csum(skb);
1685	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1686	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1687	goto discard;
1688
1689
1690ipv6_pktoptions:
1691	/* Do you ask, what is it?
1692
1693	   1. skb was enqueued by tcp.
1694	   2. skb is added to tail of read queue, rather than out of order.
1695	   3. socket is not in passive state.
1696	   4. Finally, it really contains options, which user wants to receive.
1697	 */
1698	tp = tcp_sk(sk);
1699	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1700	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1701		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1702			WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb));
1703		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1704			WRITE_ONCE(np->mcast_hops,
1705				   ipv6_hdr(opt_skb)->hop_limit);
1706		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1707			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1708		if (inet6_test_bit(REPFLOW, sk))
1709			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1710		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1711			tcp_v6_restore_cb(opt_skb);
1712			opt_skb = xchg(&np->pktoptions, opt_skb);
1713		} else {
1714			__kfree_skb(opt_skb);
1715			opt_skb = xchg(&np->pktoptions, NULL);
1716		}
1717	}
1718
1719	consume_skb(opt_skb);
1720	return 0;
1721}
1722
1723static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1724			   const struct tcphdr *th)
1725{
1726	/* This is tricky: we move IP6CB at its correct location into
1727	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1728	 * _decode_session6() uses IP6CB().
1729	 * barrier() makes sure compiler won't play aliasing games.
1730	 */
1731	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1732		sizeof(struct inet6_skb_parm));
1733	barrier();
1734
1735	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1736	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1737				    skb->len - th->doff*4);
1738	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1739	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1740	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1741	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1742	TCP_SKB_CB(skb)->sacked = 0;
1743	TCP_SKB_CB(skb)->has_rxtstamp =
1744			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1745}
1746
1747INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
1748{
1749	enum skb_drop_reason drop_reason;
1750	int sdif = inet6_sdif(skb);
1751	int dif = inet6_iif(skb);
1752	const struct tcphdr *th;
1753	const struct ipv6hdr *hdr;
1754	bool refcounted;
1755	struct sock *sk;
1756	int ret;
1757	struct net *net = dev_net(skb->dev);
1758
1759	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1760	if (skb->pkt_type != PACKET_HOST)
1761		goto discard_it;
1762
1763	/*
1764	 *	Count it even if it's bad.
1765	 */
1766	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1767
1768	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1769		goto discard_it;
1770
1771	th = (const struct tcphdr *)skb->data;
1772
1773	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1774		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1775		goto bad_packet;
1776	}
1777	if (!pskb_may_pull(skb, th->doff*4))
1778		goto discard_it;
1779
1780	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1781		goto csum_error;
1782
1783	th = (const struct tcphdr *)skb->data;
1784	hdr = ipv6_hdr(skb);
1785
1786lookup:
1787	sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1788				th->source, th->dest, inet6_iif(skb), sdif,
1789				&refcounted);
1790	if (!sk)
1791		goto no_tcp_socket;
1792
1793process:
1794	if (sk->sk_state == TCP_TIME_WAIT)
1795		goto do_time_wait;
1796
1797	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1798		struct request_sock *req = inet_reqsk(sk);
1799		bool req_stolen = false;
1800		struct sock *nsk;
1801
1802		sk = req->rsk_listener;
1803		if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1804			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1805		else
1806			drop_reason = tcp_inbound_hash(sk, req, skb,
1807						       &hdr->saddr, &hdr->daddr,
1808						       AF_INET6, dif, sdif);
1809		if (drop_reason) {
1810			sk_drops_add(sk, skb);
1811			reqsk_put(req);
1812			goto discard_it;
1813		}
1814		if (tcp_checksum_complete(skb)) {
1815			reqsk_put(req);
1816			goto csum_error;
1817		}
1818		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1819			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1820			if (!nsk) {
1821				inet_csk_reqsk_queue_drop_and_put(sk, req);
1822				goto lookup;
1823			}
1824			sk = nsk;
1825			/* reuseport_migrate_sock() has already held one sk_refcnt
1826			 * before returning.
1827			 */
1828		} else {
1829			sock_hold(sk);
1830		}
1831		refcounted = true;
1832		nsk = NULL;
1833		if (!tcp_filter(sk, skb)) {
1834			th = (const struct tcphdr *)skb->data;
1835			hdr = ipv6_hdr(skb);
1836			tcp_v6_fill_cb(skb, hdr, th);
1837			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1838		} else {
1839			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1840		}
 
 
1841		if (!nsk) {
1842			reqsk_put(req);
1843			if (req_stolen) {
1844				/* Another cpu got exclusive access to req
1845				 * and created a full blown socket.
1846				 * Try to feed this packet to this socket
1847				 * instead of discarding it.
1848				 */
1849				tcp_v6_restore_cb(skb);
1850				sock_put(sk);
1851				goto lookup;
1852			}
1853			goto discard_and_relse;
1854		}
1855		nf_reset_ct(skb);
1856		if (nsk == sk) {
1857			reqsk_put(req);
1858			tcp_v6_restore_cb(skb);
1859		} else if (tcp_child_process(sk, nsk, skb)) {
1860			tcp_v6_send_reset(nsk, skb);
1861			goto discard_and_relse;
1862		} else {
1863			sock_put(sk);
1864			return 0;
1865		}
1866	}
1867
1868	if (static_branch_unlikely(&ip6_min_hopcount)) {
1869		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1870		if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1871			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1872			drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1873			goto discard_and_relse;
1874		}
1875	}
1876
1877	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1878		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1879		goto discard_and_relse;
1880	}
1881
1882	drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
1883				       AF_INET6, dif, sdif);
1884	if (drop_reason)
1885		goto discard_and_relse;
1886
1887	nf_reset_ct(skb);
1888
1889	if (tcp_filter(sk, skb)) {
1890		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1891		goto discard_and_relse;
1892	}
1893	th = (const struct tcphdr *)skb->data;
1894	hdr = ipv6_hdr(skb);
1895	tcp_v6_fill_cb(skb, hdr, th);
1896
1897	skb->dev = NULL;
1898
1899	if (sk->sk_state == TCP_LISTEN) {
1900		ret = tcp_v6_do_rcv(sk, skb);
1901		goto put_and_return;
1902	}
1903
1904	sk_incoming_cpu_update(sk);
1905
1906	bh_lock_sock_nested(sk);
1907	tcp_segs_in(tcp_sk(sk), skb);
1908	ret = 0;
1909	if (!sock_owned_by_user(sk)) {
1910		ret = tcp_v6_do_rcv(sk, skb);
1911	} else {
1912		if (tcp_add_backlog(sk, skb, &drop_reason))
1913			goto discard_and_relse;
 
 
 
1914	}
1915	bh_unlock_sock(sk);
 
1916put_and_return:
1917	if (refcounted)
1918		sock_put(sk);
1919	return ret ? -1 : 0;
1920
1921no_tcp_socket:
1922	drop_reason = SKB_DROP_REASON_NO_SOCKET;
1923	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1924		goto discard_it;
1925
1926	tcp_v6_fill_cb(skb, hdr, th);
1927
1928	if (tcp_checksum_complete(skb)) {
1929csum_error:
1930		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1931		trace_tcp_bad_csum(skb);
1932		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1933bad_packet:
1934		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1935	} else {
1936		tcp_v6_send_reset(NULL, skb);
1937	}
1938
1939discard_it:
1940	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1941	kfree_skb_reason(skb, drop_reason);
1942	return 0;
1943
1944discard_and_relse:
1945	sk_drops_add(sk, skb);
1946	if (refcounted)
1947		sock_put(sk);
1948	goto discard_it;
1949
1950do_time_wait:
1951	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1952		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1953		inet_twsk_put(inet_twsk(sk));
1954		goto discard_it;
1955	}
1956
1957	tcp_v6_fill_cb(skb, hdr, th);
1958
1959	if (tcp_checksum_complete(skb)) {
1960		inet_twsk_put(inet_twsk(sk));
1961		goto csum_error;
1962	}
1963
1964	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1965	case TCP_TW_SYN:
1966	{
1967		struct sock *sk2;
1968
1969		sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1970					    skb, __tcp_hdrlen(th),
1971					    &ipv6_hdr(skb)->saddr, th->source,
1972					    &ipv6_hdr(skb)->daddr,
1973					    ntohs(th->dest),
1974					    tcp_v6_iif_l3_slave(skb),
1975					    sdif);
1976		if (sk2) {
1977			struct inet_timewait_sock *tw = inet_twsk(sk);
1978			inet_twsk_deschedule_put(tw);
1979			sk = sk2;
1980			tcp_v6_restore_cb(skb);
1981			refcounted = false;
1982			goto process;
1983		}
 
1984	}
1985		/* to ACK */
1986		fallthrough;
1987	case TCP_TW_ACK:
1988		tcp_v6_timewait_ack(sk, skb);
1989		break;
1990	case TCP_TW_RST:
 
1991		tcp_v6_send_reset(sk, skb);
1992		inet_twsk_deschedule_put(inet_twsk(sk));
1993		goto discard_it;
1994	case TCP_TW_SUCCESS:
1995		;
1996	}
1997	goto discard_it;
1998}
1999
2000void tcp_v6_early_demux(struct sk_buff *skb)
2001{
2002	struct net *net = dev_net(skb->dev);
2003	const struct ipv6hdr *hdr;
2004	const struct tcphdr *th;
2005	struct sock *sk;
2006
2007	if (skb->pkt_type != PACKET_HOST)
2008		return;
2009
2010	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
2011		return;
2012
2013	hdr = ipv6_hdr(skb);
2014	th = tcp_hdr(skb);
2015
2016	if (th->doff < sizeof(struct tcphdr) / 4)
2017		return;
2018
2019	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
2020	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
2021					&hdr->saddr, th->source,
2022					&hdr->daddr, ntohs(th->dest),
2023					inet6_iif(skb), inet6_sdif(skb));
2024	if (sk) {
2025		skb->sk = sk;
2026		skb->destructor = sock_edemux;
2027		if (sk_fullsock(sk)) {
2028			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
2029
2030			if (dst)
2031				dst = dst_check(dst, sk->sk_rx_dst_cookie);
2032			if (dst &&
2033			    sk->sk_rx_dst_ifindex == skb->skb_iif)
2034				skb_dst_set_noref(skb, dst);
2035		}
2036	}
2037}
2038
2039static struct timewait_sock_ops tcp6_timewait_sock_ops = {
2040	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
2041	.twsk_unique	= tcp_twsk_unique,
2042	.twsk_destructor = tcp_twsk_destructor,
2043};
2044
2045INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
2046{
2047	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
2048}
2049
2050const struct inet_connection_sock_af_ops ipv6_specific = {
2051	.queue_xmit	   = inet6_csk_xmit,
2052	.send_check	   = tcp_v6_send_check,
2053	.rebuild_header	   = inet6_sk_rebuild_header,
2054	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
2055	.conn_request	   = tcp_v6_conn_request,
2056	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2057	.net_header_len	   = sizeof(struct ipv6hdr),
 
2058	.setsockopt	   = ipv6_setsockopt,
2059	.getsockopt	   = ipv6_getsockopt,
2060	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
2061	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
 
 
 
 
2062	.mtu_reduced	   = tcp_v6_mtu_reduced,
2063};
2064
2065#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2066static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2067#ifdef CONFIG_TCP_MD5SIG
2068	.md5_lookup	=	tcp_v6_md5_lookup,
2069	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
2070	.md5_parse	=	tcp_v6_parse_md5_keys,
2071#endif
2072#ifdef CONFIG_TCP_AO
2073	.ao_lookup	=	tcp_v6_ao_lookup,
2074	.calc_ao_hash	=	tcp_v6_ao_hash_skb,
2075	.ao_parse	=	tcp_v6_parse_ao,
2076	.ao_calc_key_sk	=	tcp_v6_ao_calc_key_sk,
2077#endif
2078};
2079#endif
2080
2081/*
2082 *	TCP over IPv4 via INET6 API
2083 */
2084static const struct inet_connection_sock_af_ops ipv6_mapped = {
2085	.queue_xmit	   = ip_queue_xmit,
2086	.send_check	   = tcp_v4_send_check,
2087	.rebuild_header	   = inet_sk_rebuild_header,
2088	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2089	.conn_request	   = tcp_v6_conn_request,
2090	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2091	.net_header_len	   = sizeof(struct iphdr),
2092	.setsockopt	   = ipv6_setsockopt,
2093	.getsockopt	   = ipv6_getsockopt,
2094	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
2095	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
 
 
 
 
2096	.mtu_reduced	   = tcp_v4_mtu_reduced,
2097};
2098
2099#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2100static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2101#ifdef CONFIG_TCP_MD5SIG
2102	.md5_lookup	=	tcp_v4_md5_lookup,
2103	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
2104	.md5_parse	=	tcp_v6_parse_md5_keys,
2105#endif
2106#ifdef CONFIG_TCP_AO
2107	.ao_lookup	=	tcp_v6_ao_lookup,
2108	.calc_ao_hash	=	tcp_v4_ao_hash_skb,
2109	.ao_parse	=	tcp_v6_parse_ao,
2110	.ao_calc_key_sk	=	tcp_v4_ao_calc_key_sk,
2111#endif
2112};
2113#endif
2114
2115/* NOTE: A lot of things set to zero explicitly by call to
2116 *       sk_alloc() so need not be done here.
2117 */
2118static int tcp_v6_init_sock(struct sock *sk)
2119{
2120	struct inet_connection_sock *icsk = inet_csk(sk);
2121
2122	tcp_init_sock(sk);
2123
2124	icsk->icsk_af_ops = &ipv6_specific;
2125
2126#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2127	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
2128#endif
2129
2130	return 0;
2131}
2132
 
 
 
 
 
 
2133#ifdef CONFIG_PROC_FS
2134/* Proc filesystem TCPv6 sock list dumping. */
2135static void get_openreq6(struct seq_file *seq,
2136			 const struct request_sock *req, int i)
2137{
2138	long ttd = req->rsk_timer.expires - jiffies;
2139	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
2140	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
2141
2142	if (ttd < 0)
2143		ttd = 0;
2144
2145	seq_printf(seq,
2146		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2147		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
2148		   i,
2149		   src->s6_addr32[0], src->s6_addr32[1],
2150		   src->s6_addr32[2], src->s6_addr32[3],
2151		   inet_rsk(req)->ir_num,
2152		   dest->s6_addr32[0], dest->s6_addr32[1],
2153		   dest->s6_addr32[2], dest->s6_addr32[3],
2154		   ntohs(inet_rsk(req)->ir_rmt_port),
2155		   TCP_SYN_RECV,
2156		   0, 0, /* could print option size, but that is af dependent. */
2157		   1,   /* timers active (only the expire timer) */
2158		   jiffies_to_clock_t(ttd),
2159		   req->num_timeout,
2160		   from_kuid_munged(seq_user_ns(seq),
2161				    sock_i_uid(req->rsk_listener)),
2162		   0,  /* non standard timer */
2163		   0, /* open_requests have no inode */
2164		   0, req);
2165}
2166
2167static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2168{
2169	const struct in6_addr *dest, *src;
2170	__u16 destp, srcp;
2171	int timer_active;
2172	unsigned long timer_expires;
2173	const struct inet_sock *inet = inet_sk(sp);
2174	const struct tcp_sock *tp = tcp_sk(sp);
2175	const struct inet_connection_sock *icsk = inet_csk(sp);
2176	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2177	int rx_queue;
2178	int state;
2179
2180	dest  = &sp->sk_v6_daddr;
2181	src   = &sp->sk_v6_rcv_saddr;
2182	destp = ntohs(inet->inet_dport);
2183	srcp  = ntohs(inet->inet_sport);
2184
2185	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2186	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2187	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2188		timer_active	= 1;
2189		timer_expires	= icsk->icsk_timeout;
2190	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2191		timer_active	= 4;
2192		timer_expires	= icsk->icsk_timeout;
2193	} else if (timer_pending(&sp->sk_timer)) {
2194		timer_active	= 2;
2195		timer_expires	= sp->sk_timer.expires;
2196	} else {
2197		timer_active	= 0;
2198		timer_expires = jiffies;
2199	}
2200
2201	state = inet_sk_state_load(sp);
2202	if (state == TCP_LISTEN)
2203		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2204	else
2205		/* Because we don't lock the socket,
2206		 * we might find a transient negative value.
2207		 */
2208		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2209				      READ_ONCE(tp->copied_seq), 0);
2210
2211	seq_printf(seq,
2212		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2213		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2214		   i,
2215		   src->s6_addr32[0], src->s6_addr32[1],
2216		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2217		   dest->s6_addr32[0], dest->s6_addr32[1],
2218		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2219		   state,
2220		   READ_ONCE(tp->write_seq) - tp->snd_una,
2221		   rx_queue,
2222		   timer_active,
2223		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2224		   icsk->icsk_retransmits,
2225		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2226		   icsk->icsk_probes_out,
2227		   sock_i_ino(sp),
2228		   refcount_read(&sp->sk_refcnt), sp,
2229		   jiffies_to_clock_t(icsk->icsk_rto),
2230		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2231		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2232		   tcp_snd_cwnd(tp),
2233		   state == TCP_LISTEN ?
2234			fastopenq->max_qlen :
2235			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2236		   );
2237}
2238
2239static void get_timewait6_sock(struct seq_file *seq,
2240			       struct inet_timewait_sock *tw, int i)
2241{
2242	long delta = tw->tw_timer.expires - jiffies;
2243	const struct in6_addr *dest, *src;
2244	__u16 destp, srcp;
2245
2246	dest = &tw->tw_v6_daddr;
2247	src  = &tw->tw_v6_rcv_saddr;
2248	destp = ntohs(tw->tw_dport);
2249	srcp  = ntohs(tw->tw_sport);
2250
2251	seq_printf(seq,
2252		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2253		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2254		   i,
2255		   src->s6_addr32[0], src->s6_addr32[1],
2256		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2257		   dest->s6_addr32[0], dest->s6_addr32[1],
2258		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2259		   tw->tw_substate, 0, 0,
2260		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2261		   refcount_read(&tw->tw_refcnt), tw);
2262}
2263
2264static int tcp6_seq_show(struct seq_file *seq, void *v)
2265{
2266	struct tcp_iter_state *st;
2267	struct sock *sk = v;
2268
2269	if (v == SEQ_START_TOKEN) {
2270		seq_puts(seq,
2271			 "  sl  "
2272			 "local_address                         "
2273			 "remote_address                        "
2274			 "st tx_queue rx_queue tr tm->when retrnsmt"
2275			 "   uid  timeout inode\n");
2276		goto out;
2277	}
2278	st = seq->private;
2279
2280	if (sk->sk_state == TCP_TIME_WAIT)
2281		get_timewait6_sock(seq, v, st->num);
2282	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2283		get_openreq6(seq, v, st->num);
2284	else
2285		get_tcp6_sock(seq, v, st->num);
2286out:
2287	return 0;
2288}
2289
2290static const struct seq_operations tcp6_seq_ops = {
2291	.show		= tcp6_seq_show,
2292	.start		= tcp_seq_start,
2293	.next		= tcp_seq_next,
2294	.stop		= tcp_seq_stop,
 
2295};
2296
2297static struct tcp_seq_afinfo tcp6_seq_afinfo = {
 
2298	.family		= AF_INET6,
 
 
 
 
2299};
2300
2301int __net_init tcp6_proc_init(struct net *net)
2302{
2303	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2304			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2305		return -ENOMEM;
2306	return 0;
2307}
2308
2309void tcp6_proc_exit(struct net *net)
2310{
2311	remove_proc_entry("tcp6", net->proc_net);
2312}
2313#endif
2314
 
 
 
 
 
 
 
 
 
 
 
2315struct proto tcpv6_prot = {
2316	.name			= "TCPv6",
2317	.owner			= THIS_MODULE,
2318	.close			= tcp_close,
2319	.pre_connect		= tcp_v6_pre_connect,
2320	.connect		= tcp_v6_connect,
2321	.disconnect		= tcp_disconnect,
2322	.accept			= inet_csk_accept,
2323	.ioctl			= tcp_ioctl,
2324	.init			= tcp_v6_init_sock,
2325	.destroy		= tcp_v4_destroy_sock,
2326	.shutdown		= tcp_shutdown,
2327	.setsockopt		= tcp_setsockopt,
2328	.getsockopt		= tcp_getsockopt,
2329	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2330	.keepalive		= tcp_set_keepalive,
2331	.recvmsg		= tcp_recvmsg,
2332	.sendmsg		= tcp_sendmsg,
2333	.splice_eof		= tcp_splice_eof,
2334	.backlog_rcv		= tcp_v6_do_rcv,
2335	.release_cb		= tcp_release_cb,
2336	.hash			= inet6_hash,
2337	.unhash			= inet_unhash,
2338	.get_port		= inet_csk_get_port,
2339	.put_port		= inet_put_port,
2340#ifdef CONFIG_BPF_SYSCALL
2341	.psock_update_sk_prot	= tcp_bpf_update_proto,
2342#endif
2343	.enter_memory_pressure	= tcp_enter_memory_pressure,
2344	.leave_memory_pressure	= tcp_leave_memory_pressure,
2345	.stream_memory_free	= tcp_stream_memory_free,
2346	.sockets_allocated	= &tcp_sockets_allocated,
2347
2348	.memory_allocated	= &tcp_memory_allocated,
2349	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
2350
2351	.memory_pressure	= &tcp_memory_pressure,
2352	.orphan_count		= &tcp_orphan_count,
2353	.sysctl_mem		= sysctl_tcp_mem,
2354	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2355	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2356	.max_header		= MAX_TCP_HEADER,
2357	.obj_size		= sizeof(struct tcp6_sock),
2358	.ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
2359	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2360	.twsk_prot		= &tcp6_timewait_sock_ops,
2361	.rsk_prot		= &tcp6_request_sock_ops,
2362	.h.hashinfo		= NULL,
2363	.no_autobind		= true,
 
 
 
 
 
2364	.diag_destroy		= tcp_abort,
2365};
2366EXPORT_SYMBOL_GPL(tcpv6_prot);
2367
2368static const struct inet6_protocol tcpv6_protocol = {
 
2369	.handler	=	tcp_v6_rcv,
2370	.err_handler	=	tcp_v6_err,
2371	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2372};
2373
2374static struct inet_protosw tcpv6_protosw = {
2375	.type		=	SOCK_STREAM,
2376	.protocol	=	IPPROTO_TCP,
2377	.prot		=	&tcpv6_prot,
2378	.ops		=	&inet6_stream_ops,
2379	.flags		=	INET_PROTOSW_PERMANENT |
2380				INET_PROTOSW_ICSK,
2381};
2382
2383static int __net_init tcpv6_net_init(struct net *net)
2384{
2385	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2386				    SOCK_RAW, IPPROTO_TCP, net);
2387}
2388
2389static void __net_exit tcpv6_net_exit(struct net *net)
2390{
2391	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2392}
2393
2394static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2395{
2396	tcp_twsk_purge(net_exit_list, AF_INET6);
2397}
2398
2399static struct pernet_operations tcpv6_net_ops = {
2400	.init	    = tcpv6_net_init,
2401	.exit	    = tcpv6_net_exit,
2402	.exit_batch = tcpv6_net_exit_batch,
2403};
2404
2405int __init tcpv6_init(void)
2406{
2407	int ret;
2408
2409	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2410	if (ret)
2411		goto out;
2412
2413	/* register inet6 protocol */
2414	ret = inet6_register_protosw(&tcpv6_protosw);
2415	if (ret)
2416		goto out_tcpv6_protocol;
2417
2418	ret = register_pernet_subsys(&tcpv6_net_ops);
2419	if (ret)
2420		goto out_tcpv6_protosw;
2421
2422	ret = mptcpv6_init();
2423	if (ret)
2424		goto out_tcpv6_pernet_subsys;
2425
2426out:
2427	return ret;
2428
2429out_tcpv6_pernet_subsys:
2430	unregister_pernet_subsys(&tcpv6_net_ops);
2431out_tcpv6_protosw:
2432	inet6_unregister_protosw(&tcpv6_protosw);
2433out_tcpv6_protocol:
2434	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2435	goto out;
2436}
2437
2438void tcpv6_exit(void)
2439{
2440	unregister_pernet_subsys(&tcpv6_net_ops);
2441	inet6_unregister_protosw(&tcpv6_protosw);
2442	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2443}