Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
 
 
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source);
 
 
 
 
 
 
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 124	int addr_type;
 125	int err;
 
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr))
 152		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 153
 154	addr_type = ipv6_addr_type(&usin->sin6_addr);
 155
 156	if (addr_type & IPV6_ADDR_MULTICAST)
 157		return -ENETUNREACH;
 158
 159	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 160		if (addr_len >= sizeof(struct sockaddr_in6) &&
 161		    usin->sin6_scope_id) {
 162			/* If interface is set while binding, indices
 163			 * must coincide.
 164			 */
 165			if (sk->sk_bound_dev_if &&
 166			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 167				return -EINVAL;
 168
 169			sk->sk_bound_dev_if = usin->sin6_scope_id;
 170		}
 171
 172		/* Connect to link-local address requires an interface */
 173		if (!sk->sk_bound_dev_if)
 174			return -EINVAL;
 175	}
 176
 177	if (tp->rx_opt.ts_recent_stamp &&
 178	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 179		tp->rx_opt.ts_recent = 0;
 180		tp->rx_opt.ts_recent_stamp = 0;
 181		tp->write_seq = 0;
 182	}
 183
 184	sk->sk_v6_daddr = usin->sin6_addr;
 185	np->flow_label = fl6.flowlabel;
 186
 187	/*
 188	 *	TCP over IPv4
 189	 */
 190
 191	if (addr_type == IPV6_ADDR_MAPPED) {
 192		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 193		struct sockaddr_in sin;
 194
 195		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 196
 197		if (__ipv6_only_sock(sk))
 198			return -ENETUNREACH;
 199
 200		sin.sin_family = AF_INET;
 201		sin.sin_port = usin->sin6_port;
 202		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 203
 204		icsk->icsk_af_ops = &ipv6_mapped;
 205		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 206#ifdef CONFIG_TCP_MD5SIG
 207		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 208#endif
 209
 210		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 211
 212		if (err) {
 213			icsk->icsk_ext_hdr_len = exthdrlen;
 214			icsk->icsk_af_ops = &ipv6_specific;
 215			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 216#ifdef CONFIG_TCP_MD5SIG
 217			tp->af_specific = &tcp_sock_ipv6_specific;
 218#endif
 219			goto failure;
 220		}
 221		np->saddr = sk->sk_v6_rcv_saddr;
 222
 223		return err;
 224	}
 225
 226	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 227		saddr = &sk->sk_v6_rcv_saddr;
 228
 229	fl6.flowi6_proto = IPPROTO_TCP;
 230	fl6.daddr = sk->sk_v6_daddr;
 231	fl6.saddr = saddr ? *saddr : np->saddr;
 232	fl6.flowi6_oif = sk->sk_bound_dev_if;
 233	fl6.flowi6_mark = sk->sk_mark;
 234	fl6.fl6_dport = usin->sin6_port;
 235	fl6.fl6_sport = inet->inet_sport;
 
 236
 237	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 238	final_p = fl6_update_dst(&fl6, opt, &final);
 239
 240	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 241
 242	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 243	if (IS_ERR(dst)) {
 244		err = PTR_ERR(dst);
 245		goto failure;
 246	}
 247
 248	if (!saddr) {
 249		saddr = &fl6.saddr;
 250		sk->sk_v6_rcv_saddr = *saddr;
 251	}
 252
 253	/* set the source address */
 254	np->saddr = *saddr;
 255	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 256
 257	sk->sk_gso_type = SKB_GSO_TCPV6;
 258	ip6_dst_store(sk, dst, NULL, NULL);
 259
 260	if (tcp_death_row.sysctl_tw_recycle &&
 261	    !tp->rx_opt.ts_recent_stamp &&
 262	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 263		tcp_fetch_timewait_stamp(sk, dst);
 264
 265	icsk->icsk_ext_hdr_len = 0;
 266	if (opt)
 267		icsk->icsk_ext_hdr_len = opt->opt_flen +
 268					 opt->opt_nflen;
 269
 270	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 271
 272	inet->inet_dport = usin->sin6_port;
 273
 274	tcp_set_state(sk, TCP_SYN_SENT);
 275	err = inet6_hash_connect(&tcp_death_row, sk);
 276	if (err)
 277		goto late_failure;
 278
 279	sk_set_txhash(sk);
 280
 281	if (!tp->write_seq && likely(!tp->repair))
 282		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 283							     sk->sk_v6_daddr.s6_addr32,
 284							     inet->inet_sport,
 285							     inet->inet_dport);
 
 
 
 
 
 
 
 
 
 
 286
 287	err = tcp_connect(sk);
 288	if (err)
 289		goto late_failure;
 290
 291	return 0;
 292
 293late_failure:
 294	tcp_set_state(sk, TCP_CLOSE);
 295	__sk_dst_reset(sk);
 296failure:
 297	inet->inet_dport = 0;
 298	sk->sk_route_caps = 0;
 299	return err;
 300}
 301
 302static void tcp_v6_mtu_reduced(struct sock *sk)
 303{
 304	struct dst_entry *dst;
 305
 306	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 307		return;
 308
 309	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 310	if (!dst)
 311		return;
 312
 313	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 314		tcp_sync_mss(sk, dst_mtu(dst));
 315		tcp_simple_retransmit(sk);
 316	}
 317}
 318
 319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 320		u8 type, u8 code, int offset, __be32 info)
 321{
 322	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 323	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 324	struct net *net = dev_net(skb->dev);
 325	struct request_sock *fastopen;
 326	struct ipv6_pinfo *np;
 327	struct tcp_sock *tp;
 328	__u32 seq, snd_una;
 329	struct sock *sk;
 330	bool fatal;
 331	int err;
 332
 333	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 334					&hdr->daddr, th->dest,
 335					&hdr->saddr, ntohs(th->source),
 336					skb->dev->ifindex);
 337
 338	if (!sk) {
 339		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 340				   ICMP6_MIB_INERRORS);
 341		return;
 342	}
 343
 344	if (sk->sk_state == TCP_TIME_WAIT) {
 345		inet_twsk_put(inet_twsk(sk));
 346		return;
 347	}
 348	seq = ntohl(th->seq);
 349	fatal = icmpv6_err_convert(type, code, &err);
 350	if (sk->sk_state == TCP_NEW_SYN_RECV)
 351		return tcp_req_err(sk, seq, fatal);
 352
 353	bh_lock_sock(sk);
 354	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 355		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 356
 357	if (sk->sk_state == TCP_CLOSE)
 358		goto out;
 359
 360	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 361		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 362		goto out;
 363	}
 364
 365	tp = tcp_sk(sk);
 366	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 367	fastopen = tp->fastopen_rsk;
 368	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 369	if (sk->sk_state != TCP_LISTEN &&
 370	    !between(seq, snd_una, tp->snd_nxt)) {
 371		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 372		goto out;
 373	}
 374
 375	np = inet6_sk(sk);
 376
 377	if (type == NDISC_REDIRECT) {
 378		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 379
 380		if (dst)
 381			dst->ops->redirect(dst, sk, skb);
 
 382		goto out;
 383	}
 384
 385	if (type == ICMPV6_PKT_TOOBIG) {
 386		/* We are not interested in TCP_LISTEN and open_requests
 387		 * (SYN-ACKs send out by Linux are always <576bytes so
 388		 * they should go through unfragmented).
 389		 */
 390		if (sk->sk_state == TCP_LISTEN)
 391			goto out;
 392
 393		if (!ip6_sk_accept_pmtu(sk))
 394			goto out;
 395
 396		tp->mtu_info = ntohl(info);
 397		if (!sock_owned_by_user(sk))
 398			tcp_v6_mtu_reduced(sk);
 399		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 400					   &tp->tsq_flags))
 401			sock_hold(sk);
 402		goto out;
 403	}
 404
 405
 406	/* Might be for an request_sock */
 407	switch (sk->sk_state) {
 408	case TCP_SYN_SENT:
 409	case TCP_SYN_RECV:
 410		/* Only in fast or simultaneous open. If a fast open socket is
 411		 * is already accepted it is treated as a connected one below.
 412		 */
 413		if (fastopen && !fastopen->sk)
 414			break;
 415
 416		if (!sock_owned_by_user(sk)) {
 417			sk->sk_err = err;
 418			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 419
 420			tcp_done(sk);
 421		} else
 422			sk->sk_err_soft = err;
 423		goto out;
 424	}
 425
 426	if (!sock_owned_by_user(sk) && np->recverr) {
 427		sk->sk_err = err;
 428		sk->sk_error_report(sk);
 429	} else
 430		sk->sk_err_soft = err;
 431
 432out:
 433	bh_unlock_sock(sk);
 434	sock_put(sk);
 435}
 436
 437
 438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 439			      struct flowi *fl,
 440			      struct request_sock *req,
 441			      struct tcp_fastopen_cookie *foc,
 442			      bool attach_req)
 443{
 444	struct inet_request_sock *ireq = inet_rsk(req);
 445	struct ipv6_pinfo *np = inet6_sk(sk);
 
 446	struct flowi6 *fl6 = &fl->u.ip6;
 447	struct sk_buff *skb;
 448	int err = -ENOMEM;
 449
 450	/* First, grab a route. */
 451	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 452					       IPPROTO_TCP)) == NULL)
 453		goto done;
 454
 455	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 456
 457	if (skb) {
 458		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 459				    &ireq->ir_v6_rmt_addr);
 460
 461		fl6->daddr = ireq->ir_v6_rmt_addr;
 462		if (np->repflow && ireq->pktopts)
 463			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 464
 465		rcu_read_lock();
 466		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
 467			       np->tclass);
 
 
 468		rcu_read_unlock();
 469		err = net_xmit_eval(err);
 470	}
 471
 472done:
 473	return err;
 474}
 475
 476
 477static void tcp_v6_reqsk_destructor(struct request_sock *req)
 478{
 
 479	kfree_skb(inet_rsk(req)->pktopts);
 480}
 481
 482#ifdef CONFIG_TCP_MD5SIG
 483static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 484						   const struct in6_addr *addr)
 485{
 486	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 487}
 488
 489static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 490						const struct sock *addr_sk)
 491{
 492	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 493}
 494
 495static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 496				 int optlen)
 497{
 498	struct tcp_md5sig cmd;
 499	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 
 500
 501	if (optlen < sizeof(cmd))
 502		return -EINVAL;
 503
 504	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 505		return -EFAULT;
 506
 507	if (sin6->sin6_family != AF_INET6)
 508		return -EINVAL;
 509
 
 
 
 
 
 
 
 
 
 
 510	if (!cmd.tcpm_keylen) {
 511		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 512			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 513					      AF_INET);
 514		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 515				      AF_INET6);
 516	}
 517
 518	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 519		return -EINVAL;
 520
 521	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 522		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 523				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 524
 525	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 526			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 527}
 528
 529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 530					const struct in6_addr *daddr,
 531					const struct in6_addr *saddr, int nbytes)
 
 532{
 533	struct tcp6_pseudohdr *bp;
 534	struct scatterlist sg;
 
 535
 536	bp = &hp->md5_blk.ip6;
 537	/* 1. TCP pseudo-header (RFC2460) */
 538	bp->saddr = *saddr;
 539	bp->daddr = *daddr;
 540	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 541	bp->len = cpu_to_be32(nbytes);
 542
 543	sg_init_one(&sg, bp, sizeof(*bp));
 544	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
 
 
 
 
 
 545	return crypto_ahash_update(hp->md5_req);
 546}
 547
 548static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 549			       const struct in6_addr *daddr, struct in6_addr *saddr,
 550			       const struct tcphdr *th)
 551{
 552	struct tcp_md5sig_pool *hp;
 553	struct ahash_request *req;
 554
 555	hp = tcp_get_md5sig_pool();
 556	if (!hp)
 557		goto clear_hash_noput;
 558	req = hp->md5_req;
 559
 560	if (crypto_ahash_init(req))
 561		goto clear_hash;
 562	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 563		goto clear_hash;
 564	if (tcp_md5_hash_header(hp, th))
 565		goto clear_hash;
 566	if (tcp_md5_hash_key(hp, key))
 567		goto clear_hash;
 568	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 569	if (crypto_ahash_final(req))
 570		goto clear_hash;
 571
 572	tcp_put_md5sig_pool();
 573	return 0;
 574
 575clear_hash:
 576	tcp_put_md5sig_pool();
 577clear_hash_noput:
 578	memset(md5_hash, 0, 16);
 579	return 1;
 580}
 581
 582static int tcp_v6_md5_hash_skb(char *md5_hash,
 583			       const struct tcp_md5sig_key *key,
 584			       const struct sock *sk,
 585			       const struct sk_buff *skb)
 586{
 587	const struct in6_addr *saddr, *daddr;
 588	struct tcp_md5sig_pool *hp;
 589	struct ahash_request *req;
 590	const struct tcphdr *th = tcp_hdr(skb);
 591
 592	if (sk) { /* valid for establish/request sockets */
 593		saddr = &sk->sk_v6_rcv_saddr;
 594		daddr = &sk->sk_v6_daddr;
 595	} else {
 596		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 597		saddr = &ip6h->saddr;
 598		daddr = &ip6h->daddr;
 599	}
 600
 601	hp = tcp_get_md5sig_pool();
 602	if (!hp)
 603		goto clear_hash_noput;
 604	req = hp->md5_req;
 605
 606	if (crypto_ahash_init(req))
 607		goto clear_hash;
 608
 609	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 610		goto clear_hash;
 611	if (tcp_md5_hash_header(hp, th))
 612		goto clear_hash;
 613	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 614		goto clear_hash;
 615	if (tcp_md5_hash_key(hp, key))
 616		goto clear_hash;
 617	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 618	if (crypto_ahash_final(req))
 619		goto clear_hash;
 620
 621	tcp_put_md5sig_pool();
 622	return 0;
 623
 624clear_hash:
 625	tcp_put_md5sig_pool();
 626clear_hash_noput:
 627	memset(md5_hash, 0, 16);
 628	return 1;
 629}
 630
 631#endif
 632
 633static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 634				    const struct sk_buff *skb)
 635{
 636#ifdef CONFIG_TCP_MD5SIG
 637	const __u8 *hash_location = NULL;
 638	struct tcp_md5sig_key *hash_expected;
 639	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 640	const struct tcphdr *th = tcp_hdr(skb);
 641	int genhash;
 642	u8 newhash[16];
 643
 644	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 645	hash_location = tcp_parse_md5sig_option(th);
 646
 647	/* We've parsed the options - do we have a hash? */
 648	if (!hash_expected && !hash_location)
 649		return false;
 650
 651	if (hash_expected && !hash_location) {
 652		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 653		return true;
 654	}
 655
 656	if (!hash_expected && hash_location) {
 657		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 658		return true;
 659	}
 660
 661	/* check the signature */
 662	genhash = tcp_v6_md5_hash_skb(newhash,
 663				      hash_expected,
 664				      NULL, skb);
 665
 666	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 
 667		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 668				     genhash ? "failed" : "mismatch",
 669				     &ip6h->saddr, ntohs(th->source),
 670				     &ip6h->daddr, ntohs(th->dest));
 671		return true;
 672	}
 673#endif
 674	return false;
 675}
 676
 677static void tcp_v6_init_req(struct request_sock *req,
 678			    const struct sock *sk_listener,
 679			    struct sk_buff *skb)
 680{
 681	struct inet_request_sock *ireq = inet_rsk(req);
 682	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 683
 684	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 685	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 686
 687	/* So that link locals have meaning */
 688	if (!sk_listener->sk_bound_dev_if &&
 689	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 690		ireq->ir_iif = tcp_v6_iif(skb);
 691
 692	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 693	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 694	     np->rxopt.bits.rxinfo ||
 695	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 696	     np->rxopt.bits.rxohlim || np->repflow)) {
 697		atomic_inc(&skb->users);
 698		ireq->pktopts = skb;
 699	}
 700}
 701
 702static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 703					  struct flowi *fl,
 704					  const struct request_sock *req,
 705					  bool *strict)
 706{
 707	if (strict)
 708		*strict = true;
 709	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 710}
 711
 712struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 713	.family		=	AF_INET6,
 714	.obj_size	=	sizeof(struct tcp6_request_sock),
 715	.rtx_syn_ack	=	tcp_rtx_synack,
 716	.send_ack	=	tcp_v6_reqsk_send_ack,
 717	.destructor	=	tcp_v6_reqsk_destructor,
 718	.send_reset	=	tcp_v6_send_reset,
 719	.syn_ack_timeout =	tcp_syn_ack_timeout,
 720};
 721
 722static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 723	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 724				sizeof(struct ipv6hdr),
 725#ifdef CONFIG_TCP_MD5SIG
 726	.req_md5_lookup	=	tcp_v6_md5_lookup,
 727	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 728#endif
 729	.init_req	=	tcp_v6_init_req,
 730#ifdef CONFIG_SYN_COOKIES
 731	.cookie_init_seq =	cookie_v6_init_sequence,
 732#endif
 733	.route_req	=	tcp_v6_route_req,
 734	.init_seq	=	tcp_v6_init_sequence,
 
 735	.send_synack	=	tcp_v6_send_synack,
 736};
 737
 738static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 739				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 740				 int oif, struct tcp_md5sig_key *key, int rst,
 741				 u8 tclass, u32 label)
 742{
 743	const struct tcphdr *th = tcp_hdr(skb);
 744	struct tcphdr *t1;
 745	struct sk_buff *buff;
 746	struct flowi6 fl6;
 747	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 748	struct sock *ctl_sk = net->ipv6.tcp_sk;
 749	unsigned int tot_len = sizeof(struct tcphdr);
 750	struct dst_entry *dst;
 751	__be32 *topt;
 752
 753	if (tsecr)
 754		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 755#ifdef CONFIG_TCP_MD5SIG
 756	if (key)
 757		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 758#endif
 759
 760	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 761			 GFP_ATOMIC);
 762	if (!buff)
 763		return;
 764
 765	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 766
 767	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 768	skb_reset_transport_header(buff);
 769
 770	/* Swap the send and the receive. */
 771	memset(t1, 0, sizeof(*t1));
 772	t1->dest = th->source;
 773	t1->source = th->dest;
 774	t1->doff = tot_len / 4;
 775	t1->seq = htonl(seq);
 776	t1->ack_seq = htonl(ack);
 777	t1->ack = !rst || !th->ack;
 778	t1->rst = rst;
 779	t1->window = htons(win);
 780
 781	topt = (__be32 *)(t1 + 1);
 782
 783	if (tsecr) {
 784		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 785				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 786		*topt++ = htonl(tsval);
 787		*topt++ = htonl(tsecr);
 788	}
 789
 790#ifdef CONFIG_TCP_MD5SIG
 791	if (key) {
 792		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 793				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 794		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 795				    &ipv6_hdr(skb)->saddr,
 796				    &ipv6_hdr(skb)->daddr, t1);
 797	}
 798#endif
 799
 800	memset(&fl6, 0, sizeof(fl6));
 801	fl6.daddr = ipv6_hdr(skb)->saddr;
 802	fl6.saddr = ipv6_hdr(skb)->daddr;
 803	fl6.flowlabel = label;
 804
 805	buff->ip_summed = CHECKSUM_PARTIAL;
 806	buff->csum = 0;
 807
 808	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 809
 810	fl6.flowi6_proto = IPPROTO_TCP;
 811	if (rt6_need_strict(&fl6.daddr) && !oif)
 812		fl6.flowi6_oif = tcp_v6_iif(skb);
 813	else {
 814		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 815			oif = skb->skb_iif;
 816
 817		fl6.flowi6_oif = oif;
 818	}
 819
 820	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 821	fl6.fl6_dport = t1->dest;
 822	fl6.fl6_sport = t1->source;
 
 823	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 824
 825	/* Pass a socket to ip6_dst_lookup either it is for RST
 826	 * Underlying function will use this to retrieve the network
 827	 * namespace
 828	 */
 829	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 830	if (!IS_ERR(dst)) {
 831		skb_dst_set(buff, dst);
 832		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 833		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 834		if (rst)
 835			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 836		return;
 837	}
 838
 839	kfree_skb(buff);
 840}
 841
 842static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 843{
 844	const struct tcphdr *th = tcp_hdr(skb);
 845	u32 seq = 0, ack_seq = 0;
 846	struct tcp_md5sig_key *key = NULL;
 847#ifdef CONFIG_TCP_MD5SIG
 848	const __u8 *hash_location = NULL;
 849	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 850	unsigned char newhash[16];
 851	int genhash;
 852	struct sock *sk1 = NULL;
 853#endif
 854	int oif;
 855
 856	if (th->rst)
 857		return;
 858
 859	/* If sk not NULL, it means we did a successful lookup and incoming
 860	 * route had to be correct. prequeue might have dropped our dst.
 861	 */
 862	if (!sk && !ipv6_unicast_destination(skb))
 863		return;
 864
 865#ifdef CONFIG_TCP_MD5SIG
 
 866	hash_location = tcp_parse_md5sig_option(th);
 867	if (sk && sk_fullsock(sk)) {
 868		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 869	} else if (hash_location) {
 870		/*
 871		 * active side is lost. Try to find listening socket through
 872		 * source port, and then find md5 key through listening socket.
 873		 * we are not loose security here:
 874		 * Incoming packet is checked with md5 hash with finding key,
 875		 * no RST generated if md5 hash doesn't match.
 876		 */
 877		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 878					   &tcp_hashinfo, NULL, 0,
 879					   &ipv6h->saddr,
 880					   th->source, &ipv6h->daddr,
 881					   ntohs(th->source), tcp_v6_iif(skb));
 
 882		if (!sk1)
 883			return;
 884
 885		rcu_read_lock();
 886		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 887		if (!key)
 888			goto release_sk1;
 889
 890		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 891		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 892			goto release_sk1;
 893	}
 894#endif
 895
 896	if (th->ack)
 897		seq = ntohl(th->ack_seq);
 898	else
 899		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 900			  (th->doff << 2);
 901
 902	oif = sk ? sk->sk_bound_dev_if : 0;
 
 
 
 
 
 903	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 904
 905#ifdef CONFIG_TCP_MD5SIG
 906release_sk1:
 907	if (sk1) {
 908		rcu_read_unlock();
 909		sock_put(sk1);
 910	}
 911#endif
 912}
 913
 914static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 915			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 916			    struct tcp_md5sig_key *key, u8 tclass,
 917			    u32 label)
 918{
 919	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 920			     tclass, label);
 921}
 922
 923static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 924{
 925	struct inet_timewait_sock *tw = inet_twsk(sk);
 926	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 927
 928	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 929			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 930			tcp_time_stamp + tcptw->tw_ts_offset,
 931			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 932			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 933
 934	inet_twsk_put(tw);
 935}
 936
 937static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 938				  struct request_sock *req)
 939{
 940	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 941	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 942	 */
 
 
 
 
 
 943	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 944			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 945			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 946			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 947			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 
 
 948			0, 0);
 949}
 950
 951
 952static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 953{
 954#ifdef CONFIG_SYN_COOKIES
 955	const struct tcphdr *th = tcp_hdr(skb);
 956
 957	if (!th->syn)
 958		sk = cookie_v6_check(sk, skb);
 959#endif
 960	return sk;
 961}
 962
 963static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 964{
 965	if (skb->protocol == htons(ETH_P_IP))
 966		return tcp_v4_conn_request(sk, skb);
 967
 968	if (!ipv6_unicast_destination(skb))
 969		goto drop;
 970
 971	return tcp_conn_request(&tcp6_request_sock_ops,
 972				&tcp_request_sock_ipv6_ops, sk, skb);
 973
 974drop:
 975	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 976	return 0; /* don't send reset */
 977}
 978
 
 
 
 
 
 
 
 
 
 
 979static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 980					 struct request_sock *req,
 981					 struct dst_entry *dst,
 982					 struct request_sock *req_unhash,
 983					 bool *own_req)
 984{
 985	struct inet_request_sock *ireq;
 986	struct ipv6_pinfo *newnp;
 987	const struct ipv6_pinfo *np = inet6_sk(sk);
 988	struct ipv6_txoptions *opt;
 989	struct tcp6_sock *newtcp6sk;
 990	struct inet_sock *newinet;
 991	struct tcp_sock *newtp;
 992	struct sock *newsk;
 993#ifdef CONFIG_TCP_MD5SIG
 994	struct tcp_md5sig_key *key;
 995#endif
 996	struct flowi6 fl6;
 997
 998	if (skb->protocol == htons(ETH_P_IP)) {
 999		/*
1000		 *	v6 mapped
1001		 */
1002
1003		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004					     req_unhash, own_req);
1005
1006		if (!newsk)
1007			return NULL;
1008
1009		newtcp6sk = (struct tcp6_sock *)newsk;
1010		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1011
1012		newinet = inet_sk(newsk);
1013		newnp = inet6_sk(newsk);
1014		newtp = tcp_sk(newsk);
1015
1016		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1017
1018		newnp->saddr = newsk->sk_v6_rcv_saddr;
1019
1020		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1021		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022#ifdef CONFIG_TCP_MD5SIG
1023		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024#endif
1025
 
1026		newnp->ipv6_ac_list = NULL;
1027		newnp->ipv6_fl_list = NULL;
1028		newnp->pktoptions  = NULL;
1029		newnp->opt	   = NULL;
1030		newnp->mcast_oif   = tcp_v6_iif(skb);
1031		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1032		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1033		if (np->repflow)
1034			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1035
1036		/*
1037		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038		 * here, tcp_create_openreq_child now does this for us, see the comment in
1039		 * that function for the gory details. -acme
1040		 */
1041
1042		/* It is tricky place. Until this moment IPv4 tcp
1043		   worked with IPv6 icsk.icsk_af_ops.
1044		   Sync it now.
1045		 */
1046		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1047
1048		return newsk;
1049	}
1050
1051	ireq = inet_rsk(req);
1052
1053	if (sk_acceptq_is_full(sk))
1054		goto out_overflow;
1055
1056	if (!dst) {
1057		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1058		if (!dst)
1059			goto out;
1060	}
1061
1062	newsk = tcp_create_openreq_child(sk, req, skb);
1063	if (!newsk)
1064		goto out_nonewsk;
1065
1066	/*
1067	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068	 * count here, tcp_create_openreq_child now does this for us, see the
1069	 * comment in that function for the gory details. -acme
1070	 */
1071
1072	newsk->sk_gso_type = SKB_GSO_TCPV6;
1073	ip6_dst_store(newsk, dst, NULL, NULL);
1074	inet6_sk_rx_dst_set(newsk, skb);
1075
1076	newtcp6sk = (struct tcp6_sock *)newsk;
1077	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1078
1079	newtp = tcp_sk(newsk);
1080	newinet = inet_sk(newsk);
1081	newnp = inet6_sk(newsk);
1082
1083	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1084
1085	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086	newnp->saddr = ireq->ir_v6_loc_addr;
1087	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088	newsk->sk_bound_dev_if = ireq->ir_iif;
1089
1090	/* Now IPv6 options...
1091
1092	   First: no IPv4 options.
1093	 */
1094	newinet->inet_opt = NULL;
 
1095	newnp->ipv6_ac_list = NULL;
1096	newnp->ipv6_fl_list = NULL;
1097
1098	/* Clone RX bits */
1099	newnp->rxopt.all = np->rxopt.all;
1100
1101	newnp->pktoptions = NULL;
1102	newnp->opt	  = NULL;
1103	newnp->mcast_oif  = tcp_v6_iif(skb);
1104	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1106	if (np->repflow)
1107		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1108
1109	/* Clone native IPv6 options from listening socket (if any)
1110
1111	   Yes, keeping reference count would be much more clever,
1112	   but we make one more one thing there: reattach optmem
1113	   to newsk.
1114	 */
1115	opt = rcu_dereference(np->opt);
 
 
1116	if (opt) {
1117		opt = ipv6_dup_options(newsk, opt);
1118		RCU_INIT_POINTER(newnp->opt, opt);
1119	}
1120	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1121	if (opt)
1122		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1123						    opt->opt_flen;
1124
1125	tcp_ca_openreq_child(newsk, dst);
1126
1127	tcp_sync_mss(newsk, dst_mtu(dst));
1128	newtp->advmss = dst_metric_advmss(dst);
1129	if (tcp_sk(sk)->rx_opt.user_mss &&
1130	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1132
1133	tcp_initialize_rcv_mss(newsk);
1134
1135	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1137
1138#ifdef CONFIG_TCP_MD5SIG
1139	/* Copy over the MD5 key from the original socket */
1140	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1141	if (key) {
1142		/* We're using one, so create a matching key
1143		 * on the newsk structure. If we fail to get
1144		 * memory, then we end up not copying the key
1145		 * across. Shucks.
1146		 */
1147		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148			       AF_INET6, key->key, key->keylen,
1149			       sk_gfp_mask(sk, GFP_ATOMIC));
1150	}
1151#endif
1152
1153	if (__inet_inherit_port(sk, newsk) < 0) {
1154		inet_csk_prepare_forced_close(newsk);
1155		tcp_done(newsk);
1156		goto out;
1157	}
1158	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1159	if (*own_req) {
1160		tcp_move_syn(newtp, req);
1161
1162		/* Clone pktoptions received with SYN, if we own the req */
1163		if (ireq->pktopts) {
1164			newnp->pktoptions = skb_clone(ireq->pktopts,
1165						      sk_gfp_mask(sk, GFP_ATOMIC));
1166			consume_skb(ireq->pktopts);
1167			ireq->pktopts = NULL;
1168			if (newnp->pktoptions)
 
1169				skb_set_owner_r(newnp->pktoptions, newsk);
 
1170		}
1171	}
1172
1173	return newsk;
1174
1175out_overflow:
1176	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177out_nonewsk:
1178	dst_release(dst);
1179out:
1180	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1181	return NULL;
1182}
1183
1184/* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1186 *
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1190 * held.
1191 */
1192static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1193{
1194	struct ipv6_pinfo *np = inet6_sk(sk);
1195	struct tcp_sock *tp;
1196	struct sk_buff *opt_skb = NULL;
1197
1198	/* Imagine: socket is IPv6. IPv4 packet arrives,
1199	   goes to IPv4 receive handler and backlogged.
1200	   From backlog it always goes here. Kerboom...
1201	   Fortunately, tcp_rcv_established and rcv_established
1202	   handle them correctly, but it is not case with
1203	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1204	 */
1205
1206	if (skb->protocol == htons(ETH_P_IP))
1207		return tcp_v4_do_rcv(sk, skb);
1208
1209	if (sk_filter(sk, skb))
1210		goto discard;
1211
1212	/*
1213	 *	socket locking is here for SMP purposes as backlog rcv
1214	 *	is currently called with bh processing disabled.
1215	 */
1216
1217	/* Do Stevens' IPV6_PKTOPTIONS.
1218
1219	   Yes, guys, it is the only place in our code, where we
1220	   may make it not affecting IPv4.
1221	   The rest of code is protocol independent,
1222	   and I do not like idea to uglify IPv4.
1223
1224	   Actually, all the idea behind IPV6_PKTOPTIONS
1225	   looks not very well thought. For now we latch
1226	   options, received in the last packet, enqueued
1227	   by tcp. Feel free to propose better solution.
1228					       --ANK (980728)
1229	 */
1230	if (np->rxopt.all)
1231		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1232
1233	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1234		struct dst_entry *dst = sk->sk_rx_dst;
1235
1236		sock_rps_save_rxhash(sk, skb);
1237		sk_mark_napi_id(sk, skb);
1238		if (dst) {
1239			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1240			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1241				dst_release(dst);
1242				sk->sk_rx_dst = NULL;
1243			}
1244		}
1245
1246		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1247		if (opt_skb)
1248			goto ipv6_pktoptions;
1249		return 0;
1250	}
1251
1252	if (tcp_checksum_complete(skb))
1253		goto csum_err;
1254
1255	if (sk->sk_state == TCP_LISTEN) {
1256		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1257
1258		if (!nsk)
1259			goto discard;
1260
1261		if (nsk != sk) {
1262			sock_rps_save_rxhash(nsk, skb);
1263			sk_mark_napi_id(nsk, skb);
1264			if (tcp_child_process(sk, nsk, skb))
1265				goto reset;
1266			if (opt_skb)
1267				__kfree_skb(opt_skb);
1268			return 0;
1269		}
1270	} else
1271		sock_rps_save_rxhash(sk, skb);
1272
1273	if (tcp_rcv_state_process(sk, skb))
1274		goto reset;
1275	if (opt_skb)
1276		goto ipv6_pktoptions;
1277	return 0;
1278
1279reset:
1280	tcp_v6_send_reset(sk, skb);
1281discard:
1282	if (opt_skb)
1283		__kfree_skb(opt_skb);
1284	kfree_skb(skb);
1285	return 0;
1286csum_err:
1287	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1288	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1289	goto discard;
1290
1291
1292ipv6_pktoptions:
1293	/* Do you ask, what is it?
1294
1295	   1. skb was enqueued by tcp.
1296	   2. skb is added to tail of read queue, rather than out of order.
1297	   3. socket is not in passive state.
1298	   4. Finally, it really contains options, which user wants to receive.
1299	 */
1300	tp = tcp_sk(sk);
1301	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1302	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1303		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1304			np->mcast_oif = tcp_v6_iif(opt_skb);
1305		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1306			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1307		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1308			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1309		if (np->repflow)
1310			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1311		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1312			skb_set_owner_r(opt_skb, sk);
 
1313			opt_skb = xchg(&np->pktoptions, opt_skb);
1314		} else {
1315			__kfree_skb(opt_skb);
1316			opt_skb = xchg(&np->pktoptions, NULL);
1317		}
1318	}
1319
1320	kfree_skb(opt_skb);
1321	return 0;
1322}
1323
1324static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1325			   const struct tcphdr *th)
1326{
1327	/* This is tricky: we move IP6CB at its correct location into
1328	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329	 * _decode_session6() uses IP6CB().
1330	 * barrier() makes sure compiler won't play aliasing games.
1331	 */
1332	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1333		sizeof(struct inet6_skb_parm));
1334	barrier();
1335
1336	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1337	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1338				    skb->len - th->doff*4);
1339	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1340	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1341	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1342	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1343	TCP_SKB_CB(skb)->sacked = 0;
1344}
1345
1346static void tcp_v6_restore_cb(struct sk_buff *skb)
1347{
1348	/* We need to move header back to the beginning if xfrm6_policy_check()
1349	 * and tcp_v6_fill_cb() are going to be called again.
1350	 */
1351	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1352		sizeof(struct inet6_skb_parm));
1353}
1354
1355static int tcp_v6_rcv(struct sk_buff *skb)
1356{
 
1357	const struct tcphdr *th;
1358	const struct ipv6hdr *hdr;
 
1359	struct sock *sk;
1360	int ret;
1361	struct net *net = dev_net(skb->dev);
1362
1363	if (skb->pkt_type != PACKET_HOST)
1364		goto discard_it;
1365
1366	/*
1367	 *	Count it even if it's bad.
1368	 */
1369	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1370
1371	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1372		goto discard_it;
1373
1374	th = tcp_hdr(skb);
1375
1376	if (th->doff < sizeof(struct tcphdr)/4)
1377		goto bad_packet;
1378	if (!pskb_may_pull(skb, th->doff*4))
1379		goto discard_it;
1380
1381	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1382		goto csum_error;
1383
1384	th = tcp_hdr(skb);
1385	hdr = ipv6_hdr(skb);
1386
1387lookup:
1388	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1389				th->source, th->dest, inet6_iif(skb));
 
1390	if (!sk)
1391		goto no_tcp_socket;
1392
1393process:
1394	if (sk->sk_state == TCP_TIME_WAIT)
1395		goto do_time_wait;
1396
1397	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1398		struct request_sock *req = inet_reqsk(sk);
 
1399		struct sock *nsk;
1400
1401		sk = req->rsk_listener;
1402		tcp_v6_fill_cb(skb, hdr, th);
1403		if (tcp_v6_inbound_md5_hash(sk, skb)) {
 
1404			reqsk_put(req);
1405			goto discard_it;
1406		}
1407		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1408			inet_csk_reqsk_queue_drop_and_put(sk, req);
1409			goto lookup;
1410		}
1411		sock_hold(sk);
1412		nsk = tcp_check_req(sk, skb, req, false);
 
 
 
 
 
 
 
1413		if (!nsk) {
1414			reqsk_put(req);
 
 
 
 
 
 
 
 
 
 
1415			goto discard_and_relse;
1416		}
1417		if (nsk == sk) {
1418			reqsk_put(req);
1419			tcp_v6_restore_cb(skb);
1420		} else if (tcp_child_process(sk, nsk, skb)) {
1421			tcp_v6_send_reset(nsk, skb);
1422			goto discard_and_relse;
1423		} else {
1424			sock_put(sk);
1425			return 0;
1426		}
1427	}
1428	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1430		goto discard_and_relse;
1431	}
1432
1433	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1434		goto discard_and_relse;
1435
1436	tcp_v6_fill_cb(skb, hdr, th);
1437
1438	if (tcp_v6_inbound_md5_hash(sk, skb))
1439		goto discard_and_relse;
1440
1441	if (sk_filter(sk, skb))
1442		goto discard_and_relse;
 
 
 
1443
1444	skb->dev = NULL;
1445
1446	if (sk->sk_state == TCP_LISTEN) {
1447		ret = tcp_v6_do_rcv(sk, skb);
1448		goto put_and_return;
1449	}
1450
1451	sk_incoming_cpu_update(sk);
1452
1453	bh_lock_sock_nested(sk);
1454	tcp_segs_in(tcp_sk(sk), skb);
1455	ret = 0;
1456	if (!sock_owned_by_user(sk)) {
1457		if (!tcp_prequeue(sk, skb))
1458			ret = tcp_v6_do_rcv(sk, skb);
1459	} else if (unlikely(sk_add_backlog(sk, skb,
1460					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1461		bh_unlock_sock(sk);
1462		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1463		goto discard_and_relse;
1464	}
1465	bh_unlock_sock(sk);
1466
1467put_and_return:
1468	sock_put(sk);
 
1469	return ret ? -1 : 0;
1470
1471no_tcp_socket:
1472	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1473		goto discard_it;
1474
1475	tcp_v6_fill_cb(skb, hdr, th);
1476
1477	if (tcp_checksum_complete(skb)) {
1478csum_error:
1479		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1480bad_packet:
1481		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482	} else {
1483		tcp_v6_send_reset(NULL, skb);
1484	}
1485
1486discard_it:
1487	kfree_skb(skb);
1488	return 0;
1489
1490discard_and_relse:
1491	sock_put(sk);
 
 
1492	goto discard_it;
1493
1494do_time_wait:
1495	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1496		inet_twsk_put(inet_twsk(sk));
1497		goto discard_it;
1498	}
1499
1500	tcp_v6_fill_cb(skb, hdr, th);
1501
1502	if (tcp_checksum_complete(skb)) {
1503		inet_twsk_put(inet_twsk(sk));
1504		goto csum_error;
1505	}
1506
1507	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1508	case TCP_TW_SYN:
1509	{
1510		struct sock *sk2;
1511
1512		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1513					    skb, __tcp_hdrlen(th),
1514					    &ipv6_hdr(skb)->saddr, th->source,
1515					    &ipv6_hdr(skb)->daddr,
1516					    ntohs(th->dest), tcp_v6_iif(skb));
 
1517		if (sk2) {
1518			struct inet_timewait_sock *tw = inet_twsk(sk);
1519			inet_twsk_deschedule_put(tw);
1520			sk = sk2;
1521			tcp_v6_restore_cb(skb);
 
1522			goto process;
1523		}
1524		/* Fall through to ACK */
1525	}
 
 
1526	case TCP_TW_ACK:
1527		tcp_v6_timewait_ack(sk, skb);
1528		break;
1529	case TCP_TW_RST:
1530		tcp_v6_restore_cb(skb);
1531		tcp_v6_send_reset(sk, skb);
1532		inet_twsk_deschedule_put(inet_twsk(sk));
1533		goto discard_it;
1534	case TCP_TW_SUCCESS:
1535		;
1536	}
1537	goto discard_it;
1538}
1539
1540static void tcp_v6_early_demux(struct sk_buff *skb)
1541{
1542	const struct ipv6hdr *hdr;
1543	const struct tcphdr *th;
1544	struct sock *sk;
1545
1546	if (skb->pkt_type != PACKET_HOST)
1547		return;
1548
1549	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1550		return;
1551
1552	hdr = ipv6_hdr(skb);
1553	th = tcp_hdr(skb);
1554
1555	if (th->doff < sizeof(struct tcphdr) / 4)
1556		return;
1557
1558	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1559	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560					&hdr->saddr, th->source,
1561					&hdr->daddr, ntohs(th->dest),
1562					inet6_iif(skb));
1563	if (sk) {
1564		skb->sk = sk;
1565		skb->destructor = sock_edemux;
1566		if (sk_fullsock(sk)) {
1567			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1568
1569			if (dst)
1570				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571			if (dst &&
1572			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573				skb_dst_set_noref(skb, dst);
1574		}
1575	}
1576}
1577
1578static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1580	.twsk_unique	= tcp_twsk_unique,
1581	.twsk_destructor = tcp_twsk_destructor,
1582};
1583
1584static const struct inet_connection_sock_af_ops ipv6_specific = {
1585	.queue_xmit	   = inet6_csk_xmit,
1586	.send_check	   = tcp_v6_send_check,
1587	.rebuild_header	   = inet6_sk_rebuild_header,
1588	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1589	.conn_request	   = tcp_v6_conn_request,
1590	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1591	.net_header_len	   = sizeof(struct ipv6hdr),
1592	.net_frag_header_len = sizeof(struct frag_hdr),
1593	.setsockopt	   = ipv6_setsockopt,
1594	.getsockopt	   = ipv6_getsockopt,
1595	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1596	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1597	.bind_conflict	   = inet6_csk_bind_conflict,
1598#ifdef CONFIG_COMPAT
1599	.compat_setsockopt = compat_ipv6_setsockopt,
1600	.compat_getsockopt = compat_ipv6_getsockopt,
1601#endif
1602	.mtu_reduced	   = tcp_v6_mtu_reduced,
1603};
1604
1605#ifdef CONFIG_TCP_MD5SIG
1606static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1607	.md5_lookup	=	tcp_v6_md5_lookup,
1608	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1609	.md5_parse	=	tcp_v6_parse_md5_keys,
1610};
1611#endif
1612
1613/*
1614 *	TCP over IPv4 via INET6 API
1615 */
1616static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617	.queue_xmit	   = ip_queue_xmit,
1618	.send_check	   = tcp_v4_send_check,
1619	.rebuild_header	   = inet_sk_rebuild_header,
1620	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1621	.conn_request	   = tcp_v6_conn_request,
1622	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1623	.net_header_len	   = sizeof(struct iphdr),
1624	.setsockopt	   = ipv6_setsockopt,
1625	.getsockopt	   = ipv6_getsockopt,
1626	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1627	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1628	.bind_conflict	   = inet6_csk_bind_conflict,
1629#ifdef CONFIG_COMPAT
1630	.compat_setsockopt = compat_ipv6_setsockopt,
1631	.compat_getsockopt = compat_ipv6_getsockopt,
1632#endif
1633	.mtu_reduced	   = tcp_v4_mtu_reduced,
1634};
1635
1636#ifdef CONFIG_TCP_MD5SIG
1637static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1638	.md5_lookup	=	tcp_v4_md5_lookup,
1639	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1640	.md5_parse	=	tcp_v6_parse_md5_keys,
1641};
1642#endif
1643
1644/* NOTE: A lot of things set to zero explicitly by call to
1645 *       sk_alloc() so need not be done here.
1646 */
1647static int tcp_v6_init_sock(struct sock *sk)
1648{
1649	struct inet_connection_sock *icsk = inet_csk(sk);
1650
1651	tcp_init_sock(sk);
1652
1653	icsk->icsk_af_ops = &ipv6_specific;
1654
1655#ifdef CONFIG_TCP_MD5SIG
1656	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657#endif
1658
1659	return 0;
1660}
1661
1662static void tcp_v6_destroy_sock(struct sock *sk)
1663{
1664	tcp_v4_destroy_sock(sk);
1665	inet6_destroy_sock(sk);
1666}
1667
1668#ifdef CONFIG_PROC_FS
1669/* Proc filesystem TCPv6 sock list dumping. */
1670static void get_openreq6(struct seq_file *seq,
1671			 const struct request_sock *req, int i)
1672{
1673	long ttd = req->rsk_timer.expires - jiffies;
1674	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677	if (ttd < 0)
1678		ttd = 0;
1679
1680	seq_printf(seq,
1681		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683		   i,
1684		   src->s6_addr32[0], src->s6_addr32[1],
1685		   src->s6_addr32[2], src->s6_addr32[3],
1686		   inet_rsk(req)->ir_num,
1687		   dest->s6_addr32[0], dest->s6_addr32[1],
1688		   dest->s6_addr32[2], dest->s6_addr32[3],
1689		   ntohs(inet_rsk(req)->ir_rmt_port),
1690		   TCP_SYN_RECV,
1691		   0, 0, /* could print option size, but that is af dependent. */
1692		   1,   /* timers active (only the expire timer) */
1693		   jiffies_to_clock_t(ttd),
1694		   req->num_timeout,
1695		   from_kuid_munged(seq_user_ns(seq),
1696				    sock_i_uid(req->rsk_listener)),
1697		   0,  /* non standard timer */
1698		   0, /* open_requests have no inode */
1699		   0, req);
1700}
1701
1702static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1703{
1704	const struct in6_addr *dest, *src;
1705	__u16 destp, srcp;
1706	int timer_active;
1707	unsigned long timer_expires;
1708	const struct inet_sock *inet = inet_sk(sp);
1709	const struct tcp_sock *tp = tcp_sk(sp);
1710	const struct inet_connection_sock *icsk = inet_csk(sp);
1711	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1712	int rx_queue;
1713	int state;
1714
1715	dest  = &sp->sk_v6_daddr;
1716	src   = &sp->sk_v6_rcv_saddr;
1717	destp = ntohs(inet->inet_dport);
1718	srcp  = ntohs(inet->inet_sport);
1719
1720	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
1721		timer_active	= 1;
1722		timer_expires	= icsk->icsk_timeout;
1723	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1724		timer_active	= 4;
1725		timer_expires	= icsk->icsk_timeout;
1726	} else if (timer_pending(&sp->sk_timer)) {
1727		timer_active	= 2;
1728		timer_expires	= sp->sk_timer.expires;
1729	} else {
1730		timer_active	= 0;
1731		timer_expires = jiffies;
1732	}
1733
1734	state = sk_state_load(sp);
1735	if (state == TCP_LISTEN)
1736		rx_queue = sp->sk_ack_backlog;
1737	else
1738		/* Because we don't lock the socket,
1739		 * we might find a transient negative value.
1740		 */
1741		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1742
1743	seq_printf(seq,
1744		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1746		   i,
1747		   src->s6_addr32[0], src->s6_addr32[1],
1748		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1749		   dest->s6_addr32[0], dest->s6_addr32[1],
1750		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1751		   state,
1752		   tp->write_seq - tp->snd_una,
1753		   rx_queue,
1754		   timer_active,
1755		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1756		   icsk->icsk_retransmits,
1757		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1758		   icsk->icsk_probes_out,
1759		   sock_i_ino(sp),
1760		   atomic_read(&sp->sk_refcnt), sp,
1761		   jiffies_to_clock_t(icsk->icsk_rto),
1762		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1763		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1764		   tp->snd_cwnd,
1765		   state == TCP_LISTEN ?
1766			fastopenq->max_qlen :
1767			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1768		   );
1769}
1770
1771static void get_timewait6_sock(struct seq_file *seq,
1772			       struct inet_timewait_sock *tw, int i)
1773{
1774	long delta = tw->tw_timer.expires - jiffies;
1775	const struct in6_addr *dest, *src;
1776	__u16 destp, srcp;
1777
1778	dest = &tw->tw_v6_daddr;
1779	src  = &tw->tw_v6_rcv_saddr;
1780	destp = ntohs(tw->tw_dport);
1781	srcp  = ntohs(tw->tw_sport);
1782
1783	seq_printf(seq,
1784		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1785		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1786		   i,
1787		   src->s6_addr32[0], src->s6_addr32[1],
1788		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1789		   dest->s6_addr32[0], dest->s6_addr32[1],
1790		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1791		   tw->tw_substate, 0, 0,
1792		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1793		   atomic_read(&tw->tw_refcnt), tw);
1794}
1795
1796static int tcp6_seq_show(struct seq_file *seq, void *v)
1797{
1798	struct tcp_iter_state *st;
1799	struct sock *sk = v;
1800
1801	if (v == SEQ_START_TOKEN) {
1802		seq_puts(seq,
1803			 "  sl  "
1804			 "local_address                         "
1805			 "remote_address                        "
1806			 "st tx_queue rx_queue tr tm->when retrnsmt"
1807			 "   uid  timeout inode\n");
1808		goto out;
1809	}
1810	st = seq->private;
1811
1812	if (sk->sk_state == TCP_TIME_WAIT)
1813		get_timewait6_sock(seq, v, st->num);
1814	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1815		get_openreq6(seq, v, st->num);
1816	else
1817		get_tcp6_sock(seq, v, st->num);
1818out:
1819	return 0;
1820}
1821
1822static const struct file_operations tcp6_afinfo_seq_fops = {
1823	.owner   = THIS_MODULE,
1824	.open    = tcp_seq_open,
1825	.read    = seq_read,
1826	.llseek  = seq_lseek,
1827	.release = seq_release_net
1828};
1829
1830static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1831	.name		= "tcp6",
1832	.family		= AF_INET6,
1833	.seq_fops	= &tcp6_afinfo_seq_fops,
1834	.seq_ops	= {
1835		.show		= tcp6_seq_show,
1836	},
1837};
1838
1839int __net_init tcp6_proc_init(struct net *net)
1840{
1841	return tcp_proc_register(net, &tcp6_seq_afinfo);
1842}
1843
1844void tcp6_proc_exit(struct net *net)
1845{
1846	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1847}
1848#endif
1849
1850static void tcp_v6_clear_sk(struct sock *sk, int size)
1851{
1852	struct inet_sock *inet = inet_sk(sk);
1853
1854	/* we do not want to clear pinet6 field, because of RCU lookups */
1855	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1856
1857	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1858	memset(&inet->pinet6 + 1, 0, size);
1859}
1860
1861struct proto tcpv6_prot = {
1862	.name			= "TCPv6",
1863	.owner			= THIS_MODULE,
1864	.close			= tcp_close,
 
1865	.connect		= tcp_v6_connect,
1866	.disconnect		= tcp_disconnect,
1867	.accept			= inet_csk_accept,
1868	.ioctl			= tcp_ioctl,
1869	.init			= tcp_v6_init_sock,
1870	.destroy		= tcp_v6_destroy_sock,
1871	.shutdown		= tcp_shutdown,
1872	.setsockopt		= tcp_setsockopt,
1873	.getsockopt		= tcp_getsockopt,
 
1874	.recvmsg		= tcp_recvmsg,
1875	.sendmsg		= tcp_sendmsg,
1876	.sendpage		= tcp_sendpage,
1877	.backlog_rcv		= tcp_v6_do_rcv,
1878	.release_cb		= tcp_release_cb,
1879	.hash			= inet6_hash,
1880	.unhash			= inet_unhash,
1881	.get_port		= inet_csk_get_port,
1882	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
1883	.stream_memory_free	= tcp_stream_memory_free,
1884	.sockets_allocated	= &tcp_sockets_allocated,
1885	.memory_allocated	= &tcp_memory_allocated,
1886	.memory_pressure	= &tcp_memory_pressure,
1887	.orphan_count		= &tcp_orphan_count,
1888	.sysctl_mem		= sysctl_tcp_mem,
1889	.sysctl_wmem		= sysctl_tcp_wmem,
1890	.sysctl_rmem		= sysctl_tcp_rmem,
1891	.max_header		= MAX_TCP_HEADER,
1892	.obj_size		= sizeof(struct tcp6_sock),
1893	.slab_flags		= SLAB_DESTROY_BY_RCU,
1894	.twsk_prot		= &tcp6_timewait_sock_ops,
1895	.rsk_prot		= &tcp6_request_sock_ops,
1896	.h.hashinfo		= &tcp_hashinfo,
1897	.no_autobind		= true,
1898#ifdef CONFIG_COMPAT
1899	.compat_setsockopt	= compat_tcp_setsockopt,
1900	.compat_getsockopt	= compat_tcp_getsockopt,
1901#endif
1902	.clear_sk		= tcp_v6_clear_sk,
1903	.diag_destroy		= tcp_abort,
1904};
1905
1906static const struct inet6_protocol tcpv6_protocol = {
 
 
 
1907	.early_demux	=	tcp_v6_early_demux,
 
1908	.handler	=	tcp_v6_rcv,
1909	.err_handler	=	tcp_v6_err,
1910	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1911};
1912
1913static struct inet_protosw tcpv6_protosw = {
1914	.type		=	SOCK_STREAM,
1915	.protocol	=	IPPROTO_TCP,
1916	.prot		=	&tcpv6_prot,
1917	.ops		=	&inet6_stream_ops,
1918	.flags		=	INET_PROTOSW_PERMANENT |
1919				INET_PROTOSW_ICSK,
1920};
1921
1922static int __net_init tcpv6_net_init(struct net *net)
1923{
1924	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1925				    SOCK_RAW, IPPROTO_TCP, net);
1926}
1927
1928static void __net_exit tcpv6_net_exit(struct net *net)
1929{
1930	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1931}
1932
1933static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1934{
1935	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1936}
1937
1938static struct pernet_operations tcpv6_net_ops = {
1939	.init	    = tcpv6_net_init,
1940	.exit	    = tcpv6_net_exit,
1941	.exit_batch = tcpv6_net_exit_batch,
1942};
1943
1944int __init tcpv6_init(void)
1945{
1946	int ret;
1947
1948	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1949	if (ret)
1950		goto out;
1951
1952	/* register inet6 protocol */
1953	ret = inet6_register_protosw(&tcpv6_protosw);
1954	if (ret)
1955		goto out_tcpv6_protocol;
1956
1957	ret = register_pernet_subsys(&tcpv6_net_ops);
1958	if (ret)
1959		goto out_tcpv6_protosw;
1960out:
1961	return ret;
1962
1963out_tcpv6_protosw:
1964	inet6_unregister_protosw(&tcpv6_protosw);
1965out_tcpv6_protocol:
1966	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967	goto out;
1968}
1969
1970void tcpv6_exit(void)
1971{
1972	unregister_pernet_subsys(&tcpv6_net_ops);
1973	inet6_unregister_protosw(&tcpv6_protosw);
1974	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1975}
v4.17
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
  72#include <trace/events/tcp.h>
  73
  74static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  75static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  76				      struct request_sock *req);
  77
  78static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  79
  80static const struct inet_connection_sock_af_ops ipv6_mapped;
  81static const struct inet_connection_sock_af_ops ipv6_specific;
  82#ifdef CONFIG_TCP_MD5SIG
  83static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  85#else
  86static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  87						   const struct in6_addr *addr)
  88{
  89	return NULL;
  90}
  91#endif
  92
  93static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  94{
  95	struct dst_entry *dst = skb_dst(skb);
  96
  97	if (dst && dst_hold_safe(dst)) {
  98		const struct rt6_info *rt = (const struct rt6_info *)dst;
  99
 100		sk->sk_rx_dst = dst;
 101		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 102		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 103	}
 104}
 105
 106static u32 tcp_v6_init_seq(const struct sk_buff *skb)
 107{
 108	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
 109				ipv6_hdr(skb)->saddr.s6_addr32,
 110				tcp_hdr(skb)->dest,
 111				tcp_hdr(skb)->source);
 112}
 113
 114static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
 115{
 116	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
 117				   ipv6_hdr(skb)->saddr.s6_addr32);
 118}
 119
 120static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 121			      int addr_len)
 122{
 123	/* This check is replicated from tcp_v6_connect() and intended to
 124	 * prevent BPF program called below from accessing bytes that are out
 125	 * of the bound specified by user in addr_len.
 126	 */
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	sock_owned_by_me(sk);
 131
 132	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
 133}
 134
 135static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 136			  int addr_len)
 137{
 138	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 139	struct inet_sock *inet = inet_sk(sk);
 140	struct inet_connection_sock *icsk = inet_csk(sk);
 141	struct ipv6_pinfo *np = inet6_sk(sk);
 142	struct tcp_sock *tp = tcp_sk(sk);
 143	struct in6_addr *saddr = NULL, *final_p, final;
 144	struct ipv6_txoptions *opt;
 145	struct flowi6 fl6;
 146	struct dst_entry *dst;
 147	int addr_type;
 148	int err;
 149	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 150
 151	if (addr_len < SIN6_LEN_RFC2133)
 152		return -EINVAL;
 153
 154	if (usin->sin6_family != AF_INET6)
 155		return -EAFNOSUPPORT;
 156
 157	memset(&fl6, 0, sizeof(fl6));
 158
 159	if (np->sndflow) {
 160		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 161		IP6_ECN_flow_init(fl6.flowlabel);
 162		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 163			struct ip6_flowlabel *flowlabel;
 164			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 165			if (!flowlabel)
 166				return -EINVAL;
 167			fl6_sock_release(flowlabel);
 168		}
 169	}
 170
 171	/*
 172	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 173	 */
 174
 175	if (ipv6_addr_any(&usin->sin6_addr)) {
 176		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 177			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 178					       &usin->sin6_addr);
 179		else
 180			usin->sin6_addr = in6addr_loopback;
 181	}
 182
 183	addr_type = ipv6_addr_type(&usin->sin6_addr);
 184
 185	if (addr_type & IPV6_ADDR_MULTICAST)
 186		return -ENETUNREACH;
 187
 188	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 189		if (addr_len >= sizeof(struct sockaddr_in6) &&
 190		    usin->sin6_scope_id) {
 191			/* If interface is set while binding, indices
 192			 * must coincide.
 193			 */
 194			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 
 195				return -EINVAL;
 196
 197			sk->sk_bound_dev_if = usin->sin6_scope_id;
 198		}
 199
 200		/* Connect to link-local address requires an interface */
 201		if (!sk->sk_bound_dev_if)
 202			return -EINVAL;
 203	}
 204
 205	if (tp->rx_opt.ts_recent_stamp &&
 206	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 207		tp->rx_opt.ts_recent = 0;
 208		tp->rx_opt.ts_recent_stamp = 0;
 209		tp->write_seq = 0;
 210	}
 211
 212	sk->sk_v6_daddr = usin->sin6_addr;
 213	np->flow_label = fl6.flowlabel;
 214
 215	/*
 216	 *	TCP over IPv4
 217	 */
 218
 219	if (addr_type & IPV6_ADDR_MAPPED) {
 220		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 221		struct sockaddr_in sin;
 222
 223		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 224
 225		if (__ipv6_only_sock(sk))
 226			return -ENETUNREACH;
 227
 228		sin.sin_family = AF_INET;
 229		sin.sin_port = usin->sin6_port;
 230		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 231
 232		icsk->icsk_af_ops = &ipv6_mapped;
 233		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 234#ifdef CONFIG_TCP_MD5SIG
 235		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 236#endif
 237
 238		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 239
 240		if (err) {
 241			icsk->icsk_ext_hdr_len = exthdrlen;
 242			icsk->icsk_af_ops = &ipv6_specific;
 243			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 244#ifdef CONFIG_TCP_MD5SIG
 245			tp->af_specific = &tcp_sock_ipv6_specific;
 246#endif
 247			goto failure;
 248		}
 249		np->saddr = sk->sk_v6_rcv_saddr;
 250
 251		return err;
 252	}
 253
 254	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 255		saddr = &sk->sk_v6_rcv_saddr;
 256
 257	fl6.flowi6_proto = IPPROTO_TCP;
 258	fl6.daddr = sk->sk_v6_daddr;
 259	fl6.saddr = saddr ? *saddr : np->saddr;
 260	fl6.flowi6_oif = sk->sk_bound_dev_if;
 261	fl6.flowi6_mark = sk->sk_mark;
 262	fl6.fl6_dport = usin->sin6_port;
 263	fl6.fl6_sport = inet->inet_sport;
 264	fl6.flowi6_uid = sk->sk_uid;
 265
 266	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 267	final_p = fl6_update_dst(&fl6, opt, &final);
 268
 269	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 270
 271	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 272	if (IS_ERR(dst)) {
 273		err = PTR_ERR(dst);
 274		goto failure;
 275	}
 276
 277	if (!saddr) {
 278		saddr = &fl6.saddr;
 279		sk->sk_v6_rcv_saddr = *saddr;
 280	}
 281
 282	/* set the source address */
 283	np->saddr = *saddr;
 284	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 285
 286	sk->sk_gso_type = SKB_GSO_TCPV6;
 287	ip6_dst_store(sk, dst, NULL, NULL);
 288
 
 
 
 
 
 289	icsk->icsk_ext_hdr_len = 0;
 290	if (opt)
 291		icsk->icsk_ext_hdr_len = opt->opt_flen +
 292					 opt->opt_nflen;
 293
 294	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 295
 296	inet->inet_dport = usin->sin6_port;
 297
 298	tcp_set_state(sk, TCP_SYN_SENT);
 299	err = inet6_hash_connect(tcp_death_row, sk);
 300	if (err)
 301		goto late_failure;
 302
 303	sk_set_txhash(sk);
 304
 305	if (likely(!tp->repair)) {
 306		if (!tp->write_seq)
 307			tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
 308							 sk->sk_v6_daddr.s6_addr32,
 309							 inet->inet_sport,
 310							 inet->inet_dport);
 311		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
 312						   np->saddr.s6_addr32,
 313						   sk->sk_v6_daddr.s6_addr32);
 314	}
 315
 316	if (tcp_fastopen_defer_connect(sk, &err))
 317		return err;
 318	if (err)
 319		goto late_failure;
 320
 321	err = tcp_connect(sk);
 322	if (err)
 323		goto late_failure;
 324
 325	return 0;
 326
 327late_failure:
 328	tcp_set_state(sk, TCP_CLOSE);
 
 329failure:
 330	inet->inet_dport = 0;
 331	sk->sk_route_caps = 0;
 332	return err;
 333}
 334
 335static void tcp_v6_mtu_reduced(struct sock *sk)
 336{
 337	struct dst_entry *dst;
 338
 339	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 340		return;
 341
 342	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 343	if (!dst)
 344		return;
 345
 346	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 347		tcp_sync_mss(sk, dst_mtu(dst));
 348		tcp_simple_retransmit(sk);
 349	}
 350}
 351
 352static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 353		u8 type, u8 code, int offset, __be32 info)
 354{
 355	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 356	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 357	struct net *net = dev_net(skb->dev);
 358	struct request_sock *fastopen;
 359	struct ipv6_pinfo *np;
 360	struct tcp_sock *tp;
 361	__u32 seq, snd_una;
 362	struct sock *sk;
 363	bool fatal;
 364	int err;
 365
 366	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 367					&hdr->daddr, th->dest,
 368					&hdr->saddr, ntohs(th->source),
 369					skb->dev->ifindex, inet6_sdif(skb));
 370
 371	if (!sk) {
 372		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 373				  ICMP6_MIB_INERRORS);
 374		return;
 375	}
 376
 377	if (sk->sk_state == TCP_TIME_WAIT) {
 378		inet_twsk_put(inet_twsk(sk));
 379		return;
 380	}
 381	seq = ntohl(th->seq);
 382	fatal = icmpv6_err_convert(type, code, &err);
 383	if (sk->sk_state == TCP_NEW_SYN_RECV)
 384		return tcp_req_err(sk, seq, fatal);
 385
 386	bh_lock_sock(sk);
 387	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 388		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 389
 390	if (sk->sk_state == TCP_CLOSE)
 391		goto out;
 392
 393	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 394		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 395		goto out;
 396	}
 397
 398	tp = tcp_sk(sk);
 399	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 400	fastopen = tp->fastopen_rsk;
 401	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 402	if (sk->sk_state != TCP_LISTEN &&
 403	    !between(seq, snd_una, tp->snd_nxt)) {
 404		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 405		goto out;
 406	}
 407
 408	np = inet6_sk(sk);
 409
 410	if (type == NDISC_REDIRECT) {
 411		if (!sock_owned_by_user(sk)) {
 412			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 413
 414			if (dst)
 415				dst->ops->redirect(dst, sk, skb);
 416		}
 417		goto out;
 418	}
 419
 420	if (type == ICMPV6_PKT_TOOBIG) {
 421		/* We are not interested in TCP_LISTEN and open_requests
 422		 * (SYN-ACKs send out by Linux are always <576bytes so
 423		 * they should go through unfragmented).
 424		 */
 425		if (sk->sk_state == TCP_LISTEN)
 426			goto out;
 427
 428		if (!ip6_sk_accept_pmtu(sk))
 429			goto out;
 430
 431		tp->mtu_info = ntohl(info);
 432		if (!sock_owned_by_user(sk))
 433			tcp_v6_mtu_reduced(sk);
 434		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 435					   &sk->sk_tsq_flags))
 436			sock_hold(sk);
 437		goto out;
 438	}
 439
 440
 441	/* Might be for an request_sock */
 442	switch (sk->sk_state) {
 443	case TCP_SYN_SENT:
 444	case TCP_SYN_RECV:
 445		/* Only in fast or simultaneous open. If a fast open socket is
 446		 * is already accepted it is treated as a connected one below.
 447		 */
 448		if (fastopen && !fastopen->sk)
 449			break;
 450
 451		if (!sock_owned_by_user(sk)) {
 452			sk->sk_err = err;
 453			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 454
 455			tcp_done(sk);
 456		} else
 457			sk->sk_err_soft = err;
 458		goto out;
 459	}
 460
 461	if (!sock_owned_by_user(sk) && np->recverr) {
 462		sk->sk_err = err;
 463		sk->sk_error_report(sk);
 464	} else
 465		sk->sk_err_soft = err;
 466
 467out:
 468	bh_unlock_sock(sk);
 469	sock_put(sk);
 470}
 471
 472
 473static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 474			      struct flowi *fl,
 475			      struct request_sock *req,
 476			      struct tcp_fastopen_cookie *foc,
 477			      enum tcp_synack_type synack_type)
 478{
 479	struct inet_request_sock *ireq = inet_rsk(req);
 480	struct ipv6_pinfo *np = inet6_sk(sk);
 481	struct ipv6_txoptions *opt;
 482	struct flowi6 *fl6 = &fl->u.ip6;
 483	struct sk_buff *skb;
 484	int err = -ENOMEM;
 485
 486	/* First, grab a route. */
 487	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 488					       IPPROTO_TCP)) == NULL)
 489		goto done;
 490
 491	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 492
 493	if (skb) {
 494		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 495				    &ireq->ir_v6_rmt_addr);
 496
 497		fl6->daddr = ireq->ir_v6_rmt_addr;
 498		if (np->repflow && ireq->pktopts)
 499			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 500
 501		rcu_read_lock();
 502		opt = ireq->ipv6_opt;
 503		if (!opt)
 504			opt = rcu_dereference(np->opt);
 505		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
 506		rcu_read_unlock();
 507		err = net_xmit_eval(err);
 508	}
 509
 510done:
 511	return err;
 512}
 513
 514
 515static void tcp_v6_reqsk_destructor(struct request_sock *req)
 516{
 517	kfree(inet_rsk(req)->ipv6_opt);
 518	kfree_skb(inet_rsk(req)->pktopts);
 519}
 520
 521#ifdef CONFIG_TCP_MD5SIG
 522static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 523						   const struct in6_addr *addr)
 524{
 525	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 526}
 527
 528static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 529						const struct sock *addr_sk)
 530{
 531	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 532}
 533
 534static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 535				 char __user *optval, int optlen)
 536{
 537	struct tcp_md5sig cmd;
 538	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 539	u8 prefixlen;
 540
 541	if (optlen < sizeof(cmd))
 542		return -EINVAL;
 543
 544	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 545		return -EFAULT;
 546
 547	if (sin6->sin6_family != AF_INET6)
 548		return -EINVAL;
 549
 550	if (optname == TCP_MD5SIG_EXT &&
 551	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
 552		prefixlen = cmd.tcpm_prefixlen;
 553		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
 554					prefixlen > 32))
 555			return -EINVAL;
 556	} else {
 557		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
 558	}
 559
 560	if (!cmd.tcpm_keylen) {
 561		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 562			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 563					      AF_INET, prefixlen);
 564		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 565				      AF_INET6, prefixlen);
 566	}
 567
 568	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 569		return -EINVAL;
 570
 571	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 572		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 573				      AF_INET, prefixlen, cmd.tcpm_key,
 574				      cmd.tcpm_keylen, GFP_KERNEL);
 575
 576	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 577			      AF_INET6, prefixlen, cmd.tcpm_key,
 578			      cmd.tcpm_keylen, GFP_KERNEL);
 579}
 580
 581static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
 582				   const struct in6_addr *daddr,
 583				   const struct in6_addr *saddr,
 584				   const struct tcphdr *th, int nbytes)
 585{
 586	struct tcp6_pseudohdr *bp;
 587	struct scatterlist sg;
 588	struct tcphdr *_th;
 589
 590	bp = hp->scratch;
 591	/* 1. TCP pseudo-header (RFC2460) */
 592	bp->saddr = *saddr;
 593	bp->daddr = *daddr;
 594	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 595	bp->len = cpu_to_be32(nbytes);
 596
 597	_th = (struct tcphdr *)(bp + 1);
 598	memcpy(_th, th, sizeof(*th));
 599	_th->check = 0;
 600
 601	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 602	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
 603				sizeof(*bp) + sizeof(*th));
 604	return crypto_ahash_update(hp->md5_req);
 605}
 606
 607static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 608			       const struct in6_addr *daddr, struct in6_addr *saddr,
 609			       const struct tcphdr *th)
 610{
 611	struct tcp_md5sig_pool *hp;
 612	struct ahash_request *req;
 613
 614	hp = tcp_get_md5sig_pool();
 615	if (!hp)
 616		goto clear_hash_noput;
 617	req = hp->md5_req;
 618
 619	if (crypto_ahash_init(req))
 620		goto clear_hash;
 621	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 
 
 622		goto clear_hash;
 623	if (tcp_md5_hash_key(hp, key))
 624		goto clear_hash;
 625	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 626	if (crypto_ahash_final(req))
 627		goto clear_hash;
 628
 629	tcp_put_md5sig_pool();
 630	return 0;
 631
 632clear_hash:
 633	tcp_put_md5sig_pool();
 634clear_hash_noput:
 635	memset(md5_hash, 0, 16);
 636	return 1;
 637}
 638
 639static int tcp_v6_md5_hash_skb(char *md5_hash,
 640			       const struct tcp_md5sig_key *key,
 641			       const struct sock *sk,
 642			       const struct sk_buff *skb)
 643{
 644	const struct in6_addr *saddr, *daddr;
 645	struct tcp_md5sig_pool *hp;
 646	struct ahash_request *req;
 647	const struct tcphdr *th = tcp_hdr(skb);
 648
 649	if (sk) { /* valid for establish/request sockets */
 650		saddr = &sk->sk_v6_rcv_saddr;
 651		daddr = &sk->sk_v6_daddr;
 652	} else {
 653		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 654		saddr = &ip6h->saddr;
 655		daddr = &ip6h->daddr;
 656	}
 657
 658	hp = tcp_get_md5sig_pool();
 659	if (!hp)
 660		goto clear_hash_noput;
 661	req = hp->md5_req;
 662
 663	if (crypto_ahash_init(req))
 664		goto clear_hash;
 665
 666	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
 667		goto clear_hash;
 668	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 669		goto clear_hash;
 670	if (tcp_md5_hash_key(hp, key))
 671		goto clear_hash;
 672	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 673	if (crypto_ahash_final(req))
 674		goto clear_hash;
 675
 676	tcp_put_md5sig_pool();
 677	return 0;
 678
 679clear_hash:
 680	tcp_put_md5sig_pool();
 681clear_hash_noput:
 682	memset(md5_hash, 0, 16);
 683	return 1;
 684}
 685
 686#endif
 687
 688static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 689				    const struct sk_buff *skb)
 690{
 691#ifdef CONFIG_TCP_MD5SIG
 692	const __u8 *hash_location = NULL;
 693	struct tcp_md5sig_key *hash_expected;
 694	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 695	const struct tcphdr *th = tcp_hdr(skb);
 696	int genhash;
 697	u8 newhash[16];
 698
 699	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 700	hash_location = tcp_parse_md5sig_option(th);
 701
 702	/* We've parsed the options - do we have a hash? */
 703	if (!hash_expected && !hash_location)
 704		return false;
 705
 706	if (hash_expected && !hash_location) {
 707		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 708		return true;
 709	}
 710
 711	if (!hash_expected && hash_location) {
 712		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 713		return true;
 714	}
 715
 716	/* check the signature */
 717	genhash = tcp_v6_md5_hash_skb(newhash,
 718				      hash_expected,
 719				      NULL, skb);
 720
 721	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 722		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
 723		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 724				     genhash ? "failed" : "mismatch",
 725				     &ip6h->saddr, ntohs(th->source),
 726				     &ip6h->daddr, ntohs(th->dest));
 727		return true;
 728	}
 729#endif
 730	return false;
 731}
 732
 733static void tcp_v6_init_req(struct request_sock *req,
 734			    const struct sock *sk_listener,
 735			    struct sk_buff *skb)
 736{
 737	struct inet_request_sock *ireq = inet_rsk(req);
 738	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 739
 740	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 741	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 742
 743	/* So that link locals have meaning */
 744	if (!sk_listener->sk_bound_dev_if &&
 745	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 746		ireq->ir_iif = tcp_v6_iif(skb);
 747
 748	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 749	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 750	     np->rxopt.bits.rxinfo ||
 751	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 752	     np->rxopt.bits.rxohlim || np->repflow)) {
 753		refcount_inc(&skb->users);
 754		ireq->pktopts = skb;
 755	}
 756}
 757
 758static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 759					  struct flowi *fl,
 760					  const struct request_sock *req)
 
 761{
 
 
 762	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 763}
 764
 765struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 766	.family		=	AF_INET6,
 767	.obj_size	=	sizeof(struct tcp6_request_sock),
 768	.rtx_syn_ack	=	tcp_rtx_synack,
 769	.send_ack	=	tcp_v6_reqsk_send_ack,
 770	.destructor	=	tcp_v6_reqsk_destructor,
 771	.send_reset	=	tcp_v6_send_reset,
 772	.syn_ack_timeout =	tcp_syn_ack_timeout,
 773};
 774
 775static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 776	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 777				sizeof(struct ipv6hdr),
 778#ifdef CONFIG_TCP_MD5SIG
 779	.req_md5_lookup	=	tcp_v6_md5_lookup,
 780	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 781#endif
 782	.init_req	=	tcp_v6_init_req,
 783#ifdef CONFIG_SYN_COOKIES
 784	.cookie_init_seq =	cookie_v6_init_sequence,
 785#endif
 786	.route_req	=	tcp_v6_route_req,
 787	.init_seq	=	tcp_v6_init_seq,
 788	.init_ts_off	=	tcp_v6_init_ts_off,
 789	.send_synack	=	tcp_v6_send_synack,
 790};
 791
 792static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 793				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 794				 int oif, struct tcp_md5sig_key *key, int rst,
 795				 u8 tclass, __be32 label)
 796{
 797	const struct tcphdr *th = tcp_hdr(skb);
 798	struct tcphdr *t1;
 799	struct sk_buff *buff;
 800	struct flowi6 fl6;
 801	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 802	struct sock *ctl_sk = net->ipv6.tcp_sk;
 803	unsigned int tot_len = sizeof(struct tcphdr);
 804	struct dst_entry *dst;
 805	__be32 *topt;
 806
 807	if (tsecr)
 808		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 809#ifdef CONFIG_TCP_MD5SIG
 810	if (key)
 811		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 812#endif
 813
 814	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 815			 GFP_ATOMIC);
 816	if (!buff)
 817		return;
 818
 819	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 820
 821	t1 = skb_push(buff, tot_len);
 822	skb_reset_transport_header(buff);
 823
 824	/* Swap the send and the receive. */
 825	memset(t1, 0, sizeof(*t1));
 826	t1->dest = th->source;
 827	t1->source = th->dest;
 828	t1->doff = tot_len / 4;
 829	t1->seq = htonl(seq);
 830	t1->ack_seq = htonl(ack);
 831	t1->ack = !rst || !th->ack;
 832	t1->rst = rst;
 833	t1->window = htons(win);
 834
 835	topt = (__be32 *)(t1 + 1);
 836
 837	if (tsecr) {
 838		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 839				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 840		*topt++ = htonl(tsval);
 841		*topt++ = htonl(tsecr);
 842	}
 843
 844#ifdef CONFIG_TCP_MD5SIG
 845	if (key) {
 846		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 847				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 848		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 849				    &ipv6_hdr(skb)->saddr,
 850				    &ipv6_hdr(skb)->daddr, t1);
 851	}
 852#endif
 853
 854	memset(&fl6, 0, sizeof(fl6));
 855	fl6.daddr = ipv6_hdr(skb)->saddr;
 856	fl6.saddr = ipv6_hdr(skb)->daddr;
 857	fl6.flowlabel = label;
 858
 859	buff->ip_summed = CHECKSUM_PARTIAL;
 860	buff->csum = 0;
 861
 862	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 863
 864	fl6.flowi6_proto = IPPROTO_TCP;
 865	if (rt6_need_strict(&fl6.daddr) && !oif)
 866		fl6.flowi6_oif = tcp_v6_iif(skb);
 867	else {
 868		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 869			oif = skb->skb_iif;
 870
 871		fl6.flowi6_oif = oif;
 872	}
 873
 874	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 875	fl6.fl6_dport = t1->dest;
 876	fl6.fl6_sport = t1->source;
 877	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 878	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 879
 880	/* Pass a socket to ip6_dst_lookup either it is for RST
 881	 * Underlying function will use this to retrieve the network
 882	 * namespace
 883	 */
 884	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 885	if (!IS_ERR(dst)) {
 886		skb_dst_set(buff, dst);
 887		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
 888		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 889		if (rst)
 890			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 891		return;
 892	}
 893
 894	kfree_skb(buff);
 895}
 896
 897static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 898{
 899	const struct tcphdr *th = tcp_hdr(skb);
 900	u32 seq = 0, ack_seq = 0;
 901	struct tcp_md5sig_key *key = NULL;
 902#ifdef CONFIG_TCP_MD5SIG
 903	const __u8 *hash_location = NULL;
 904	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 905	unsigned char newhash[16];
 906	int genhash;
 907	struct sock *sk1 = NULL;
 908#endif
 909	int oif = 0;
 910
 911	if (th->rst)
 912		return;
 913
 914	/* If sk not NULL, it means we did a successful lookup and incoming
 915	 * route had to be correct. prequeue might have dropped our dst.
 916	 */
 917	if (!sk && !ipv6_unicast_destination(skb))
 918		return;
 919
 920#ifdef CONFIG_TCP_MD5SIG
 921	rcu_read_lock();
 922	hash_location = tcp_parse_md5sig_option(th);
 923	if (sk && sk_fullsock(sk)) {
 924		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 925	} else if (hash_location) {
 926		/*
 927		 * active side is lost. Try to find listening socket through
 928		 * source port, and then find md5 key through listening socket.
 929		 * we are not loose security here:
 930		 * Incoming packet is checked with md5 hash with finding key,
 931		 * no RST generated if md5 hash doesn't match.
 932		 */
 933		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 934					   &tcp_hashinfo, NULL, 0,
 935					   &ipv6h->saddr,
 936					   th->source, &ipv6h->daddr,
 937					   ntohs(th->source), tcp_v6_iif(skb),
 938					   tcp_v6_sdif(skb));
 939		if (!sk1)
 940			goto out;
 941
 
 942		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 943		if (!key)
 944			goto out;
 945
 946		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 947		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 948			goto out;
 949	}
 950#endif
 951
 952	if (th->ack)
 953		seq = ntohl(th->ack_seq);
 954	else
 955		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 956			  (th->doff << 2);
 957
 958	if (sk) {
 959		oif = sk->sk_bound_dev_if;
 960		if (sk_fullsock(sk))
 961			trace_tcp_send_reset(sk, skb);
 962	}
 963
 964	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 965
 966#ifdef CONFIG_TCP_MD5SIG
 967out:
 968	rcu_read_unlock();
 
 
 
 969#endif
 970}
 971
 972static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 973			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 974			    struct tcp_md5sig_key *key, u8 tclass,
 975			    __be32 label)
 976{
 977	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 978			     tclass, label);
 979}
 980
 981static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 982{
 983	struct inet_timewait_sock *tw = inet_twsk(sk);
 984	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 985
 986	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 987			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 988			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
 989			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 990			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 991
 992	inet_twsk_put(tw);
 993}
 994
 995static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 996				  struct request_sock *req)
 997{
 998	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 999	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1000	 */
1001	/* RFC 7323 2.3
1002	 * The window field (SEG.WND) of every outgoing segment, with the
1003	 * exception of <SYN> segments, MUST be right-shifted by
1004	 * Rcv.Wind.Shift bits:
1005	 */
1006	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1007			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1008			tcp_rsk(req)->rcv_nxt,
1009			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1010			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1011			req->ts_recent, sk->sk_bound_dev_if,
1012			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1013			0, 0);
1014}
1015
1016
1017static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1018{
1019#ifdef CONFIG_SYN_COOKIES
1020	const struct tcphdr *th = tcp_hdr(skb);
1021
1022	if (!th->syn)
1023		sk = cookie_v6_check(sk, skb);
1024#endif
1025	return sk;
1026}
1027
1028static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1029{
1030	if (skb->protocol == htons(ETH_P_IP))
1031		return tcp_v4_conn_request(sk, skb);
1032
1033	if (!ipv6_unicast_destination(skb))
1034		goto drop;
1035
1036	return tcp_conn_request(&tcp6_request_sock_ops,
1037				&tcp_request_sock_ipv6_ops, sk, skb);
1038
1039drop:
1040	tcp_listendrop(sk);
1041	return 0; /* don't send reset */
1042}
1043
1044static void tcp_v6_restore_cb(struct sk_buff *skb)
1045{
1046	/* We need to move header back to the beginning if xfrm6_policy_check()
1047	 * and tcp_v6_fill_cb() are going to be called again.
1048	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1049	 */
1050	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1051		sizeof(struct inet6_skb_parm));
1052}
1053
1054static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1055					 struct request_sock *req,
1056					 struct dst_entry *dst,
1057					 struct request_sock *req_unhash,
1058					 bool *own_req)
1059{
1060	struct inet_request_sock *ireq;
1061	struct ipv6_pinfo *newnp;
1062	const struct ipv6_pinfo *np = inet6_sk(sk);
1063	struct ipv6_txoptions *opt;
1064	struct tcp6_sock *newtcp6sk;
1065	struct inet_sock *newinet;
1066	struct tcp_sock *newtp;
1067	struct sock *newsk;
1068#ifdef CONFIG_TCP_MD5SIG
1069	struct tcp_md5sig_key *key;
1070#endif
1071	struct flowi6 fl6;
1072
1073	if (skb->protocol == htons(ETH_P_IP)) {
1074		/*
1075		 *	v6 mapped
1076		 */
1077
1078		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1079					     req_unhash, own_req);
1080
1081		if (!newsk)
1082			return NULL;
1083
1084		newtcp6sk = (struct tcp6_sock *)newsk;
1085		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1086
1087		newinet = inet_sk(newsk);
1088		newnp = inet6_sk(newsk);
1089		newtp = tcp_sk(newsk);
1090
1091		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1092
1093		newnp->saddr = newsk->sk_v6_rcv_saddr;
1094
1095		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1096		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1097#ifdef CONFIG_TCP_MD5SIG
1098		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1099#endif
1100
1101		newnp->ipv6_mc_list = NULL;
1102		newnp->ipv6_ac_list = NULL;
1103		newnp->ipv6_fl_list = NULL;
1104		newnp->pktoptions  = NULL;
1105		newnp->opt	   = NULL;
1106		newnp->mcast_oif   = tcp_v6_iif(skb);
1107		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1108		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1109		if (np->repflow)
1110			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1111
1112		/*
1113		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1114		 * here, tcp_create_openreq_child now does this for us, see the comment in
1115		 * that function for the gory details. -acme
1116		 */
1117
1118		/* It is tricky place. Until this moment IPv4 tcp
1119		   worked with IPv6 icsk.icsk_af_ops.
1120		   Sync it now.
1121		 */
1122		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1123
1124		return newsk;
1125	}
1126
1127	ireq = inet_rsk(req);
1128
1129	if (sk_acceptq_is_full(sk))
1130		goto out_overflow;
1131
1132	if (!dst) {
1133		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1134		if (!dst)
1135			goto out;
1136	}
1137
1138	newsk = tcp_create_openreq_child(sk, req, skb);
1139	if (!newsk)
1140		goto out_nonewsk;
1141
1142	/*
1143	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1144	 * count here, tcp_create_openreq_child now does this for us, see the
1145	 * comment in that function for the gory details. -acme
1146	 */
1147
1148	newsk->sk_gso_type = SKB_GSO_TCPV6;
1149	ip6_dst_store(newsk, dst, NULL, NULL);
1150	inet6_sk_rx_dst_set(newsk, skb);
1151
1152	newtcp6sk = (struct tcp6_sock *)newsk;
1153	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1154
1155	newtp = tcp_sk(newsk);
1156	newinet = inet_sk(newsk);
1157	newnp = inet6_sk(newsk);
1158
1159	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1160
1161	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1162	newnp->saddr = ireq->ir_v6_loc_addr;
1163	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1164	newsk->sk_bound_dev_if = ireq->ir_iif;
1165
1166	/* Now IPv6 options...
1167
1168	   First: no IPv4 options.
1169	 */
1170	newinet->inet_opt = NULL;
1171	newnp->ipv6_mc_list = NULL;
1172	newnp->ipv6_ac_list = NULL;
1173	newnp->ipv6_fl_list = NULL;
1174
1175	/* Clone RX bits */
1176	newnp->rxopt.all = np->rxopt.all;
1177
1178	newnp->pktoptions = NULL;
1179	newnp->opt	  = NULL;
1180	newnp->mcast_oif  = tcp_v6_iif(skb);
1181	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1182	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1183	if (np->repflow)
1184		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1185
1186	/* Clone native IPv6 options from listening socket (if any)
1187
1188	   Yes, keeping reference count would be much more clever,
1189	   but we make one more one thing there: reattach optmem
1190	   to newsk.
1191	 */
1192	opt = ireq->ipv6_opt;
1193	if (!opt)
1194		opt = rcu_dereference(np->opt);
1195	if (opt) {
1196		opt = ipv6_dup_options(newsk, opt);
1197		RCU_INIT_POINTER(newnp->opt, opt);
1198	}
1199	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1200	if (opt)
1201		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1202						    opt->opt_flen;
1203
1204	tcp_ca_openreq_child(newsk, dst);
1205
1206	tcp_sync_mss(newsk, dst_mtu(dst));
1207	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1208
1209	tcp_initialize_rcv_mss(newsk);
1210
1211	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1212	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1213
1214#ifdef CONFIG_TCP_MD5SIG
1215	/* Copy over the MD5 key from the original socket */
1216	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1217	if (key) {
1218		/* We're using one, so create a matching key
1219		 * on the newsk structure. If we fail to get
1220		 * memory, then we end up not copying the key
1221		 * across. Shucks.
1222		 */
1223		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1224			       AF_INET6, 128, key->key, key->keylen,
1225			       sk_gfp_mask(sk, GFP_ATOMIC));
1226	}
1227#endif
1228
1229	if (__inet_inherit_port(sk, newsk) < 0) {
1230		inet_csk_prepare_forced_close(newsk);
1231		tcp_done(newsk);
1232		goto out;
1233	}
1234	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1235	if (*own_req) {
1236		tcp_move_syn(newtp, req);
1237
1238		/* Clone pktoptions received with SYN, if we own the req */
1239		if (ireq->pktopts) {
1240			newnp->pktoptions = skb_clone(ireq->pktopts,
1241						      sk_gfp_mask(sk, GFP_ATOMIC));
1242			consume_skb(ireq->pktopts);
1243			ireq->pktopts = NULL;
1244			if (newnp->pktoptions) {
1245				tcp_v6_restore_cb(newnp->pktoptions);
1246				skb_set_owner_r(newnp->pktoptions, newsk);
1247			}
1248		}
1249	}
1250
1251	return newsk;
1252
1253out_overflow:
1254	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1255out_nonewsk:
1256	dst_release(dst);
1257out:
1258	tcp_listendrop(sk);
1259	return NULL;
1260}
1261
1262/* The socket must have it's spinlock held when we get
1263 * here, unless it is a TCP_LISTEN socket.
1264 *
1265 * We have a potential double-lock case here, so even when
1266 * doing backlog processing we use the BH locking scheme.
1267 * This is because we cannot sleep with the original spinlock
1268 * held.
1269 */
1270static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1271{
1272	struct ipv6_pinfo *np = inet6_sk(sk);
1273	struct tcp_sock *tp;
1274	struct sk_buff *opt_skb = NULL;
1275
1276	/* Imagine: socket is IPv6. IPv4 packet arrives,
1277	   goes to IPv4 receive handler and backlogged.
1278	   From backlog it always goes here. Kerboom...
1279	   Fortunately, tcp_rcv_established and rcv_established
1280	   handle them correctly, but it is not case with
1281	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1282	 */
1283
1284	if (skb->protocol == htons(ETH_P_IP))
1285		return tcp_v4_do_rcv(sk, skb);
1286
 
 
 
1287	/*
1288	 *	socket locking is here for SMP purposes as backlog rcv
1289	 *	is currently called with bh processing disabled.
1290	 */
1291
1292	/* Do Stevens' IPV6_PKTOPTIONS.
1293
1294	   Yes, guys, it is the only place in our code, where we
1295	   may make it not affecting IPv4.
1296	   The rest of code is protocol independent,
1297	   and I do not like idea to uglify IPv4.
1298
1299	   Actually, all the idea behind IPV6_PKTOPTIONS
1300	   looks not very well thought. For now we latch
1301	   options, received in the last packet, enqueued
1302	   by tcp. Feel free to propose better solution.
1303					       --ANK (980728)
1304	 */
1305	if (np->rxopt.all)
1306		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1307
1308	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1309		struct dst_entry *dst = sk->sk_rx_dst;
1310
1311		sock_rps_save_rxhash(sk, skb);
1312		sk_mark_napi_id(sk, skb);
1313		if (dst) {
1314			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1315			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1316				dst_release(dst);
1317				sk->sk_rx_dst = NULL;
1318			}
1319		}
1320
1321		tcp_rcv_established(sk, skb, tcp_hdr(skb));
1322		if (opt_skb)
1323			goto ipv6_pktoptions;
1324		return 0;
1325	}
1326
1327	if (tcp_checksum_complete(skb))
1328		goto csum_err;
1329
1330	if (sk->sk_state == TCP_LISTEN) {
1331		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1332
1333		if (!nsk)
1334			goto discard;
1335
1336		if (nsk != sk) {
 
 
1337			if (tcp_child_process(sk, nsk, skb))
1338				goto reset;
1339			if (opt_skb)
1340				__kfree_skb(opt_skb);
1341			return 0;
1342		}
1343	} else
1344		sock_rps_save_rxhash(sk, skb);
1345
1346	if (tcp_rcv_state_process(sk, skb))
1347		goto reset;
1348	if (opt_skb)
1349		goto ipv6_pktoptions;
1350	return 0;
1351
1352reset:
1353	tcp_v6_send_reset(sk, skb);
1354discard:
1355	if (opt_skb)
1356		__kfree_skb(opt_skb);
1357	kfree_skb(skb);
1358	return 0;
1359csum_err:
1360	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1361	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1362	goto discard;
1363
1364
1365ipv6_pktoptions:
1366	/* Do you ask, what is it?
1367
1368	   1. skb was enqueued by tcp.
1369	   2. skb is added to tail of read queue, rather than out of order.
1370	   3. socket is not in passive state.
1371	   4. Finally, it really contains options, which user wants to receive.
1372	 */
1373	tp = tcp_sk(sk);
1374	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1375	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1376		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1377			np->mcast_oif = tcp_v6_iif(opt_skb);
1378		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1379			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1380		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1381			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1382		if (np->repflow)
1383			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1384		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1385			skb_set_owner_r(opt_skb, sk);
1386			tcp_v6_restore_cb(opt_skb);
1387			opt_skb = xchg(&np->pktoptions, opt_skb);
1388		} else {
1389			__kfree_skb(opt_skb);
1390			opt_skb = xchg(&np->pktoptions, NULL);
1391		}
1392	}
1393
1394	kfree_skb(opt_skb);
1395	return 0;
1396}
1397
1398static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1399			   const struct tcphdr *th)
1400{
1401	/* This is tricky: we move IP6CB at its correct location into
1402	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1403	 * _decode_session6() uses IP6CB().
1404	 * barrier() makes sure compiler won't play aliasing games.
1405	 */
1406	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1407		sizeof(struct inet6_skb_parm));
1408	barrier();
1409
1410	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1411	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1412				    skb->len - th->doff*4);
1413	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1414	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1415	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1416	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1417	TCP_SKB_CB(skb)->sacked = 0;
1418	TCP_SKB_CB(skb)->has_rxtstamp =
1419			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
 
 
 
 
 
 
 
1420}
1421
1422static int tcp_v6_rcv(struct sk_buff *skb)
1423{
1424	int sdif = inet6_sdif(skb);
1425	const struct tcphdr *th;
1426	const struct ipv6hdr *hdr;
1427	bool refcounted;
1428	struct sock *sk;
1429	int ret;
1430	struct net *net = dev_net(skb->dev);
1431
1432	if (skb->pkt_type != PACKET_HOST)
1433		goto discard_it;
1434
1435	/*
1436	 *	Count it even if it's bad.
1437	 */
1438	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1439
1440	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1441		goto discard_it;
1442
1443	th = (const struct tcphdr *)skb->data;
1444
1445	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1446		goto bad_packet;
1447	if (!pskb_may_pull(skb, th->doff*4))
1448		goto discard_it;
1449
1450	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1451		goto csum_error;
1452
1453	th = (const struct tcphdr *)skb->data;
1454	hdr = ipv6_hdr(skb);
1455
1456lookup:
1457	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1458				th->source, th->dest, inet6_iif(skb), sdif,
1459				&refcounted);
1460	if (!sk)
1461		goto no_tcp_socket;
1462
1463process:
1464	if (sk->sk_state == TCP_TIME_WAIT)
1465		goto do_time_wait;
1466
1467	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1468		struct request_sock *req = inet_reqsk(sk);
1469		bool req_stolen = false;
1470		struct sock *nsk;
1471
1472		sk = req->rsk_listener;
 
1473		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1474			sk_drops_add(sk, skb);
1475			reqsk_put(req);
1476			goto discard_it;
1477		}
1478		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1479			inet_csk_reqsk_queue_drop_and_put(sk, req);
1480			goto lookup;
1481		}
1482		sock_hold(sk);
1483		refcounted = true;
1484		nsk = NULL;
1485		if (!tcp_filter(sk, skb)) {
1486			th = (const struct tcphdr *)skb->data;
1487			hdr = ipv6_hdr(skb);
1488			tcp_v6_fill_cb(skb, hdr, th);
1489			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1490		}
1491		if (!nsk) {
1492			reqsk_put(req);
1493			if (req_stolen) {
1494				/* Another cpu got exclusive access to req
1495				 * and created a full blown socket.
1496				 * Try to feed this packet to this socket
1497				 * instead of discarding it.
1498				 */
1499				tcp_v6_restore_cb(skb);
1500				sock_put(sk);
1501				goto lookup;
1502			}
1503			goto discard_and_relse;
1504		}
1505		if (nsk == sk) {
1506			reqsk_put(req);
1507			tcp_v6_restore_cb(skb);
1508		} else if (tcp_child_process(sk, nsk, skb)) {
1509			tcp_v6_send_reset(nsk, skb);
1510			goto discard_and_relse;
1511		} else {
1512			sock_put(sk);
1513			return 0;
1514		}
1515	}
1516	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1517		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1518		goto discard_and_relse;
1519	}
1520
1521	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1522		goto discard_and_relse;
1523
 
 
1524	if (tcp_v6_inbound_md5_hash(sk, skb))
1525		goto discard_and_relse;
1526
1527	if (tcp_filter(sk, skb))
1528		goto discard_and_relse;
1529	th = (const struct tcphdr *)skb->data;
1530	hdr = ipv6_hdr(skb);
1531	tcp_v6_fill_cb(skb, hdr, th);
1532
1533	skb->dev = NULL;
1534
1535	if (sk->sk_state == TCP_LISTEN) {
1536		ret = tcp_v6_do_rcv(sk, skb);
1537		goto put_and_return;
1538	}
1539
1540	sk_incoming_cpu_update(sk);
1541
1542	bh_lock_sock_nested(sk);
1543	tcp_segs_in(tcp_sk(sk), skb);
1544	ret = 0;
1545	if (!sock_owned_by_user(sk)) {
1546		ret = tcp_v6_do_rcv(sk, skb);
1547	} else if (tcp_add_backlog(sk, skb)) {
 
 
 
 
1548		goto discard_and_relse;
1549	}
1550	bh_unlock_sock(sk);
1551
1552put_and_return:
1553	if (refcounted)
1554		sock_put(sk);
1555	return ret ? -1 : 0;
1556
1557no_tcp_socket:
1558	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1559		goto discard_it;
1560
1561	tcp_v6_fill_cb(skb, hdr, th);
1562
1563	if (tcp_checksum_complete(skb)) {
1564csum_error:
1565		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1566bad_packet:
1567		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1568	} else {
1569		tcp_v6_send_reset(NULL, skb);
1570	}
1571
1572discard_it:
1573	kfree_skb(skb);
1574	return 0;
1575
1576discard_and_relse:
1577	sk_drops_add(sk, skb);
1578	if (refcounted)
1579		sock_put(sk);
1580	goto discard_it;
1581
1582do_time_wait:
1583	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1584		inet_twsk_put(inet_twsk(sk));
1585		goto discard_it;
1586	}
1587
1588	tcp_v6_fill_cb(skb, hdr, th);
1589
1590	if (tcp_checksum_complete(skb)) {
1591		inet_twsk_put(inet_twsk(sk));
1592		goto csum_error;
1593	}
1594
1595	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1596	case TCP_TW_SYN:
1597	{
1598		struct sock *sk2;
1599
1600		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1601					    skb, __tcp_hdrlen(th),
1602					    &ipv6_hdr(skb)->saddr, th->source,
1603					    &ipv6_hdr(skb)->daddr,
1604					    ntohs(th->dest), tcp_v6_iif(skb),
1605					    sdif);
1606		if (sk2) {
1607			struct inet_timewait_sock *tw = inet_twsk(sk);
1608			inet_twsk_deschedule_put(tw);
1609			sk = sk2;
1610			tcp_v6_restore_cb(skb);
1611			refcounted = false;
1612			goto process;
1613		}
 
1614	}
1615		/* to ACK */
1616		/* fall through */
1617	case TCP_TW_ACK:
1618		tcp_v6_timewait_ack(sk, skb);
1619		break;
1620	case TCP_TW_RST:
 
1621		tcp_v6_send_reset(sk, skb);
1622		inet_twsk_deschedule_put(inet_twsk(sk));
1623		goto discard_it;
1624	case TCP_TW_SUCCESS:
1625		;
1626	}
1627	goto discard_it;
1628}
1629
1630static void tcp_v6_early_demux(struct sk_buff *skb)
1631{
1632	const struct ipv6hdr *hdr;
1633	const struct tcphdr *th;
1634	struct sock *sk;
1635
1636	if (skb->pkt_type != PACKET_HOST)
1637		return;
1638
1639	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1640		return;
1641
1642	hdr = ipv6_hdr(skb);
1643	th = tcp_hdr(skb);
1644
1645	if (th->doff < sizeof(struct tcphdr) / 4)
1646		return;
1647
1648	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1649	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1650					&hdr->saddr, th->source,
1651					&hdr->daddr, ntohs(th->dest),
1652					inet6_iif(skb), inet6_sdif(skb));
1653	if (sk) {
1654		skb->sk = sk;
1655		skb->destructor = sock_edemux;
1656		if (sk_fullsock(sk)) {
1657			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1658
1659			if (dst)
1660				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1661			if (dst &&
1662			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1663				skb_dst_set_noref(skb, dst);
1664		}
1665	}
1666}
1667
1668static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1669	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1670	.twsk_unique	= tcp_twsk_unique,
1671	.twsk_destructor = tcp_twsk_destructor,
1672};
1673
1674static const struct inet_connection_sock_af_ops ipv6_specific = {
1675	.queue_xmit	   = inet6_csk_xmit,
1676	.send_check	   = tcp_v6_send_check,
1677	.rebuild_header	   = inet6_sk_rebuild_header,
1678	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1679	.conn_request	   = tcp_v6_conn_request,
1680	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1681	.net_header_len	   = sizeof(struct ipv6hdr),
1682	.net_frag_header_len = sizeof(struct frag_hdr),
1683	.setsockopt	   = ipv6_setsockopt,
1684	.getsockopt	   = ipv6_getsockopt,
1685	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1686	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
1687#ifdef CONFIG_COMPAT
1688	.compat_setsockopt = compat_ipv6_setsockopt,
1689	.compat_getsockopt = compat_ipv6_getsockopt,
1690#endif
1691	.mtu_reduced	   = tcp_v6_mtu_reduced,
1692};
1693
1694#ifdef CONFIG_TCP_MD5SIG
1695static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1696	.md5_lookup	=	tcp_v6_md5_lookup,
1697	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1698	.md5_parse	=	tcp_v6_parse_md5_keys,
1699};
1700#endif
1701
1702/*
1703 *	TCP over IPv4 via INET6 API
1704 */
1705static const struct inet_connection_sock_af_ops ipv6_mapped = {
1706	.queue_xmit	   = ip_queue_xmit,
1707	.send_check	   = tcp_v4_send_check,
1708	.rebuild_header	   = inet_sk_rebuild_header,
1709	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1710	.conn_request	   = tcp_v6_conn_request,
1711	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1712	.net_header_len	   = sizeof(struct iphdr),
1713	.setsockopt	   = ipv6_setsockopt,
1714	.getsockopt	   = ipv6_getsockopt,
1715	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1716	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
1717#ifdef CONFIG_COMPAT
1718	.compat_setsockopt = compat_ipv6_setsockopt,
1719	.compat_getsockopt = compat_ipv6_getsockopt,
1720#endif
1721	.mtu_reduced	   = tcp_v4_mtu_reduced,
1722};
1723
1724#ifdef CONFIG_TCP_MD5SIG
1725static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1726	.md5_lookup	=	tcp_v4_md5_lookup,
1727	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1728	.md5_parse	=	tcp_v6_parse_md5_keys,
1729};
1730#endif
1731
1732/* NOTE: A lot of things set to zero explicitly by call to
1733 *       sk_alloc() so need not be done here.
1734 */
1735static int tcp_v6_init_sock(struct sock *sk)
1736{
1737	struct inet_connection_sock *icsk = inet_csk(sk);
1738
1739	tcp_init_sock(sk);
1740
1741	icsk->icsk_af_ops = &ipv6_specific;
1742
1743#ifdef CONFIG_TCP_MD5SIG
1744	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1745#endif
1746
1747	return 0;
1748}
1749
1750static void tcp_v6_destroy_sock(struct sock *sk)
1751{
1752	tcp_v4_destroy_sock(sk);
1753	inet6_destroy_sock(sk);
1754}
1755
1756#ifdef CONFIG_PROC_FS
1757/* Proc filesystem TCPv6 sock list dumping. */
1758static void get_openreq6(struct seq_file *seq,
1759			 const struct request_sock *req, int i)
1760{
1761	long ttd = req->rsk_timer.expires - jiffies;
1762	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1763	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1764
1765	if (ttd < 0)
1766		ttd = 0;
1767
1768	seq_printf(seq,
1769		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1770		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1771		   i,
1772		   src->s6_addr32[0], src->s6_addr32[1],
1773		   src->s6_addr32[2], src->s6_addr32[3],
1774		   inet_rsk(req)->ir_num,
1775		   dest->s6_addr32[0], dest->s6_addr32[1],
1776		   dest->s6_addr32[2], dest->s6_addr32[3],
1777		   ntohs(inet_rsk(req)->ir_rmt_port),
1778		   TCP_SYN_RECV,
1779		   0, 0, /* could print option size, but that is af dependent. */
1780		   1,   /* timers active (only the expire timer) */
1781		   jiffies_to_clock_t(ttd),
1782		   req->num_timeout,
1783		   from_kuid_munged(seq_user_ns(seq),
1784				    sock_i_uid(req->rsk_listener)),
1785		   0,  /* non standard timer */
1786		   0, /* open_requests have no inode */
1787		   0, req);
1788}
1789
1790static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1791{
1792	const struct in6_addr *dest, *src;
1793	__u16 destp, srcp;
1794	int timer_active;
1795	unsigned long timer_expires;
1796	const struct inet_sock *inet = inet_sk(sp);
1797	const struct tcp_sock *tp = tcp_sk(sp);
1798	const struct inet_connection_sock *icsk = inet_csk(sp);
1799	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1800	int rx_queue;
1801	int state;
1802
1803	dest  = &sp->sk_v6_daddr;
1804	src   = &sp->sk_v6_rcv_saddr;
1805	destp = ntohs(inet->inet_dport);
1806	srcp  = ntohs(inet->inet_sport);
1807
1808	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1809	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1810	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1811		timer_active	= 1;
1812		timer_expires	= icsk->icsk_timeout;
1813	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1814		timer_active	= 4;
1815		timer_expires	= icsk->icsk_timeout;
1816	} else if (timer_pending(&sp->sk_timer)) {
1817		timer_active	= 2;
1818		timer_expires	= sp->sk_timer.expires;
1819	} else {
1820		timer_active	= 0;
1821		timer_expires = jiffies;
1822	}
1823
1824	state = inet_sk_state_load(sp);
1825	if (state == TCP_LISTEN)
1826		rx_queue = sp->sk_ack_backlog;
1827	else
1828		/* Because we don't lock the socket,
1829		 * we might find a transient negative value.
1830		 */
1831		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1832
1833	seq_printf(seq,
1834		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1835		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1836		   i,
1837		   src->s6_addr32[0], src->s6_addr32[1],
1838		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1839		   dest->s6_addr32[0], dest->s6_addr32[1],
1840		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1841		   state,
1842		   tp->write_seq - tp->snd_una,
1843		   rx_queue,
1844		   timer_active,
1845		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1846		   icsk->icsk_retransmits,
1847		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1848		   icsk->icsk_probes_out,
1849		   sock_i_ino(sp),
1850		   refcount_read(&sp->sk_refcnt), sp,
1851		   jiffies_to_clock_t(icsk->icsk_rto),
1852		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1853		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1854		   tp->snd_cwnd,
1855		   state == TCP_LISTEN ?
1856			fastopenq->max_qlen :
1857			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1858		   );
1859}
1860
1861static void get_timewait6_sock(struct seq_file *seq,
1862			       struct inet_timewait_sock *tw, int i)
1863{
1864	long delta = tw->tw_timer.expires - jiffies;
1865	const struct in6_addr *dest, *src;
1866	__u16 destp, srcp;
1867
1868	dest = &tw->tw_v6_daddr;
1869	src  = &tw->tw_v6_rcv_saddr;
1870	destp = ntohs(tw->tw_dport);
1871	srcp  = ntohs(tw->tw_sport);
1872
1873	seq_printf(seq,
1874		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1875		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1876		   i,
1877		   src->s6_addr32[0], src->s6_addr32[1],
1878		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1879		   dest->s6_addr32[0], dest->s6_addr32[1],
1880		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1881		   tw->tw_substate, 0, 0,
1882		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1883		   refcount_read(&tw->tw_refcnt), tw);
1884}
1885
1886static int tcp6_seq_show(struct seq_file *seq, void *v)
1887{
1888	struct tcp_iter_state *st;
1889	struct sock *sk = v;
1890
1891	if (v == SEQ_START_TOKEN) {
1892		seq_puts(seq,
1893			 "  sl  "
1894			 "local_address                         "
1895			 "remote_address                        "
1896			 "st tx_queue rx_queue tr tm->when retrnsmt"
1897			 "   uid  timeout inode\n");
1898		goto out;
1899	}
1900	st = seq->private;
1901
1902	if (sk->sk_state == TCP_TIME_WAIT)
1903		get_timewait6_sock(seq, v, st->num);
1904	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1905		get_openreq6(seq, v, st->num);
1906	else
1907		get_tcp6_sock(seq, v, st->num);
1908out:
1909	return 0;
1910}
1911
1912static const struct file_operations tcp6_afinfo_seq_fops = {
 
1913	.open    = tcp_seq_open,
1914	.read    = seq_read,
1915	.llseek  = seq_lseek,
1916	.release = seq_release_net
1917};
1918
1919static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1920	.name		= "tcp6",
1921	.family		= AF_INET6,
1922	.seq_fops	= &tcp6_afinfo_seq_fops,
1923	.seq_ops	= {
1924		.show		= tcp6_seq_show,
1925	},
1926};
1927
1928int __net_init tcp6_proc_init(struct net *net)
1929{
1930	return tcp_proc_register(net, &tcp6_seq_afinfo);
1931}
1932
1933void tcp6_proc_exit(struct net *net)
1934{
1935	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1936}
1937#endif
1938
 
 
 
 
 
 
 
 
 
 
 
1939struct proto tcpv6_prot = {
1940	.name			= "TCPv6",
1941	.owner			= THIS_MODULE,
1942	.close			= tcp_close,
1943	.pre_connect		= tcp_v6_pre_connect,
1944	.connect		= tcp_v6_connect,
1945	.disconnect		= tcp_disconnect,
1946	.accept			= inet_csk_accept,
1947	.ioctl			= tcp_ioctl,
1948	.init			= tcp_v6_init_sock,
1949	.destroy		= tcp_v6_destroy_sock,
1950	.shutdown		= tcp_shutdown,
1951	.setsockopt		= tcp_setsockopt,
1952	.getsockopt		= tcp_getsockopt,
1953	.keepalive		= tcp_set_keepalive,
1954	.recvmsg		= tcp_recvmsg,
1955	.sendmsg		= tcp_sendmsg,
1956	.sendpage		= tcp_sendpage,
1957	.backlog_rcv		= tcp_v6_do_rcv,
1958	.release_cb		= tcp_release_cb,
1959	.hash			= inet6_hash,
1960	.unhash			= inet_unhash,
1961	.get_port		= inet_csk_get_port,
1962	.enter_memory_pressure	= tcp_enter_memory_pressure,
1963	.leave_memory_pressure	= tcp_leave_memory_pressure,
1964	.stream_memory_free	= tcp_stream_memory_free,
1965	.sockets_allocated	= &tcp_sockets_allocated,
1966	.memory_allocated	= &tcp_memory_allocated,
1967	.memory_pressure	= &tcp_memory_pressure,
1968	.orphan_count		= &tcp_orphan_count,
1969	.sysctl_mem		= sysctl_tcp_mem,
1970	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
1971	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
1972	.max_header		= MAX_TCP_HEADER,
1973	.obj_size		= sizeof(struct tcp6_sock),
1974	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
1975	.twsk_prot		= &tcp6_timewait_sock_ops,
1976	.rsk_prot		= &tcp6_request_sock_ops,
1977	.h.hashinfo		= &tcp_hashinfo,
1978	.no_autobind		= true,
1979#ifdef CONFIG_COMPAT
1980	.compat_setsockopt	= compat_tcp_setsockopt,
1981	.compat_getsockopt	= compat_tcp_getsockopt,
1982#endif
 
1983	.diag_destroy		= tcp_abort,
1984};
1985
1986/* thinking of making this const? Don't.
1987 * early_demux can change based on sysctl.
1988 */
1989static struct inet6_protocol tcpv6_protocol = {
1990	.early_demux	=	tcp_v6_early_demux,
1991	.early_demux_handler =  tcp_v6_early_demux,
1992	.handler	=	tcp_v6_rcv,
1993	.err_handler	=	tcp_v6_err,
1994	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1995};
1996
1997static struct inet_protosw tcpv6_protosw = {
1998	.type		=	SOCK_STREAM,
1999	.protocol	=	IPPROTO_TCP,
2000	.prot		=	&tcpv6_prot,
2001	.ops		=	&inet6_stream_ops,
2002	.flags		=	INET_PROTOSW_PERMANENT |
2003				INET_PROTOSW_ICSK,
2004};
2005
2006static int __net_init tcpv6_net_init(struct net *net)
2007{
2008	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2009				    SOCK_RAW, IPPROTO_TCP, net);
2010}
2011
2012static void __net_exit tcpv6_net_exit(struct net *net)
2013{
2014	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2015}
2016
2017static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2018{
2019	inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2020}
2021
2022static struct pernet_operations tcpv6_net_ops = {
2023	.init	    = tcpv6_net_init,
2024	.exit	    = tcpv6_net_exit,
2025	.exit_batch = tcpv6_net_exit_batch,
2026};
2027
2028int __init tcpv6_init(void)
2029{
2030	int ret;
2031
2032	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2033	if (ret)
2034		goto out;
2035
2036	/* register inet6 protocol */
2037	ret = inet6_register_protosw(&tcpv6_protosw);
2038	if (ret)
2039		goto out_tcpv6_protocol;
2040
2041	ret = register_pernet_subsys(&tcpv6_net_ops);
2042	if (ret)
2043		goto out_tcpv6_protosw;
2044out:
2045	return ret;
2046
2047out_tcpv6_protosw:
2048	inet6_unregister_protosw(&tcpv6_protosw);
2049out_tcpv6_protocol:
2050	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2051	goto out;
2052}
2053
2054void tcpv6_exit(void)
2055{
2056	unregister_pernet_subsys(&tcpv6_net_ops);
2057	inet6_unregister_protosw(&tcpv6_protosw);
2058	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2059}