Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
 
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65
  66#include <asm/uaccess.h>
  67
  68#include <linux/proc_fs.h>
  69#include <linux/seq_file.h>
  70
  71#include <linux/crypto.h>
  72#include <linux/scatterlist.h>
  73
  74static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  75static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 
 
  76				      struct request_sock *req);
  77
  78static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  79static void	__tcp_v6_send_check(struct sk_buff *skb,
  80				    const struct in6_addr *saddr,
  81				    const struct in6_addr *daddr);
  82
  83static const struct inet_connection_sock_af_ops ipv6_mapped;
  84static const struct inet_connection_sock_af_ops ipv6_specific;
  85#ifdef CONFIG_TCP_MD5SIG
  86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  88#else
  89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  90						   const struct in6_addr *addr)
  91{
  92	return NULL;
  93}
  94#endif
  95
  96static void tcp_v6_hash(struct sock *sk)
 
 
 
 
 
  97{
  98	if (sk->sk_state != TCP_CLOSE) {
  99		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 100			tcp_prot.hash(sk);
 101			return;
 102		}
 103		local_bh_disable();
 104		__inet6_hash(sk, NULL);
 105		local_bh_enable();
 
 
 
 
 
 
 
 106	}
 107}
 108
 109static __inline__ __sum16 tcp_v6_check(int len,
 110				   const struct in6_addr *saddr,
 111				   const struct in6_addr *daddr,
 112				   __wsum base)
 113{
 114	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 
 
 115}
 116
 117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
 118{
 119	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 120					    ipv6_hdr(skb)->saddr.s6_addr32,
 121					    tcp_hdr(skb)->dest,
 122					    tcp_hdr(skb)->source);
 
 
 
 
 
 
 
 
 
 
 
 
 
 123}
 124
 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 126			  int addr_len)
 127{
 128	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 129	struct inet_sock *inet = inet_sk(sk);
 130	struct inet_connection_sock *icsk = inet_csk(sk);
 131	struct ipv6_pinfo *np = inet6_sk(sk);
 132	struct tcp_sock *tp = tcp_sk(sk);
 133	struct in6_addr *saddr = NULL, *final_p, final;
 134	struct rt6_info *rt;
 135	struct flowi6 fl6;
 136	struct dst_entry *dst;
 137	int addr_type;
 138	int err;
 
 139
 140	if (addr_len < SIN6_LEN_RFC2133)
 141		return -EINVAL;
 142
 143	if (usin->sin6_family != AF_INET6)
 144		return -EAFNOSUPPORT;
 145
 146	memset(&fl6, 0, sizeof(fl6));
 147
 148	if (np->sndflow) {
 149		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 150		IP6_ECN_flow_init(fl6.flowlabel);
 151		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 152			struct ip6_flowlabel *flowlabel;
 153			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 154			if (flowlabel == NULL)
 155				return -EINVAL;
 156			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
 157			fl6_sock_release(flowlabel);
 158		}
 159	}
 160
 161	/*
 162	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 163	 */
 164
 165	if(ipv6_addr_any(&usin->sin6_addr))
 166		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 167
 168	addr_type = ipv6_addr_type(&usin->sin6_addr);
 169
 170	if(addr_type & IPV6_ADDR_MULTICAST)
 171		return -ENETUNREACH;
 172
 173	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 174		if (addr_len >= sizeof(struct sockaddr_in6) &&
 175		    usin->sin6_scope_id) {
 176			/* If interface is set while binding, indices
 177			 * must coincide.
 178			 */
 179			if (sk->sk_bound_dev_if &&
 180			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 181				return -EINVAL;
 182
 183			sk->sk_bound_dev_if = usin->sin6_scope_id;
 184		}
 185
 186		/* Connect to link-local address requires an interface */
 187		if (!sk->sk_bound_dev_if)
 188			return -EINVAL;
 189	}
 190
 191	if (tp->rx_opt.ts_recent_stamp &&
 192	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
 193		tp->rx_opt.ts_recent = 0;
 194		tp->rx_opt.ts_recent_stamp = 0;
 195		tp->write_seq = 0;
 196	}
 197
 198	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
 199	np->flow_label = fl6.flowlabel;
 200
 201	/*
 202	 *	TCP over IPv4
 203	 */
 204
 205	if (addr_type == IPV6_ADDR_MAPPED) {
 206		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 207		struct sockaddr_in sin;
 208
 209		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 210
 211		if (__ipv6_only_sock(sk))
 212			return -ENETUNREACH;
 213
 214		sin.sin_family = AF_INET;
 215		sin.sin_port = usin->sin6_port;
 216		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 217
 218		icsk->icsk_af_ops = &ipv6_mapped;
 219		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 220#ifdef CONFIG_TCP_MD5SIG
 221		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 222#endif
 223
 224		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 225
 226		if (err) {
 227			icsk->icsk_ext_hdr_len = exthdrlen;
 228			icsk->icsk_af_ops = &ipv6_specific;
 229			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 230#ifdef CONFIG_TCP_MD5SIG
 231			tp->af_specific = &tcp_sock_ipv6_specific;
 232#endif
 233			goto failure;
 234		} else {
 235			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 236			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 237					       &np->rcv_saddr);
 238		}
 
 239
 240		return err;
 241	}
 242
 243	if (!ipv6_addr_any(&np->rcv_saddr))
 244		saddr = &np->rcv_saddr;
 245
 246	fl6.flowi6_proto = IPPROTO_TCP;
 247	ipv6_addr_copy(&fl6.daddr, &np->daddr);
 248	ipv6_addr_copy(&fl6.saddr,
 249		       (saddr ? saddr : &np->saddr));
 250	fl6.flowi6_oif = sk->sk_bound_dev_if;
 251	fl6.flowi6_mark = sk->sk_mark;
 252	fl6.fl6_dport = usin->sin6_port;
 253	fl6.fl6_sport = inet->inet_sport;
 
 254
 255	final_p = fl6_update_dst(&fl6, np->opt, &final);
 
 256
 257	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 258
 259	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
 260	if (IS_ERR(dst)) {
 261		err = PTR_ERR(dst);
 262		goto failure;
 263	}
 264
 265	if (saddr == NULL) {
 266		saddr = &fl6.saddr;
 267		ipv6_addr_copy(&np->rcv_saddr, saddr);
 268	}
 269
 270	/* set the source address */
 271	ipv6_addr_copy(&np->saddr, saddr);
 272	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 273
 274	sk->sk_gso_type = SKB_GSO_TCPV6;
 275	__ip6_dst_store(sk, dst, NULL, NULL);
 276
 277	rt = (struct rt6_info *) dst;
 278	if (tcp_death_row.sysctl_tw_recycle &&
 279	    !tp->rx_opt.ts_recent_stamp &&
 280	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
 281		struct inet_peer *peer = rt6_get_peer(rt);
 282		/*
 283		 * VJ's idea. We save last timestamp seen from
 284		 * the destination in peer table, when entering state
 285		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 286		 * when trying new connection.
 287		 */
 288		if (peer) {
 289			inet_peer_refcheck(peer);
 290			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 291				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 292				tp->rx_opt.ts_recent = peer->tcp_ts;
 293			}
 294		}
 295	}
 296
 297	icsk->icsk_ext_hdr_len = 0;
 298	if (np->opt)
 299		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 300					  np->opt->opt_nflen);
 301
 302	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 303
 304	inet->inet_dport = usin->sin6_port;
 305
 306	tcp_set_state(sk, TCP_SYN_SENT);
 307	err = inet6_hash_connect(&tcp_death_row, sk);
 308	if (err)
 309		goto late_failure;
 310
 311	if (!tp->write_seq)
 312		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 313							     np->daddr.s6_addr32,
 314							     inet->inet_sport,
 315							     inet->inet_dport);
 
 
 
 
 
 
 
 
 
 
 
 
 
 316
 317	err = tcp_connect(sk);
 318	if (err)
 319		goto late_failure;
 320
 321	return 0;
 322
 323late_failure:
 324	tcp_set_state(sk, TCP_CLOSE);
 325	__sk_dst_reset(sk);
 326failure:
 327	inet->inet_dport = 0;
 328	sk->sk_route_caps = 0;
 329	return err;
 330}
 331
 332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333		u8 type, u8 code, int offset, __be32 info)
 334{
 335	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
 336	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 
 
 337	struct ipv6_pinfo *np;
 
 
 338	struct sock *sk;
 
 339	int err;
 340	struct tcp_sock *tp;
 341	__u32 seq;
 342	struct net *net = dev_net(skb->dev);
 343
 344	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 345			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 346
 347	if (sk == NULL) {
 348		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 349				   ICMP6_MIB_INERRORS);
 350		return;
 
 
 351	}
 352
 353	if (sk->sk_state == TCP_TIME_WAIT) {
 354		inet_twsk_put(inet_twsk(sk));
 355		return;
 
 
 
 
 
 
 356	}
 357
 358	bh_lock_sock(sk);
 359	if (sock_owned_by_user(sk))
 360		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 361
 362	if (sk->sk_state == TCP_CLOSE)
 363		goto out;
 364
 365	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 366		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 367		goto out;
 368	}
 369
 370	tp = tcp_sk(sk);
 371	seq = ntohl(th->seq);
 
 
 372	if (sk->sk_state != TCP_LISTEN &&
 373	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 374		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 375		goto out;
 376	}
 377
 378	np = inet6_sk(sk);
 379
 380	if (type == ICMPV6_PKT_TOOBIG) {
 381		struct dst_entry *dst;
 382
 383		if (sock_owned_by_user(sk))
 384			goto out;
 385		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 386			goto out;
 387
 388		/* icmp should have updated the destination cache entry */
 389		dst = __sk_dst_check(sk, np->dst_cookie);
 
 
 
 390
 391		if (dst == NULL) {
 392			struct inet_sock *inet = inet_sk(sk);
 393			struct flowi6 fl6;
 394
 395			/* BUGGG_FUTURE: Again, it is not clear how
 396			   to handle rthdr case. Ignore this complexity
 397			   for now.
 398			 */
 399			memset(&fl6, 0, sizeof(fl6));
 400			fl6.flowi6_proto = IPPROTO_TCP;
 401			ipv6_addr_copy(&fl6.daddr, &np->daddr);
 402			ipv6_addr_copy(&fl6.saddr, &np->saddr);
 403			fl6.flowi6_oif = sk->sk_bound_dev_if;
 404			fl6.flowi6_mark = sk->sk_mark;
 405			fl6.fl6_dport = inet->inet_dport;
 406			fl6.fl6_sport = inet->inet_sport;
 407			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 408
 409			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
 410			if (IS_ERR(dst)) {
 411				sk->sk_err_soft = -PTR_ERR(dst);
 412				goto out;
 413			}
 414
 415		} else
 416			dst_hold(dst);
 417
 418		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 419			tcp_sync_mss(sk, dst_mtu(dst));
 420			tcp_simple_retransmit(sk);
 421		} /* else let the usual retransmit timer handle it */
 422		dst_release(dst);
 
 423		goto out;
 424	}
 425
 426	icmpv6_err_convert(type, code, &err);
 427
 428	/* Might be for an request_sock */
 429	switch (sk->sk_state) {
 430		struct request_sock *req, **prev;
 431	case TCP_LISTEN:
 432		if (sock_owned_by_user(sk))
 433			goto out;
 434
 435		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 436					   &hdr->saddr, inet6_iif(skb));
 437		if (!req)
 438			goto out;
 439
 440		/* ICMPs are not backlogged, hence we cannot get
 441		 * an established socket here.
 442		 */
 443		WARN_ON(req->sk != NULL);
 444
 445		if (seq != tcp_rsk(req)->snt_isn) {
 446			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 447			goto out;
 448		}
 449
 450		inet_csk_reqsk_queue_drop(sk, req, prev);
 451		goto out;
 452
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:  /* Cannot happen.
 455			       It can, it SYNs are crossed. --ANK */
 456		if (!sock_owned_by_user(sk)) {
 457			sk->sk_err = err;
 458			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 459
 460			tcp_done(sk);
 461		} else
 462			sk->sk_err_soft = err;
 463		goto out;
 464	}
 465
 466	if (!sock_owned_by_user(sk) && np->recverr) {
 467		sk->sk_err = err;
 468		sk->sk_error_report(sk);
 469	} else
 470		sk->sk_err_soft = err;
 471
 472out:
 473	bh_unlock_sock(sk);
 474	sock_put(sk);
 
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 479			      struct request_values *rvp)
 
 
 
 480{
 481	struct inet6_request_sock *treq = inet6_rsk(req);
 482	struct ipv6_pinfo *np = inet6_sk(sk);
 483	struct sk_buff * skb;
 484	struct ipv6_txoptions *opt = NULL;
 485	struct in6_addr * final_p, final;
 486	struct flowi6 fl6;
 487	struct dst_entry *dst;
 488	int err;
 489
 490	memset(&fl6, 0, sizeof(fl6));
 491	fl6.flowi6_proto = IPPROTO_TCP;
 492	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 493	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
 494	fl6.flowlabel = 0;
 495	fl6.flowi6_oif = treq->iif;
 496	fl6.flowi6_mark = sk->sk_mark;
 497	fl6.fl6_dport = inet_rsk(req)->rmt_port;
 498	fl6.fl6_sport = inet_rsk(req)->loc_port;
 499	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 500
 501	opt = np->opt;
 502	final_p = fl6_update_dst(&fl6, opt, &final);
 503
 504	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 505	if (IS_ERR(dst)) {
 506		err = PTR_ERR(dst);
 507		dst = NULL;
 508		goto done;
 509	}
 510	skb = tcp_make_synack(sk, dst, req, rvp);
 511	err = -ENOMEM;
 512	if (skb) {
 513		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 514
 515		ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 516		err = ip6_xmit(sk, skb, &fl6, opt);
 
 
 
 
 
 
 
 
 
 517		err = net_xmit_eval(err);
 518	}
 519
 520done:
 521	if (opt && opt != np->opt)
 522		sock_kfree_s(sk, opt, opt->tot_len);
 523	dst_release(dst);
 524	return err;
 525}
 526
 527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
 528			     struct request_values *rvp)
 529{
 530	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 531	return tcp_v6_send_synack(sk, req, rvp);
 532}
 533
 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
 535{
 536	kfree_skb(inet6_rsk(req)->pktopts);
 
 537}
 538
 539#ifdef CONFIG_TCP_MD5SIG
 540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 541						   const struct in6_addr *addr)
 542{
 543	struct tcp_sock *tp = tcp_sk(sk);
 544	int i;
 545
 546	BUG_ON(tp == NULL);
 547
 548	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
 549		return NULL;
 550
 551	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 552		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
 553			return &tp->md5sig_info->keys6[i].base;
 554	}
 555	return NULL;
 556}
 557
 558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 559						struct sock *addr_sk)
 560{
 561	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
 562}
 563
 564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 565						      struct request_sock *req)
 566{
 567	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
 568}
 569
 570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
 571			     char *newkey, u8 newkeylen)
 572{
 573	/* Add key to the list */
 574	struct tcp_md5sig_key *key;
 575	struct tcp_sock *tp = tcp_sk(sk);
 576	struct tcp6_md5sig_key *keys;
 577
 578	key = tcp_v6_md5_do_lookup(sk, peer);
 579	if (key) {
 580		/* modify existing entry - just update that one */
 581		kfree(key->key);
 582		key->key = newkey;
 583		key->keylen = newkeylen;
 584	} else {
 585		/* reallocate new list if current one is full. */
 586		if (!tp->md5sig_info) {
 587			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
 588			if (!tp->md5sig_info) {
 589				kfree(newkey);
 590				return -ENOMEM;
 591			}
 592			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 593		}
 594		if (tp->md5sig_info->entries6 == 0 &&
 595			tcp_alloc_md5sig_pool(sk) == NULL) {
 596			kfree(newkey);
 597			return -ENOMEM;
 598		}
 599		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
 600			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
 601				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
 602
 603			if (!keys) {
 604				kfree(newkey);
 605				if (tp->md5sig_info->entries6 == 0)
 606					tcp_free_md5sig_pool();
 607				return -ENOMEM;
 608			}
 609
 610			if (tp->md5sig_info->entries6)
 611				memmove(keys, tp->md5sig_info->keys6,
 612					(sizeof (tp->md5sig_info->keys6[0]) *
 613					 tp->md5sig_info->entries6));
 614
 615			kfree(tp->md5sig_info->keys6);
 616			tp->md5sig_info->keys6 = keys;
 617			tp->md5sig_info->alloced6++;
 618		}
 619
 620		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
 621			       peer);
 622		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
 623		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 624
 625		tp->md5sig_info->entries6++;
 626	}
 627	return 0;
 628}
 629
 630static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
 631			       u8 *newkey, __u8 newkeylen)
 632{
 633	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
 634				 newkey, newkeylen);
 635}
 636
 637static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
 638{
 639	struct tcp_sock *tp = tcp_sk(sk);
 640	int i;
 641
 642	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 643		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
 644			/* Free the key */
 645			kfree(tp->md5sig_info->keys6[i].base.key);
 646			tp->md5sig_info->entries6--;
 647
 648			if (tp->md5sig_info->entries6 == 0) {
 649				kfree(tp->md5sig_info->keys6);
 650				tp->md5sig_info->keys6 = NULL;
 651				tp->md5sig_info->alloced6 = 0;
 652				tcp_free_md5sig_pool();
 653			} else {
 654				/* shrink the database */
 655				if (tp->md5sig_info->entries6 != i)
 656					memmove(&tp->md5sig_info->keys6[i],
 657						&tp->md5sig_info->keys6[i+1],
 658						(tp->md5sig_info->entries6 - i)
 659						* sizeof (tp->md5sig_info->keys6[0]));
 660			}
 661			return 0;
 662		}
 663	}
 664	return -ENOENT;
 665}
 666
 667static void tcp_v6_clear_md5_list (struct sock *sk)
 668{
 669	struct tcp_sock *tp = tcp_sk(sk);
 670	int i;
 671
 672	if (tp->md5sig_info->entries6) {
 673		for (i = 0; i < tp->md5sig_info->entries6; i++)
 674			kfree(tp->md5sig_info->keys6[i].base.key);
 675		tp->md5sig_info->entries6 = 0;
 676		tcp_free_md5sig_pool();
 677	}
 678
 679	kfree(tp->md5sig_info->keys6);
 680	tp->md5sig_info->keys6 = NULL;
 681	tp->md5sig_info->alloced6 = 0;
 682
 683	if (tp->md5sig_info->entries4) {
 684		for (i = 0; i < tp->md5sig_info->entries4; i++)
 685			kfree(tp->md5sig_info->keys4[i].base.key);
 686		tp->md5sig_info->entries4 = 0;
 687		tcp_free_md5sig_pool();
 688	}
 689
 690	kfree(tp->md5sig_info->keys4);
 691	tp->md5sig_info->keys4 = NULL;
 692	tp->md5sig_info->alloced4 = 0;
 693}
 694
 695static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
 696				  int optlen)
 697{
 698	struct tcp_md5sig cmd;
 699	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 700	u8 *newkey;
 701
 702	if (optlen < sizeof(cmd))
 703		return -EINVAL;
 704
 705	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 706		return -EFAULT;
 707
 708	if (sin6->sin6_family != AF_INET6)
 709		return -EINVAL;
 710
 
 
 
 
 
 
 
 
 
 
 711	if (!cmd.tcpm_keylen) {
 712		if (!tcp_sk(sk)->md5sig_info)
 713			return -ENOENT;
 714		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 715			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
 716		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
 
 
 717	}
 718
 719	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 720		return -EINVAL;
 721
 722	if (!tcp_sk(sk)->md5sig_info) {
 723		struct tcp_sock *tp = tcp_sk(sk);
 724		struct tcp_md5sig_info *p;
 725
 726		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
 727		if (!p)
 728			return -ENOMEM;
 729
 730		tp->md5sig_info = p;
 731		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 732	}
 733
 734	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 735	if (!newkey)
 736		return -ENOMEM;
 737	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
 738		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
 739					 newkey, cmd.tcpm_keylen);
 740	}
 741	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
 742}
 743
 744static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 745					const struct in6_addr *daddr,
 746					const struct in6_addr *saddr, int nbytes)
 
 747{
 748	struct tcp6_pseudohdr *bp;
 749	struct scatterlist sg;
 
 750
 751	bp = &hp->md5_blk.ip6;
 752	/* 1. TCP pseudo-header (RFC2460) */
 753	ipv6_addr_copy(&bp->saddr, saddr);
 754	ipv6_addr_copy(&bp->daddr, daddr);
 755	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 756	bp->len = cpu_to_be32(nbytes);
 757
 758	sg_init_one(&sg, bp, sizeof(*bp));
 759	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
 
 
 
 
 
 760}
 761
 762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 763			       const struct in6_addr *daddr, struct in6_addr *saddr,
 764			       struct tcphdr *th)
 765{
 766	struct tcp_md5sig_pool *hp;
 767	struct hash_desc *desc;
 768
 769	hp = tcp_get_md5sig_pool();
 770	if (!hp)
 771		goto clear_hash_noput;
 772	desc = &hp->md5_desc;
 773
 774	if (crypto_hash_init(desc))
 775		goto clear_hash;
 776	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 777		goto clear_hash;
 778	if (tcp_md5_hash_header(hp, th))
 779		goto clear_hash;
 780	if (tcp_md5_hash_key(hp, key))
 781		goto clear_hash;
 782	if (crypto_hash_final(desc, md5_hash))
 
 783		goto clear_hash;
 784
 785	tcp_put_md5sig_pool();
 786	return 0;
 787
 788clear_hash:
 789	tcp_put_md5sig_pool();
 790clear_hash_noput:
 791	memset(md5_hash, 0, 16);
 792	return 1;
 793}
 794
 795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 796			       struct sock *sk, struct request_sock *req,
 797			       struct sk_buff *skb)
 
 798{
 799	const struct in6_addr *saddr, *daddr;
 800	struct tcp_md5sig_pool *hp;
 801	struct hash_desc *desc;
 802	struct tcphdr *th = tcp_hdr(skb);
 803
 804	if (sk) {
 805		saddr = &inet6_sk(sk)->saddr;
 806		daddr = &inet6_sk(sk)->daddr;
 807	} else if (req) {
 808		saddr = &inet6_rsk(req)->loc_addr;
 809		daddr = &inet6_rsk(req)->rmt_addr;
 810	} else {
 811		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 812		saddr = &ip6h->saddr;
 813		daddr = &ip6h->daddr;
 814	}
 815
 816	hp = tcp_get_md5sig_pool();
 817	if (!hp)
 818		goto clear_hash_noput;
 819	desc = &hp->md5_desc;
 820
 821	if (crypto_hash_init(desc))
 822		goto clear_hash;
 823
 824	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 825		goto clear_hash;
 826	if (tcp_md5_hash_header(hp, th))
 827		goto clear_hash;
 828	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 829		goto clear_hash;
 830	if (tcp_md5_hash_key(hp, key))
 831		goto clear_hash;
 832	if (crypto_hash_final(desc, md5_hash))
 
 833		goto clear_hash;
 834
 835	tcp_put_md5sig_pool();
 836	return 0;
 837
 838clear_hash:
 839	tcp_put_md5sig_pool();
 840clear_hash_noput:
 841	memset(md5_hash, 0, 16);
 842	return 1;
 843}
 844
 845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 
 
 
 846{
 847	__u8 *hash_location = NULL;
 
 848	struct tcp_md5sig_key *hash_expected;
 849	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 850	struct tcphdr *th = tcp_hdr(skb);
 851	int genhash;
 852	u8 newhash[16];
 853
 854	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 855	hash_location = tcp_parse_md5sig_option(th);
 856
 857	/* We've parsed the options - do we have a hash? */
 858	if (!hash_expected && !hash_location)
 859		return 0;
 860
 861	if (hash_expected && !hash_location) {
 862		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 863		return 1;
 864	}
 865
 866	if (!hash_expected && hash_location) {
 867		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 868		return 1;
 869	}
 870
 871	/* check the signature */
 872	genhash = tcp_v6_md5_hash_skb(newhash,
 873				      hash_expected,
 874				      NULL, NULL, skb);
 875
 876	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 877		if (net_ratelimit()) {
 878			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 879			       genhash ? "failed" : "mismatch",
 880			       &ip6h->saddr, ntohs(th->source),
 881			       &ip6h->daddr, ntohs(th->dest));
 882		}
 883		return 1;
 884	}
 885	return 0;
 886}
 887#endif
 888
 889struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 890	.family		=	AF_INET6,
 891	.obj_size	=	sizeof(struct tcp6_request_sock),
 892	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 893	.send_ack	=	tcp_v6_reqsk_send_ack,
 894	.destructor	=	tcp_v6_reqsk_destructor,
 895	.send_reset	=	tcp_v6_send_reset,
 896	.syn_ack_timeout = 	tcp_syn_ack_timeout,
 897};
 898
 899#ifdef CONFIG_TCP_MD5SIG
 900static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 901	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 902	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 903};
 904#endif
 905
 906static void __tcp_v6_send_check(struct sk_buff *skb,
 907				const struct in6_addr *saddr, const struct in6_addr *daddr)
 908{
 909	struct tcphdr *th = tcp_hdr(skb);
 910
 911	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 912		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
 913		skb->csum_start = skb_transport_header(skb) - skb->head;
 914		skb->csum_offset = offsetof(struct tcphdr, check);
 915	} else {
 916		th->check = tcp_v6_check(skb->len, saddr, daddr,
 917					 csum_partial(th, th->doff << 2,
 918						      skb->csum));
 919	}
 920}
 921
 922static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 
 
 923{
 924	struct ipv6_pinfo *np = inet6_sk(sk);
 925
 926	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
 927}
 928
 929static int tcp_v6_gso_send_check(struct sk_buff *skb)
 930{
 931	const struct ipv6hdr *ipv6h;
 932	struct tcphdr *th;
 933
 934	if (!pskb_may_pull(skb, sizeof(*th)))
 935		return -EINVAL;
 936
 937	ipv6h = ipv6_hdr(skb);
 938	th = tcp_hdr(skb);
 939
 940	th->check = 0;
 941	skb->ip_summed = CHECKSUM_PARTIAL;
 942	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
 943	return 0;
 944}
 945
 946static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
 947					 struct sk_buff *skb)
 948{
 949	const struct ipv6hdr *iph = skb_gro_network_header(skb);
 950
 951	switch (skb->ip_summed) {
 952	case CHECKSUM_COMPLETE:
 953		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
 954				  skb->csum)) {
 955			skb->ip_summed = CHECKSUM_UNNECESSARY;
 956			break;
 957		}
 958
 959		/* fall through */
 960	case CHECKSUM_NONE:
 961		NAPI_GRO_CB(skb)->flush = 1;
 962		return NULL;
 
 
 
 963	}
 964
 965	return tcp_gro_receive(head, skb);
 966}
 967
 968static int tcp6_gro_complete(struct sk_buff *skb)
 
 
 969{
 970	const struct ipv6hdr *iph = ipv6_hdr(skb);
 971	struct tcphdr *th = tcp_hdr(skb);
 972
 973	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 974				  &iph->saddr, &iph->daddr, 0);
 975	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 
 
 
 
 
 
 976
 977	return tcp_gro_complete(skb);
 978}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979
 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 981				 u32 ts, struct tcp_md5sig_key *key, int rst)
 
 
 982{
 983	struct tcphdr *th = tcp_hdr(skb), *t1;
 
 984	struct sk_buff *buff;
 985	struct flowi6 fl6;
 986	struct net *net = dev_net(skb_dst(skb)->dev);
 987	struct sock *ctl_sk = net->ipv6.tcp_sk;
 988	unsigned int tot_len = sizeof(struct tcphdr);
 989	struct dst_entry *dst;
 990	__be32 *topt;
 
 991
 992	if (ts)
 993		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 994#ifdef CONFIG_TCP_MD5SIG
 995	if (key)
 996		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 997#endif
 998
 999	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1000			 GFP_ATOMIC);
1001	if (buff == NULL)
1002		return;
1003
1004	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1005
1006	t1 = (struct tcphdr *) skb_push(buff, tot_len);
1007	skb_reset_transport_header(buff);
1008
1009	/* Swap the send and the receive. */
1010	memset(t1, 0, sizeof(*t1));
1011	t1->dest = th->source;
1012	t1->source = th->dest;
1013	t1->doff = tot_len / 4;
1014	t1->seq = htonl(seq);
1015	t1->ack_seq = htonl(ack);
1016	t1->ack = !rst || !th->ack;
1017	t1->rst = rst;
1018	t1->window = htons(win);
1019
1020	topt = (__be32 *)(t1 + 1);
1021
1022	if (ts) {
1023		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025		*topt++ = htonl(tcp_time_stamp);
1026		*topt++ = htonl(ts);
1027	}
1028
1029#ifdef CONFIG_TCP_MD5SIG
1030	if (key) {
1031		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1034				    &ipv6_hdr(skb)->saddr,
1035				    &ipv6_hdr(skb)->daddr, t1);
1036	}
1037#endif
1038
1039	memset(&fl6, 0, sizeof(fl6));
1040	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1041	ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
 
1042
1043	buff->ip_summed = CHECKSUM_PARTIAL;
1044	buff->csum = 0;
1045
1046	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1047
1048	fl6.flowi6_proto = IPPROTO_TCP;
1049	fl6.flowi6_oif = inet6_iif(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050	fl6.fl6_dport = t1->dest;
1051	fl6.fl6_sport = t1->source;
 
1052	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1053
1054	/* Pass a socket to ip6_dst_lookup either it is for RST
1055	 * Underlying function will use this to retrieve the network
1056	 * namespace
1057	 */
1058	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059	if (!IS_ERR(dst)) {
1060		skb_dst_set(buff, dst);
1061		ip6_xmit(ctl_sk, buff, &fl6, NULL);
1062		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 
1063		if (rst)
1064			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1065		return;
1066	}
1067
1068	kfree_skb(buff);
1069}
1070
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{
1073	struct tcphdr *th = tcp_hdr(skb);
 
1074	u32 seq = 0, ack_seq = 0;
1075	struct tcp_md5sig_key *key = NULL;
 
 
 
 
 
 
 
 
 
 
1076
1077	if (th->rst)
1078		return;
1079
1080	if (!ipv6_unicast_destination(skb))
 
 
 
1081		return;
1082
 
1083#ifdef CONFIG_TCP_MD5SIG
1084	if (sk)
1085		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1086#endif
1087
1088	if (th->ack)
1089		seq = ntohl(th->ack_seq);
1090	else
1091		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092			  (th->doff << 2);
1093
1094	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1095}
1096
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098			    struct tcp_md5sig_key *key)
 
 
1099{
1100	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
 
1101}
1102
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
1105	struct inet_timewait_sock *tw = inet_twsk(sk);
1106	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1107
1108	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
 
 
1111
1112	inet_twsk_put(tw);
1113}
1114
1115static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116				  struct request_sock *req)
1117{
1118	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
1121
1122
1123static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1124{
1125	struct request_sock *req, **prev;
1126	const struct tcphdr *th = tcp_hdr(skb);
1127	struct sock *nsk;
1128
1129	/* Find possible connection requests. */
1130	req = inet6_csk_search_req(sk, &prev, th->source,
1131				   &ipv6_hdr(skb)->saddr,
1132				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1133	if (req)
1134		return tcp_check_req(sk, skb, req, prev);
1135
1136	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1137			&ipv6_hdr(skb)->saddr, th->source,
1138			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1139
1140	if (nsk) {
1141		if (nsk->sk_state != TCP_TIME_WAIT) {
1142			bh_lock_sock(nsk);
1143			return nsk;
1144		}
1145		inet_twsk_put(inet_twsk(nsk));
1146		return NULL;
1147	}
1148
1149#ifdef CONFIG_SYN_COOKIES
1150	if (!th->syn)
1151		sk = cookie_v6_check(sk, skb);
1152#endif
1153	return sk;
1154}
1155
1156/* FIXME: this is substantially similar to the ipv4 code.
1157 * Can some kind of merge be done? -- erics
1158 */
1159static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{
1161	struct tcp_extend_values tmp_ext;
1162	struct tcp_options_received tmp_opt;
1163	u8 *hash_location;
1164	struct request_sock *req;
1165	struct inet6_request_sock *treq;
1166	struct ipv6_pinfo *np = inet6_sk(sk);
1167	struct tcp_sock *tp = tcp_sk(sk);
1168	__u32 isn = TCP_SKB_CB(skb)->when;
1169	struct dst_entry *dst = NULL;
1170	int want_cookie = 0;
 
1171
 
 
1172	if (skb->protocol == htons(ETH_P_IP))
1173		return tcp_v4_conn_request(sk, skb);
1174
1175	if (!ipv6_unicast_destination(skb))
1176		goto drop;
1177
1178	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1179		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1180		if (!want_cookie)
1181			goto drop;
1182	}
1183
1184	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1185		goto drop;
1186
1187	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1188	if (req == NULL)
1189		goto drop;
1190
1191#ifdef CONFIG_TCP_MD5SIG
1192	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1193#endif
1194
1195	tcp_clear_options(&tmp_opt);
1196	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1197	tmp_opt.user_mss = tp->rx_opt.user_mss;
1198	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1199
1200	if (tmp_opt.cookie_plus > 0 &&
1201	    tmp_opt.saw_tstamp &&
1202	    !tp->rx_opt.cookie_out_never &&
1203	    (sysctl_tcp_cookie_size > 0 ||
1204	     (tp->cookie_values != NULL &&
1205	      tp->cookie_values->cookie_desired > 0))) {
1206		u8 *c;
1207		u32 *d;
1208		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1209		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1210
1211		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1212			goto drop_and_free;
1213
1214		/* Secret recipe starts with IP addresses */
1215		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1216		*mess++ ^= *d++;
1217		*mess++ ^= *d++;
1218		*mess++ ^= *d++;
1219		*mess++ ^= *d++;
1220		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1221		*mess++ ^= *d++;
1222		*mess++ ^= *d++;
1223		*mess++ ^= *d++;
1224		*mess++ ^= *d++;
1225
1226		/* plus variable length Initiator Cookie */
1227		c = (u8 *)mess;
1228		while (l-- > 0)
1229			*c++ ^= *hash_location++;
1230
1231		want_cookie = 0;	/* not our kind of cookie */
1232		tmp_ext.cookie_out_never = 0; /* false */
1233		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1234	} else if (!tp->rx_opt.cookie_in_always) {
1235		/* redundant indications, but ensure initialization. */
1236		tmp_ext.cookie_out_never = 1; /* true */
1237		tmp_ext.cookie_plus = 0;
1238	} else {
1239		goto drop_and_free;
1240	}
1241	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1242
1243	if (want_cookie && !tmp_opt.saw_tstamp)
1244		tcp_clear_options(&tmp_opt);
1245
1246	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1247	tcp_openreq_init(req, &tmp_opt, skb);
1248
1249	treq = inet6_rsk(req);
1250	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1251	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1252	if (!want_cookie || tmp_opt.tstamp_ok)
1253		TCP_ECN_create_request(req, tcp_hdr(skb));
1254
1255	if (!isn) {
1256		struct inet_peer *peer = NULL;
1257
1258		if (ipv6_opt_accepted(sk, skb) ||
1259		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1260		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1261			atomic_inc(&skb->users);
1262			treq->pktopts = skb;
1263		}
1264		treq->iif = sk->sk_bound_dev_if;
1265
1266		/* So that link locals have meaning */
1267		if (!sk->sk_bound_dev_if &&
1268		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1269			treq->iif = inet6_iif(skb);
1270
1271		if (want_cookie) {
1272			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1273			req->cookie_ts = tmp_opt.tstamp_ok;
1274			goto have_isn;
1275		}
1276
1277		/* VJ's idea. We save last timestamp seen
1278		 * from the destination in peer table, when entering
1279		 * state TIME-WAIT, and check against it before
1280		 * accepting new connection request.
1281		 *
1282		 * If "isn" is not zero, this request hit alive
1283		 * timewait bucket, so that all the necessary checks
1284		 * are made in the function processing timewait state.
1285		 */
1286		if (tmp_opt.saw_tstamp &&
1287		    tcp_death_row.sysctl_tw_recycle &&
1288		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1289		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1290		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1291				    &treq->rmt_addr)) {
1292			inet_peer_refcheck(peer);
1293			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1294			    (s32)(peer->tcp_ts - req->ts_recent) >
1295							TCP_PAWS_WINDOW) {
1296				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1297				goto drop_and_release;
1298			}
1299		}
1300		/* Kill the following clause, if you dislike this way. */
1301		else if (!sysctl_tcp_syncookies &&
1302			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1303			  (sysctl_max_syn_backlog >> 2)) &&
1304			 (!peer || !peer->tcp_ts_stamp) &&
1305			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1306			/* Without syncookies last quarter of
1307			 * backlog is filled with destinations,
1308			 * proven to be alive.
1309			 * It means that we continue to communicate
1310			 * to destinations, already remembered
1311			 * to the moment of synflood.
1312			 */
1313			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1314				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1315			goto drop_and_release;
1316		}
1317
1318		isn = tcp_v6_init_sequence(skb);
1319	}
1320have_isn:
1321	tcp_rsk(req)->snt_isn = isn;
1322	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1323
1324	security_inet_conn_request(sk, skb, req);
1325
1326	if (tcp_v6_send_synack(sk, req,
1327			       (struct request_values *)&tmp_ext) ||
1328	    want_cookie)
1329		goto drop_and_free;
1330
1331	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1332	return 0;
1333
1334drop_and_release:
1335	dst_release(dst);
1336drop_and_free:
1337	reqsk_free(req);
1338drop:
 
1339	return 0; /* don't send reset */
1340}
1341
1342static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1343					  struct request_sock *req,
1344					  struct dst_entry *dst)
1345{
1346	struct inet6_request_sock *treq;
1347	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1348	struct tcp6_sock *newtcp6sk;
 
 
 
 
 
 
 
 
 
 
 
 
 
1349	struct inet_sock *newinet;
1350	struct tcp_sock *newtp;
1351	struct sock *newsk;
1352	struct ipv6_txoptions *opt;
1353#ifdef CONFIG_TCP_MD5SIG
1354	struct tcp_md5sig_key *key;
1355#endif
 
1356
1357	if (skb->protocol == htons(ETH_P_IP)) {
1358		/*
1359		 *	v6 mapped
1360		 */
1361
1362		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
 
1363
1364		if (newsk == NULL)
1365			return NULL;
1366
1367		newtcp6sk = (struct tcp6_sock *)newsk;
1368		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1369
1370		newinet = inet_sk(newsk);
1371		newnp = inet6_sk(newsk);
1372		newtp = tcp_sk(newsk);
1373
1374		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1375
1376		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1377
1378		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1379
1380		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1381
1382		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1383		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1384#ifdef CONFIG_TCP_MD5SIG
1385		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1386#endif
1387
 
1388		newnp->ipv6_ac_list = NULL;
1389		newnp->ipv6_fl_list = NULL;
1390		newnp->pktoptions  = NULL;
1391		newnp->opt	   = NULL;
1392		newnp->mcast_oif   = inet6_iif(skb);
1393		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
 
 
1394
1395		/*
1396		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1397		 * here, tcp_create_openreq_child now does this for us, see the comment in
1398		 * that function for the gory details. -acme
1399		 */
1400
1401		/* It is tricky place. Until this moment IPv4 tcp
1402		   worked with IPv6 icsk.icsk_af_ops.
1403		   Sync it now.
1404		 */
1405		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1406
1407		return newsk;
1408	}
1409
1410	treq = inet6_rsk(req);
1411	opt = np->opt;
1412
1413	if (sk_acceptq_is_full(sk))
1414		goto out_overflow;
1415
1416	if (!dst) {
1417		dst = inet6_csk_route_req(sk, req);
1418		if (!dst)
1419			goto out;
1420	}
1421
1422	newsk = tcp_create_openreq_child(sk, req, skb);
1423	if (newsk == NULL)
1424		goto out_nonewsk;
1425
1426	/*
1427	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1428	 * count here, tcp_create_openreq_child now does this for us, see the
1429	 * comment in that function for the gory details. -acme
1430	 */
1431
1432	newsk->sk_gso_type = SKB_GSO_TCPV6;
1433	__ip6_dst_store(newsk, dst, NULL, NULL);
 
1434
1435	newtcp6sk = (struct tcp6_sock *)newsk;
1436	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1437
1438	newtp = tcp_sk(newsk);
1439	newinet = inet_sk(newsk);
1440	newnp = inet6_sk(newsk);
1441
1442	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1443
1444	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1445	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1446	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1447	newsk->sk_bound_dev_if = treq->iif;
1448
1449	/* Now IPv6 options...
1450
1451	   First: no IPv4 options.
1452	 */
1453	newinet->inet_opt = NULL;
 
1454	newnp->ipv6_ac_list = NULL;
1455	newnp->ipv6_fl_list = NULL;
1456
1457	/* Clone RX bits */
1458	newnp->rxopt.all = np->rxopt.all;
1459
1460	/* Clone pktoptions received with SYN */
1461	newnp->pktoptions = NULL;
1462	if (treq->pktopts != NULL) {
1463		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1464		kfree_skb(treq->pktopts);
1465		treq->pktopts = NULL;
1466		if (newnp->pktoptions)
1467			skb_set_owner_r(newnp->pktoptions, newsk);
1468	}
1469	newnp->opt	  = NULL;
1470	newnp->mcast_oif  = inet6_iif(skb);
1471	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 
 
 
1472
1473	/* Clone native IPv6 options from listening socket (if any)
1474
1475	   Yes, keeping reference count would be much more clever,
1476	   but we make one more one thing there: reattach optmem
1477	   to newsk.
1478	 */
 
 
 
1479	if (opt) {
1480		newnp->opt = ipv6_dup_options(newsk, opt);
1481		if (opt != np->opt)
1482			sock_kfree_s(sk, opt, opt->tot_len);
1483	}
1484
1485	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486	if (newnp->opt)
1487		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1488						     newnp->opt->opt_flen);
 
 
1489
1490	tcp_mtup_init(newsk);
1491	tcp_sync_mss(newsk, dst_mtu(dst));
1492	newtp->advmss = dst_metric_advmss(dst);
 
1493	tcp_initialize_rcv_mss(newsk);
1494	if (tcp_rsk(req)->snt_synack)
1495		tcp_valid_rtt_meas(newsk,
1496		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1497	newtp->total_retrans = req->retrans;
1498
1499	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1500	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1501
1502#ifdef CONFIG_TCP_MD5SIG
1503	/* Copy over the MD5 key from the original socket */
1504	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
 
1505		/* We're using one, so create a matching key
1506		 * on the newsk structure. If we fail to get
1507		 * memory, then we end up not copying the key
1508		 * across. Shucks.
1509		 */
1510		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1511		if (newkey != NULL)
1512			tcp_v6_md5_do_add(newsk, &newnp->daddr,
1513					  newkey, key->keylen);
1514	}
1515#endif
1516
1517	if (__inet_inherit_port(sk, newsk) < 0) {
1518		sock_put(newsk);
 
1519		goto out;
1520	}
1521	__inet6_hash(newsk, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522
1523	return newsk;
1524
1525out_overflow:
1526	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1527out_nonewsk:
1528	if (opt && opt != np->opt)
1529		sock_kfree_s(sk, opt, opt->tot_len);
1530	dst_release(dst);
1531out:
1532	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1533	return NULL;
1534}
1535
1536static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1537{
1538	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1539		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1540				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1541			skb->ip_summed = CHECKSUM_UNNECESSARY;
1542			return 0;
1543		}
1544	}
1545
1546	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1547					      &ipv6_hdr(skb)->saddr,
1548					      &ipv6_hdr(skb)->daddr, 0));
1549
1550	if (skb->len <= 76) {
1551		return __skb_checksum_complete(skb);
1552	}
1553	return 0;
1554}
1555
1556/* The socket must have it's spinlock held when we get
1557 * here.
1558 *
1559 * We have a potential double-lock case here, so even when
1560 * doing backlog processing we use the BH locking scheme.
1561 * This is because we cannot sleep with the original spinlock
1562 * held.
1563 */
1564static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1565{
1566	struct ipv6_pinfo *np = inet6_sk(sk);
1567	struct tcp_sock *tp;
1568	struct sk_buff *opt_skb = NULL;
 
1569
1570	/* Imagine: socket is IPv6. IPv4 packet arrives,
1571	   goes to IPv4 receive handler and backlogged.
1572	   From backlog it always goes here. Kerboom...
1573	   Fortunately, tcp_rcv_established and rcv_established
1574	   handle them correctly, but it is not case with
1575	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1576	 */
1577
1578	if (skb->protocol == htons(ETH_P_IP))
1579		return tcp_v4_do_rcv(sk, skb);
1580
1581#ifdef CONFIG_TCP_MD5SIG
1582	if (tcp_v6_inbound_md5_hash (sk, skb))
1583		goto discard;
1584#endif
1585
1586	if (sk_filter(sk, skb))
1587		goto discard;
1588
1589	/*
1590	 *	socket locking is here for SMP purposes as backlog rcv
1591	 *	is currently called with bh processing disabled.
1592	 */
1593
1594	/* Do Stevens' IPV6_PKTOPTIONS.
1595
1596	   Yes, guys, it is the only place in our code, where we
1597	   may make it not affecting IPv4.
1598	   The rest of code is protocol independent,
1599	   and I do not like idea to uglify IPv4.
1600
1601	   Actually, all the idea behind IPV6_PKTOPTIONS
1602	   looks not very well thought. For now we latch
1603	   options, received in the last packet, enqueued
1604	   by tcp. Feel free to propose better solution.
1605					       --ANK (980728)
1606	 */
1607	if (np->rxopt.all)
1608		opt_skb = skb_clone(skb, GFP_ATOMIC);
1609
1610	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611		sock_rps_save_rxhash(sk, skb->rxhash);
1612		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613			goto reset;
 
 
 
 
 
 
 
 
 
 
1614		if (opt_skb)
1615			goto ipv6_pktoptions;
1616		return 0;
1617	}
1618
1619	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1620		goto csum_err;
1621
1622	if (sk->sk_state == TCP_LISTEN) {
1623		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
 
1624		if (!nsk)
1625			goto discard;
1626
1627		/*
1628		 * Queue it on the new socket if the new socket is active,
1629		 * otherwise we just shortcircuit this and continue with
1630		 * the new socket..
1631		 */
1632		if(nsk != sk) {
1633			sock_rps_save_rxhash(nsk, skb->rxhash);
1634			if (tcp_child_process(sk, nsk, skb))
1635				goto reset;
1636			if (opt_skb)
1637				__kfree_skb(opt_skb);
1638			return 0;
1639		}
1640	} else
1641		sock_rps_save_rxhash(sk, skb->rxhash);
1642
1643	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644		goto reset;
1645	if (opt_skb)
1646		goto ipv6_pktoptions;
1647	return 0;
1648
1649reset:
1650	tcp_v6_send_reset(sk, skb);
1651discard:
1652	if (opt_skb)
1653		__kfree_skb(opt_skb);
1654	kfree_skb(skb);
1655	return 0;
1656csum_err:
1657	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
1658	goto discard;
1659
1660
1661ipv6_pktoptions:
1662	/* Do you ask, what is it?
1663
1664	   1. skb was enqueued by tcp.
1665	   2. skb is added to tail of read queue, rather than out of order.
1666	   3. socket is not in passive state.
1667	   4. Finally, it really contains options, which user wants to receive.
1668	 */
1669	tp = tcp_sk(sk);
1670	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1671	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1672		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1673			np->mcast_oif = inet6_iif(opt_skb);
1674		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1675			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1676		if (ipv6_opt_accepted(sk, opt_skb)) {
 
 
 
 
1677			skb_set_owner_r(opt_skb, sk);
 
1678			opt_skb = xchg(&np->pktoptions, opt_skb);
1679		} else {
1680			__kfree_skb(opt_skb);
1681			opt_skb = xchg(&np->pktoptions, NULL);
1682		}
1683	}
1684
1685	kfree_skb(opt_skb);
1686	return 0;
1687}
1688
1689static int tcp_v6_rcv(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690{
1691	struct tcphdr *th;
 
 
1692	const struct ipv6hdr *hdr;
 
1693	struct sock *sk;
1694	int ret;
1695	struct net *net = dev_net(skb->dev);
1696
1697	if (skb->pkt_type != PACKET_HOST)
1698		goto discard_it;
1699
1700	/*
1701	 *	Count it even if it's bad.
1702	 */
1703	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1704
1705	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1706		goto discard_it;
1707
1708	th = tcp_hdr(skb);
1709
1710	if (th->doff < sizeof(struct tcphdr)/4)
1711		goto bad_packet;
1712	if (!pskb_may_pull(skb, th->doff*4))
1713		goto discard_it;
1714
1715	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1716		goto bad_packet;
1717
1718	th = tcp_hdr(skb);
1719	hdr = ipv6_hdr(skb);
1720	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1721	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1722				    skb->len - th->doff*4);
1723	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724	TCP_SKB_CB(skb)->when = 0;
1725	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1726	TCP_SKB_CB(skb)->sacked = 0;
1727
1728	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 
 
 
1729	if (!sk)
1730		goto no_tcp_socket;
1731
1732process:
1733	if (sk->sk_state == TCP_TIME_WAIT)
1734		goto do_time_wait;
1735
1736	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1737		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738		goto discard_and_relse;
1739	}
1740
1741	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1742		goto discard_and_relse;
1743
1744	if (sk_filter(sk, skb))
 
 
 
1745		goto discard_and_relse;
 
 
 
1746
1747	skb->dev = NULL;
1748
 
 
 
 
 
 
 
1749	bh_lock_sock_nested(sk);
 
1750	ret = 0;
1751	if (!sock_owned_by_user(sk)) {
1752#ifdef CONFIG_NET_DMA
1753		struct tcp_sock *tp = tcp_sk(sk);
1754		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1755			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1756		if (tp->ucopy.dma_chan)
1757			ret = tcp_v6_do_rcv(sk, skb);
1758		else
1759#endif
1760		{
1761			if (!tcp_prequeue(sk, skb))
1762				ret = tcp_v6_do_rcv(sk, skb);
1763		}
1764	} else if (unlikely(sk_add_backlog(sk, skb))) {
1765		bh_unlock_sock(sk);
1766		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1767		goto discard_and_relse;
1768	}
1769	bh_unlock_sock(sk);
1770
1771	sock_put(sk);
 
 
 
1772	return ret ? -1 : 0;
1773
1774no_tcp_socket:
1775	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1776		goto discard_it;
1777
1778	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 
 
 
 
1779bad_packet:
1780		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1781	} else {
1782		tcp_v6_send_reset(NULL, skb);
1783	}
1784
1785discard_it:
1786
1787	/*
1788	 *	Discard frame
1789	 */
1790
1791	kfree_skb(skb);
1792	return 0;
1793
1794discard_and_relse:
1795	sock_put(sk);
 
 
1796	goto discard_it;
1797
1798do_time_wait:
1799	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1800		inet_twsk_put(inet_twsk(sk));
1801		goto discard_it;
1802	}
1803
1804	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1805		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 
1806		inet_twsk_put(inet_twsk(sk));
1807		goto discard_it;
1808	}
1809
1810	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1811	case TCP_TW_SYN:
1812	{
1813		struct sock *sk2;
1814
1815		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
 
 
1816					    &ipv6_hdr(skb)->daddr,
1817					    ntohs(th->dest), inet6_iif(skb));
1818		if (sk2 != NULL) {
 
 
1819			struct inet_timewait_sock *tw = inet_twsk(sk);
1820			inet_twsk_deschedule(tw, &tcp_death_row);
1821			inet_twsk_put(tw);
1822			sk = sk2;
 
 
1823			goto process;
1824		}
1825		/* Fall through to ACK */
1826	}
 
 
1827	case TCP_TW_ACK:
1828		tcp_v6_timewait_ack(sk, skb);
1829		break;
1830	case TCP_TW_RST:
1831		goto no_tcp_socket;
1832	case TCP_TW_SUCCESS:;
 
 
 
1833	}
1834	goto discard_it;
1835}
1836
1837static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1838{
1839	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1840	struct ipv6_pinfo *np = inet6_sk(sk);
1841	struct inet_peer *peer;
1842
1843	if (!rt ||
1844	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1845		peer = inet_getpeer_v6(&np->daddr, 1);
1846		*release_it = true;
1847	} else {
1848		if (!rt->rt6i_peer)
1849			rt6_bind_peer(rt, 1);
1850		peer = rt->rt6i_peer;
1851		*release_it = false;
1852	}
1853
1854	return peer;
1855}
1856
1857static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{
1859	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860	struct inet_timewait_sock *tw = inet_twsk(sk);
 
1861
1862	if (tw->tw_family == AF_INET)
1863		return tcp_v4_tw_get_peer(sk);
1864
1865	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866}
1867
1868static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1869	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1870	.twsk_unique	= tcp_twsk_unique,
1871	.twsk_destructor= tcp_twsk_destructor,
1872	.twsk_getpeer	= tcp_v6_tw_get_peer,
1873};
1874
1875static const struct inet_connection_sock_af_ops ipv6_specific = {
1876	.queue_xmit	   = inet6_csk_xmit,
1877	.send_check	   = tcp_v6_send_check,
1878	.rebuild_header	   = inet6_sk_rebuild_header,
 
1879	.conn_request	   = tcp_v6_conn_request,
1880	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1881	.get_peer	   = tcp_v6_get_peer,
1882	.net_header_len	   = sizeof(struct ipv6hdr),
 
1883	.setsockopt	   = ipv6_setsockopt,
1884	.getsockopt	   = ipv6_getsockopt,
1885	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1886	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1887	.bind_conflict	   = inet6_csk_bind_conflict,
1888#ifdef CONFIG_COMPAT
1889	.compat_setsockopt = compat_ipv6_setsockopt,
1890	.compat_getsockopt = compat_ipv6_getsockopt,
1891#endif
 
1892};
1893
1894#ifdef CONFIG_TCP_MD5SIG
1895static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1896	.md5_lookup	=	tcp_v6_md5_lookup,
1897	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1898	.md5_add	=	tcp_v6_md5_add_func,
1899	.md5_parse	=	tcp_v6_parse_md5_keys,
1900};
1901#endif
1902
1903/*
1904 *	TCP over IPv4 via INET6 API
1905 */
1906
1907static const struct inet_connection_sock_af_ops ipv6_mapped = {
1908	.queue_xmit	   = ip_queue_xmit,
1909	.send_check	   = tcp_v4_send_check,
1910	.rebuild_header	   = inet_sk_rebuild_header,
 
1911	.conn_request	   = tcp_v6_conn_request,
1912	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1913	.get_peer	   = tcp_v4_get_peer,
1914	.net_header_len	   = sizeof(struct iphdr),
1915	.setsockopt	   = ipv6_setsockopt,
1916	.getsockopt	   = ipv6_getsockopt,
1917	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1918	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1919	.bind_conflict	   = inet6_csk_bind_conflict,
1920#ifdef CONFIG_COMPAT
1921	.compat_setsockopt = compat_ipv6_setsockopt,
1922	.compat_getsockopt = compat_ipv6_getsockopt,
1923#endif
 
1924};
1925
1926#ifdef CONFIG_TCP_MD5SIG
1927static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1928	.md5_lookup	=	tcp_v4_md5_lookup,
1929	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1930	.md5_add	=	tcp_v6_md5_add_func,
1931	.md5_parse	=	tcp_v6_parse_md5_keys,
1932};
1933#endif
1934
1935/* NOTE: A lot of things set to zero explicitly by call to
1936 *       sk_alloc() so need not be done here.
1937 */
1938static int tcp_v6_init_sock(struct sock *sk)
1939{
1940	struct inet_connection_sock *icsk = inet_csk(sk);
1941	struct tcp_sock *tp = tcp_sk(sk);
1942
1943	skb_queue_head_init(&tp->out_of_order_queue);
1944	tcp_init_xmit_timers(sk);
1945	tcp_prequeue_init(tp);
1946
1947	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1948	tp->mdev = TCP_TIMEOUT_INIT;
1949
1950	/* So many TCP implementations out there (incorrectly) count the
1951	 * initial SYN frame in their delayed-ACK and congestion control
1952	 * algorithms that we must have the following bandaid to talk
1953	 * efficiently to them.  -DaveM
1954	 */
1955	tp->snd_cwnd = 2;
1956
1957	/* See draft-stevens-tcpca-spec-01 for discussion of the
1958	 * initialization of these values.
1959	 */
1960	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1961	tp->snd_cwnd_clamp = ~0;
1962	tp->mss_cache = TCP_MSS_DEFAULT;
1963
1964	tp->reordering = sysctl_tcp_reordering;
1965
1966	sk->sk_state = TCP_CLOSE;
1967
1968	icsk->icsk_af_ops = &ipv6_specific;
1969	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1970	icsk->icsk_sync_mss = tcp_sync_mss;
1971	sk->sk_write_space = sk_stream_write_space;
1972	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1973
1974#ifdef CONFIG_TCP_MD5SIG
1975	tp->af_specific = &tcp_sock_ipv6_specific;
1976#endif
1977
1978	/* TCP Cookie Transactions */
1979	if (sysctl_tcp_cookie_size > 0) {
1980		/* Default, cookies without s_data_payload. */
1981		tp->cookie_values =
1982			kzalloc(sizeof(*tp->cookie_values),
1983				sk->sk_allocation);
1984		if (tp->cookie_values != NULL)
1985			kref_init(&tp->cookie_values->kref);
1986	}
1987	/* Presumed zeroed, in order of appearance:
1988	 *	cookie_in_always, cookie_out_never,
1989	 *	s_data_constant, s_data_in, s_data_out
1990	 */
1991	sk->sk_sndbuf = sysctl_tcp_wmem[1];
1992	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1993
1994	local_bh_disable();
1995	percpu_counter_inc(&tcp_sockets_allocated);
1996	local_bh_enable();
1997
1998	return 0;
1999}
2000
2001static void tcp_v6_destroy_sock(struct sock *sk)
2002{
2003#ifdef CONFIG_TCP_MD5SIG
2004	/* Clean up the MD5 key list */
2005	if (tcp_sk(sk)->md5sig_info)
2006		tcp_v6_clear_md5_list(sk);
2007#endif
2008	tcp_v4_destroy_sock(sk);
2009	inet6_destroy_sock(sk);
2010}
2011
2012#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq,
2015			 struct sock *sk, struct request_sock *req, int i, int uid)
2016{
2017	int ttd = req->expires - jiffies;
2018	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2019	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2020
2021	if (ttd < 0)
2022		ttd = 0;
2023
2024	seq_printf(seq,
2025		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2026		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2027		   i,
2028		   src->s6_addr32[0], src->s6_addr32[1],
2029		   src->s6_addr32[2], src->s6_addr32[3],
2030		   ntohs(inet_rsk(req)->loc_port),
2031		   dest->s6_addr32[0], dest->s6_addr32[1],
2032		   dest->s6_addr32[2], dest->s6_addr32[3],
2033		   ntohs(inet_rsk(req)->rmt_port),
2034		   TCP_SYN_RECV,
2035		   0,0, /* could print option size, but that is af dependent. */
2036		   1,   /* timers active (only the expire timer) */
2037		   jiffies_to_clock_t(ttd),
2038		   req->retrans,
2039		   uid,
 
2040		   0,  /* non standard timer */
2041		   0, /* open_requests have no inode */
2042		   0, req);
2043}
2044
2045static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2046{
2047	const struct in6_addr *dest, *src;
2048	__u16 destp, srcp;
2049	int timer_active;
2050	unsigned long timer_expires;
2051	struct inet_sock *inet = inet_sk(sp);
2052	struct tcp_sock *tp = tcp_sk(sp);
2053	const struct inet_connection_sock *icsk = inet_csk(sp);
2054	struct ipv6_pinfo *np = inet6_sk(sp);
 
 
2055
2056	dest  = &np->daddr;
2057	src   = &np->rcv_saddr;
2058	destp = ntohs(inet->inet_dport);
2059	srcp  = ntohs(inet->inet_sport);
2060
2061	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
2062		timer_active	= 1;
2063		timer_expires	= icsk->icsk_timeout;
2064	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2065		timer_active	= 4;
2066		timer_expires	= icsk->icsk_timeout;
2067	} else if (timer_pending(&sp->sk_timer)) {
2068		timer_active	= 2;
2069		timer_expires	= sp->sk_timer.expires;
2070	} else {
2071		timer_active	= 0;
2072		timer_expires = jiffies;
2073	}
2074
 
 
 
 
 
 
 
 
 
 
2075	seq_printf(seq,
2076		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2077		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2078		   i,
2079		   src->s6_addr32[0], src->s6_addr32[1],
2080		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2081		   dest->s6_addr32[0], dest->s6_addr32[1],
2082		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2083		   sp->sk_state,
2084		   tp->write_seq-tp->snd_una,
2085		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2086		   timer_active,
2087		   jiffies_to_clock_t(timer_expires - jiffies),
2088		   icsk->icsk_retransmits,
2089		   sock_i_uid(sp),
2090		   icsk->icsk_probes_out,
2091		   sock_i_ino(sp),
2092		   atomic_read(&sp->sk_refcnt), sp,
2093		   jiffies_to_clock_t(icsk->icsk_rto),
2094		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2095		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2096		   tp->snd_cwnd,
2097		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
 
 
2098		   );
2099}
2100
2101static void get_timewait6_sock(struct seq_file *seq,
2102			       struct inet_timewait_sock *tw, int i)
2103{
 
2104	const struct in6_addr *dest, *src;
2105	__u16 destp, srcp;
2106	struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107	int ttd = tw->tw_ttd - jiffies;
2108
2109	if (ttd < 0)
2110		ttd = 0;
2111
2112	dest = &tw6->tw_v6_daddr;
2113	src  = &tw6->tw_v6_rcv_saddr;
2114	destp = ntohs(tw->tw_dport);
2115	srcp  = ntohs(tw->tw_sport);
2116
2117	seq_printf(seq,
2118		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2119		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2120		   i,
2121		   src->s6_addr32[0], src->s6_addr32[1],
2122		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2123		   dest->s6_addr32[0], dest->s6_addr32[1],
2124		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2125		   tw->tw_substate, 0, 0,
2126		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2127		   atomic_read(&tw->tw_refcnt), tw);
2128}
2129
2130static int tcp6_seq_show(struct seq_file *seq, void *v)
2131{
2132	struct tcp_iter_state *st;
 
2133
2134	if (v == SEQ_START_TOKEN) {
2135		seq_puts(seq,
2136			 "  sl  "
2137			 "local_address                         "
2138			 "remote_address                        "
2139			 "st tx_queue rx_queue tr tm->when retrnsmt"
2140			 "   uid  timeout inode\n");
2141		goto out;
2142	}
2143	st = seq->private;
2144
2145	switch (st->state) {
2146	case TCP_SEQ_STATE_LISTENING:
2147	case TCP_SEQ_STATE_ESTABLISHED:
2148		get_tcp6_sock(seq, v, st->num);
2149		break;
2150	case TCP_SEQ_STATE_OPENREQ:
2151		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2152		break;
2153	case TCP_SEQ_STATE_TIME_WAIT:
2154		get_timewait6_sock(seq, v, st->num);
2155		break;
2156	}
 
 
2157out:
2158	return 0;
2159}
2160
 
 
 
 
 
 
 
2161static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2162	.name		= "tcp6",
2163	.family		= AF_INET6,
2164	.seq_fops	= {
2165		.owner		= THIS_MODULE,
2166	},
2167	.seq_ops	= {
2168		.show		= tcp6_seq_show,
2169	},
2170};
2171
2172int __net_init tcp6_proc_init(struct net *net)
2173{
2174	return tcp_proc_register(net, &tcp6_seq_afinfo);
 
 
 
2175}
2176
2177void tcp6_proc_exit(struct net *net)
2178{
2179	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2180}
2181#endif
2182
2183struct proto tcpv6_prot = {
2184	.name			= "TCPv6",
2185	.owner			= THIS_MODULE,
2186	.close			= tcp_close,
 
2187	.connect		= tcp_v6_connect,
2188	.disconnect		= tcp_disconnect,
2189	.accept			= inet_csk_accept,
2190	.ioctl			= tcp_ioctl,
2191	.init			= tcp_v6_init_sock,
2192	.destroy		= tcp_v6_destroy_sock,
2193	.shutdown		= tcp_shutdown,
2194	.setsockopt		= tcp_setsockopt,
2195	.getsockopt		= tcp_getsockopt,
 
2196	.recvmsg		= tcp_recvmsg,
2197	.sendmsg		= tcp_sendmsg,
2198	.sendpage		= tcp_sendpage,
2199	.backlog_rcv		= tcp_v6_do_rcv,
2200	.hash			= tcp_v6_hash,
 
2201	.unhash			= inet_unhash,
2202	.get_port		= inet_csk_get_port,
2203	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
 
2204	.sockets_allocated	= &tcp_sockets_allocated,
2205	.memory_allocated	= &tcp_memory_allocated,
2206	.memory_pressure	= &tcp_memory_pressure,
2207	.orphan_count		= &tcp_orphan_count,
2208	.sysctl_mem		= sysctl_tcp_mem,
2209	.sysctl_wmem		= sysctl_tcp_wmem,
2210	.sysctl_rmem		= sysctl_tcp_rmem,
2211	.max_header		= MAX_TCP_HEADER,
2212	.obj_size		= sizeof(struct tcp6_sock),
2213	.slab_flags		= SLAB_DESTROY_BY_RCU,
2214	.twsk_prot		= &tcp6_timewait_sock_ops,
2215	.rsk_prot		= &tcp6_request_sock_ops,
2216	.h.hashinfo		= &tcp_hashinfo,
2217	.no_autobind		= true,
2218#ifdef CONFIG_COMPAT
2219	.compat_setsockopt	= compat_tcp_setsockopt,
2220	.compat_getsockopt	= compat_tcp_getsockopt,
2221#endif
 
2222};
2223
2224static const struct inet6_protocol tcpv6_protocol = {
 
 
 
 
 
2225	.handler	=	tcp_v6_rcv,
2226	.err_handler	=	tcp_v6_err,
2227	.gso_send_check	=	tcp_v6_gso_send_check,
2228	.gso_segment	=	tcp_tso_segment,
2229	.gro_receive	=	tcp6_gro_receive,
2230	.gro_complete	=	tcp6_gro_complete,
2231	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2232};
2233
2234static struct inet_protosw tcpv6_protosw = {
2235	.type		=	SOCK_STREAM,
2236	.protocol	=	IPPROTO_TCP,
2237	.prot		=	&tcpv6_prot,
2238	.ops		=	&inet6_stream_ops,
2239	.no_check	=	0,
2240	.flags		=	INET_PROTOSW_PERMANENT |
2241				INET_PROTOSW_ICSK,
2242};
2243
2244static int __net_init tcpv6_net_init(struct net *net)
2245{
2246	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2247				    SOCK_RAW, IPPROTO_TCP, net);
2248}
2249
2250static void __net_exit tcpv6_net_exit(struct net *net)
2251{
2252	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2253}
2254
2255static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2256{
2257	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2258}
2259
2260static struct pernet_operations tcpv6_net_ops = {
2261	.init	    = tcpv6_net_init,
2262	.exit	    = tcpv6_net_exit,
2263	.exit_batch = tcpv6_net_exit_batch,
2264};
2265
2266int __init tcpv6_init(void)
2267{
2268	int ret;
2269
2270	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2271	if (ret)
2272		goto out;
2273
2274	/* register inet6 protocol */
2275	ret = inet6_register_protosw(&tcpv6_protosw);
2276	if (ret)
2277		goto out_tcpv6_protocol;
2278
2279	ret = register_pernet_subsys(&tcpv6_net_ops);
2280	if (ret)
2281		goto out_tcpv6_protosw;
2282out:
2283	return ret;
2284
2285out_tcpv6_protocol:
2286	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2287out_tcpv6_protosw:
2288	inet6_unregister_protosw(&tcpv6_protosw);
 
 
2289	goto out;
2290}
2291
2292void tcpv6_exit(void)
2293{
2294	unregister_pernet_subsys(&tcpv6_net_ops);
2295	inet6_unregister_protosw(&tcpv6_protosw);
2296	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	TCP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on:
  10 *	linux/net/ipv4/tcp.c
  11 *	linux/net/ipv4/tcp_input.c
  12 *	linux/net/ipv4/tcp_output.c
  13 *
  14 *	Fixes:
  15 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  16 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  17 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  18 *					a single port at the same time.
  19 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
 
 
 
 
 
  20 */
  21
  22#include <linux/bottom_half.h>
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/jiffies.h>
  30#include <linux/in.h>
  31#include <linux/in6.h>
  32#include <linux/netdevice.h>
  33#include <linux/init.h>
  34#include <linux/jhash.h>
  35#include <linux/ipsec.h>
  36#include <linux/times.h>
  37#include <linux/slab.h>
  38#include <linux/uaccess.h>
  39#include <linux/ipv6.h>
  40#include <linux/icmpv6.h>
  41#include <linux/random.h>
  42#include <linux/indirect_call_wrapper.h>
  43
  44#include <net/tcp.h>
  45#include <net/ndisc.h>
  46#include <net/inet6_hashtables.h>
  47#include <net/inet6_connection_sock.h>
  48#include <net/ipv6.h>
  49#include <net/transp_v6.h>
  50#include <net/addrconf.h>
  51#include <net/ip6_route.h>
  52#include <net/ip6_checksum.h>
  53#include <net/inet_ecn.h>
  54#include <net/protocol.h>
  55#include <net/xfrm.h>
  56#include <net/snmp.h>
  57#include <net/dsfield.h>
  58#include <net/timewait_sock.h>
 
  59#include <net/inet_common.h>
  60#include <net/secure_seq.h>
  61#include <net/busy_poll.h>
 
  62
  63#include <linux/proc_fs.h>
  64#include <linux/seq_file.h>
  65
  66#include <crypto/hash.h>
  67#include <linux/scatterlist.h>
  68
  69#include <trace/events/tcp.h>
  70
  71static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  72static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  73				      struct request_sock *req);
  74
  75static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 
 
  76
  77static const struct inet_connection_sock_af_ops ipv6_mapped;
  78static const struct inet_connection_sock_af_ops ipv6_specific;
  79#ifdef CONFIG_TCP_MD5SIG
  80static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  82#else
  83static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  84						   const struct in6_addr *addr)
  85{
  86	return NULL;
  87}
  88#endif
  89
  90/* Helper returning the inet6 address from a given tcp socket.
  91 * It can be used in TCP stack instead of inet6_sk(sk).
  92 * This avoids a dereference and allow compiler optimizations.
  93 * It is a specialized version of inet6_sk_generic().
  94 */
  95static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
  96{
  97	unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
  98
  99	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
 100}
 101
 102static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 103{
 104	struct dst_entry *dst = skb_dst(skb);
 105
 106	if (dst && dst_hold_safe(dst)) {
 107		const struct rt6_info *rt = (const struct rt6_info *)dst;
 108
 109		sk->sk_rx_dst = dst;
 110		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 111		tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 112	}
 113}
 114
 115static u32 tcp_v6_init_seq(const struct sk_buff *skb)
 
 
 
 116{
 117	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
 118				ipv6_hdr(skb)->saddr.s6_addr32,
 119				tcp_hdr(skb)->dest,
 120				tcp_hdr(skb)->source);
 121}
 122
 123static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
 124{
 125	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
 126				   ipv6_hdr(skb)->saddr.s6_addr32);
 127}
 128
 129static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 130			      int addr_len)
 131{
 132	/* This check is replicated from tcp_v6_connect() and intended to
 133	 * prevent BPF program called below from accessing bytes that are out
 134	 * of the bound specified by user in addr_len.
 135	 */
 136	if (addr_len < SIN6_LEN_RFC2133)
 137		return -EINVAL;
 138
 139	sock_owned_by_me(sk);
 140
 141	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
 142}
 143
 144static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 145			  int addr_len)
 146{
 147	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 148	struct inet_sock *inet = inet_sk(sk);
 149	struct inet_connection_sock *icsk = inet_csk(sk);
 150	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 151	struct tcp_sock *tp = tcp_sk(sk);
 152	struct in6_addr *saddr = NULL, *final_p, final;
 153	struct ipv6_txoptions *opt;
 154	struct flowi6 fl6;
 155	struct dst_entry *dst;
 156	int addr_type;
 157	int err;
 158	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 159
 160	if (addr_len < SIN6_LEN_RFC2133)
 161		return -EINVAL;
 162
 163	if (usin->sin6_family != AF_INET6)
 164		return -EAFNOSUPPORT;
 165
 166	memset(&fl6, 0, sizeof(fl6));
 167
 168	if (np->sndflow) {
 169		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 170		IP6_ECN_flow_init(fl6.flowlabel);
 171		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 172			struct ip6_flowlabel *flowlabel;
 173			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 174			if (IS_ERR(flowlabel))
 175				return -EINVAL;
 
 176			fl6_sock_release(flowlabel);
 177		}
 178	}
 179
 180	/*
 181	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 182	 */
 183
 184	if (ipv6_addr_any(&usin->sin6_addr)) {
 185		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 186			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 187					       &usin->sin6_addr);
 188		else
 189			usin->sin6_addr = in6addr_loopback;
 190	}
 191
 192	addr_type = ipv6_addr_type(&usin->sin6_addr);
 193
 194	if (addr_type & IPV6_ADDR_MULTICAST)
 195		return -ENETUNREACH;
 196
 197	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 198		if (addr_len >= sizeof(struct sockaddr_in6) &&
 199		    usin->sin6_scope_id) {
 200			/* If interface is set while binding, indices
 201			 * must coincide.
 202			 */
 203			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 
 204				return -EINVAL;
 205
 206			sk->sk_bound_dev_if = usin->sin6_scope_id;
 207		}
 208
 209		/* Connect to link-local address requires an interface */
 210		if (!sk->sk_bound_dev_if)
 211			return -EINVAL;
 212	}
 213
 214	if (tp->rx_opt.ts_recent_stamp &&
 215	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 216		tp->rx_opt.ts_recent = 0;
 217		tp->rx_opt.ts_recent_stamp = 0;
 218		WRITE_ONCE(tp->write_seq, 0);
 219	}
 220
 221	sk->sk_v6_daddr = usin->sin6_addr;
 222	np->flow_label = fl6.flowlabel;
 223
 224	/*
 225	 *	TCP over IPv4
 226	 */
 227
 228	if (addr_type & IPV6_ADDR_MAPPED) {
 229		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 230		struct sockaddr_in sin;
 231
 
 
 232		if (__ipv6_only_sock(sk))
 233			return -ENETUNREACH;
 234
 235		sin.sin_family = AF_INET;
 236		sin.sin_port = usin->sin6_port;
 237		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 238
 239		icsk->icsk_af_ops = &ipv6_mapped;
 240		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 241#ifdef CONFIG_TCP_MD5SIG
 242		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 243#endif
 244
 245		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 246
 247		if (err) {
 248			icsk->icsk_ext_hdr_len = exthdrlen;
 249			icsk->icsk_af_ops = &ipv6_specific;
 250			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 251#ifdef CONFIG_TCP_MD5SIG
 252			tp->af_specific = &tcp_sock_ipv6_specific;
 253#endif
 254			goto failure;
 
 
 
 
 255		}
 256		np->saddr = sk->sk_v6_rcv_saddr;
 257
 258		return err;
 259	}
 260
 261	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 262		saddr = &sk->sk_v6_rcv_saddr;
 263
 264	fl6.flowi6_proto = IPPROTO_TCP;
 265	fl6.daddr = sk->sk_v6_daddr;
 266	fl6.saddr = saddr ? *saddr : np->saddr;
 
 267	fl6.flowi6_oif = sk->sk_bound_dev_if;
 268	fl6.flowi6_mark = sk->sk_mark;
 269	fl6.fl6_dport = usin->sin6_port;
 270	fl6.fl6_sport = inet->inet_sport;
 271	fl6.flowi6_uid = sk->sk_uid;
 272
 273	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 274	final_p = fl6_update_dst(&fl6, opt, &final);
 275
 276	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 277
 278	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 279	if (IS_ERR(dst)) {
 280		err = PTR_ERR(dst);
 281		goto failure;
 282	}
 283
 284	if (!saddr) {
 285		saddr = &fl6.saddr;
 286		sk->sk_v6_rcv_saddr = *saddr;
 287	}
 288
 289	/* set the source address */
 290	np->saddr = *saddr;
 291	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 292
 293	sk->sk_gso_type = SKB_GSO_TCPV6;
 294	ip6_dst_store(sk, dst, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295
 296	icsk->icsk_ext_hdr_len = 0;
 297	if (opt)
 298		icsk->icsk_ext_hdr_len = opt->opt_flen +
 299					 opt->opt_nflen;
 300
 301	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 302
 303	inet->inet_dport = usin->sin6_port;
 304
 305	tcp_set_state(sk, TCP_SYN_SENT);
 306	err = inet6_hash_connect(tcp_death_row, sk);
 307	if (err)
 308		goto late_failure;
 309
 310	sk_set_txhash(sk);
 311
 312	if (likely(!tp->repair)) {
 313		if (!tp->write_seq)
 314			WRITE_ONCE(tp->write_seq,
 315				   secure_tcpv6_seq(np->saddr.s6_addr32,
 316						    sk->sk_v6_daddr.s6_addr32,
 317						    inet->inet_sport,
 318						    inet->inet_dport));
 319		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
 320						   np->saddr.s6_addr32,
 321						   sk->sk_v6_daddr.s6_addr32);
 322	}
 323
 324	if (tcp_fastopen_defer_connect(sk, &err))
 325		return err;
 326	if (err)
 327		goto late_failure;
 328
 329	err = tcp_connect(sk);
 330	if (err)
 331		goto late_failure;
 332
 333	return 0;
 334
 335late_failure:
 336	tcp_set_state(sk, TCP_CLOSE);
 
 337failure:
 338	inet->inet_dport = 0;
 339	sk->sk_route_caps = 0;
 340	return err;
 341}
 342
 343static void tcp_v6_mtu_reduced(struct sock *sk)
 344{
 345	struct dst_entry *dst;
 346
 347	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 348		return;
 349
 350	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 351	if (!dst)
 352		return;
 353
 354	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 355		tcp_sync_mss(sk, dst_mtu(dst));
 356		tcp_simple_retransmit(sk);
 357	}
 358}
 359
 360static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 361		u8 type, u8 code, int offset, __be32 info)
 362{
 363	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 364	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 365	struct net *net = dev_net(skb->dev);
 366	struct request_sock *fastopen;
 367	struct ipv6_pinfo *np;
 368	struct tcp_sock *tp;
 369	__u32 seq, snd_una;
 370	struct sock *sk;
 371	bool fatal;
 372	int err;
 
 
 
 373
 374	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 375					&hdr->daddr, th->dest,
 376					&hdr->saddr, ntohs(th->source),
 377					skb->dev->ifindex, inet6_sdif(skb));
 378
 379	if (!sk) {
 380		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 381				  ICMP6_MIB_INERRORS);
 382		return -ENOENT;
 383	}
 384
 385	if (sk->sk_state == TCP_TIME_WAIT) {
 386		inet_twsk_put(inet_twsk(sk));
 387		return 0;
 388	}
 389	seq = ntohl(th->seq);
 390	fatal = icmpv6_err_convert(type, code, &err);
 391	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 392		tcp_req_err(sk, seq, fatal);
 393		return 0;
 394	}
 395
 396	bh_lock_sock(sk);
 397	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 398		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 399
 400	if (sk->sk_state == TCP_CLOSE)
 401		goto out;
 402
 403	if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
 404		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 405		goto out;
 406	}
 407
 408	tp = tcp_sk(sk);
 409	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 410	fastopen = rcu_dereference(tp->fastopen_rsk);
 411	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 412	if (sk->sk_state != TCP_LISTEN &&
 413	    !between(seq, snd_una, tp->snd_nxt)) {
 414		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 415		goto out;
 416	}
 417
 418	np = tcp_inet6_sk(sk);
 
 
 
 419
 420	if (type == NDISC_REDIRECT) {
 421		if (!sock_owned_by_user(sk)) {
 422			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 423
 424			if (dst)
 425				dst->ops->redirect(dst, sk, skb);
 426		}
 427		goto out;
 428	}
 429
 430	if (type == ICMPV6_PKT_TOOBIG) {
 431		/* We are not interested in TCP_LISTEN and open_requests
 432		 * (SYN-ACKs send out by Linux are always <576bytes so
 433		 * they should go through unfragmented).
 434		 */
 435		if (sk->sk_state == TCP_LISTEN)
 436			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438		if (!ip6_sk_accept_pmtu(sk))
 439			goto out;
 440
 441		tp->mtu_info = ntohl(info);
 442		if (!sock_owned_by_user(sk))
 443			tcp_v6_mtu_reduced(sk);
 444		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 445					   &sk->sk_tsq_flags))
 446			sock_hold(sk);
 447		goto out;
 448	}
 449
 
 450
 451	/* Might be for an request_sock */
 452	switch (sk->sk_state) {
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:
 455		/* Only in fast or simultaneous open. If a fast open socket is
 456		 * is already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 457		 */
 458		if (fastopen && !fastopen->sk)
 459			break;
 
 
 
 
 
 
 
 460
 
 
 
 461		if (!sock_owned_by_user(sk)) {
 462			sk->sk_err = err;
 463			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 464
 465			tcp_done(sk);
 466		} else
 467			sk->sk_err_soft = err;
 468		goto out;
 469	}
 470
 471	if (!sock_owned_by_user(sk) && np->recverr) {
 472		sk->sk_err = err;
 473		sk->sk_error_report(sk);
 474	} else
 475		sk->sk_err_soft = err;
 476
 477out:
 478	bh_unlock_sock(sk);
 479	sock_put(sk);
 480	return 0;
 481}
 482
 483
 484static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 485			      struct flowi *fl,
 486			      struct request_sock *req,
 487			      struct tcp_fastopen_cookie *foc,
 488			      enum tcp_synack_type synack_type)
 489{
 490	struct inet_request_sock *ireq = inet_rsk(req);
 491	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 492	struct ipv6_txoptions *opt;
 493	struct flowi6 *fl6 = &fl->u.ip6;
 494	struct sk_buff *skb;
 495	int err = -ENOMEM;
 496
 497	/* First, grab a route. */
 498	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 499					       IPPROTO_TCP)) == NULL)
 500		goto done;
 
 
 
 
 
 
 
 
 501
 502	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
 503
 
 
 
 
 
 
 
 
 504	if (skb) {
 505		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 506				    &ireq->ir_v6_rmt_addr);
 507
 508		fl6->daddr = ireq->ir_v6_rmt_addr;
 509		if (np->repflow && ireq->pktopts)
 510			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 511
 512		rcu_read_lock();
 513		opt = ireq->ipv6_opt;
 514		if (!opt)
 515			opt = rcu_dereference(np->opt);
 516		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
 517			       sk->sk_priority);
 518		rcu_read_unlock();
 519		err = net_xmit_eval(err);
 520	}
 521
 522done:
 
 
 
 523	return err;
 524}
 525
 
 
 
 
 
 
 526
 527static void tcp_v6_reqsk_destructor(struct request_sock *req)
 528{
 529	kfree(inet_rsk(req)->ipv6_opt);
 530	kfree_skb(inet_rsk(req)->pktopts);
 531}
 532
 533#ifdef CONFIG_TCP_MD5SIG
 534static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 535						   const struct in6_addr *addr)
 536{
 537	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 
 
 
 
 
 
 
 
 
 
 
 
 538}
 539
 540static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 541						const struct sock *addr_sk)
 542{
 543	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 544}
 545
 546static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 547				 char __user *optval, int optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548{
 549	struct tcp_md5sig cmd;
 550	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 551	u8 prefixlen;
 552
 553	if (optlen < sizeof(cmd))
 554		return -EINVAL;
 555
 556	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 557		return -EFAULT;
 558
 559	if (sin6->sin6_family != AF_INET6)
 560		return -EINVAL;
 561
 562	if (optname == TCP_MD5SIG_EXT &&
 563	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
 564		prefixlen = cmd.tcpm_prefixlen;
 565		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
 566					prefixlen > 32))
 567			return -EINVAL;
 568	} else {
 569		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
 570	}
 571
 572	if (!cmd.tcpm_keylen) {
 
 
 573		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 574			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 575					      AF_INET, prefixlen);
 576		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 577				      AF_INET6, prefixlen);
 578	}
 579
 580	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 581		return -EINVAL;
 582
 583	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 584		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 585				      AF_INET, prefixlen, cmd.tcpm_key,
 586				      cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 
 
 587
 588	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 589			      AF_INET6, prefixlen, cmd.tcpm_key,
 590			      cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 591}
 592
 593static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
 594				   const struct in6_addr *daddr,
 595				   const struct in6_addr *saddr,
 596				   const struct tcphdr *th, int nbytes)
 597{
 598	struct tcp6_pseudohdr *bp;
 599	struct scatterlist sg;
 600	struct tcphdr *_th;
 601
 602	bp = hp->scratch;
 603	/* 1. TCP pseudo-header (RFC2460) */
 604	bp->saddr = *saddr;
 605	bp->daddr = *daddr;
 606	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 607	bp->len = cpu_to_be32(nbytes);
 608
 609	_th = (struct tcphdr *)(bp + 1);
 610	memcpy(_th, th, sizeof(*th));
 611	_th->check = 0;
 612
 613	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 614	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
 615				sizeof(*bp) + sizeof(*th));
 616	return crypto_ahash_update(hp->md5_req);
 617}
 618
 619static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 620			       const struct in6_addr *daddr, struct in6_addr *saddr,
 621			       const struct tcphdr *th)
 622{
 623	struct tcp_md5sig_pool *hp;
 624	struct ahash_request *req;
 625
 626	hp = tcp_get_md5sig_pool();
 627	if (!hp)
 628		goto clear_hash_noput;
 629	req = hp->md5_req;
 630
 631	if (crypto_ahash_init(req))
 
 
 632		goto clear_hash;
 633	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 634		goto clear_hash;
 635	if (tcp_md5_hash_key(hp, key))
 636		goto clear_hash;
 637	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 638	if (crypto_ahash_final(req))
 639		goto clear_hash;
 640
 641	tcp_put_md5sig_pool();
 642	return 0;
 643
 644clear_hash:
 645	tcp_put_md5sig_pool();
 646clear_hash_noput:
 647	memset(md5_hash, 0, 16);
 648	return 1;
 649}
 650
 651static int tcp_v6_md5_hash_skb(char *md5_hash,
 652			       const struct tcp_md5sig_key *key,
 653			       const struct sock *sk,
 654			       const struct sk_buff *skb)
 655{
 656	const struct in6_addr *saddr, *daddr;
 657	struct tcp_md5sig_pool *hp;
 658	struct ahash_request *req;
 659	const struct tcphdr *th = tcp_hdr(skb);
 660
 661	if (sk) { /* valid for establish/request sockets */
 662		saddr = &sk->sk_v6_rcv_saddr;
 663		daddr = &sk->sk_v6_daddr;
 
 
 
 664	} else {
 665		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 666		saddr = &ip6h->saddr;
 667		daddr = &ip6h->daddr;
 668	}
 669
 670	hp = tcp_get_md5sig_pool();
 671	if (!hp)
 672		goto clear_hash_noput;
 673	req = hp->md5_req;
 674
 675	if (crypto_ahash_init(req))
 676		goto clear_hash;
 677
 678	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
 679		goto clear_hash;
 680	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 681		goto clear_hash;
 682	if (tcp_md5_hash_key(hp, key))
 683		goto clear_hash;
 684	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 685	if (crypto_ahash_final(req))
 686		goto clear_hash;
 687
 688	tcp_put_md5sig_pool();
 689	return 0;
 690
 691clear_hash:
 692	tcp_put_md5sig_pool();
 693clear_hash_noput:
 694	memset(md5_hash, 0, 16);
 695	return 1;
 696}
 697
 698#endif
 699
 700static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 701				    const struct sk_buff *skb)
 702{
 703#ifdef CONFIG_TCP_MD5SIG
 704	const __u8 *hash_location = NULL;
 705	struct tcp_md5sig_key *hash_expected;
 706	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 707	const struct tcphdr *th = tcp_hdr(skb);
 708	int genhash;
 709	u8 newhash[16];
 710
 711	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 712	hash_location = tcp_parse_md5sig_option(th);
 713
 714	/* We've parsed the options - do we have a hash? */
 715	if (!hash_expected && !hash_location)
 716		return false;
 717
 718	if (hash_expected && !hash_location) {
 719		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 720		return true;
 721	}
 722
 723	if (!hash_expected && hash_location) {
 724		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 725		return true;
 726	}
 727
 728	/* check the signature */
 729	genhash = tcp_v6_md5_hash_skb(newhash,
 730				      hash_expected,
 731				      NULL, skb);
 732
 733	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 734		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
 735		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 736				     genhash ? "failed" : "mismatch",
 737				     &ip6h->saddr, ntohs(th->source),
 738				     &ip6h->daddr, ntohs(th->dest));
 739		return true;
 
 740	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741#endif
 742	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743}
 744
 745static void tcp_v6_init_req(struct request_sock *req,
 746			    const struct sock *sk_listener,
 747			    struct sk_buff *skb)
 748{
 749	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 750	struct inet_request_sock *ireq = inet_rsk(req);
 751	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
 
 
 
 
 
 
 
 
 
 
 
 
 752
 753	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 754	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 
 
 
 
 
 
 
 755
 756	/* So that link locals have meaning */
 757	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
 758	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 759		ireq->ir_iif = tcp_v6_iif(skb);
 
 
 
 760
 761	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 762	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 763	     np->rxopt.bits.rxinfo ||
 764	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 765	     np->rxopt.bits.rxohlim || np->repflow)) {
 766		refcount_inc(&skb->users);
 767		ireq->pktopts = skb;
 768	}
 
 
 769}
 770
 771static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 772					  struct flowi *fl,
 773					  const struct request_sock *req)
 774{
 775	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 776}
 777
 778struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 779	.family		=	AF_INET6,
 780	.obj_size	=	sizeof(struct tcp6_request_sock),
 781	.rtx_syn_ack	=	tcp_rtx_synack,
 782	.send_ack	=	tcp_v6_reqsk_send_ack,
 783	.destructor	=	tcp_v6_reqsk_destructor,
 784	.send_reset	=	tcp_v6_send_reset,
 785	.syn_ack_timeout =	tcp_syn_ack_timeout,
 786};
 787
 788static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 789	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 790				sizeof(struct ipv6hdr),
 791#ifdef CONFIG_TCP_MD5SIG
 792	.req_md5_lookup	=	tcp_v6_md5_lookup,
 793	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 794#endif
 795	.init_req	=	tcp_v6_init_req,
 796#ifdef CONFIG_SYN_COOKIES
 797	.cookie_init_seq =	cookie_v6_init_sequence,
 798#endif
 799	.route_req	=	tcp_v6_route_req,
 800	.init_seq	=	tcp_v6_init_seq,
 801	.init_ts_off	=	tcp_v6_init_ts_off,
 802	.send_synack	=	tcp_v6_send_synack,
 803};
 804
 805static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 806				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 807				 int oif, struct tcp_md5sig_key *key, int rst,
 808				 u8 tclass, __be32 label, u32 priority)
 809{
 810	const struct tcphdr *th = tcp_hdr(skb);
 811	struct tcphdr *t1;
 812	struct sk_buff *buff;
 813	struct flowi6 fl6;
 814	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 815	struct sock *ctl_sk = net->ipv6.tcp_sk;
 816	unsigned int tot_len = sizeof(struct tcphdr);
 817	struct dst_entry *dst;
 818	__be32 *topt;
 819	__u32 mark = 0;
 820
 821	if (tsecr)
 822		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 823#ifdef CONFIG_TCP_MD5SIG
 824	if (key)
 825		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 826#endif
 827
 828	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 829			 GFP_ATOMIC);
 830	if (!buff)
 831		return;
 832
 833	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 834
 835	t1 = skb_push(buff, tot_len);
 836	skb_reset_transport_header(buff);
 837
 838	/* Swap the send and the receive. */
 839	memset(t1, 0, sizeof(*t1));
 840	t1->dest = th->source;
 841	t1->source = th->dest;
 842	t1->doff = tot_len / 4;
 843	t1->seq = htonl(seq);
 844	t1->ack_seq = htonl(ack);
 845	t1->ack = !rst || !th->ack;
 846	t1->rst = rst;
 847	t1->window = htons(win);
 848
 849	topt = (__be32 *)(t1 + 1);
 850
 851	if (tsecr) {
 852		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 853				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 854		*topt++ = htonl(tsval);
 855		*topt++ = htonl(tsecr);
 856	}
 857
 858#ifdef CONFIG_TCP_MD5SIG
 859	if (key) {
 860		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 861				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 862		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 863				    &ipv6_hdr(skb)->saddr,
 864				    &ipv6_hdr(skb)->daddr, t1);
 865	}
 866#endif
 867
 868	memset(&fl6, 0, sizeof(fl6));
 869	fl6.daddr = ipv6_hdr(skb)->saddr;
 870	fl6.saddr = ipv6_hdr(skb)->daddr;
 871	fl6.flowlabel = label;
 872
 873	buff->ip_summed = CHECKSUM_PARTIAL;
 874	buff->csum = 0;
 875
 876	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 877
 878	fl6.flowi6_proto = IPPROTO_TCP;
 879	if (rt6_need_strict(&fl6.daddr) && !oif)
 880		fl6.flowi6_oif = tcp_v6_iif(skb);
 881	else {
 882		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 883			oif = skb->skb_iif;
 884
 885		fl6.flowi6_oif = oif;
 886	}
 887
 888	if (sk) {
 889		if (sk->sk_state == TCP_TIME_WAIT) {
 890			mark = inet_twsk(sk)->tw_mark;
 891			/* autoflowlabel relies on buff->hash */
 892			skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
 893				     PKT_HASH_TYPE_L4);
 894		} else {
 895			mark = sk->sk_mark;
 896		}
 897		buff->tstamp = tcp_transmit_time(sk);
 898	}
 899	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
 900	fl6.fl6_dport = t1->dest;
 901	fl6.fl6_sport = t1->source;
 902	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 903	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 904
 905	/* Pass a socket to ip6_dst_lookup either it is for RST
 906	 * Underlying function will use this to retrieve the network
 907	 * namespace
 908	 */
 909	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 910	if (!IS_ERR(dst)) {
 911		skb_dst_set(buff, dst);
 912		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
 913			 priority);
 914		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 915		if (rst)
 916			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 917		return;
 918	}
 919
 920	kfree_skb(buff);
 921}
 922
 923static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 924{
 925	const struct tcphdr *th = tcp_hdr(skb);
 926	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 927	u32 seq = 0, ack_seq = 0;
 928	struct tcp_md5sig_key *key = NULL;
 929#ifdef CONFIG_TCP_MD5SIG
 930	const __u8 *hash_location = NULL;
 931	unsigned char newhash[16];
 932	int genhash;
 933	struct sock *sk1 = NULL;
 934#endif
 935	__be32 label = 0;
 936	u32 priority = 0;
 937	struct net *net;
 938	int oif = 0;
 939
 940	if (th->rst)
 941		return;
 942
 943	/* If sk not NULL, it means we did a successful lookup and incoming
 944	 * route had to be correct. prequeue might have dropped our dst.
 945	 */
 946	if (!sk && !ipv6_unicast_destination(skb))
 947		return;
 948
 949	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 950#ifdef CONFIG_TCP_MD5SIG
 951	rcu_read_lock();
 952	hash_location = tcp_parse_md5sig_option(th);
 953	if (sk && sk_fullsock(sk)) {
 954		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 955	} else if (hash_location) {
 956		/*
 957		 * active side is lost. Try to find listening socket through
 958		 * source port, and then find md5 key through listening socket.
 959		 * we are not loose security here:
 960		 * Incoming packet is checked with md5 hash with finding key,
 961		 * no RST generated if md5 hash doesn't match.
 962		 */
 963		sk1 = inet6_lookup_listener(net,
 964					   &tcp_hashinfo, NULL, 0,
 965					   &ipv6h->saddr,
 966					   th->source, &ipv6h->daddr,
 967					   ntohs(th->source),
 968					   tcp_v6_iif_l3_slave(skb),
 969					   tcp_v6_sdif(skb));
 970		if (!sk1)
 971			goto out;
 972
 973		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 974		if (!key)
 975			goto out;
 976
 977		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 978		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 979			goto out;
 980	}
 981#endif
 982
 983	if (th->ack)
 984		seq = ntohl(th->ack_seq);
 985	else
 986		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 987			  (th->doff << 2);
 988
 989	if (sk) {
 990		oif = sk->sk_bound_dev_if;
 991		if (sk_fullsock(sk)) {
 992			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 993
 994			trace_tcp_send_reset(sk, skb);
 995			if (np->repflow)
 996				label = ip6_flowlabel(ipv6h);
 997			priority = sk->sk_priority;
 998		}
 999		if (sk->sk_state == TCP_TIME_WAIT) {
1000			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1001			priority = inet_twsk(sk)->tw_priority;
1002		}
1003	} else {
1004		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1005			label = ip6_flowlabel(ipv6h);
1006	}
1007
1008	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
1009			     label, priority);
1010
1011#ifdef CONFIG_TCP_MD5SIG
1012out:
1013	rcu_read_unlock();
1014#endif
1015}
1016
1017static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1018			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1019			    struct tcp_md5sig_key *key, u8 tclass,
1020			    __be32 label, u32 priority)
1021{
1022	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1023			     tclass, label, priority);
1024}
1025
1026static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1027{
1028	struct inet_timewait_sock *tw = inet_twsk(sk);
1029	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1030
1031	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1032			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1033			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1034			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1035			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
1036
1037	inet_twsk_put(tw);
1038}
1039
1040static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1041				  struct request_sock *req)
1042{
1043	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1044	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1045	 */
1046	/* RFC 7323 2.3
1047	 * The window field (SEG.WND) of every outgoing segment, with the
1048	 * exception of <SYN> segments, MUST be right-shifted by
1049	 * Rcv.Wind.Shift bits:
1050	 */
1051	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1052			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1053			tcp_rsk(req)->rcv_nxt,
1054			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1055			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1056			req->ts_recent, sk->sk_bound_dev_if,
1057			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1058			0, 0, sk->sk_priority);
1059}
1060
1061
1062static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1063{
1064#ifdef CONFIG_SYN_COOKIES
1065	const struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066
 
1067	if (!th->syn)
1068		sk = cookie_v6_check(sk, skb);
1069#endif
1070	return sk;
1071}
1072
1073u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1074			 struct tcphdr *th, u32 *cookie)
 
 
1075{
1076	u16 mss = 0;
1077#ifdef CONFIG_SYN_COOKIES
1078	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1079				    &tcp_request_sock_ipv6_ops, sk, th);
1080	if (mss) {
1081		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1082		tcp_synq_overflow(sk);
1083	}
1084#endif
1085	return mss;
1086}
1087
1088static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1089{
1090	if (skb->protocol == htons(ETH_P_IP))
1091		return tcp_v4_conn_request(sk, skb);
1092
1093	if (!ipv6_unicast_destination(skb))
1094		goto drop;
1095
1096	return tcp_conn_request(&tcp6_request_sock_ops,
1097				&tcp_request_sock_ipv6_ops, sk, skb);
 
 
 
 
 
 
1098
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099drop:
1100	tcp_listendrop(sk);
1101	return 0; /* don't send reset */
1102}
1103
1104static void tcp_v6_restore_cb(struct sk_buff *skb)
1105{
1106	/* We need to move header back to the beginning if xfrm6_policy_check()
1107	 * and tcp_v6_fill_cb() are going to be called again.
1108	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1109	 */
1110	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1111		sizeof(struct inet6_skb_parm));
1112}
1113
1114static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1115					 struct request_sock *req,
1116					 struct dst_entry *dst,
1117					 struct request_sock *req_unhash,
1118					 bool *own_req)
1119{
1120	struct inet_request_sock *ireq;
1121	struct ipv6_pinfo *newnp;
1122	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1123	struct ipv6_txoptions *opt;
1124	struct inet_sock *newinet;
1125	struct tcp_sock *newtp;
1126	struct sock *newsk;
 
1127#ifdef CONFIG_TCP_MD5SIG
1128	struct tcp_md5sig_key *key;
1129#endif
1130	struct flowi6 fl6;
1131
1132	if (skb->protocol == htons(ETH_P_IP)) {
1133		/*
1134		 *	v6 mapped
1135		 */
1136
1137		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1138					     req_unhash, own_req);
1139
1140		if (!newsk)
1141			return NULL;
1142
1143		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1144
1145		newinet = inet_sk(newsk);
1146		newnp = tcp_inet6_sk(newsk);
1147		newtp = tcp_sk(newsk);
1148
1149		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1150
1151		newnp->saddr = newsk->sk_v6_rcv_saddr;
 
 
 
 
1152
1153		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1154		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1155#ifdef CONFIG_TCP_MD5SIG
1156		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1157#endif
1158
1159		newnp->ipv6_mc_list = NULL;
1160		newnp->ipv6_ac_list = NULL;
1161		newnp->ipv6_fl_list = NULL;
1162		newnp->pktoptions  = NULL;
1163		newnp->opt	   = NULL;
1164		newnp->mcast_oif   = inet_iif(skb);
1165		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1166		newnp->rcv_flowinfo = 0;
1167		if (np->repflow)
1168			newnp->flow_label = 0;
1169
1170		/*
1171		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1172		 * here, tcp_create_openreq_child now does this for us, see the comment in
1173		 * that function for the gory details. -acme
1174		 */
1175
1176		/* It is tricky place. Until this moment IPv4 tcp
1177		   worked with IPv6 icsk.icsk_af_ops.
1178		   Sync it now.
1179		 */
1180		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1181
1182		return newsk;
1183	}
1184
1185	ireq = inet_rsk(req);
 
1186
1187	if (sk_acceptq_is_full(sk))
1188		goto out_overflow;
1189
1190	if (!dst) {
1191		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1192		if (!dst)
1193			goto out;
1194	}
1195
1196	newsk = tcp_create_openreq_child(sk, req, skb);
1197	if (!newsk)
1198		goto out_nonewsk;
1199
1200	/*
1201	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1202	 * count here, tcp_create_openreq_child now does this for us, see the
1203	 * comment in that function for the gory details. -acme
1204	 */
1205
1206	newsk->sk_gso_type = SKB_GSO_TCPV6;
1207	ip6_dst_store(newsk, dst, NULL, NULL);
1208	inet6_sk_rx_dst_set(newsk, skb);
1209
1210	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1211
1212	newtp = tcp_sk(newsk);
1213	newinet = inet_sk(newsk);
1214	newnp = tcp_inet6_sk(newsk);
1215
1216	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1217
1218	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1219	newnp->saddr = ireq->ir_v6_loc_addr;
1220	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1221	newsk->sk_bound_dev_if = ireq->ir_iif;
1222
1223	/* Now IPv6 options...
1224
1225	   First: no IPv4 options.
1226	 */
1227	newinet->inet_opt = NULL;
1228	newnp->ipv6_mc_list = NULL;
1229	newnp->ipv6_ac_list = NULL;
1230	newnp->ipv6_fl_list = NULL;
1231
1232	/* Clone RX bits */
1233	newnp->rxopt.all = np->rxopt.all;
1234
 
1235	newnp->pktoptions = NULL;
 
 
 
 
 
 
 
1236	newnp->opt	  = NULL;
1237	newnp->mcast_oif  = tcp_v6_iif(skb);
1238	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1239	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1240	if (np->repflow)
1241		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1242
1243	/* Clone native IPv6 options from listening socket (if any)
1244
1245	   Yes, keeping reference count would be much more clever,
1246	   but we make one more one thing there: reattach optmem
1247	   to newsk.
1248	 */
1249	opt = ireq->ipv6_opt;
1250	if (!opt)
1251		opt = rcu_dereference(np->opt);
1252	if (opt) {
1253		opt = ipv6_dup_options(newsk, opt);
1254		RCU_INIT_POINTER(newnp->opt, opt);
 
1255	}
 
1256	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1257	if (opt)
1258		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1259						    opt->opt_flen;
1260
1261	tcp_ca_openreq_child(newsk, dst);
1262
 
1263	tcp_sync_mss(newsk, dst_mtu(dst));
1264	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1265
1266	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1267
1268	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1269	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1270
1271#ifdef CONFIG_TCP_MD5SIG
1272	/* Copy over the MD5 key from the original socket */
1273	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1274	if (key) {
1275		/* We're using one, so create a matching key
1276		 * on the newsk structure. If we fail to get
1277		 * memory, then we end up not copying the key
1278		 * across. Shucks.
1279		 */
1280		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1281			       AF_INET6, 128, key->key, key->keylen,
1282			       sk_gfp_mask(sk, GFP_ATOMIC));
 
1283	}
1284#endif
1285
1286	if (__inet_inherit_port(sk, newsk) < 0) {
1287		inet_csk_prepare_forced_close(newsk);
1288		tcp_done(newsk);
1289		goto out;
1290	}
1291	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1292	if (*own_req) {
1293		tcp_move_syn(newtp, req);
1294
1295		/* Clone pktoptions received with SYN, if we own the req */
1296		if (ireq->pktopts) {
1297			newnp->pktoptions = skb_clone(ireq->pktopts,
1298						      sk_gfp_mask(sk, GFP_ATOMIC));
1299			consume_skb(ireq->pktopts);
1300			ireq->pktopts = NULL;
1301			if (newnp->pktoptions) {
1302				tcp_v6_restore_cb(newnp->pktoptions);
1303				skb_set_owner_r(newnp->pktoptions, newsk);
1304			}
1305		}
1306	}
1307
1308	return newsk;
1309
1310out_overflow:
1311	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1312out_nonewsk:
 
 
1313	dst_release(dst);
1314out:
1315	tcp_listendrop(sk);
1316	return NULL;
1317}
1318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319/* The socket must have it's spinlock held when we get
1320 * here, unless it is a TCP_LISTEN socket.
1321 *
1322 * We have a potential double-lock case here, so even when
1323 * doing backlog processing we use the BH locking scheme.
1324 * This is because we cannot sleep with the original spinlock
1325 * held.
1326 */
1327static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1328{
1329	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 
1330	struct sk_buff *opt_skb = NULL;
1331	struct tcp_sock *tp;
1332
1333	/* Imagine: socket is IPv6. IPv4 packet arrives,
1334	   goes to IPv4 receive handler and backlogged.
1335	   From backlog it always goes here. Kerboom...
1336	   Fortunately, tcp_rcv_established and rcv_established
1337	   handle them correctly, but it is not case with
1338	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1339	 */
1340
1341	if (skb->protocol == htons(ETH_P_IP))
1342		return tcp_v4_do_rcv(sk, skb);
1343
 
 
 
 
 
 
 
 
1344	/*
1345	 *	socket locking is here for SMP purposes as backlog rcv
1346	 *	is currently called with bh processing disabled.
1347	 */
1348
1349	/* Do Stevens' IPV6_PKTOPTIONS.
1350
1351	   Yes, guys, it is the only place in our code, where we
1352	   may make it not affecting IPv4.
1353	   The rest of code is protocol independent,
1354	   and I do not like idea to uglify IPv4.
1355
1356	   Actually, all the idea behind IPV6_PKTOPTIONS
1357	   looks not very well thought. For now we latch
1358	   options, received in the last packet, enqueued
1359	   by tcp. Feel free to propose better solution.
1360					       --ANK (980728)
1361	 */
1362	if (np->rxopt.all)
1363		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1364
1365	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1366		struct dst_entry *dst = sk->sk_rx_dst;
1367
1368		sock_rps_save_rxhash(sk, skb);
1369		sk_mark_napi_id(sk, skb);
1370		if (dst) {
1371			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1372			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1373				dst_release(dst);
1374				sk->sk_rx_dst = NULL;
1375			}
1376		}
1377
1378		tcp_rcv_established(sk, skb);
1379		if (opt_skb)
1380			goto ipv6_pktoptions;
1381		return 0;
1382	}
1383
1384	if (tcp_checksum_complete(skb))
1385		goto csum_err;
1386
1387	if (sk->sk_state == TCP_LISTEN) {
1388		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1389
1390		if (!nsk)
1391			goto discard;
1392
1393		if (nsk != sk) {
 
 
 
 
 
 
1394			if (tcp_child_process(sk, nsk, skb))
1395				goto reset;
1396			if (opt_skb)
1397				__kfree_skb(opt_skb);
1398			return 0;
1399		}
1400	} else
1401		sock_rps_save_rxhash(sk, skb);
1402
1403	if (tcp_rcv_state_process(sk, skb))
1404		goto reset;
1405	if (opt_skb)
1406		goto ipv6_pktoptions;
1407	return 0;
1408
1409reset:
1410	tcp_v6_send_reset(sk, skb);
1411discard:
1412	if (opt_skb)
1413		__kfree_skb(opt_skb);
1414	kfree_skb(skb);
1415	return 0;
1416csum_err:
1417	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1418	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1419	goto discard;
1420
1421
1422ipv6_pktoptions:
1423	/* Do you ask, what is it?
1424
1425	   1. skb was enqueued by tcp.
1426	   2. skb is added to tail of read queue, rather than out of order.
1427	   3. socket is not in passive state.
1428	   4. Finally, it really contains options, which user wants to receive.
1429	 */
1430	tp = tcp_sk(sk);
1431	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1432	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1433		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1434			np->mcast_oif = tcp_v6_iif(opt_skb);
1435		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1436			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1437		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1438			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1439		if (np->repflow)
1440			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1441		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1442			skb_set_owner_r(opt_skb, sk);
1443			tcp_v6_restore_cb(opt_skb);
1444			opt_skb = xchg(&np->pktoptions, opt_skb);
1445		} else {
1446			__kfree_skb(opt_skb);
1447			opt_skb = xchg(&np->pktoptions, NULL);
1448		}
1449	}
1450
1451	kfree_skb(opt_skb);
1452	return 0;
1453}
1454
1455static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1456			   const struct tcphdr *th)
1457{
1458	/* This is tricky: we move IP6CB at its correct location into
1459	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1460	 * _decode_session6() uses IP6CB().
1461	 * barrier() makes sure compiler won't play aliasing games.
1462	 */
1463	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1464		sizeof(struct inet6_skb_parm));
1465	barrier();
1466
1467	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1468	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1469				    skb->len - th->doff*4);
1470	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1471	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1472	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1473	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1474	TCP_SKB_CB(skb)->sacked = 0;
1475	TCP_SKB_CB(skb)->has_rxtstamp =
1476			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1477}
1478
1479INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1480{
1481	struct sk_buff *skb_to_free;
1482	int sdif = inet6_sdif(skb);
1483	const struct tcphdr *th;
1484	const struct ipv6hdr *hdr;
1485	bool refcounted;
1486	struct sock *sk;
1487	int ret;
1488	struct net *net = dev_net(skb->dev);
1489
1490	if (skb->pkt_type != PACKET_HOST)
1491		goto discard_it;
1492
1493	/*
1494	 *	Count it even if it's bad.
1495	 */
1496	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1497
1498	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1499		goto discard_it;
1500
1501	th = (const struct tcphdr *)skb->data;
1502
1503	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1504		goto bad_packet;
1505	if (!pskb_may_pull(skb, th->doff*4))
1506		goto discard_it;
1507
1508	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1509		goto csum_error;
1510
1511	th = (const struct tcphdr *)skb->data;
1512	hdr = ipv6_hdr(skb);
 
 
 
 
 
 
 
1513
1514lookup:
1515	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1516				th->source, th->dest, inet6_iif(skb), sdif,
1517				&refcounted);
1518	if (!sk)
1519		goto no_tcp_socket;
1520
1521process:
1522	if (sk->sk_state == TCP_TIME_WAIT)
1523		goto do_time_wait;
1524
1525	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1526		struct request_sock *req = inet_reqsk(sk);
1527		bool req_stolen = false;
1528		struct sock *nsk;
1529
1530		sk = req->rsk_listener;
1531		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1532			sk_drops_add(sk, skb);
1533			reqsk_put(req);
1534			goto discard_it;
1535		}
1536		if (tcp_checksum_complete(skb)) {
1537			reqsk_put(req);
1538			goto csum_error;
1539		}
1540		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1541			inet_csk_reqsk_queue_drop_and_put(sk, req);
1542			goto lookup;
1543		}
1544		sock_hold(sk);
1545		refcounted = true;
1546		nsk = NULL;
1547		if (!tcp_filter(sk, skb)) {
1548			th = (const struct tcphdr *)skb->data;
1549			hdr = ipv6_hdr(skb);
1550			tcp_v6_fill_cb(skb, hdr, th);
1551			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1552		}
1553		if (!nsk) {
1554			reqsk_put(req);
1555			if (req_stolen) {
1556				/* Another cpu got exclusive access to req
1557				 * and created a full blown socket.
1558				 * Try to feed this packet to this socket
1559				 * instead of discarding it.
1560				 */
1561				tcp_v6_restore_cb(skb);
1562				sock_put(sk);
1563				goto lookup;
1564			}
1565			goto discard_and_relse;
1566		}
1567		if (nsk == sk) {
1568			reqsk_put(req);
1569			tcp_v6_restore_cb(skb);
1570		} else if (tcp_child_process(sk, nsk, skb)) {
1571			tcp_v6_send_reset(nsk, skb);
1572			goto discard_and_relse;
1573		} else {
1574			sock_put(sk);
1575			return 0;
1576		}
1577	}
1578	if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
1579		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1580		goto discard_and_relse;
1581	}
1582
1583	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1584		goto discard_and_relse;
1585
1586	if (tcp_v6_inbound_md5_hash(sk, skb))
1587		goto discard_and_relse;
1588
1589	if (tcp_filter(sk, skb))
1590		goto discard_and_relse;
1591	th = (const struct tcphdr *)skb->data;
1592	hdr = ipv6_hdr(skb);
1593	tcp_v6_fill_cb(skb, hdr, th);
1594
1595	skb->dev = NULL;
1596
1597	if (sk->sk_state == TCP_LISTEN) {
1598		ret = tcp_v6_do_rcv(sk, skb);
1599		goto put_and_return;
1600	}
1601
1602	sk_incoming_cpu_update(sk);
1603
1604	bh_lock_sock_nested(sk);
1605	tcp_segs_in(tcp_sk(sk), skb);
1606	ret = 0;
1607	if (!sock_owned_by_user(sk)) {
1608		skb_to_free = sk->sk_rx_skb_cache;
1609		sk->sk_rx_skb_cache = NULL;
1610		ret = tcp_v6_do_rcv(sk, skb);
1611	} else {
1612		if (tcp_add_backlog(sk, skb))
1613			goto discard_and_relse;
1614		skb_to_free = NULL;
 
 
 
 
 
 
 
 
 
1615	}
1616	bh_unlock_sock(sk);
1617	if (skb_to_free)
1618		__kfree_skb(skb_to_free);
1619put_and_return:
1620	if (refcounted)
1621		sock_put(sk);
1622	return ret ? -1 : 0;
1623
1624no_tcp_socket:
1625	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1626		goto discard_it;
1627
1628	tcp_v6_fill_cb(skb, hdr, th);
1629
1630	if (tcp_checksum_complete(skb)) {
1631csum_error:
1632		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1633bad_packet:
1634		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1635	} else {
1636		tcp_v6_send_reset(NULL, skb);
1637	}
1638
1639discard_it:
 
 
 
 
 
1640	kfree_skb(skb);
1641	return 0;
1642
1643discard_and_relse:
1644	sk_drops_add(sk, skb);
1645	if (refcounted)
1646		sock_put(sk);
1647	goto discard_it;
1648
1649do_time_wait:
1650	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1651		inet_twsk_put(inet_twsk(sk));
1652		goto discard_it;
1653	}
1654
1655	tcp_v6_fill_cb(skb, hdr, th);
1656
1657	if (tcp_checksum_complete(skb)) {
1658		inet_twsk_put(inet_twsk(sk));
1659		goto csum_error;
1660	}
1661
1662	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1663	case TCP_TW_SYN:
1664	{
1665		struct sock *sk2;
1666
1667		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1668					    skb, __tcp_hdrlen(th),
1669					    &ipv6_hdr(skb)->saddr, th->source,
1670					    &ipv6_hdr(skb)->daddr,
1671					    ntohs(th->dest),
1672					    tcp_v6_iif_l3_slave(skb),
1673					    sdif);
1674		if (sk2) {
1675			struct inet_timewait_sock *tw = inet_twsk(sk);
1676			inet_twsk_deschedule_put(tw);
 
1677			sk = sk2;
1678			tcp_v6_restore_cb(skb);
1679			refcounted = false;
1680			goto process;
1681		}
 
1682	}
1683		/* to ACK */
1684		/* fall through */
1685	case TCP_TW_ACK:
1686		tcp_v6_timewait_ack(sk, skb);
1687		break;
1688	case TCP_TW_RST:
1689		tcp_v6_send_reset(sk, skb);
1690		inet_twsk_deschedule_put(inet_twsk(sk));
1691		goto discard_it;
1692	case TCP_TW_SUCCESS:
1693		;
1694	}
1695	goto discard_it;
1696}
1697
1698INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
1699{
1700	const struct ipv6hdr *hdr;
1701	const struct tcphdr *th;
1702	struct sock *sk;
 
 
 
 
 
 
 
 
 
 
 
1703
1704	if (skb->pkt_type != PACKET_HOST)
1705		return;
1706
1707	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1708		return;
1709
1710	hdr = ipv6_hdr(skb);
1711	th = tcp_hdr(skb);
1712
1713	if (th->doff < sizeof(struct tcphdr) / 4)
1714		return;
1715
1716	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1717	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1718					&hdr->saddr, th->source,
1719					&hdr->daddr, ntohs(th->dest),
1720					inet6_iif(skb), inet6_sdif(skb));
1721	if (sk) {
1722		skb->sk = sk;
1723		skb->destructor = sock_edemux;
1724		if (sk_fullsock(sk)) {
1725			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1726
1727			if (dst)
1728				dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
1729			if (dst &&
1730			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1731				skb_dst_set_noref(skb, dst);
1732		}
1733	}
1734}
1735
1736static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1737	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1738	.twsk_unique	= tcp_twsk_unique,
1739	.twsk_destructor = tcp_twsk_destructor,
 
1740};
1741
1742static const struct inet_connection_sock_af_ops ipv6_specific = {
1743	.queue_xmit	   = inet6_csk_xmit,
1744	.send_check	   = tcp_v6_send_check,
1745	.rebuild_header	   = inet6_sk_rebuild_header,
1746	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1747	.conn_request	   = tcp_v6_conn_request,
1748	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1749	.net_header_len	   = sizeof(struct ipv6hdr),
1750	.net_frag_header_len = sizeof(struct frag_hdr),
1751	.setsockopt	   = ipv6_setsockopt,
1752	.getsockopt	   = ipv6_getsockopt,
1753	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1754	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
1755#ifdef CONFIG_COMPAT
1756	.compat_setsockopt = compat_ipv6_setsockopt,
1757	.compat_getsockopt = compat_ipv6_getsockopt,
1758#endif
1759	.mtu_reduced	   = tcp_v6_mtu_reduced,
1760};
1761
1762#ifdef CONFIG_TCP_MD5SIG
1763static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1764	.md5_lookup	=	tcp_v6_md5_lookup,
1765	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 
1766	.md5_parse	=	tcp_v6_parse_md5_keys,
1767};
1768#endif
1769
1770/*
1771 *	TCP over IPv4 via INET6 API
1772 */
 
1773static const struct inet_connection_sock_af_ops ipv6_mapped = {
1774	.queue_xmit	   = ip_queue_xmit,
1775	.send_check	   = tcp_v4_send_check,
1776	.rebuild_header	   = inet_sk_rebuild_header,
1777	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1778	.conn_request	   = tcp_v6_conn_request,
1779	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1780	.net_header_len	   = sizeof(struct iphdr),
1781	.setsockopt	   = ipv6_setsockopt,
1782	.getsockopt	   = ipv6_getsockopt,
1783	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1784	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
1785#ifdef CONFIG_COMPAT
1786	.compat_setsockopt = compat_ipv6_setsockopt,
1787	.compat_getsockopt = compat_ipv6_getsockopt,
1788#endif
1789	.mtu_reduced	   = tcp_v4_mtu_reduced,
1790};
1791
1792#ifdef CONFIG_TCP_MD5SIG
1793static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1794	.md5_lookup	=	tcp_v4_md5_lookup,
1795	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 
1796	.md5_parse	=	tcp_v6_parse_md5_keys,
1797};
1798#endif
1799
1800/* NOTE: A lot of things set to zero explicitly by call to
1801 *       sk_alloc() so need not be done here.
1802 */
1803static int tcp_v6_init_sock(struct sock *sk)
1804{
1805	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1806
1807	tcp_init_sock(sk);
 
 
 
 
 
 
 
 
 
1808
1809	icsk->icsk_af_ops = &ipv6_specific;
 
 
 
 
1810
1811#ifdef CONFIG_TCP_MD5SIG
1812	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1813#endif
1814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815	return 0;
1816}
1817
1818static void tcp_v6_destroy_sock(struct sock *sk)
1819{
 
 
 
 
 
1820	tcp_v4_destroy_sock(sk);
1821	inet6_destroy_sock(sk);
1822}
1823
1824#ifdef CONFIG_PROC_FS
1825/* Proc filesystem TCPv6 sock list dumping. */
1826static void get_openreq6(struct seq_file *seq,
1827			 const struct request_sock *req, int i)
1828{
1829	long ttd = req->rsk_timer.expires - jiffies;
1830	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1831	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1832
1833	if (ttd < 0)
1834		ttd = 0;
1835
1836	seq_printf(seq,
1837		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1838		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1839		   i,
1840		   src->s6_addr32[0], src->s6_addr32[1],
1841		   src->s6_addr32[2], src->s6_addr32[3],
1842		   inet_rsk(req)->ir_num,
1843		   dest->s6_addr32[0], dest->s6_addr32[1],
1844		   dest->s6_addr32[2], dest->s6_addr32[3],
1845		   ntohs(inet_rsk(req)->ir_rmt_port),
1846		   TCP_SYN_RECV,
1847		   0, 0, /* could print option size, but that is af dependent. */
1848		   1,   /* timers active (only the expire timer) */
1849		   jiffies_to_clock_t(ttd),
1850		   req->num_timeout,
1851		   from_kuid_munged(seq_user_ns(seq),
1852				    sock_i_uid(req->rsk_listener)),
1853		   0,  /* non standard timer */
1854		   0, /* open_requests have no inode */
1855		   0, req);
1856}
1857
1858static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1859{
1860	const struct in6_addr *dest, *src;
1861	__u16 destp, srcp;
1862	int timer_active;
1863	unsigned long timer_expires;
1864	const struct inet_sock *inet = inet_sk(sp);
1865	const struct tcp_sock *tp = tcp_sk(sp);
1866	const struct inet_connection_sock *icsk = inet_csk(sp);
1867	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1868	int rx_queue;
1869	int state;
1870
1871	dest  = &sp->sk_v6_daddr;
1872	src   = &sp->sk_v6_rcv_saddr;
1873	destp = ntohs(inet->inet_dport);
1874	srcp  = ntohs(inet->inet_sport);
1875
1876	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1877	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1878	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1879		timer_active	= 1;
1880		timer_expires	= icsk->icsk_timeout;
1881	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1882		timer_active	= 4;
1883		timer_expires	= icsk->icsk_timeout;
1884	} else if (timer_pending(&sp->sk_timer)) {
1885		timer_active	= 2;
1886		timer_expires	= sp->sk_timer.expires;
1887	} else {
1888		timer_active	= 0;
1889		timer_expires = jiffies;
1890	}
1891
1892	state = inet_sk_state_load(sp);
1893	if (state == TCP_LISTEN)
1894		rx_queue = sp->sk_ack_backlog;
1895	else
1896		/* Because we don't lock the socket,
1897		 * we might find a transient negative value.
1898		 */
1899		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
1900				      READ_ONCE(tp->copied_seq), 0);
1901
1902	seq_printf(seq,
1903		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1904		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1905		   i,
1906		   src->s6_addr32[0], src->s6_addr32[1],
1907		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1908		   dest->s6_addr32[0], dest->s6_addr32[1],
1909		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1910		   state,
1911		   READ_ONCE(tp->write_seq) - tp->snd_una,
1912		   rx_queue,
1913		   timer_active,
1914		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1915		   icsk->icsk_retransmits,
1916		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1917		   icsk->icsk_probes_out,
1918		   sock_i_ino(sp),
1919		   refcount_read(&sp->sk_refcnt), sp,
1920		   jiffies_to_clock_t(icsk->icsk_rto),
1921		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1922		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
1923		   tp->snd_cwnd,
1924		   state == TCP_LISTEN ?
1925			fastopenq->max_qlen :
1926			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1927		   );
1928}
1929
1930static void get_timewait6_sock(struct seq_file *seq,
1931			       struct inet_timewait_sock *tw, int i)
1932{
1933	long delta = tw->tw_timer.expires - jiffies;
1934	const struct in6_addr *dest, *src;
1935	__u16 destp, srcp;
 
 
 
 
 
1936
1937	dest = &tw->tw_v6_daddr;
1938	src  = &tw->tw_v6_rcv_saddr;
1939	destp = ntohs(tw->tw_dport);
1940	srcp  = ntohs(tw->tw_sport);
1941
1942	seq_printf(seq,
1943		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1944		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1945		   i,
1946		   src->s6_addr32[0], src->s6_addr32[1],
1947		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1948		   dest->s6_addr32[0], dest->s6_addr32[1],
1949		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1950		   tw->tw_substate, 0, 0,
1951		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1952		   refcount_read(&tw->tw_refcnt), tw);
1953}
1954
1955static int tcp6_seq_show(struct seq_file *seq, void *v)
1956{
1957	struct tcp_iter_state *st;
1958	struct sock *sk = v;
1959
1960	if (v == SEQ_START_TOKEN) {
1961		seq_puts(seq,
1962			 "  sl  "
1963			 "local_address                         "
1964			 "remote_address                        "
1965			 "st tx_queue rx_queue tr tm->when retrnsmt"
1966			 "   uid  timeout inode\n");
1967		goto out;
1968	}
1969	st = seq->private;
1970
1971	if (sk->sk_state == TCP_TIME_WAIT)
 
 
 
 
 
 
 
 
1972		get_timewait6_sock(seq, v, st->num);
1973	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1974		get_openreq6(seq, v, st->num);
1975	else
1976		get_tcp6_sock(seq, v, st->num);
1977out:
1978	return 0;
1979}
1980
1981static const struct seq_operations tcp6_seq_ops = {
1982	.show		= tcp6_seq_show,
1983	.start		= tcp_seq_start,
1984	.next		= tcp_seq_next,
1985	.stop		= tcp_seq_stop,
1986};
1987
1988static struct tcp_seq_afinfo tcp6_seq_afinfo = {
 
1989	.family		= AF_INET6,
 
 
 
 
 
 
1990};
1991
1992int __net_init tcp6_proc_init(struct net *net)
1993{
1994	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1995			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
1996		return -ENOMEM;
1997	return 0;
1998}
1999
2000void tcp6_proc_exit(struct net *net)
2001{
2002	remove_proc_entry("tcp6", net->proc_net);
2003}
2004#endif
2005
2006struct proto tcpv6_prot = {
2007	.name			= "TCPv6",
2008	.owner			= THIS_MODULE,
2009	.close			= tcp_close,
2010	.pre_connect		= tcp_v6_pre_connect,
2011	.connect		= tcp_v6_connect,
2012	.disconnect		= tcp_disconnect,
2013	.accept			= inet_csk_accept,
2014	.ioctl			= tcp_ioctl,
2015	.init			= tcp_v6_init_sock,
2016	.destroy		= tcp_v6_destroy_sock,
2017	.shutdown		= tcp_shutdown,
2018	.setsockopt		= tcp_setsockopt,
2019	.getsockopt		= tcp_getsockopt,
2020	.keepalive		= tcp_set_keepalive,
2021	.recvmsg		= tcp_recvmsg,
2022	.sendmsg		= tcp_sendmsg,
2023	.sendpage		= tcp_sendpage,
2024	.backlog_rcv		= tcp_v6_do_rcv,
2025	.release_cb		= tcp_release_cb,
2026	.hash			= inet6_hash,
2027	.unhash			= inet_unhash,
2028	.get_port		= inet_csk_get_port,
2029	.enter_memory_pressure	= tcp_enter_memory_pressure,
2030	.leave_memory_pressure	= tcp_leave_memory_pressure,
2031	.stream_memory_free	= tcp_stream_memory_free,
2032	.sockets_allocated	= &tcp_sockets_allocated,
2033	.memory_allocated	= &tcp_memory_allocated,
2034	.memory_pressure	= &tcp_memory_pressure,
2035	.orphan_count		= &tcp_orphan_count,
2036	.sysctl_mem		= sysctl_tcp_mem,
2037	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2038	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2039	.max_header		= MAX_TCP_HEADER,
2040	.obj_size		= sizeof(struct tcp6_sock),
2041	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2042	.twsk_prot		= &tcp6_timewait_sock_ops,
2043	.rsk_prot		= &tcp6_request_sock_ops,
2044	.h.hashinfo		= &tcp_hashinfo,
2045	.no_autobind		= true,
2046#ifdef CONFIG_COMPAT
2047	.compat_setsockopt	= compat_tcp_setsockopt,
2048	.compat_getsockopt	= compat_tcp_getsockopt,
2049#endif
2050	.diag_destroy		= tcp_abort,
2051};
2052
2053/* thinking of making this const? Don't.
2054 * early_demux can change based on sysctl.
2055 */
2056static struct inet6_protocol tcpv6_protocol = {
2057	.early_demux	=	tcp_v6_early_demux,
2058	.early_demux_handler =  tcp_v6_early_demux,
2059	.handler	=	tcp_v6_rcv,
2060	.err_handler	=	tcp_v6_err,
 
 
 
 
2061	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2062};
2063
2064static struct inet_protosw tcpv6_protosw = {
2065	.type		=	SOCK_STREAM,
2066	.protocol	=	IPPROTO_TCP,
2067	.prot		=	&tcpv6_prot,
2068	.ops		=	&inet6_stream_ops,
 
2069	.flags		=	INET_PROTOSW_PERMANENT |
2070				INET_PROTOSW_ICSK,
2071};
2072
2073static int __net_init tcpv6_net_init(struct net *net)
2074{
2075	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2076				    SOCK_RAW, IPPROTO_TCP, net);
2077}
2078
2079static void __net_exit tcpv6_net_exit(struct net *net)
2080{
2081	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2082}
2083
2084static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2085{
2086	inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2087}
2088
2089static struct pernet_operations tcpv6_net_ops = {
2090	.init	    = tcpv6_net_init,
2091	.exit	    = tcpv6_net_exit,
2092	.exit_batch = tcpv6_net_exit_batch,
2093};
2094
2095int __init tcpv6_init(void)
2096{
2097	int ret;
2098
2099	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2100	if (ret)
2101		goto out;
2102
2103	/* register inet6 protocol */
2104	ret = inet6_register_protosw(&tcpv6_protosw);
2105	if (ret)
2106		goto out_tcpv6_protocol;
2107
2108	ret = register_pernet_subsys(&tcpv6_net_ops);
2109	if (ret)
2110		goto out_tcpv6_protosw;
2111out:
2112	return ret;
2113
 
 
2114out_tcpv6_protosw:
2115	inet6_unregister_protosw(&tcpv6_protosw);
2116out_tcpv6_protocol:
2117	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2118	goto out;
2119}
2120
2121void tcpv6_exit(void)
2122{
2123	unregister_pernet_subsys(&tcpv6_net_ops);
2124	inet6_unregister_protosw(&tcpv6_protosw);
2125	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2126}