Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65
  66#include <asm/uaccess.h>
  67
  68#include <linux/proc_fs.h>
  69#include <linux/seq_file.h>
  70
  71#include <linux/crypto.h>
  72#include <linux/scatterlist.h>
  73
  74static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  75static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  76				      struct request_sock *req);
  77
  78static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  79static void	__tcp_v6_send_check(struct sk_buff *skb,
  80				    const struct in6_addr *saddr,
  81				    const struct in6_addr *daddr);
  82
  83static const struct inet_connection_sock_af_ops ipv6_mapped;
  84static const struct inet_connection_sock_af_ops ipv6_specific;
  85#ifdef CONFIG_TCP_MD5SIG
  86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  88#else
  89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  90						   const struct in6_addr *addr)
  91{
  92	return NULL;
  93}
  94#endif
  95
  96static void tcp_v6_hash(struct sock *sk)
  97{
  98	if (sk->sk_state != TCP_CLOSE) {
  99		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 100			tcp_prot.hash(sk);
 101			return;
 102		}
 103		local_bh_disable();
 104		__inet6_hash(sk, NULL);
 105		local_bh_enable();
 106	}
 107}
 108
 109static __inline__ __sum16 tcp_v6_check(int len,
 110				   const struct in6_addr *saddr,
 111				   const struct in6_addr *daddr,
 112				   __wsum base)
 113{
 114	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 115}
 116
 117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
 118{
 119	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 120					    ipv6_hdr(skb)->saddr.s6_addr32,
 121					    tcp_hdr(skb)->dest,
 122					    tcp_hdr(skb)->source);
 123}
 124
 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 126			  int addr_len)
 127{
 128	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 129	struct inet_sock *inet = inet_sk(sk);
 130	struct inet_connection_sock *icsk = inet_csk(sk);
 131	struct ipv6_pinfo *np = inet6_sk(sk);
 132	struct tcp_sock *tp = tcp_sk(sk);
 133	struct in6_addr *saddr = NULL, *final_p, final;
 134	struct rt6_info *rt;
 135	struct flowi6 fl6;
 136	struct dst_entry *dst;
 137	int addr_type;
 138	int err;
 139
 140	if (addr_len < SIN6_LEN_RFC2133)
 141		return -EINVAL;
 142
 143	if (usin->sin6_family != AF_INET6)
 144		return -EAFNOSUPPORT;
 145
 146	memset(&fl6, 0, sizeof(fl6));
 147
 148	if (np->sndflow) {
 149		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 150		IP6_ECN_flow_init(fl6.flowlabel);
 151		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 152			struct ip6_flowlabel *flowlabel;
 153			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 154			if (flowlabel == NULL)
 155				return -EINVAL;
 156			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
 157			fl6_sock_release(flowlabel);
 158		}
 159	}
 160
 161	/*
 162	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 163	 */
 164
 165	if(ipv6_addr_any(&usin->sin6_addr))
 166		usin->sin6_addr.s6_addr[15] = 0x1;
 167
 168	addr_type = ipv6_addr_type(&usin->sin6_addr);
 169
 170	if(addr_type & IPV6_ADDR_MULTICAST)
 171		return -ENETUNREACH;
 172
 173	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 174		if (addr_len >= sizeof(struct sockaddr_in6) &&
 175		    usin->sin6_scope_id) {
 176			/* If interface is set while binding, indices
 177			 * must coincide.
 178			 */
 179			if (sk->sk_bound_dev_if &&
 180			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 181				return -EINVAL;
 182
 183			sk->sk_bound_dev_if = usin->sin6_scope_id;
 184		}
 185
 186		/* Connect to link-local address requires an interface */
 187		if (!sk->sk_bound_dev_if)
 188			return -EINVAL;
 189	}
 190
 191	if (tp->rx_opt.ts_recent_stamp &&
 192	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
 193		tp->rx_opt.ts_recent = 0;
 194		tp->rx_opt.ts_recent_stamp = 0;
 195		tp->write_seq = 0;
 196	}
 197
 198	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
 199	np->flow_label = fl6.flowlabel;
 200
 201	/*
 202	 *	TCP over IPv4
 203	 */
 204
 205	if (addr_type == IPV6_ADDR_MAPPED) {
 206		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 207		struct sockaddr_in sin;
 208
 209		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 210
 211		if (__ipv6_only_sock(sk))
 212			return -ENETUNREACH;
 213
 214		sin.sin_family = AF_INET;
 215		sin.sin_port = usin->sin6_port;
 216		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 217
 218		icsk->icsk_af_ops = &ipv6_mapped;
 219		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 220#ifdef CONFIG_TCP_MD5SIG
 221		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 222#endif
 223
 224		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 225
 226		if (err) {
 227			icsk->icsk_ext_hdr_len = exthdrlen;
 228			icsk->icsk_af_ops = &ipv6_specific;
 229			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 230#ifdef CONFIG_TCP_MD5SIG
 231			tp->af_specific = &tcp_sock_ipv6_specific;
 232#endif
 233			goto failure;
 234		} else {
 235			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 236			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 237					       &np->rcv_saddr);
 238		}
 
 239
 240		return err;
 241	}
 242
 243	if (!ipv6_addr_any(&np->rcv_saddr))
 244		saddr = &np->rcv_saddr;
 245
 246	fl6.flowi6_proto = IPPROTO_TCP;
 247	ipv6_addr_copy(&fl6.daddr, &np->daddr);
 248	ipv6_addr_copy(&fl6.saddr,
 249		       (saddr ? saddr : &np->saddr));
 250	fl6.flowi6_oif = sk->sk_bound_dev_if;
 251	fl6.flowi6_mark = sk->sk_mark;
 252	fl6.fl6_dport = usin->sin6_port;
 253	fl6.fl6_sport = inet->inet_sport;
 254
 255	final_p = fl6_update_dst(&fl6, np->opt, &final);
 
 256
 257	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 258
 259	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
 260	if (IS_ERR(dst)) {
 261		err = PTR_ERR(dst);
 262		goto failure;
 263	}
 264
 265	if (saddr == NULL) {
 266		saddr = &fl6.saddr;
 267		ipv6_addr_copy(&np->rcv_saddr, saddr);
 268	}
 269
 270	/* set the source address */
 271	ipv6_addr_copy(&np->saddr, saddr);
 272	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 273
 274	sk->sk_gso_type = SKB_GSO_TCPV6;
 275	__ip6_dst_store(sk, dst, NULL, NULL);
 276
 277	rt = (struct rt6_info *) dst;
 278	if (tcp_death_row.sysctl_tw_recycle &&
 279	    !tp->rx_opt.ts_recent_stamp &&
 280	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
 281		struct inet_peer *peer = rt6_get_peer(rt);
 282		/*
 283		 * VJ's idea. We save last timestamp seen from
 284		 * the destination in peer table, when entering state
 285		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 286		 * when trying new connection.
 287		 */
 288		if (peer) {
 289			inet_peer_refcheck(peer);
 290			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 291				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 292				tp->rx_opt.ts_recent = peer->tcp_ts;
 293			}
 294		}
 295	}
 296
 297	icsk->icsk_ext_hdr_len = 0;
 298	if (np->opt)
 299		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 300					  np->opt->opt_nflen);
 301
 302	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 303
 304	inet->inet_dport = usin->sin6_port;
 305
 306	tcp_set_state(sk, TCP_SYN_SENT);
 307	err = inet6_hash_connect(&tcp_death_row, sk);
 308	if (err)
 309		goto late_failure;
 310
 311	if (!tp->write_seq)
 
 
 312		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 313							     np->daddr.s6_addr32,
 314							     inet->inet_sport,
 315							     inet->inet_dport);
 316
 317	err = tcp_connect(sk);
 318	if (err)
 319		goto late_failure;
 320
 321	return 0;
 322
 323late_failure:
 324	tcp_set_state(sk, TCP_CLOSE);
 325	__sk_dst_reset(sk);
 326failure:
 327	inet->inet_dport = 0;
 328	sk->sk_route_caps = 0;
 329	return err;
 330}
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 333		u8 type, u8 code, int offset, __be32 info)
 334{
 335	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
 336	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 
 
 337	struct ipv6_pinfo *np;
 
 
 338	struct sock *sk;
 
 339	int err;
 340	struct tcp_sock *tp;
 341	__u32 seq;
 342	struct net *net = dev_net(skb->dev);
 343
 344	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 345			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 
 
 346
 347	if (sk == NULL) {
 348		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 349				   ICMP6_MIB_INERRORS);
 350		return;
 351	}
 352
 353	if (sk->sk_state == TCP_TIME_WAIT) {
 354		inet_twsk_put(inet_twsk(sk));
 355		return;
 356	}
 
 
 
 
 357
 358	bh_lock_sock(sk);
 359	if (sock_owned_by_user(sk))
 360		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 361
 362	if (sk->sk_state == TCP_CLOSE)
 363		goto out;
 364
 365	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 366		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 367		goto out;
 368	}
 369
 370	tp = tcp_sk(sk);
 371	seq = ntohl(th->seq);
 
 
 372	if (sk->sk_state != TCP_LISTEN &&
 373	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 374		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 375		goto out;
 376	}
 377
 378	np = inet6_sk(sk);
 379
 380	if (type == ICMPV6_PKT_TOOBIG) {
 381		struct dst_entry *dst;
 382
 383		if (sock_owned_by_user(sk))
 384			goto out;
 385		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 386			goto out;
 387
 388		/* icmp should have updated the destination cache entry */
 389		dst = __sk_dst_check(sk, np->dst_cookie);
 
 
 390
 391		if (dst == NULL) {
 392			struct inet_sock *inet = inet_sk(sk);
 393			struct flowi6 fl6;
 394
 395			/* BUGGG_FUTURE: Again, it is not clear how
 396			   to handle rthdr case. Ignore this complexity
 397			   for now.
 398			 */
 399			memset(&fl6, 0, sizeof(fl6));
 400			fl6.flowi6_proto = IPPROTO_TCP;
 401			ipv6_addr_copy(&fl6.daddr, &np->daddr);
 402			ipv6_addr_copy(&fl6.saddr, &np->saddr);
 403			fl6.flowi6_oif = sk->sk_bound_dev_if;
 404			fl6.flowi6_mark = sk->sk_mark;
 405			fl6.fl6_dport = inet->inet_dport;
 406			fl6.fl6_sport = inet->inet_sport;
 407			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 408
 409			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
 410			if (IS_ERR(dst)) {
 411				sk->sk_err_soft = -PTR_ERR(dst);
 412				goto out;
 413			}
 414
 415		} else
 416			dst_hold(dst);
 417
 418		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 419			tcp_sync_mss(sk, dst_mtu(dst));
 420			tcp_simple_retransmit(sk);
 421		} /* else let the usual retransmit timer handle it */
 422		dst_release(dst);
 
 423		goto out;
 424	}
 425
 426	icmpv6_err_convert(type, code, &err);
 427
 428	/* Might be for an request_sock */
 429	switch (sk->sk_state) {
 430		struct request_sock *req, **prev;
 431	case TCP_LISTEN:
 432		if (sock_owned_by_user(sk))
 433			goto out;
 434
 435		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 436					   &hdr->saddr, inet6_iif(skb));
 437		if (!req)
 438			goto out;
 439
 440		/* ICMPs are not backlogged, hence we cannot get
 441		 * an established socket here.
 442		 */
 443		WARN_ON(req->sk != NULL);
 444
 445		if (seq != tcp_rsk(req)->snt_isn) {
 446			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 447			goto out;
 448		}
 449
 450		inet_csk_reqsk_queue_drop(sk, req, prev);
 451		goto out;
 452
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:  /* Cannot happen.
 455			       It can, it SYNs are crossed. --ANK */
 456		if (!sock_owned_by_user(sk)) {
 457			sk->sk_err = err;
 458			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 459
 460			tcp_done(sk);
 461		} else
 462			sk->sk_err_soft = err;
 463		goto out;
 464	}
 465
 466	if (!sock_owned_by_user(sk) && np->recverr) {
 467		sk->sk_err = err;
 468		sk->sk_error_report(sk);
 469	} else
 470		sk->sk_err_soft = err;
 471
 472out:
 473	bh_unlock_sock(sk);
 474	sock_put(sk);
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 479			      struct request_values *rvp)
 
 
 
 480{
 481	struct inet6_request_sock *treq = inet6_rsk(req);
 482	struct ipv6_pinfo *np = inet6_sk(sk);
 483	struct sk_buff * skb;
 484	struct ipv6_txoptions *opt = NULL;
 485	struct in6_addr * final_p, final;
 486	struct flowi6 fl6;
 487	struct dst_entry *dst;
 488	int err;
 489
 490	memset(&fl6, 0, sizeof(fl6));
 491	fl6.flowi6_proto = IPPROTO_TCP;
 492	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 493	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
 494	fl6.flowlabel = 0;
 495	fl6.flowi6_oif = treq->iif;
 496	fl6.flowi6_mark = sk->sk_mark;
 497	fl6.fl6_dport = inet_rsk(req)->rmt_port;
 498	fl6.fl6_sport = inet_rsk(req)->loc_port;
 499	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 500
 501	opt = np->opt;
 502	final_p = fl6_update_dst(&fl6, opt, &final);
 503
 504	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 505	if (IS_ERR(dst)) {
 506		err = PTR_ERR(dst);
 507		dst = NULL;
 508		goto done;
 509	}
 510	skb = tcp_make_synack(sk, dst, req, rvp);
 511	err = -ENOMEM;
 512	if (skb) {
 513		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 514
 515		ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 516		err = ip6_xmit(sk, skb, &fl6, opt);
 
 
 
 
 
 
 517		err = net_xmit_eval(err);
 518	}
 519
 520done:
 521	if (opt && opt != np->opt)
 522		sock_kfree_s(sk, opt, opt->tot_len);
 523	dst_release(dst);
 524	return err;
 525}
 526
 527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
 528			     struct request_values *rvp)
 529{
 530	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 531	return tcp_v6_send_synack(sk, req, rvp);
 532}
 533
 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
 535{
 536	kfree_skb(inet6_rsk(req)->pktopts);
 537}
 538
 539#ifdef CONFIG_TCP_MD5SIG
 540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 541						   const struct in6_addr *addr)
 542{
 543	struct tcp_sock *tp = tcp_sk(sk);
 544	int i;
 545
 546	BUG_ON(tp == NULL);
 547
 548	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
 549		return NULL;
 550
 551	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 552		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
 553			return &tp->md5sig_info->keys6[i].base;
 554	}
 555	return NULL;
 556}
 557
 558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 559						struct sock *addr_sk)
 560{
 561	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
 562}
 563
 564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 565						      struct request_sock *req)
 566{
 567	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
 568}
 569
 570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
 571			     char *newkey, u8 newkeylen)
 572{
 573	/* Add key to the list */
 574	struct tcp_md5sig_key *key;
 575	struct tcp_sock *tp = tcp_sk(sk);
 576	struct tcp6_md5sig_key *keys;
 577
 578	key = tcp_v6_md5_do_lookup(sk, peer);
 579	if (key) {
 580		/* modify existing entry - just update that one */
 581		kfree(key->key);
 582		key->key = newkey;
 583		key->keylen = newkeylen;
 584	} else {
 585		/* reallocate new list if current one is full. */
 586		if (!tp->md5sig_info) {
 587			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
 588			if (!tp->md5sig_info) {
 589				kfree(newkey);
 590				return -ENOMEM;
 591			}
 592			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 593		}
 594		if (tp->md5sig_info->entries6 == 0 &&
 595			tcp_alloc_md5sig_pool(sk) == NULL) {
 596			kfree(newkey);
 597			return -ENOMEM;
 598		}
 599		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
 600			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
 601				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
 602
 603			if (!keys) {
 604				kfree(newkey);
 605				if (tp->md5sig_info->entries6 == 0)
 606					tcp_free_md5sig_pool();
 607				return -ENOMEM;
 608			}
 609
 610			if (tp->md5sig_info->entries6)
 611				memmove(keys, tp->md5sig_info->keys6,
 612					(sizeof (tp->md5sig_info->keys6[0]) *
 613					 tp->md5sig_info->entries6));
 614
 615			kfree(tp->md5sig_info->keys6);
 616			tp->md5sig_info->keys6 = keys;
 617			tp->md5sig_info->alloced6++;
 618		}
 619
 620		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
 621			       peer);
 622		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
 623		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 624
 625		tp->md5sig_info->entries6++;
 626	}
 627	return 0;
 628}
 629
 630static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
 631			       u8 *newkey, __u8 newkeylen)
 632{
 633	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
 634				 newkey, newkeylen);
 635}
 636
 637static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
 638{
 639	struct tcp_sock *tp = tcp_sk(sk);
 640	int i;
 641
 642	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 643		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
 644			/* Free the key */
 645			kfree(tp->md5sig_info->keys6[i].base.key);
 646			tp->md5sig_info->entries6--;
 647
 648			if (tp->md5sig_info->entries6 == 0) {
 649				kfree(tp->md5sig_info->keys6);
 650				tp->md5sig_info->keys6 = NULL;
 651				tp->md5sig_info->alloced6 = 0;
 652				tcp_free_md5sig_pool();
 653			} else {
 654				/* shrink the database */
 655				if (tp->md5sig_info->entries6 != i)
 656					memmove(&tp->md5sig_info->keys6[i],
 657						&tp->md5sig_info->keys6[i+1],
 658						(tp->md5sig_info->entries6 - i)
 659						* sizeof (tp->md5sig_info->keys6[0]));
 660			}
 661			return 0;
 662		}
 663	}
 664	return -ENOENT;
 665}
 666
 667static void tcp_v6_clear_md5_list (struct sock *sk)
 668{
 669	struct tcp_sock *tp = tcp_sk(sk);
 670	int i;
 671
 672	if (tp->md5sig_info->entries6) {
 673		for (i = 0; i < tp->md5sig_info->entries6; i++)
 674			kfree(tp->md5sig_info->keys6[i].base.key);
 675		tp->md5sig_info->entries6 = 0;
 676		tcp_free_md5sig_pool();
 677	}
 678
 679	kfree(tp->md5sig_info->keys6);
 680	tp->md5sig_info->keys6 = NULL;
 681	tp->md5sig_info->alloced6 = 0;
 682
 683	if (tp->md5sig_info->entries4) {
 684		for (i = 0; i < tp->md5sig_info->entries4; i++)
 685			kfree(tp->md5sig_info->keys4[i].base.key);
 686		tp->md5sig_info->entries4 = 0;
 687		tcp_free_md5sig_pool();
 688	}
 689
 690	kfree(tp->md5sig_info->keys4);
 691	tp->md5sig_info->keys4 = NULL;
 692	tp->md5sig_info->alloced4 = 0;
 693}
 694
 695static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
 696				  int optlen)
 697{
 698	struct tcp_md5sig cmd;
 699	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 700	u8 *newkey;
 701
 702	if (optlen < sizeof(cmd))
 703		return -EINVAL;
 704
 705	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 706		return -EFAULT;
 707
 708	if (sin6->sin6_family != AF_INET6)
 709		return -EINVAL;
 710
 711	if (!cmd.tcpm_keylen) {
 712		if (!tcp_sk(sk)->md5sig_info)
 713			return -ENOENT;
 714		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 715			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
 716		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
 
 
 717	}
 718
 719	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 720		return -EINVAL;
 721
 722	if (!tcp_sk(sk)->md5sig_info) {
 723		struct tcp_sock *tp = tcp_sk(sk);
 724		struct tcp_md5sig_info *p;
 725
 726		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
 727		if (!p)
 728			return -ENOMEM;
 729
 730		tp->md5sig_info = p;
 731		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 732	}
 733
 734	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 735	if (!newkey)
 736		return -ENOMEM;
 737	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
 738		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
 739					 newkey, cmd.tcpm_keylen);
 740	}
 741	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
 742}
 743
 744static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 745					const struct in6_addr *daddr,
 746					const struct in6_addr *saddr, int nbytes)
 747{
 748	struct tcp6_pseudohdr *bp;
 749	struct scatterlist sg;
 750
 751	bp = &hp->md5_blk.ip6;
 752	/* 1. TCP pseudo-header (RFC2460) */
 753	ipv6_addr_copy(&bp->saddr, saddr);
 754	ipv6_addr_copy(&bp->daddr, daddr);
 755	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 756	bp->len = cpu_to_be32(nbytes);
 757
 758	sg_init_one(&sg, bp, sizeof(*bp));
 759	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
 760}
 761
 762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 763			       const struct in6_addr *daddr, struct in6_addr *saddr,
 764			       struct tcphdr *th)
 765{
 766	struct tcp_md5sig_pool *hp;
 767	struct hash_desc *desc;
 768
 769	hp = tcp_get_md5sig_pool();
 770	if (!hp)
 771		goto clear_hash_noput;
 772	desc = &hp->md5_desc;
 773
 774	if (crypto_hash_init(desc))
 775		goto clear_hash;
 776	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 777		goto clear_hash;
 778	if (tcp_md5_hash_header(hp, th))
 779		goto clear_hash;
 780	if (tcp_md5_hash_key(hp, key))
 781		goto clear_hash;
 782	if (crypto_hash_final(desc, md5_hash))
 
 783		goto clear_hash;
 784
 785	tcp_put_md5sig_pool();
 786	return 0;
 787
 788clear_hash:
 789	tcp_put_md5sig_pool();
 790clear_hash_noput:
 791	memset(md5_hash, 0, 16);
 792	return 1;
 793}
 794
 795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 796			       struct sock *sk, struct request_sock *req,
 797			       struct sk_buff *skb)
 
 798{
 799	const struct in6_addr *saddr, *daddr;
 800	struct tcp_md5sig_pool *hp;
 801	struct hash_desc *desc;
 802	struct tcphdr *th = tcp_hdr(skb);
 803
 804	if (sk) {
 805		saddr = &inet6_sk(sk)->saddr;
 806		daddr = &inet6_sk(sk)->daddr;
 807	} else if (req) {
 808		saddr = &inet6_rsk(req)->loc_addr;
 809		daddr = &inet6_rsk(req)->rmt_addr;
 810	} else {
 811		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 812		saddr = &ip6h->saddr;
 813		daddr = &ip6h->daddr;
 814	}
 815
 816	hp = tcp_get_md5sig_pool();
 817	if (!hp)
 818		goto clear_hash_noput;
 819	desc = &hp->md5_desc;
 820
 821	if (crypto_hash_init(desc))
 822		goto clear_hash;
 823
 824	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 825		goto clear_hash;
 826	if (tcp_md5_hash_header(hp, th))
 827		goto clear_hash;
 828	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 829		goto clear_hash;
 830	if (tcp_md5_hash_key(hp, key))
 831		goto clear_hash;
 832	if (crypto_hash_final(desc, md5_hash))
 
 833		goto clear_hash;
 834
 835	tcp_put_md5sig_pool();
 836	return 0;
 837
 838clear_hash:
 839	tcp_put_md5sig_pool();
 840clear_hash_noput:
 841	memset(md5_hash, 0, 16);
 842	return 1;
 843}
 844
 845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 
 
 
 846{
 847	__u8 *hash_location = NULL;
 
 848	struct tcp_md5sig_key *hash_expected;
 849	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 850	struct tcphdr *th = tcp_hdr(skb);
 851	int genhash;
 852	u8 newhash[16];
 853
 854	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 855	hash_location = tcp_parse_md5sig_option(th);
 856
 857	/* We've parsed the options - do we have a hash? */
 858	if (!hash_expected && !hash_location)
 859		return 0;
 860
 861	if (hash_expected && !hash_location) {
 862		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 863		return 1;
 864	}
 865
 866	if (!hash_expected && hash_location) {
 867		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 868		return 1;
 869	}
 870
 871	/* check the signature */
 872	genhash = tcp_v6_md5_hash_skb(newhash,
 873				      hash_expected,
 874				      NULL, NULL, skb);
 875
 876	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 877		if (net_ratelimit()) {
 878			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 879			       genhash ? "failed" : "mismatch",
 880			       &ip6h->saddr, ntohs(th->source),
 881			       &ip6h->daddr, ntohs(th->dest));
 882		}
 883		return 1;
 884	}
 885	return 0;
 886}
 887#endif
 888
 889struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 890	.family		=	AF_INET6,
 891	.obj_size	=	sizeof(struct tcp6_request_sock),
 892	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 893	.send_ack	=	tcp_v6_reqsk_send_ack,
 894	.destructor	=	tcp_v6_reqsk_destructor,
 895	.send_reset	=	tcp_v6_send_reset,
 896	.syn_ack_timeout = 	tcp_syn_ack_timeout,
 897};
 898
 899#ifdef CONFIG_TCP_MD5SIG
 900static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 901	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 902	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 903};
 904#endif
 905
 906static void __tcp_v6_send_check(struct sk_buff *skb,
 907				const struct in6_addr *saddr, const struct in6_addr *daddr)
 908{
 909	struct tcphdr *th = tcp_hdr(skb);
 910
 911	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 912		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
 913		skb->csum_start = skb_transport_header(skb) - skb->head;
 914		skb->csum_offset = offsetof(struct tcphdr, check);
 915	} else {
 916		th->check = tcp_v6_check(skb->len, saddr, daddr,
 917					 csum_partial(th, th->doff << 2,
 918						      skb->csum));
 919	}
 920}
 921
 922static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 923{
 924	struct ipv6_pinfo *np = inet6_sk(sk);
 925
 926	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
 927}
 928
 929static int tcp_v6_gso_send_check(struct sk_buff *skb)
 
 
 930{
 931	const struct ipv6hdr *ipv6h;
 932	struct tcphdr *th;
 933
 934	if (!pskb_may_pull(skb, sizeof(*th)))
 935		return -EINVAL;
 936
 937	ipv6h = ipv6_hdr(skb);
 938	th = tcp_hdr(skb);
 939
 940	th->check = 0;
 941	skb->ip_summed = CHECKSUM_PARTIAL;
 942	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
 943	return 0;
 944}
 945
 946static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
 947					 struct sk_buff *skb)
 948{
 949	const struct ipv6hdr *iph = skb_gro_network_header(skb);
 950
 951	switch (skb->ip_summed) {
 952	case CHECKSUM_COMPLETE:
 953		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
 954				  skb->csum)) {
 955			skb->ip_summed = CHECKSUM_UNNECESSARY;
 956			break;
 957		}
 958
 959		/* fall through */
 960	case CHECKSUM_NONE:
 961		NAPI_GRO_CB(skb)->flush = 1;
 962		return NULL;
 963	}
 964
 965	return tcp_gro_receive(head, skb);
 966}
 967
 968static int tcp6_gro_complete(struct sk_buff *skb)
 
 
 
 969{
 970	const struct ipv6hdr *iph = ipv6_hdr(skb);
 971	struct tcphdr *th = tcp_hdr(skb);
 
 
 972
 973	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 974				  &iph->saddr, &iph->daddr, 0);
 975	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 
 
 
 
 
 
 976
 977	return tcp_gro_complete(skb);
 978}
 
 
 
 
 
 
 
 
 
 
 
 
 
 979
 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 981				 u32 ts, struct tcp_md5sig_key *key, int rst)
 
 
 982{
 983	struct tcphdr *th = tcp_hdr(skb), *t1;
 
 984	struct sk_buff *buff;
 985	struct flowi6 fl6;
 986	struct net *net = dev_net(skb_dst(skb)->dev);
 987	struct sock *ctl_sk = net->ipv6.tcp_sk;
 988	unsigned int tot_len = sizeof(struct tcphdr);
 989	struct dst_entry *dst;
 990	__be32 *topt;
 991
 992	if (ts)
 993		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 994#ifdef CONFIG_TCP_MD5SIG
 995	if (key)
 996		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 997#endif
 998
 999	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1000			 GFP_ATOMIC);
1001	if (buff == NULL)
1002		return;
1003
1004	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1005
1006	t1 = (struct tcphdr *) skb_push(buff, tot_len);
1007	skb_reset_transport_header(buff);
1008
1009	/* Swap the send and the receive. */
1010	memset(t1, 0, sizeof(*t1));
1011	t1->dest = th->source;
1012	t1->source = th->dest;
1013	t1->doff = tot_len / 4;
1014	t1->seq = htonl(seq);
1015	t1->ack_seq = htonl(ack);
1016	t1->ack = !rst || !th->ack;
1017	t1->rst = rst;
1018	t1->window = htons(win);
1019
1020	topt = (__be32 *)(t1 + 1);
1021
1022	if (ts) {
1023		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025		*topt++ = htonl(tcp_time_stamp);
1026		*topt++ = htonl(ts);
1027	}
1028
1029#ifdef CONFIG_TCP_MD5SIG
1030	if (key) {
1031		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1034				    &ipv6_hdr(skb)->saddr,
1035				    &ipv6_hdr(skb)->daddr, t1);
1036	}
1037#endif
1038
1039	memset(&fl6, 0, sizeof(fl6));
1040	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1041	ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
 
1042
1043	buff->ip_summed = CHECKSUM_PARTIAL;
1044	buff->csum = 0;
1045
1046	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1047
1048	fl6.flowi6_proto = IPPROTO_TCP;
1049	fl6.flowi6_oif = inet6_iif(skb);
 
 
 
 
 
 
 
 
 
1050	fl6.fl6_dport = t1->dest;
1051	fl6.fl6_sport = t1->source;
1052	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1053
1054	/* Pass a socket to ip6_dst_lookup either it is for RST
1055	 * Underlying function will use this to retrieve the network
1056	 * namespace
1057	 */
1058	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059	if (!IS_ERR(dst)) {
1060		skb_dst_set(buff, dst);
1061		ip6_xmit(ctl_sk, buff, &fl6, NULL);
1062		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063		if (rst)
1064			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1065		return;
1066	}
1067
1068	kfree_skb(buff);
1069}
1070
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{
1073	struct tcphdr *th = tcp_hdr(skb);
1074	u32 seq = 0, ack_seq = 0;
1075	struct tcp_md5sig_key *key = NULL;
 
 
 
 
 
 
 
 
1076
1077	if (th->rst)
1078		return;
1079
1080	if (!ipv6_unicast_destination(skb))
 
 
 
1081		return;
1082
1083#ifdef CONFIG_TCP_MD5SIG
1084	if (sk)
1085		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1086#endif
1087
1088	if (th->ack)
1089		seq = ntohl(th->ack_seq);
1090	else
1091		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092			  (th->doff << 2);
1093
1094	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
 
 
 
 
 
 
 
 
 
1095}
1096
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098			    struct tcp_md5sig_key *key)
 
 
1099{
1100	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
 
1101}
1102
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
1105	struct inet_timewait_sock *tw = inet_twsk(sk);
1106	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1107
1108	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
 
 
1111
1112	inet_twsk_put(tw);
1113}
1114
1115static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116				  struct request_sock *req)
1117{
1118	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
 
 
 
 
 
 
 
1120}
1121
1122
1123static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1124{
1125	struct request_sock *req, **prev;
1126	const struct tcphdr *th = tcp_hdr(skb);
1127	struct sock *nsk;
1128
1129	/* Find possible connection requests. */
1130	req = inet6_csk_search_req(sk, &prev, th->source,
1131				   &ipv6_hdr(skb)->saddr,
1132				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1133	if (req)
1134		return tcp_check_req(sk, skb, req, prev);
1135
1136	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1137			&ipv6_hdr(skb)->saddr, th->source,
1138			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1139
1140	if (nsk) {
1141		if (nsk->sk_state != TCP_TIME_WAIT) {
1142			bh_lock_sock(nsk);
1143			return nsk;
1144		}
1145		inet_twsk_put(inet_twsk(nsk));
1146		return NULL;
1147	}
1148
1149#ifdef CONFIG_SYN_COOKIES
1150	if (!th->syn)
1151		sk = cookie_v6_check(sk, skb);
1152#endif
1153	return sk;
1154}
1155
1156/* FIXME: this is substantially similar to the ipv4 code.
1157 * Can some kind of merge be done? -- erics
1158 */
1159static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{
1161	struct tcp_extend_values tmp_ext;
1162	struct tcp_options_received tmp_opt;
1163	u8 *hash_location;
1164	struct request_sock *req;
1165	struct inet6_request_sock *treq;
1166	struct ipv6_pinfo *np = inet6_sk(sk);
1167	struct tcp_sock *tp = tcp_sk(sk);
1168	__u32 isn = TCP_SKB_CB(skb)->when;
1169	struct dst_entry *dst = NULL;
1170	int want_cookie = 0;
1171
1172	if (skb->protocol == htons(ETH_P_IP))
1173		return tcp_v4_conn_request(sk, skb);
1174
1175	if (!ipv6_unicast_destination(skb))
1176		goto drop;
1177
1178	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1179		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1180		if (!want_cookie)
1181			goto drop;
1182	}
1183
1184	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1185		goto drop;
1186
1187	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1188	if (req == NULL)
1189		goto drop;
1190
1191#ifdef CONFIG_TCP_MD5SIG
1192	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1193#endif
1194
1195	tcp_clear_options(&tmp_opt);
1196	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1197	tmp_opt.user_mss = tp->rx_opt.user_mss;
1198	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1199
1200	if (tmp_opt.cookie_plus > 0 &&
1201	    tmp_opt.saw_tstamp &&
1202	    !tp->rx_opt.cookie_out_never &&
1203	    (sysctl_tcp_cookie_size > 0 ||
1204	     (tp->cookie_values != NULL &&
1205	      tp->cookie_values->cookie_desired > 0))) {
1206		u8 *c;
1207		u32 *d;
1208		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1209		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1210
1211		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1212			goto drop_and_free;
1213
1214		/* Secret recipe starts with IP addresses */
1215		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1216		*mess++ ^= *d++;
1217		*mess++ ^= *d++;
1218		*mess++ ^= *d++;
1219		*mess++ ^= *d++;
1220		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1221		*mess++ ^= *d++;
1222		*mess++ ^= *d++;
1223		*mess++ ^= *d++;
1224		*mess++ ^= *d++;
1225
1226		/* plus variable length Initiator Cookie */
1227		c = (u8 *)mess;
1228		while (l-- > 0)
1229			*c++ ^= *hash_location++;
1230
1231		want_cookie = 0;	/* not our kind of cookie */
1232		tmp_ext.cookie_out_never = 0; /* false */
1233		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1234	} else if (!tp->rx_opt.cookie_in_always) {
1235		/* redundant indications, but ensure initialization. */
1236		tmp_ext.cookie_out_never = 1; /* true */
1237		tmp_ext.cookie_plus = 0;
1238	} else {
1239		goto drop_and_free;
1240	}
1241	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1242
1243	if (want_cookie && !tmp_opt.saw_tstamp)
1244		tcp_clear_options(&tmp_opt);
1245
1246	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1247	tcp_openreq_init(req, &tmp_opt, skb);
1248
1249	treq = inet6_rsk(req);
1250	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1251	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1252	if (!want_cookie || tmp_opt.tstamp_ok)
1253		TCP_ECN_create_request(req, tcp_hdr(skb));
1254
1255	if (!isn) {
1256		struct inet_peer *peer = NULL;
1257
1258		if (ipv6_opt_accepted(sk, skb) ||
1259		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1260		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1261			atomic_inc(&skb->users);
1262			treq->pktopts = skb;
1263		}
1264		treq->iif = sk->sk_bound_dev_if;
1265
1266		/* So that link locals have meaning */
1267		if (!sk->sk_bound_dev_if &&
1268		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1269			treq->iif = inet6_iif(skb);
1270
1271		if (want_cookie) {
1272			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1273			req->cookie_ts = tmp_opt.tstamp_ok;
1274			goto have_isn;
1275		}
1276
1277		/* VJ's idea. We save last timestamp seen
1278		 * from the destination in peer table, when entering
1279		 * state TIME-WAIT, and check against it before
1280		 * accepting new connection request.
1281		 *
1282		 * If "isn" is not zero, this request hit alive
1283		 * timewait bucket, so that all the necessary checks
1284		 * are made in the function processing timewait state.
1285		 */
1286		if (tmp_opt.saw_tstamp &&
1287		    tcp_death_row.sysctl_tw_recycle &&
1288		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1289		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1290		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1291				    &treq->rmt_addr)) {
1292			inet_peer_refcheck(peer);
1293			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1294			    (s32)(peer->tcp_ts - req->ts_recent) >
1295							TCP_PAWS_WINDOW) {
1296				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1297				goto drop_and_release;
1298			}
1299		}
1300		/* Kill the following clause, if you dislike this way. */
1301		else if (!sysctl_tcp_syncookies &&
1302			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1303			  (sysctl_max_syn_backlog >> 2)) &&
1304			 (!peer || !peer->tcp_ts_stamp) &&
1305			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1306			/* Without syncookies last quarter of
1307			 * backlog is filled with destinations,
1308			 * proven to be alive.
1309			 * It means that we continue to communicate
1310			 * to destinations, already remembered
1311			 * to the moment of synflood.
1312			 */
1313			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1314				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1315			goto drop_and_release;
1316		}
1317
1318		isn = tcp_v6_init_sequence(skb);
1319	}
1320have_isn:
1321	tcp_rsk(req)->snt_isn = isn;
1322	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1323
1324	security_inet_conn_request(sk, skb, req);
1325
1326	if (tcp_v6_send_synack(sk, req,
1327			       (struct request_values *)&tmp_ext) ||
1328	    want_cookie)
1329		goto drop_and_free;
1330
1331	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1332	return 0;
1333
1334drop_and_release:
1335	dst_release(dst);
1336drop_and_free:
1337	reqsk_free(req);
1338drop:
 
1339	return 0; /* don't send reset */
1340}
1341
1342static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1343					  struct request_sock *req,
1344					  struct dst_entry *dst)
1345{
1346	struct inet6_request_sock *treq;
1347	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 
 
 
 
1348	struct tcp6_sock *newtcp6sk;
1349	struct inet_sock *newinet;
1350	struct tcp_sock *newtp;
1351	struct sock *newsk;
1352	struct ipv6_txoptions *opt;
1353#ifdef CONFIG_TCP_MD5SIG
1354	struct tcp_md5sig_key *key;
1355#endif
 
1356
1357	if (skb->protocol == htons(ETH_P_IP)) {
1358		/*
1359		 *	v6 mapped
1360		 */
1361
1362		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
 
1363
1364		if (newsk == NULL)
1365			return NULL;
1366
1367		newtcp6sk = (struct tcp6_sock *)newsk;
1368		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1369
1370		newinet = inet_sk(newsk);
1371		newnp = inet6_sk(newsk);
1372		newtp = tcp_sk(newsk);
1373
1374		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1375
1376		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1377
1378		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1379
1380		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1381
1382		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1383		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1384#ifdef CONFIG_TCP_MD5SIG
1385		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1386#endif
1387
1388		newnp->ipv6_ac_list = NULL;
1389		newnp->ipv6_fl_list = NULL;
1390		newnp->pktoptions  = NULL;
1391		newnp->opt	   = NULL;
1392		newnp->mcast_oif   = inet6_iif(skb);
1393		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
 
 
1394
1395		/*
1396		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1397		 * here, tcp_create_openreq_child now does this for us, see the comment in
1398		 * that function for the gory details. -acme
1399		 */
1400
1401		/* It is tricky place. Until this moment IPv4 tcp
1402		   worked with IPv6 icsk.icsk_af_ops.
1403		   Sync it now.
1404		 */
1405		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1406
1407		return newsk;
1408	}
1409
1410	treq = inet6_rsk(req);
1411	opt = np->opt;
1412
1413	if (sk_acceptq_is_full(sk))
1414		goto out_overflow;
1415
1416	if (!dst) {
1417		dst = inet6_csk_route_req(sk, req);
1418		if (!dst)
1419			goto out;
1420	}
1421
1422	newsk = tcp_create_openreq_child(sk, req, skb);
1423	if (newsk == NULL)
1424		goto out_nonewsk;
1425
1426	/*
1427	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1428	 * count here, tcp_create_openreq_child now does this for us, see the
1429	 * comment in that function for the gory details. -acme
1430	 */
1431
1432	newsk->sk_gso_type = SKB_GSO_TCPV6;
1433	__ip6_dst_store(newsk, dst, NULL, NULL);
 
1434
1435	newtcp6sk = (struct tcp6_sock *)newsk;
1436	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1437
1438	newtp = tcp_sk(newsk);
1439	newinet = inet_sk(newsk);
1440	newnp = inet6_sk(newsk);
1441
1442	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1443
1444	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1445	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1446	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1447	newsk->sk_bound_dev_if = treq->iif;
1448
1449	/* Now IPv6 options...
1450
1451	   First: no IPv4 options.
1452	 */
1453	newinet->inet_opt = NULL;
1454	newnp->ipv6_ac_list = NULL;
1455	newnp->ipv6_fl_list = NULL;
1456
1457	/* Clone RX bits */
1458	newnp->rxopt.all = np->rxopt.all;
1459
1460	/* Clone pktoptions received with SYN */
1461	newnp->pktoptions = NULL;
1462	if (treq->pktopts != NULL) {
1463		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1464		kfree_skb(treq->pktopts);
1465		treq->pktopts = NULL;
1466		if (newnp->pktoptions)
1467			skb_set_owner_r(newnp->pktoptions, newsk);
1468	}
1469	newnp->opt	  = NULL;
1470	newnp->mcast_oif  = inet6_iif(skb);
1471	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 
 
 
1472
1473	/* Clone native IPv6 options from listening socket (if any)
1474
1475	   Yes, keeping reference count would be much more clever,
1476	   but we make one more one thing there: reattach optmem
1477	   to newsk.
1478	 */
 
1479	if (opt) {
1480		newnp->opt = ipv6_dup_options(newsk, opt);
1481		if (opt != np->opt)
1482			sock_kfree_s(sk, opt, opt->tot_len);
1483	}
1484
1485	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486	if (newnp->opt)
1487		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1488						     newnp->opt->opt_flen);
 
 
1489
1490	tcp_mtup_init(newsk);
1491	tcp_sync_mss(newsk, dst_mtu(dst));
1492	newtp->advmss = dst_metric_advmss(dst);
 
 
 
 
1493	tcp_initialize_rcv_mss(newsk);
1494	if (tcp_rsk(req)->snt_synack)
1495		tcp_valid_rtt_meas(newsk,
1496		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1497	newtp->total_retrans = req->retrans;
1498
1499	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1500	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1501
1502#ifdef CONFIG_TCP_MD5SIG
1503	/* Copy over the MD5 key from the original socket */
1504	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
 
1505		/* We're using one, so create a matching key
1506		 * on the newsk structure. If we fail to get
1507		 * memory, then we end up not copying the key
1508		 * across. Shucks.
1509		 */
1510		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1511		if (newkey != NULL)
1512			tcp_v6_md5_do_add(newsk, &newnp->daddr,
1513					  newkey, key->keylen);
1514	}
1515#endif
1516
1517	if (__inet_inherit_port(sk, newsk) < 0) {
1518		sock_put(newsk);
 
1519		goto out;
1520	}
1521	__inet6_hash(newsk, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
1522
1523	return newsk;
1524
1525out_overflow:
1526	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1527out_nonewsk:
1528	if (opt && opt != np->opt)
1529		sock_kfree_s(sk, opt, opt->tot_len);
1530	dst_release(dst);
1531out:
1532	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1533	return NULL;
1534}
1535
1536static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1537{
1538	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1539		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1540				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1541			skb->ip_summed = CHECKSUM_UNNECESSARY;
1542			return 0;
1543		}
1544	}
1545
1546	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1547					      &ipv6_hdr(skb)->saddr,
1548					      &ipv6_hdr(skb)->daddr, 0));
1549
1550	if (skb->len <= 76) {
1551		return __skb_checksum_complete(skb);
1552	}
1553	return 0;
1554}
1555
1556/* The socket must have it's spinlock held when we get
1557 * here.
1558 *
1559 * We have a potential double-lock case here, so even when
1560 * doing backlog processing we use the BH locking scheme.
1561 * This is because we cannot sleep with the original spinlock
1562 * held.
1563 */
1564static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1565{
1566	struct ipv6_pinfo *np = inet6_sk(sk);
1567	struct tcp_sock *tp;
1568	struct sk_buff *opt_skb = NULL;
1569
1570	/* Imagine: socket is IPv6. IPv4 packet arrives,
1571	   goes to IPv4 receive handler and backlogged.
1572	   From backlog it always goes here. Kerboom...
1573	   Fortunately, tcp_rcv_established and rcv_established
1574	   handle them correctly, but it is not case with
1575	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1576	 */
1577
1578	if (skb->protocol == htons(ETH_P_IP))
1579		return tcp_v4_do_rcv(sk, skb);
1580
1581#ifdef CONFIG_TCP_MD5SIG
1582	if (tcp_v6_inbound_md5_hash (sk, skb))
1583		goto discard;
1584#endif
1585
1586	if (sk_filter(sk, skb))
1587		goto discard;
1588
1589	/*
1590	 *	socket locking is here for SMP purposes as backlog rcv
1591	 *	is currently called with bh processing disabled.
1592	 */
1593
1594	/* Do Stevens' IPV6_PKTOPTIONS.
1595
1596	   Yes, guys, it is the only place in our code, where we
1597	   may make it not affecting IPv4.
1598	   The rest of code is protocol independent,
1599	   and I do not like idea to uglify IPv4.
1600
1601	   Actually, all the idea behind IPV6_PKTOPTIONS
1602	   looks not very well thought. For now we latch
1603	   options, received in the last packet, enqueued
1604	   by tcp. Feel free to propose better solution.
1605					       --ANK (980728)
1606	 */
1607	if (np->rxopt.all)
1608		opt_skb = skb_clone(skb, GFP_ATOMIC);
1609
1610	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611		sock_rps_save_rxhash(sk, skb->rxhash);
1612		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613			goto reset;
 
 
 
 
 
 
 
 
 
 
1614		if (opt_skb)
1615			goto ipv6_pktoptions;
1616		return 0;
1617	}
1618
1619	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1620		goto csum_err;
1621
1622	if (sk->sk_state == TCP_LISTEN) {
1623		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
 
1624		if (!nsk)
1625			goto discard;
1626
1627		/*
1628		 * Queue it on the new socket if the new socket is active,
1629		 * otherwise we just shortcircuit this and continue with
1630		 * the new socket..
1631		 */
1632		if(nsk != sk) {
1633			sock_rps_save_rxhash(nsk, skb->rxhash);
1634			if (tcp_child_process(sk, nsk, skb))
1635				goto reset;
1636			if (opt_skb)
1637				__kfree_skb(opt_skb);
1638			return 0;
1639		}
1640	} else
1641		sock_rps_save_rxhash(sk, skb->rxhash);
1642
1643	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644		goto reset;
1645	if (opt_skb)
1646		goto ipv6_pktoptions;
1647	return 0;
1648
1649reset:
1650	tcp_v6_send_reset(sk, skb);
1651discard:
1652	if (opt_skb)
1653		__kfree_skb(opt_skb);
1654	kfree_skb(skb);
1655	return 0;
1656csum_err:
 
1657	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1658	goto discard;
1659
1660
1661ipv6_pktoptions:
1662	/* Do you ask, what is it?
1663
1664	   1. skb was enqueued by tcp.
1665	   2. skb is added to tail of read queue, rather than out of order.
1666	   3. socket is not in passive state.
1667	   4. Finally, it really contains options, which user wants to receive.
1668	 */
1669	tp = tcp_sk(sk);
1670	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1671	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1672		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1673			np->mcast_oif = inet6_iif(opt_skb);
1674		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1675			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1676		if (ipv6_opt_accepted(sk, opt_skb)) {
 
 
 
 
1677			skb_set_owner_r(opt_skb, sk);
1678			opt_skb = xchg(&np->pktoptions, opt_skb);
1679		} else {
1680			__kfree_skb(opt_skb);
1681			opt_skb = xchg(&np->pktoptions, NULL);
1682		}
1683	}
1684
1685	kfree_skb(opt_skb);
1686	return 0;
1687}
1688
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689static int tcp_v6_rcv(struct sk_buff *skb)
1690{
1691	struct tcphdr *th;
1692	const struct ipv6hdr *hdr;
1693	struct sock *sk;
1694	int ret;
1695	struct net *net = dev_net(skb->dev);
1696
1697	if (skb->pkt_type != PACKET_HOST)
1698		goto discard_it;
1699
1700	/*
1701	 *	Count it even if it's bad.
1702	 */
1703	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1704
1705	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1706		goto discard_it;
1707
1708	th = tcp_hdr(skb);
1709
1710	if (th->doff < sizeof(struct tcphdr)/4)
1711		goto bad_packet;
1712	if (!pskb_may_pull(skb, th->doff*4))
1713		goto discard_it;
1714
1715	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1716		goto bad_packet;
1717
1718	th = tcp_hdr(skb);
1719	hdr = ipv6_hdr(skb);
1720	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1721	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1722				    skb->len - th->doff*4);
1723	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724	TCP_SKB_CB(skb)->when = 0;
1725	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1726	TCP_SKB_CB(skb)->sacked = 0;
1727
1728	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 
 
1729	if (!sk)
1730		goto no_tcp_socket;
1731
1732process:
1733	if (sk->sk_state == TCP_TIME_WAIT)
1734		goto do_time_wait;
1735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1736	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1737		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1738		goto discard_and_relse;
1739	}
1740
1741	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1742		goto discard_and_relse;
1743
 
 
 
 
 
1744	if (sk_filter(sk, skb))
1745		goto discard_and_relse;
1746
1747	skb->dev = NULL;
1748
 
 
 
 
 
 
 
1749	bh_lock_sock_nested(sk);
 
1750	ret = 0;
1751	if (!sock_owned_by_user(sk)) {
1752#ifdef CONFIG_NET_DMA
1753		struct tcp_sock *tp = tcp_sk(sk);
1754		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1755			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1756		if (tp->ucopy.dma_chan)
1757			ret = tcp_v6_do_rcv(sk, skb);
1758		else
1759#endif
1760		{
1761			if (!tcp_prequeue(sk, skb))
1762				ret = tcp_v6_do_rcv(sk, skb);
1763		}
1764	} else if (unlikely(sk_add_backlog(sk, skb))) {
1765		bh_unlock_sock(sk);
1766		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1767		goto discard_and_relse;
1768	}
1769	bh_unlock_sock(sk);
1770
 
1771	sock_put(sk);
1772	return ret ? -1 : 0;
1773
1774no_tcp_socket:
1775	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1776		goto discard_it;
1777
1778	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 
 
 
 
1779bad_packet:
1780		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1781	} else {
1782		tcp_v6_send_reset(NULL, skb);
1783	}
1784
1785discard_it:
1786
1787	/*
1788	 *	Discard frame
1789	 */
1790
1791	kfree_skb(skb);
1792	return 0;
1793
1794discard_and_relse:
1795	sock_put(sk);
1796	goto discard_it;
1797
1798do_time_wait:
1799	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1800		inet_twsk_put(inet_twsk(sk));
1801		goto discard_it;
1802	}
1803
1804	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1805		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 
1806		inet_twsk_put(inet_twsk(sk));
1807		goto discard_it;
1808	}
1809
1810	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1811	case TCP_TW_SYN:
1812	{
1813		struct sock *sk2;
1814
1815		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
 
 
1816					    &ipv6_hdr(skb)->daddr,
1817					    ntohs(th->dest), inet6_iif(skb));
1818		if (sk2 != NULL) {
1819			struct inet_timewait_sock *tw = inet_twsk(sk);
1820			inet_twsk_deschedule(tw, &tcp_death_row);
1821			inet_twsk_put(tw);
1822			sk = sk2;
 
1823			goto process;
1824		}
1825		/* Fall through to ACK */
1826	}
1827	case TCP_TW_ACK:
1828		tcp_v6_timewait_ack(sk, skb);
1829		break;
1830	case TCP_TW_RST:
1831		goto no_tcp_socket;
1832	case TCP_TW_SUCCESS:;
 
 
 
 
1833	}
1834	goto discard_it;
1835}
1836
1837static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1838{
1839	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1840	struct ipv6_pinfo *np = inet6_sk(sk);
1841	struct inet_peer *peer;
1842
1843	if (!rt ||
1844	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1845		peer = inet_getpeer_v6(&np->daddr, 1);
1846		*release_it = true;
1847	} else {
1848		if (!rt->rt6i_peer)
1849			rt6_bind_peer(rt, 1);
1850		peer = rt->rt6i_peer;
1851		*release_it = false;
1852	}
1853
1854	return peer;
1855}
1856
1857static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{
1859	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860	struct inet_timewait_sock *tw = inet_twsk(sk);
1861
1862	if (tw->tw_family == AF_INET)
1863		return tcp_v4_tw_get_peer(sk);
1864
1865	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866}
1867
1868static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1869	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1870	.twsk_unique	= tcp_twsk_unique,
1871	.twsk_destructor= tcp_twsk_destructor,
1872	.twsk_getpeer	= tcp_v6_tw_get_peer,
1873};
1874
1875static const struct inet_connection_sock_af_ops ipv6_specific = {
1876	.queue_xmit	   = inet6_csk_xmit,
1877	.send_check	   = tcp_v6_send_check,
1878	.rebuild_header	   = inet6_sk_rebuild_header,
 
1879	.conn_request	   = tcp_v6_conn_request,
1880	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1881	.get_peer	   = tcp_v6_get_peer,
1882	.net_header_len	   = sizeof(struct ipv6hdr),
 
1883	.setsockopt	   = ipv6_setsockopt,
1884	.getsockopt	   = ipv6_getsockopt,
1885	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1886	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1887	.bind_conflict	   = inet6_csk_bind_conflict,
1888#ifdef CONFIG_COMPAT
1889	.compat_setsockopt = compat_ipv6_setsockopt,
1890	.compat_getsockopt = compat_ipv6_getsockopt,
1891#endif
 
1892};
1893
1894#ifdef CONFIG_TCP_MD5SIG
1895static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1896	.md5_lookup	=	tcp_v6_md5_lookup,
1897	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1898	.md5_add	=	tcp_v6_md5_add_func,
1899	.md5_parse	=	tcp_v6_parse_md5_keys,
1900};
1901#endif
1902
1903/*
1904 *	TCP over IPv4 via INET6 API
1905 */
1906
1907static const struct inet_connection_sock_af_ops ipv6_mapped = {
1908	.queue_xmit	   = ip_queue_xmit,
1909	.send_check	   = tcp_v4_send_check,
1910	.rebuild_header	   = inet_sk_rebuild_header,
 
1911	.conn_request	   = tcp_v6_conn_request,
1912	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1913	.get_peer	   = tcp_v4_get_peer,
1914	.net_header_len	   = sizeof(struct iphdr),
1915	.setsockopt	   = ipv6_setsockopt,
1916	.getsockopt	   = ipv6_getsockopt,
1917	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1918	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1919	.bind_conflict	   = inet6_csk_bind_conflict,
1920#ifdef CONFIG_COMPAT
1921	.compat_setsockopt = compat_ipv6_setsockopt,
1922	.compat_getsockopt = compat_ipv6_getsockopt,
1923#endif
 
1924};
1925
1926#ifdef CONFIG_TCP_MD5SIG
1927static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1928	.md5_lookup	=	tcp_v4_md5_lookup,
1929	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1930	.md5_add	=	tcp_v6_md5_add_func,
1931	.md5_parse	=	tcp_v6_parse_md5_keys,
1932};
1933#endif
1934
1935/* NOTE: A lot of things set to zero explicitly by call to
1936 *       sk_alloc() so need not be done here.
1937 */
1938static int tcp_v6_init_sock(struct sock *sk)
1939{
1940	struct inet_connection_sock *icsk = inet_csk(sk);
1941	struct tcp_sock *tp = tcp_sk(sk);
1942
1943	skb_queue_head_init(&tp->out_of_order_queue);
1944	tcp_init_xmit_timers(sk);
1945	tcp_prequeue_init(tp);
1946
1947	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1948	tp->mdev = TCP_TIMEOUT_INIT;
1949
1950	/* So many TCP implementations out there (incorrectly) count the
1951	 * initial SYN frame in their delayed-ACK and congestion control
1952	 * algorithms that we must have the following bandaid to talk
1953	 * efficiently to them.  -DaveM
1954	 */
1955	tp->snd_cwnd = 2;
1956
1957	/* See draft-stevens-tcpca-spec-01 for discussion of the
1958	 * initialization of these values.
1959	 */
1960	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1961	tp->snd_cwnd_clamp = ~0;
1962	tp->mss_cache = TCP_MSS_DEFAULT;
1963
1964	tp->reordering = sysctl_tcp_reordering;
1965
1966	sk->sk_state = TCP_CLOSE;
1967
1968	icsk->icsk_af_ops = &ipv6_specific;
1969	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1970	icsk->icsk_sync_mss = tcp_sync_mss;
1971	sk->sk_write_space = sk_stream_write_space;
1972	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1973
1974#ifdef CONFIG_TCP_MD5SIG
1975	tp->af_specific = &tcp_sock_ipv6_specific;
1976#endif
1977
1978	/* TCP Cookie Transactions */
1979	if (sysctl_tcp_cookie_size > 0) {
1980		/* Default, cookies without s_data_payload. */
1981		tp->cookie_values =
1982			kzalloc(sizeof(*tp->cookie_values),
1983				sk->sk_allocation);
1984		if (tp->cookie_values != NULL)
1985			kref_init(&tp->cookie_values->kref);
1986	}
1987	/* Presumed zeroed, in order of appearance:
1988	 *	cookie_in_always, cookie_out_never,
1989	 *	s_data_constant, s_data_in, s_data_out
1990	 */
1991	sk->sk_sndbuf = sysctl_tcp_wmem[1];
1992	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1993
1994	local_bh_disable();
1995	percpu_counter_inc(&tcp_sockets_allocated);
1996	local_bh_enable();
1997
1998	return 0;
1999}
2000
2001static void tcp_v6_destroy_sock(struct sock *sk)
2002{
2003#ifdef CONFIG_TCP_MD5SIG
2004	/* Clean up the MD5 key list */
2005	if (tcp_sk(sk)->md5sig_info)
2006		tcp_v6_clear_md5_list(sk);
2007#endif
2008	tcp_v4_destroy_sock(sk);
2009	inet6_destroy_sock(sk);
2010}
2011
2012#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq,
2015			 struct sock *sk, struct request_sock *req, int i, int uid)
2016{
2017	int ttd = req->expires - jiffies;
2018	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2019	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2020
2021	if (ttd < 0)
2022		ttd = 0;
2023
2024	seq_printf(seq,
2025		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2026		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2027		   i,
2028		   src->s6_addr32[0], src->s6_addr32[1],
2029		   src->s6_addr32[2], src->s6_addr32[3],
2030		   ntohs(inet_rsk(req)->loc_port),
2031		   dest->s6_addr32[0], dest->s6_addr32[1],
2032		   dest->s6_addr32[2], dest->s6_addr32[3],
2033		   ntohs(inet_rsk(req)->rmt_port),
2034		   TCP_SYN_RECV,
2035		   0,0, /* could print option size, but that is af dependent. */
2036		   1,   /* timers active (only the expire timer) */
2037		   jiffies_to_clock_t(ttd),
2038		   req->retrans,
2039		   uid,
 
2040		   0,  /* non standard timer */
2041		   0, /* open_requests have no inode */
2042		   0, req);
2043}
2044
2045static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2046{
2047	const struct in6_addr *dest, *src;
2048	__u16 destp, srcp;
2049	int timer_active;
2050	unsigned long timer_expires;
2051	struct inet_sock *inet = inet_sk(sp);
2052	struct tcp_sock *tp = tcp_sk(sp);
2053	const struct inet_connection_sock *icsk = inet_csk(sp);
2054	struct ipv6_pinfo *np = inet6_sk(sp);
 
 
2055
2056	dest  = &np->daddr;
2057	src   = &np->rcv_saddr;
2058	destp = ntohs(inet->inet_dport);
2059	srcp  = ntohs(inet->inet_sport);
2060
2061	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2062		timer_active	= 1;
2063		timer_expires	= icsk->icsk_timeout;
2064	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2065		timer_active	= 4;
2066		timer_expires	= icsk->icsk_timeout;
2067	} else if (timer_pending(&sp->sk_timer)) {
2068		timer_active	= 2;
2069		timer_expires	= sp->sk_timer.expires;
2070	} else {
2071		timer_active	= 0;
2072		timer_expires = jiffies;
2073	}
2074
 
 
 
 
 
 
 
 
 
2075	seq_printf(seq,
2076		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2077		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2078		   i,
2079		   src->s6_addr32[0], src->s6_addr32[1],
2080		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2081		   dest->s6_addr32[0], dest->s6_addr32[1],
2082		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2083		   sp->sk_state,
2084		   tp->write_seq-tp->snd_una,
2085		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2086		   timer_active,
2087		   jiffies_to_clock_t(timer_expires - jiffies),
2088		   icsk->icsk_retransmits,
2089		   sock_i_uid(sp),
2090		   icsk->icsk_probes_out,
2091		   sock_i_ino(sp),
2092		   atomic_read(&sp->sk_refcnt), sp,
2093		   jiffies_to_clock_t(icsk->icsk_rto),
2094		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2095		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2096		   tp->snd_cwnd,
2097		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
 
 
2098		   );
2099}
2100
2101static void get_timewait6_sock(struct seq_file *seq,
2102			       struct inet_timewait_sock *tw, int i)
2103{
 
2104	const struct in6_addr *dest, *src;
2105	__u16 destp, srcp;
2106	struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107	int ttd = tw->tw_ttd - jiffies;
2108
2109	if (ttd < 0)
2110		ttd = 0;
2111
2112	dest = &tw6->tw_v6_daddr;
2113	src  = &tw6->tw_v6_rcv_saddr;
2114	destp = ntohs(tw->tw_dport);
2115	srcp  = ntohs(tw->tw_sport);
2116
2117	seq_printf(seq,
2118		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2119		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2120		   i,
2121		   src->s6_addr32[0], src->s6_addr32[1],
2122		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2123		   dest->s6_addr32[0], dest->s6_addr32[1],
2124		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2125		   tw->tw_substate, 0, 0,
2126		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2127		   atomic_read(&tw->tw_refcnt), tw);
2128}
2129
2130static int tcp6_seq_show(struct seq_file *seq, void *v)
2131{
2132	struct tcp_iter_state *st;
 
2133
2134	if (v == SEQ_START_TOKEN) {
2135		seq_puts(seq,
2136			 "  sl  "
2137			 "local_address                         "
2138			 "remote_address                        "
2139			 "st tx_queue rx_queue tr tm->when retrnsmt"
2140			 "   uid  timeout inode\n");
2141		goto out;
2142	}
2143	st = seq->private;
2144
2145	switch (st->state) {
2146	case TCP_SEQ_STATE_LISTENING:
2147	case TCP_SEQ_STATE_ESTABLISHED:
2148		get_tcp6_sock(seq, v, st->num);
2149		break;
2150	case TCP_SEQ_STATE_OPENREQ:
2151		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2152		break;
2153	case TCP_SEQ_STATE_TIME_WAIT:
2154		get_timewait6_sock(seq, v, st->num);
2155		break;
2156	}
 
 
2157out:
2158	return 0;
2159}
2160
 
 
 
 
 
 
 
 
2161static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2162	.name		= "tcp6",
2163	.family		= AF_INET6,
2164	.seq_fops	= {
2165		.owner		= THIS_MODULE,
2166	},
2167	.seq_ops	= {
2168		.show		= tcp6_seq_show,
2169	},
2170};
2171
2172int __net_init tcp6_proc_init(struct net *net)
2173{
2174	return tcp_proc_register(net, &tcp6_seq_afinfo);
2175}
2176
2177void tcp6_proc_exit(struct net *net)
2178{
2179	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2180}
2181#endif
2182
 
 
 
 
 
 
 
 
 
 
 
2183struct proto tcpv6_prot = {
2184	.name			= "TCPv6",
2185	.owner			= THIS_MODULE,
2186	.close			= tcp_close,
2187	.connect		= tcp_v6_connect,
2188	.disconnect		= tcp_disconnect,
2189	.accept			= inet_csk_accept,
2190	.ioctl			= tcp_ioctl,
2191	.init			= tcp_v6_init_sock,
2192	.destroy		= tcp_v6_destroy_sock,
2193	.shutdown		= tcp_shutdown,
2194	.setsockopt		= tcp_setsockopt,
2195	.getsockopt		= tcp_getsockopt,
2196	.recvmsg		= tcp_recvmsg,
2197	.sendmsg		= tcp_sendmsg,
2198	.sendpage		= tcp_sendpage,
2199	.backlog_rcv		= tcp_v6_do_rcv,
2200	.hash			= tcp_v6_hash,
 
2201	.unhash			= inet_unhash,
2202	.get_port		= inet_csk_get_port,
2203	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2204	.sockets_allocated	= &tcp_sockets_allocated,
2205	.memory_allocated	= &tcp_memory_allocated,
2206	.memory_pressure	= &tcp_memory_pressure,
2207	.orphan_count		= &tcp_orphan_count,
2208	.sysctl_mem		= sysctl_tcp_mem,
2209	.sysctl_wmem		= sysctl_tcp_wmem,
2210	.sysctl_rmem		= sysctl_tcp_rmem,
2211	.max_header		= MAX_TCP_HEADER,
2212	.obj_size		= sizeof(struct tcp6_sock),
2213	.slab_flags		= SLAB_DESTROY_BY_RCU,
2214	.twsk_prot		= &tcp6_timewait_sock_ops,
2215	.rsk_prot		= &tcp6_request_sock_ops,
2216	.h.hashinfo		= &tcp_hashinfo,
2217	.no_autobind		= true,
2218#ifdef CONFIG_COMPAT
2219	.compat_setsockopt	= compat_tcp_setsockopt,
2220	.compat_getsockopt	= compat_tcp_getsockopt,
2221#endif
 
 
2222};
2223
2224static const struct inet6_protocol tcpv6_protocol = {
 
2225	.handler	=	tcp_v6_rcv,
2226	.err_handler	=	tcp_v6_err,
2227	.gso_send_check	=	tcp_v6_gso_send_check,
2228	.gso_segment	=	tcp_tso_segment,
2229	.gro_receive	=	tcp6_gro_receive,
2230	.gro_complete	=	tcp6_gro_complete,
2231	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2232};
2233
2234static struct inet_protosw tcpv6_protosw = {
2235	.type		=	SOCK_STREAM,
2236	.protocol	=	IPPROTO_TCP,
2237	.prot		=	&tcpv6_prot,
2238	.ops		=	&inet6_stream_ops,
2239	.no_check	=	0,
2240	.flags		=	INET_PROTOSW_PERMANENT |
2241				INET_PROTOSW_ICSK,
2242};
2243
2244static int __net_init tcpv6_net_init(struct net *net)
2245{
2246	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2247				    SOCK_RAW, IPPROTO_TCP, net);
2248}
2249
2250static void __net_exit tcpv6_net_exit(struct net *net)
2251{
2252	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2253}
2254
2255static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2256{
2257	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2258}
2259
2260static struct pernet_operations tcpv6_net_ops = {
2261	.init	    = tcpv6_net_init,
2262	.exit	    = tcpv6_net_exit,
2263	.exit_batch = tcpv6_net_exit_batch,
2264};
2265
2266int __init tcpv6_init(void)
2267{
2268	int ret;
2269
2270	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2271	if (ret)
2272		goto out;
2273
2274	/* register inet6 protocol */
2275	ret = inet6_register_protosw(&tcpv6_protosw);
2276	if (ret)
2277		goto out_tcpv6_protocol;
2278
2279	ret = register_pernet_subsys(&tcpv6_net_ops);
2280	if (ret)
2281		goto out_tcpv6_protosw;
2282out:
2283	return ret;
2284
2285out_tcpv6_protocol:
2286	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2287out_tcpv6_protosw:
2288	inet6_unregister_protosw(&tcpv6_protosw);
 
 
2289	goto out;
2290}
2291
2292void tcpv6_exit(void)
2293{
2294	unregister_pernet_subsys(&tcpv6_net_ops);
2295	inet6_unregister_protosw(&tcpv6_protosw);
2296	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297}
v4.6
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
 
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
 
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 
 
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
 
 
 
 
 
 
 
 
 
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source);
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 124	int addr_type;
 125	int err;
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr))
 152		usin->sin6_addr.s6_addr[15] = 0x1;
 153
 154	addr_type = ipv6_addr_type(&usin->sin6_addr);
 155
 156	if (addr_type & IPV6_ADDR_MULTICAST)
 157		return -ENETUNREACH;
 158
 159	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 160		if (addr_len >= sizeof(struct sockaddr_in6) &&
 161		    usin->sin6_scope_id) {
 162			/* If interface is set while binding, indices
 163			 * must coincide.
 164			 */
 165			if (sk->sk_bound_dev_if &&
 166			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 167				return -EINVAL;
 168
 169			sk->sk_bound_dev_if = usin->sin6_scope_id;
 170		}
 171
 172		/* Connect to link-local address requires an interface */
 173		if (!sk->sk_bound_dev_if)
 174			return -EINVAL;
 175	}
 176
 177	if (tp->rx_opt.ts_recent_stamp &&
 178	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 179		tp->rx_opt.ts_recent = 0;
 180		tp->rx_opt.ts_recent_stamp = 0;
 181		tp->write_seq = 0;
 182	}
 183
 184	sk->sk_v6_daddr = usin->sin6_addr;
 185	np->flow_label = fl6.flowlabel;
 186
 187	/*
 188	 *	TCP over IPv4
 189	 */
 190
 191	if (addr_type == IPV6_ADDR_MAPPED) {
 192		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 193		struct sockaddr_in sin;
 194
 195		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 196
 197		if (__ipv6_only_sock(sk))
 198			return -ENETUNREACH;
 199
 200		sin.sin_family = AF_INET;
 201		sin.sin_port = usin->sin6_port;
 202		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 203
 204		icsk->icsk_af_ops = &ipv6_mapped;
 205		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 206#ifdef CONFIG_TCP_MD5SIG
 207		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 208#endif
 209
 210		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 211
 212		if (err) {
 213			icsk->icsk_ext_hdr_len = exthdrlen;
 214			icsk->icsk_af_ops = &ipv6_specific;
 215			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 216#ifdef CONFIG_TCP_MD5SIG
 217			tp->af_specific = &tcp_sock_ipv6_specific;
 218#endif
 219			goto failure;
 
 
 
 
 220		}
 221		np->saddr = sk->sk_v6_rcv_saddr;
 222
 223		return err;
 224	}
 225
 226	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 227		saddr = &sk->sk_v6_rcv_saddr;
 228
 229	fl6.flowi6_proto = IPPROTO_TCP;
 230	fl6.daddr = sk->sk_v6_daddr;
 231	fl6.saddr = saddr ? *saddr : np->saddr;
 
 232	fl6.flowi6_oif = sk->sk_bound_dev_if;
 233	fl6.flowi6_mark = sk->sk_mark;
 234	fl6.fl6_dport = usin->sin6_port;
 235	fl6.fl6_sport = inet->inet_sport;
 236
 237	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 238	final_p = fl6_update_dst(&fl6, opt, &final);
 239
 240	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 241
 242	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 243	if (IS_ERR(dst)) {
 244		err = PTR_ERR(dst);
 245		goto failure;
 246	}
 247
 248	if (!saddr) {
 249		saddr = &fl6.saddr;
 250		sk->sk_v6_rcv_saddr = *saddr;
 251	}
 252
 253	/* set the source address */
 254	np->saddr = *saddr;
 255	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 256
 257	sk->sk_gso_type = SKB_GSO_TCPV6;
 258	ip6_dst_store(sk, dst, NULL, NULL);
 259
 
 260	if (tcp_death_row.sysctl_tw_recycle &&
 261	    !tp->rx_opt.ts_recent_stamp &&
 262	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 263		tcp_fetch_timewait_stamp(sk, dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 264
 265	icsk->icsk_ext_hdr_len = 0;
 266	if (opt)
 267		icsk->icsk_ext_hdr_len = opt->opt_flen +
 268					 opt->opt_nflen;
 269
 270	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 271
 272	inet->inet_dport = usin->sin6_port;
 273
 274	tcp_set_state(sk, TCP_SYN_SENT);
 275	err = inet6_hash_connect(&tcp_death_row, sk);
 276	if (err)
 277		goto late_failure;
 278
 279	sk_set_txhash(sk);
 280
 281	if (!tp->write_seq && likely(!tp->repair))
 282		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 283							     sk->sk_v6_daddr.s6_addr32,
 284							     inet->inet_sport,
 285							     inet->inet_dport);
 286
 287	err = tcp_connect(sk);
 288	if (err)
 289		goto late_failure;
 290
 291	return 0;
 292
 293late_failure:
 294	tcp_set_state(sk, TCP_CLOSE);
 295	__sk_dst_reset(sk);
 296failure:
 297	inet->inet_dport = 0;
 298	sk->sk_route_caps = 0;
 299	return err;
 300}
 301
 302static void tcp_v6_mtu_reduced(struct sock *sk)
 303{
 304	struct dst_entry *dst;
 305
 306	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 307		return;
 308
 309	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 310	if (!dst)
 311		return;
 312
 313	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 314		tcp_sync_mss(sk, dst_mtu(dst));
 315		tcp_simple_retransmit(sk);
 316	}
 317}
 318
 319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 320		u8 type, u8 code, int offset, __be32 info)
 321{
 322	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 323	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 324	struct net *net = dev_net(skb->dev);
 325	struct request_sock *fastopen;
 326	struct ipv6_pinfo *np;
 327	struct tcp_sock *tp;
 328	__u32 seq, snd_una;
 329	struct sock *sk;
 330	bool fatal;
 331	int err;
 
 
 
 332
 333	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 334					&hdr->daddr, th->dest,
 335					&hdr->saddr, ntohs(th->source),
 336					skb->dev->ifindex);
 337
 338	if (!sk) {
 339		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 340				   ICMP6_MIB_INERRORS);
 341		return;
 342	}
 343
 344	if (sk->sk_state == TCP_TIME_WAIT) {
 345		inet_twsk_put(inet_twsk(sk));
 346		return;
 347	}
 348	seq = ntohl(th->seq);
 349	fatal = icmpv6_err_convert(type, code, &err);
 350	if (sk->sk_state == TCP_NEW_SYN_RECV)
 351		return tcp_req_err(sk, seq, fatal);
 352
 353	bh_lock_sock(sk);
 354	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 355		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 356
 357	if (sk->sk_state == TCP_CLOSE)
 358		goto out;
 359
 360	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 361		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 362		goto out;
 363	}
 364
 365	tp = tcp_sk(sk);
 366	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 367	fastopen = tp->fastopen_rsk;
 368	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 369	if (sk->sk_state != TCP_LISTEN &&
 370	    !between(seq, snd_una, tp->snd_nxt)) {
 371		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 372		goto out;
 373	}
 374
 375	np = inet6_sk(sk);
 376
 377	if (type == NDISC_REDIRECT) {
 378		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 
 
 
 
 379
 380		if (dst)
 381			dst->ops->redirect(dst, sk, skb);
 382		goto out;
 383	}
 384
 385	if (type == ICMPV6_PKT_TOOBIG) {
 386		/* We are not interested in TCP_LISTEN and open_requests
 387		 * (SYN-ACKs send out by Linux are always <576bytes so
 388		 * they should go through unfragmented).
 389		 */
 390		if (sk->sk_state == TCP_LISTEN)
 391			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 392
 393		if (!ip6_sk_accept_pmtu(sk))
 394			goto out;
 395
 396		tp->mtu_info = ntohl(info);
 397		if (!sock_owned_by_user(sk))
 398			tcp_v6_mtu_reduced(sk);
 399		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 400					   &tp->tsq_flags))
 401			sock_hold(sk);
 402		goto out;
 403	}
 404
 
 405
 406	/* Might be for an request_sock */
 407	switch (sk->sk_state) {
 408	case TCP_SYN_SENT:
 409	case TCP_SYN_RECV:
 410		/* Only in fast or simultaneous open. If a fast open socket is
 411		 * is already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 412		 */
 413		if (fastopen && !fastopen->sk)
 414			break;
 
 
 
 
 
 
 
 415
 
 
 
 416		if (!sock_owned_by_user(sk)) {
 417			sk->sk_err = err;
 418			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 419
 420			tcp_done(sk);
 421		} else
 422			sk->sk_err_soft = err;
 423		goto out;
 424	}
 425
 426	if (!sock_owned_by_user(sk) && np->recverr) {
 427		sk->sk_err = err;
 428		sk->sk_error_report(sk);
 429	} else
 430		sk->sk_err_soft = err;
 431
 432out:
 433	bh_unlock_sock(sk);
 434	sock_put(sk);
 435}
 436
 437
 438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 439			      struct flowi *fl,
 440			      struct request_sock *req,
 441			      struct tcp_fastopen_cookie *foc,
 442			      bool attach_req)
 443{
 444	struct inet_request_sock *ireq = inet_rsk(req);
 445	struct ipv6_pinfo *np = inet6_sk(sk);
 446	struct flowi6 *fl6 = &fl->u.ip6;
 447	struct sk_buff *skb;
 448	int err = -ENOMEM;
 449
 450	/* First, grab a route. */
 451	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 452					       IPPROTO_TCP)) == NULL)
 453		goto done;
 
 
 
 
 
 
 
 
 
 454
 455	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 
 456
 
 
 
 
 
 
 
 
 457	if (skb) {
 458		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 459				    &ireq->ir_v6_rmt_addr);
 460
 461		fl6->daddr = ireq->ir_v6_rmt_addr;
 462		if (np->repflow && ireq->pktopts)
 463			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 464
 465		rcu_read_lock();
 466		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
 467			       np->tclass);
 468		rcu_read_unlock();
 469		err = net_xmit_eval(err);
 470	}
 471
 472done:
 
 
 
 473	return err;
 474}
 475
 
 
 
 
 
 
 476
 477static void tcp_v6_reqsk_destructor(struct request_sock *req)
 478{
 479	kfree_skb(inet_rsk(req)->pktopts);
 480}
 481
 482#ifdef CONFIG_TCP_MD5SIG
 483static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 484						   const struct in6_addr *addr)
 485{
 486	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487}
 488
 489static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 490						const struct sock *addr_sk)
 491{
 492	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 493}
 494
 495static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 496				 int optlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497{
 498	struct tcp_md5sig cmd;
 499	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 
 500
 501	if (optlen < sizeof(cmd))
 502		return -EINVAL;
 503
 504	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 505		return -EFAULT;
 506
 507	if (sin6->sin6_family != AF_INET6)
 508		return -EINVAL;
 509
 510	if (!cmd.tcpm_keylen) {
 
 
 511		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 512			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 513					      AF_INET);
 514		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 515				      AF_INET6);
 516	}
 517
 518	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 519		return -EINVAL;
 520
 521	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 522		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 523				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 524
 525	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 526			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527}
 528
 529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 530					const struct in6_addr *daddr,
 531					const struct in6_addr *saddr, int nbytes)
 532{
 533	struct tcp6_pseudohdr *bp;
 534	struct scatterlist sg;
 535
 536	bp = &hp->md5_blk.ip6;
 537	/* 1. TCP pseudo-header (RFC2460) */
 538	bp->saddr = *saddr;
 539	bp->daddr = *daddr;
 540	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 541	bp->len = cpu_to_be32(nbytes);
 542
 543	sg_init_one(&sg, bp, sizeof(*bp));
 544	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
 545	return crypto_ahash_update(hp->md5_req);
 546}
 547
 548static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 549			       const struct in6_addr *daddr, struct in6_addr *saddr,
 550			       const struct tcphdr *th)
 551{
 552	struct tcp_md5sig_pool *hp;
 553	struct ahash_request *req;
 554
 555	hp = tcp_get_md5sig_pool();
 556	if (!hp)
 557		goto clear_hash_noput;
 558	req = hp->md5_req;
 559
 560	if (crypto_ahash_init(req))
 561		goto clear_hash;
 562	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 563		goto clear_hash;
 564	if (tcp_md5_hash_header(hp, th))
 565		goto clear_hash;
 566	if (tcp_md5_hash_key(hp, key))
 567		goto clear_hash;
 568	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 569	if (crypto_ahash_final(req))
 570		goto clear_hash;
 571
 572	tcp_put_md5sig_pool();
 573	return 0;
 574
 575clear_hash:
 576	tcp_put_md5sig_pool();
 577clear_hash_noput:
 578	memset(md5_hash, 0, 16);
 579	return 1;
 580}
 581
 582static int tcp_v6_md5_hash_skb(char *md5_hash,
 583			       const struct tcp_md5sig_key *key,
 584			       const struct sock *sk,
 585			       const struct sk_buff *skb)
 586{
 587	const struct in6_addr *saddr, *daddr;
 588	struct tcp_md5sig_pool *hp;
 589	struct ahash_request *req;
 590	const struct tcphdr *th = tcp_hdr(skb);
 591
 592	if (sk) { /* valid for establish/request sockets */
 593		saddr = &sk->sk_v6_rcv_saddr;
 594		daddr = &sk->sk_v6_daddr;
 
 
 
 595	} else {
 596		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 597		saddr = &ip6h->saddr;
 598		daddr = &ip6h->daddr;
 599	}
 600
 601	hp = tcp_get_md5sig_pool();
 602	if (!hp)
 603		goto clear_hash_noput;
 604	req = hp->md5_req;
 605
 606	if (crypto_ahash_init(req))
 607		goto clear_hash;
 608
 609	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 610		goto clear_hash;
 611	if (tcp_md5_hash_header(hp, th))
 612		goto clear_hash;
 613	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 614		goto clear_hash;
 615	if (tcp_md5_hash_key(hp, key))
 616		goto clear_hash;
 617	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 618	if (crypto_ahash_final(req))
 619		goto clear_hash;
 620
 621	tcp_put_md5sig_pool();
 622	return 0;
 623
 624clear_hash:
 625	tcp_put_md5sig_pool();
 626clear_hash_noput:
 627	memset(md5_hash, 0, 16);
 628	return 1;
 629}
 630
 631#endif
 632
 633static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 634				    const struct sk_buff *skb)
 635{
 636#ifdef CONFIG_TCP_MD5SIG
 637	const __u8 *hash_location = NULL;
 638	struct tcp_md5sig_key *hash_expected;
 639	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 640	const struct tcphdr *th = tcp_hdr(skb);
 641	int genhash;
 642	u8 newhash[16];
 643
 644	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 645	hash_location = tcp_parse_md5sig_option(th);
 646
 647	/* We've parsed the options - do we have a hash? */
 648	if (!hash_expected && !hash_location)
 649		return false;
 650
 651	if (hash_expected && !hash_location) {
 652		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 653		return true;
 654	}
 655
 656	if (!hash_expected && hash_location) {
 657		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 658		return true;
 659	}
 660
 661	/* check the signature */
 662	genhash = tcp_v6_md5_hash_skb(newhash,
 663				      hash_expected,
 664				      NULL, skb);
 665
 666	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 667		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 668				     genhash ? "failed" : "mismatch",
 669				     &ip6h->saddr, ntohs(th->source),
 670				     &ip6h->daddr, ntohs(th->dest));
 671		return true;
 
 
 672	}
 
 
 673#endif
 674	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675}
 676
 677static void tcp_v6_init_req(struct request_sock *req,
 678			    const struct sock *sk_listener,
 679			    struct sk_buff *skb)
 680{
 681	struct inet_request_sock *ireq = inet_rsk(req);
 682	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 
 
 
 683
 684	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 685	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 
 
 
 
 
 686
 687	/* So that link locals have meaning */
 688	if (!sk_listener->sk_bound_dev_if &&
 689	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 690		ireq->ir_iif = tcp_v6_iif(skb);
 691
 692	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 693	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 694	     np->rxopt.bits.rxinfo ||
 695	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 696	     np->rxopt.bits.rxohlim || np->repflow)) {
 697		atomic_inc(&skb->users);
 698		ireq->pktopts = skb;
 
 
 
 
 
 699	}
 
 
 700}
 701
 702static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 703					  struct flowi *fl,
 704					  const struct request_sock *req,
 705					  bool *strict)
 706{
 707	if (strict)
 708		*strict = true;
 709	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 710}
 711
 712struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 713	.family		=	AF_INET6,
 714	.obj_size	=	sizeof(struct tcp6_request_sock),
 715	.rtx_syn_ack	=	tcp_rtx_synack,
 716	.send_ack	=	tcp_v6_reqsk_send_ack,
 717	.destructor	=	tcp_v6_reqsk_destructor,
 718	.send_reset	=	tcp_v6_send_reset,
 719	.syn_ack_timeout =	tcp_syn_ack_timeout,
 720};
 721
 722static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 723	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 724				sizeof(struct ipv6hdr),
 725#ifdef CONFIG_TCP_MD5SIG
 726	.req_md5_lookup	=	tcp_v6_md5_lookup,
 727	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 728#endif
 729	.init_req	=	tcp_v6_init_req,
 730#ifdef CONFIG_SYN_COOKIES
 731	.cookie_init_seq =	cookie_v6_init_sequence,
 732#endif
 733	.route_req	=	tcp_v6_route_req,
 734	.init_seq	=	tcp_v6_init_sequence,
 735	.send_synack	=	tcp_v6_send_synack,
 736};
 737
 738static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 739				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 740				 int oif, struct tcp_md5sig_key *key, int rst,
 741				 u8 tclass, u32 label)
 742{
 743	const struct tcphdr *th = tcp_hdr(skb);
 744	struct tcphdr *t1;
 745	struct sk_buff *buff;
 746	struct flowi6 fl6;
 747	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 748	struct sock *ctl_sk = net->ipv6.tcp_sk;
 749	unsigned int tot_len = sizeof(struct tcphdr);
 750	struct dst_entry *dst;
 751	__be32 *topt;
 752
 753	if (tsecr)
 754		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 755#ifdef CONFIG_TCP_MD5SIG
 756	if (key)
 757		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 758#endif
 759
 760	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 761			 GFP_ATOMIC);
 762	if (!buff)
 763		return;
 764
 765	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 766
 767	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 768	skb_reset_transport_header(buff);
 769
 770	/* Swap the send and the receive. */
 771	memset(t1, 0, sizeof(*t1));
 772	t1->dest = th->source;
 773	t1->source = th->dest;
 774	t1->doff = tot_len / 4;
 775	t1->seq = htonl(seq);
 776	t1->ack_seq = htonl(ack);
 777	t1->ack = !rst || !th->ack;
 778	t1->rst = rst;
 779	t1->window = htons(win);
 780
 781	topt = (__be32 *)(t1 + 1);
 782
 783	if (tsecr) {
 784		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 785				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 786		*topt++ = htonl(tsval);
 787		*topt++ = htonl(tsecr);
 788	}
 789
 790#ifdef CONFIG_TCP_MD5SIG
 791	if (key) {
 792		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 793				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 794		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 795				    &ipv6_hdr(skb)->saddr,
 796				    &ipv6_hdr(skb)->daddr, t1);
 797	}
 798#endif
 799
 800	memset(&fl6, 0, sizeof(fl6));
 801	fl6.daddr = ipv6_hdr(skb)->saddr;
 802	fl6.saddr = ipv6_hdr(skb)->daddr;
 803	fl6.flowlabel = label;
 804
 805	buff->ip_summed = CHECKSUM_PARTIAL;
 806	buff->csum = 0;
 807
 808	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 809
 810	fl6.flowi6_proto = IPPROTO_TCP;
 811	if (rt6_need_strict(&fl6.daddr) && !oif)
 812		fl6.flowi6_oif = tcp_v6_iif(skb);
 813	else {
 814		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 815			oif = skb->skb_iif;
 816
 817		fl6.flowi6_oif = oif;
 818	}
 819
 820	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 821	fl6.fl6_dport = t1->dest;
 822	fl6.fl6_sport = t1->source;
 823	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 824
 825	/* Pass a socket to ip6_dst_lookup either it is for RST
 826	 * Underlying function will use this to retrieve the network
 827	 * namespace
 828	 */
 829	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 830	if (!IS_ERR(dst)) {
 831		skb_dst_set(buff, dst);
 832		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 833		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 834		if (rst)
 835			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 836		return;
 837	}
 838
 839	kfree_skb(buff);
 840}
 841
 842static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 843{
 844	const struct tcphdr *th = tcp_hdr(skb);
 845	u32 seq = 0, ack_seq = 0;
 846	struct tcp_md5sig_key *key = NULL;
 847#ifdef CONFIG_TCP_MD5SIG
 848	const __u8 *hash_location = NULL;
 849	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 850	unsigned char newhash[16];
 851	int genhash;
 852	struct sock *sk1 = NULL;
 853#endif
 854	int oif;
 855
 856	if (th->rst)
 857		return;
 858
 859	/* If sk not NULL, it means we did a successful lookup and incoming
 860	 * route had to be correct. prequeue might have dropped our dst.
 861	 */
 862	if (!sk && !ipv6_unicast_destination(skb))
 863		return;
 864
 865#ifdef CONFIG_TCP_MD5SIG
 866	hash_location = tcp_parse_md5sig_option(th);
 867	if (sk && sk_fullsock(sk)) {
 868		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 869	} else if (hash_location) {
 870		/*
 871		 * active side is lost. Try to find listening socket through
 872		 * source port, and then find md5 key through listening socket.
 873		 * we are not loose security here:
 874		 * Incoming packet is checked with md5 hash with finding key,
 875		 * no RST generated if md5 hash doesn't match.
 876		 */
 877		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 878					   &tcp_hashinfo, NULL, 0,
 879					   &ipv6h->saddr,
 880					   th->source, &ipv6h->daddr,
 881					   ntohs(th->source), tcp_v6_iif(skb));
 882		if (!sk1)
 883			return;
 884
 885		rcu_read_lock();
 886		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 887		if (!key)
 888			goto release_sk1;
 889
 890		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 891		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 892			goto release_sk1;
 893	}
 894#endif
 895
 896	if (th->ack)
 897		seq = ntohl(th->ack_seq);
 898	else
 899		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 900			  (th->doff << 2);
 901
 902	oif = sk ? sk->sk_bound_dev_if : 0;
 903	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 904
 905#ifdef CONFIG_TCP_MD5SIG
 906release_sk1:
 907	if (sk1) {
 908		rcu_read_unlock();
 909		sock_put(sk1);
 910	}
 911#endif
 912}
 913
 914static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 915			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 916			    struct tcp_md5sig_key *key, u8 tclass,
 917			    u32 label)
 918{
 919	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 920			     tclass, label);
 921}
 922
 923static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 924{
 925	struct inet_timewait_sock *tw = inet_twsk(sk);
 926	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 927
 928	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 929			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 930			tcp_time_stamp + tcptw->tw_ts_offset,
 931			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 932			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 933
 934	inet_twsk_put(tw);
 935}
 936
 937static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 938				  struct request_sock *req)
 939{
 940	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 941	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 942	 */
 943	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 944			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 945			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 946			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 947			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 948			0, 0);
 949}
 950
 951
 952static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 953{
 954#ifdef CONFIG_SYN_COOKIES
 955	const struct tcphdr *th = tcp_hdr(skb);
 
 956
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957	if (!th->syn)
 958		sk = cookie_v6_check(sk, skb);
 959#endif
 960	return sk;
 961}
 962
 
 
 
 963static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 964{
 
 
 
 
 
 
 
 
 
 
 
 965	if (skb->protocol == htons(ETH_P_IP))
 966		return tcp_v4_conn_request(sk, skb);
 967
 968	if (!ipv6_unicast_destination(skb))
 969		goto drop;
 970
 971	return tcp_conn_request(&tcp6_request_sock_ops,
 972				&tcp_request_sock_ipv6_ops, sk, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974drop:
 975	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 976	return 0; /* don't send reset */
 977}
 978
 979static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 980					 struct request_sock *req,
 981					 struct dst_entry *dst,
 982					 struct request_sock *req_unhash,
 983					 bool *own_req)
 984{
 985	struct inet_request_sock *ireq;
 986	struct ipv6_pinfo *newnp;
 987	const struct ipv6_pinfo *np = inet6_sk(sk);
 988	struct ipv6_txoptions *opt;
 989	struct tcp6_sock *newtcp6sk;
 990	struct inet_sock *newinet;
 991	struct tcp_sock *newtp;
 992	struct sock *newsk;
 
 993#ifdef CONFIG_TCP_MD5SIG
 994	struct tcp_md5sig_key *key;
 995#endif
 996	struct flowi6 fl6;
 997
 998	if (skb->protocol == htons(ETH_P_IP)) {
 999		/*
1000		 *	v6 mapped
1001		 */
1002
1003		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004					     req_unhash, own_req);
1005
1006		if (!newsk)
1007			return NULL;
1008
1009		newtcp6sk = (struct tcp6_sock *)newsk;
1010		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1011
1012		newinet = inet_sk(newsk);
1013		newnp = inet6_sk(newsk);
1014		newtp = tcp_sk(newsk);
1015
1016		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1017
1018		newnp->saddr = newsk->sk_v6_rcv_saddr;
 
 
 
 
1019
1020		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1021		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022#ifdef CONFIG_TCP_MD5SIG
1023		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024#endif
1025
1026		newnp->ipv6_ac_list = NULL;
1027		newnp->ipv6_fl_list = NULL;
1028		newnp->pktoptions  = NULL;
1029		newnp->opt	   = NULL;
1030		newnp->mcast_oif   = tcp_v6_iif(skb);
1031		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1032		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1033		if (np->repflow)
1034			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1035
1036		/*
1037		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038		 * here, tcp_create_openreq_child now does this for us, see the comment in
1039		 * that function for the gory details. -acme
1040		 */
1041
1042		/* It is tricky place. Until this moment IPv4 tcp
1043		   worked with IPv6 icsk.icsk_af_ops.
1044		   Sync it now.
1045		 */
1046		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1047
1048		return newsk;
1049	}
1050
1051	ireq = inet_rsk(req);
 
1052
1053	if (sk_acceptq_is_full(sk))
1054		goto out_overflow;
1055
1056	if (!dst) {
1057		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1058		if (!dst)
1059			goto out;
1060	}
1061
1062	newsk = tcp_create_openreq_child(sk, req, skb);
1063	if (!newsk)
1064		goto out_nonewsk;
1065
1066	/*
1067	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068	 * count here, tcp_create_openreq_child now does this for us, see the
1069	 * comment in that function for the gory details. -acme
1070	 */
1071
1072	newsk->sk_gso_type = SKB_GSO_TCPV6;
1073	ip6_dst_store(newsk, dst, NULL, NULL);
1074	inet6_sk_rx_dst_set(newsk, skb);
1075
1076	newtcp6sk = (struct tcp6_sock *)newsk;
1077	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1078
1079	newtp = tcp_sk(newsk);
1080	newinet = inet_sk(newsk);
1081	newnp = inet6_sk(newsk);
1082
1083	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1084
1085	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086	newnp->saddr = ireq->ir_v6_loc_addr;
1087	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088	newsk->sk_bound_dev_if = ireq->ir_iif;
1089
1090	/* Now IPv6 options...
1091
1092	   First: no IPv4 options.
1093	 */
1094	newinet->inet_opt = NULL;
1095	newnp->ipv6_ac_list = NULL;
1096	newnp->ipv6_fl_list = NULL;
1097
1098	/* Clone RX bits */
1099	newnp->rxopt.all = np->rxopt.all;
1100
 
1101	newnp->pktoptions = NULL;
 
 
 
 
 
 
 
1102	newnp->opt	  = NULL;
1103	newnp->mcast_oif  = tcp_v6_iif(skb);
1104	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1106	if (np->repflow)
1107		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1108
1109	/* Clone native IPv6 options from listening socket (if any)
1110
1111	   Yes, keeping reference count would be much more clever,
1112	   but we make one more one thing there: reattach optmem
1113	   to newsk.
1114	 */
1115	opt = rcu_dereference(np->opt);
1116	if (opt) {
1117		opt = ipv6_dup_options(newsk, opt);
1118		RCU_INIT_POINTER(newnp->opt, opt);
 
1119	}
 
1120	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1121	if (opt)
1122		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1123						    opt->opt_flen;
1124
1125	tcp_ca_openreq_child(newsk, dst);
1126
 
1127	tcp_sync_mss(newsk, dst_mtu(dst));
1128	newtp->advmss = dst_metric_advmss(dst);
1129	if (tcp_sk(sk)->rx_opt.user_mss &&
1130	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1132
1133	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1134
1135	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1137
1138#ifdef CONFIG_TCP_MD5SIG
1139	/* Copy over the MD5 key from the original socket */
1140	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1141	if (key) {
1142		/* We're using one, so create a matching key
1143		 * on the newsk structure. If we fail to get
1144		 * memory, then we end up not copying the key
1145		 * across. Shucks.
1146		 */
1147		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148			       AF_INET6, key->key, key->keylen,
1149			       sk_gfp_mask(sk, GFP_ATOMIC));
 
1150	}
1151#endif
1152
1153	if (__inet_inherit_port(sk, newsk) < 0) {
1154		inet_csk_prepare_forced_close(newsk);
1155		tcp_done(newsk);
1156		goto out;
1157	}
1158	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1159	if (*own_req) {
1160		tcp_move_syn(newtp, req);
1161
1162		/* Clone pktoptions received with SYN, if we own the req */
1163		if (ireq->pktopts) {
1164			newnp->pktoptions = skb_clone(ireq->pktopts,
1165						      sk_gfp_mask(sk, GFP_ATOMIC));
1166			consume_skb(ireq->pktopts);
1167			ireq->pktopts = NULL;
1168			if (newnp->pktoptions)
1169				skb_set_owner_r(newnp->pktoptions, newsk);
1170		}
1171	}
1172
1173	return newsk;
1174
1175out_overflow:
1176	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177out_nonewsk:
 
 
1178	dst_release(dst);
1179out:
1180	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1181	return NULL;
1182}
1183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184/* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1186 *
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1190 * held.
1191 */
1192static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1193{
1194	struct ipv6_pinfo *np = inet6_sk(sk);
1195	struct tcp_sock *tp;
1196	struct sk_buff *opt_skb = NULL;
1197
1198	/* Imagine: socket is IPv6. IPv4 packet arrives,
1199	   goes to IPv4 receive handler and backlogged.
1200	   From backlog it always goes here. Kerboom...
1201	   Fortunately, tcp_rcv_established and rcv_established
1202	   handle them correctly, but it is not case with
1203	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1204	 */
1205
1206	if (skb->protocol == htons(ETH_P_IP))
1207		return tcp_v4_do_rcv(sk, skb);
1208
 
 
 
 
 
1209	if (sk_filter(sk, skb))
1210		goto discard;
1211
1212	/*
1213	 *	socket locking is here for SMP purposes as backlog rcv
1214	 *	is currently called with bh processing disabled.
1215	 */
1216
1217	/* Do Stevens' IPV6_PKTOPTIONS.
1218
1219	   Yes, guys, it is the only place in our code, where we
1220	   may make it not affecting IPv4.
1221	   The rest of code is protocol independent,
1222	   and I do not like idea to uglify IPv4.
1223
1224	   Actually, all the idea behind IPV6_PKTOPTIONS
1225	   looks not very well thought. For now we latch
1226	   options, received in the last packet, enqueued
1227	   by tcp. Feel free to propose better solution.
1228					       --ANK (980728)
1229	 */
1230	if (np->rxopt.all)
1231		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1232
1233	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1234		struct dst_entry *dst = sk->sk_rx_dst;
1235
1236		sock_rps_save_rxhash(sk, skb);
1237		sk_mark_napi_id(sk, skb);
1238		if (dst) {
1239			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1240			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1241				dst_release(dst);
1242				sk->sk_rx_dst = NULL;
1243			}
1244		}
1245
1246		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1247		if (opt_skb)
1248			goto ipv6_pktoptions;
1249		return 0;
1250	}
1251
1252	if (tcp_checksum_complete(skb))
1253		goto csum_err;
1254
1255	if (sk->sk_state == TCP_LISTEN) {
1256		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1257
1258		if (!nsk)
1259			goto discard;
1260
1261		if (nsk != sk) {
1262			sock_rps_save_rxhash(nsk, skb);
1263			sk_mark_napi_id(nsk, skb);
 
 
 
 
1264			if (tcp_child_process(sk, nsk, skb))
1265				goto reset;
1266			if (opt_skb)
1267				__kfree_skb(opt_skb);
1268			return 0;
1269		}
1270	} else
1271		sock_rps_save_rxhash(sk, skb);
1272
1273	if (tcp_rcv_state_process(sk, skb))
1274		goto reset;
1275	if (opt_skb)
1276		goto ipv6_pktoptions;
1277	return 0;
1278
1279reset:
1280	tcp_v6_send_reset(sk, skb);
1281discard:
1282	if (opt_skb)
1283		__kfree_skb(opt_skb);
1284	kfree_skb(skb);
1285	return 0;
1286csum_err:
1287	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1288	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1289	goto discard;
1290
1291
1292ipv6_pktoptions:
1293	/* Do you ask, what is it?
1294
1295	   1. skb was enqueued by tcp.
1296	   2. skb is added to tail of read queue, rather than out of order.
1297	   3. socket is not in passive state.
1298	   4. Finally, it really contains options, which user wants to receive.
1299	 */
1300	tp = tcp_sk(sk);
1301	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1302	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1303		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1304			np->mcast_oif = tcp_v6_iif(opt_skb);
1305		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1306			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1307		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1308			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1309		if (np->repflow)
1310			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1311		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1312			skb_set_owner_r(opt_skb, sk);
1313			opt_skb = xchg(&np->pktoptions, opt_skb);
1314		} else {
1315			__kfree_skb(opt_skb);
1316			opt_skb = xchg(&np->pktoptions, NULL);
1317		}
1318	}
1319
1320	kfree_skb(opt_skb);
1321	return 0;
1322}
1323
1324static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1325			   const struct tcphdr *th)
1326{
1327	/* This is tricky: we move IP6CB at its correct location into
1328	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329	 * _decode_session6() uses IP6CB().
1330	 * barrier() makes sure compiler won't play aliasing games.
1331	 */
1332	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1333		sizeof(struct inet6_skb_parm));
1334	barrier();
1335
1336	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1337	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1338				    skb->len - th->doff*4);
1339	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1340	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1341	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1342	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1343	TCP_SKB_CB(skb)->sacked = 0;
1344}
1345
1346static void tcp_v6_restore_cb(struct sk_buff *skb)
1347{
1348	/* We need to move header back to the beginning if xfrm6_policy_check()
1349	 * and tcp_v6_fill_cb() are going to be called again.
1350	 */
1351	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1352		sizeof(struct inet6_skb_parm));
1353}
1354
1355static int tcp_v6_rcv(struct sk_buff *skb)
1356{
1357	const struct tcphdr *th;
1358	const struct ipv6hdr *hdr;
1359	struct sock *sk;
1360	int ret;
1361	struct net *net = dev_net(skb->dev);
1362
1363	if (skb->pkt_type != PACKET_HOST)
1364		goto discard_it;
1365
1366	/*
1367	 *	Count it even if it's bad.
1368	 */
1369	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1370
1371	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1372		goto discard_it;
1373
1374	th = tcp_hdr(skb);
1375
1376	if (th->doff < sizeof(struct tcphdr)/4)
1377		goto bad_packet;
1378	if (!pskb_may_pull(skb, th->doff*4))
1379		goto discard_it;
1380
1381	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1382		goto csum_error;
1383
1384	th = tcp_hdr(skb);
1385	hdr = ipv6_hdr(skb);
 
 
 
 
 
 
 
1386
1387lookup:
1388	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1389				th->source, th->dest, inet6_iif(skb));
1390	if (!sk)
1391		goto no_tcp_socket;
1392
1393process:
1394	if (sk->sk_state == TCP_TIME_WAIT)
1395		goto do_time_wait;
1396
1397	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1398		struct request_sock *req = inet_reqsk(sk);
1399		struct sock *nsk;
1400
1401		sk = req->rsk_listener;
1402		tcp_v6_fill_cb(skb, hdr, th);
1403		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1404			reqsk_put(req);
1405			goto discard_it;
1406		}
1407		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1408			inet_csk_reqsk_queue_drop_and_put(sk, req);
1409			goto lookup;
1410		}
1411		sock_hold(sk);
1412		nsk = tcp_check_req(sk, skb, req, false);
1413		if (!nsk) {
1414			reqsk_put(req);
1415			goto discard_and_relse;
1416		}
1417		if (nsk == sk) {
1418			reqsk_put(req);
1419			tcp_v6_restore_cb(skb);
1420		} else if (tcp_child_process(sk, nsk, skb)) {
1421			tcp_v6_send_reset(nsk, skb);
1422			goto discard_and_relse;
1423		} else {
1424			sock_put(sk);
1425			return 0;
1426		}
1427	}
1428	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1430		goto discard_and_relse;
1431	}
1432
1433	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1434		goto discard_and_relse;
1435
1436	tcp_v6_fill_cb(skb, hdr, th);
1437
1438	if (tcp_v6_inbound_md5_hash(sk, skb))
1439		goto discard_and_relse;
1440
1441	if (sk_filter(sk, skb))
1442		goto discard_and_relse;
1443
1444	skb->dev = NULL;
1445
1446	if (sk->sk_state == TCP_LISTEN) {
1447		ret = tcp_v6_do_rcv(sk, skb);
1448		goto put_and_return;
1449	}
1450
1451	sk_incoming_cpu_update(sk);
1452
1453	bh_lock_sock_nested(sk);
1454	tcp_segs_in(tcp_sk(sk), skb);
1455	ret = 0;
1456	if (!sock_owned_by_user(sk)) {
1457		if (!tcp_prequeue(sk, skb))
 
 
 
 
1458			ret = tcp_v6_do_rcv(sk, skb);
1459	} else if (unlikely(sk_add_backlog(sk, skb,
1460					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 
 
 
 
 
1461		bh_unlock_sock(sk);
1462		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1463		goto discard_and_relse;
1464	}
1465	bh_unlock_sock(sk);
1466
1467put_and_return:
1468	sock_put(sk);
1469	return ret ? -1 : 0;
1470
1471no_tcp_socket:
1472	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1473		goto discard_it;
1474
1475	tcp_v6_fill_cb(skb, hdr, th);
1476
1477	if (tcp_checksum_complete(skb)) {
1478csum_error:
1479		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1480bad_packet:
1481		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482	} else {
1483		tcp_v6_send_reset(NULL, skb);
1484	}
1485
1486discard_it:
 
 
 
 
 
1487	kfree_skb(skb);
1488	return 0;
1489
1490discard_and_relse:
1491	sock_put(sk);
1492	goto discard_it;
1493
1494do_time_wait:
1495	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1496		inet_twsk_put(inet_twsk(sk));
1497		goto discard_it;
1498	}
1499
1500	tcp_v6_fill_cb(skb, hdr, th);
1501
1502	if (tcp_checksum_complete(skb)) {
1503		inet_twsk_put(inet_twsk(sk));
1504		goto csum_error;
1505	}
1506
1507	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1508	case TCP_TW_SYN:
1509	{
1510		struct sock *sk2;
1511
1512		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1513					    skb, __tcp_hdrlen(th),
1514					    &ipv6_hdr(skb)->saddr, th->source,
1515					    &ipv6_hdr(skb)->daddr,
1516					    ntohs(th->dest), tcp_v6_iif(skb));
1517		if (sk2) {
1518			struct inet_timewait_sock *tw = inet_twsk(sk);
1519			inet_twsk_deschedule_put(tw);
 
1520			sk = sk2;
1521			tcp_v6_restore_cb(skb);
1522			goto process;
1523		}
1524		/* Fall through to ACK */
1525	}
1526	case TCP_TW_ACK:
1527		tcp_v6_timewait_ack(sk, skb);
1528		break;
1529	case TCP_TW_RST:
1530		tcp_v6_restore_cb(skb);
1531		tcp_v6_send_reset(sk, skb);
1532		inet_twsk_deschedule_put(inet_twsk(sk));
1533		goto discard_it;
1534	case TCP_TW_SUCCESS:
1535		;
1536	}
1537	goto discard_it;
1538}
1539
1540static void tcp_v6_early_demux(struct sk_buff *skb)
1541{
1542	const struct ipv6hdr *hdr;
1543	const struct tcphdr *th;
1544	struct sock *sk;
1545
1546	if (skb->pkt_type != PACKET_HOST)
1547		return;
 
 
 
 
 
 
 
 
1548
1549	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1550		return;
1551
1552	hdr = ipv6_hdr(skb);
1553	th = tcp_hdr(skb);
 
 
1554
1555	if (th->doff < sizeof(struct tcphdr) / 4)
1556		return;
1557
1558	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1559	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560					&hdr->saddr, th->source,
1561					&hdr->daddr, ntohs(th->dest),
1562					inet6_iif(skb));
1563	if (sk) {
1564		skb->sk = sk;
1565		skb->destructor = sock_edemux;
1566		if (sk_fullsock(sk)) {
1567			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1568
1569			if (dst)
1570				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571			if (dst &&
1572			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573				skb_dst_set_noref(skb, dst);
1574		}
1575	}
1576}
1577
1578static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1580	.twsk_unique	= tcp_twsk_unique,
1581	.twsk_destructor = tcp_twsk_destructor,
 
1582};
1583
1584static const struct inet_connection_sock_af_ops ipv6_specific = {
1585	.queue_xmit	   = inet6_csk_xmit,
1586	.send_check	   = tcp_v6_send_check,
1587	.rebuild_header	   = inet6_sk_rebuild_header,
1588	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1589	.conn_request	   = tcp_v6_conn_request,
1590	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1591	.net_header_len	   = sizeof(struct ipv6hdr),
1592	.net_frag_header_len = sizeof(struct frag_hdr),
1593	.setsockopt	   = ipv6_setsockopt,
1594	.getsockopt	   = ipv6_getsockopt,
1595	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1596	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1597	.bind_conflict	   = inet6_csk_bind_conflict,
1598#ifdef CONFIG_COMPAT
1599	.compat_setsockopt = compat_ipv6_setsockopt,
1600	.compat_getsockopt = compat_ipv6_getsockopt,
1601#endif
1602	.mtu_reduced	   = tcp_v6_mtu_reduced,
1603};
1604
1605#ifdef CONFIG_TCP_MD5SIG
1606static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1607	.md5_lookup	=	tcp_v6_md5_lookup,
1608	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 
1609	.md5_parse	=	tcp_v6_parse_md5_keys,
1610};
1611#endif
1612
1613/*
1614 *	TCP over IPv4 via INET6 API
1615 */
 
1616static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617	.queue_xmit	   = ip_queue_xmit,
1618	.send_check	   = tcp_v4_send_check,
1619	.rebuild_header	   = inet_sk_rebuild_header,
1620	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1621	.conn_request	   = tcp_v6_conn_request,
1622	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1623	.net_header_len	   = sizeof(struct iphdr),
1624	.setsockopt	   = ipv6_setsockopt,
1625	.getsockopt	   = ipv6_getsockopt,
1626	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1627	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1628	.bind_conflict	   = inet6_csk_bind_conflict,
1629#ifdef CONFIG_COMPAT
1630	.compat_setsockopt = compat_ipv6_setsockopt,
1631	.compat_getsockopt = compat_ipv6_getsockopt,
1632#endif
1633	.mtu_reduced	   = tcp_v4_mtu_reduced,
1634};
1635
1636#ifdef CONFIG_TCP_MD5SIG
1637static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1638	.md5_lookup	=	tcp_v4_md5_lookup,
1639	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 
1640	.md5_parse	=	tcp_v6_parse_md5_keys,
1641};
1642#endif
1643
1644/* NOTE: A lot of things set to zero explicitly by call to
1645 *       sk_alloc() so need not be done here.
1646 */
1647static int tcp_v6_init_sock(struct sock *sk)
1648{
1649	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1650
1651	tcp_init_sock(sk);
1652
1653	icsk->icsk_af_ops = &ipv6_specific;
 
 
 
 
1654
1655#ifdef CONFIG_TCP_MD5SIG
1656	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657#endif
1658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659	return 0;
1660}
1661
1662static void tcp_v6_destroy_sock(struct sock *sk)
1663{
 
 
 
 
 
1664	tcp_v4_destroy_sock(sk);
1665	inet6_destroy_sock(sk);
1666}
1667
1668#ifdef CONFIG_PROC_FS
1669/* Proc filesystem TCPv6 sock list dumping. */
1670static void get_openreq6(struct seq_file *seq,
1671			 const struct request_sock *req, int i)
1672{
1673	long ttd = req->rsk_timer.expires - jiffies;
1674	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677	if (ttd < 0)
1678		ttd = 0;
1679
1680	seq_printf(seq,
1681		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683		   i,
1684		   src->s6_addr32[0], src->s6_addr32[1],
1685		   src->s6_addr32[2], src->s6_addr32[3],
1686		   inet_rsk(req)->ir_num,
1687		   dest->s6_addr32[0], dest->s6_addr32[1],
1688		   dest->s6_addr32[2], dest->s6_addr32[3],
1689		   ntohs(inet_rsk(req)->ir_rmt_port),
1690		   TCP_SYN_RECV,
1691		   0, 0, /* could print option size, but that is af dependent. */
1692		   1,   /* timers active (only the expire timer) */
1693		   jiffies_to_clock_t(ttd),
1694		   req->num_timeout,
1695		   from_kuid_munged(seq_user_ns(seq),
1696				    sock_i_uid(req->rsk_listener)),
1697		   0,  /* non standard timer */
1698		   0, /* open_requests have no inode */
1699		   0, req);
1700}
1701
1702static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1703{
1704	const struct in6_addr *dest, *src;
1705	__u16 destp, srcp;
1706	int timer_active;
1707	unsigned long timer_expires;
1708	const struct inet_sock *inet = inet_sk(sp);
1709	const struct tcp_sock *tp = tcp_sk(sp);
1710	const struct inet_connection_sock *icsk = inet_csk(sp);
1711	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1712	int rx_queue;
1713	int state;
1714
1715	dest  = &sp->sk_v6_daddr;
1716	src   = &sp->sk_v6_rcv_saddr;
1717	destp = ntohs(inet->inet_dport);
1718	srcp  = ntohs(inet->inet_sport);
1719
1720	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1721		timer_active	= 1;
1722		timer_expires	= icsk->icsk_timeout;
1723	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1724		timer_active	= 4;
1725		timer_expires	= icsk->icsk_timeout;
1726	} else if (timer_pending(&sp->sk_timer)) {
1727		timer_active	= 2;
1728		timer_expires	= sp->sk_timer.expires;
1729	} else {
1730		timer_active	= 0;
1731		timer_expires = jiffies;
1732	}
1733
1734	state = sk_state_load(sp);
1735	if (state == TCP_LISTEN)
1736		rx_queue = sp->sk_ack_backlog;
1737	else
1738		/* Because we don't lock the socket,
1739		 * we might find a transient negative value.
1740		 */
1741		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1742
1743	seq_printf(seq,
1744		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1746		   i,
1747		   src->s6_addr32[0], src->s6_addr32[1],
1748		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1749		   dest->s6_addr32[0], dest->s6_addr32[1],
1750		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1751		   state,
1752		   tp->write_seq - tp->snd_una,
1753		   rx_queue,
1754		   timer_active,
1755		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1756		   icsk->icsk_retransmits,
1757		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1758		   icsk->icsk_probes_out,
1759		   sock_i_ino(sp),
1760		   atomic_read(&sp->sk_refcnt), sp,
1761		   jiffies_to_clock_t(icsk->icsk_rto),
1762		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1763		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1764		   tp->snd_cwnd,
1765		   state == TCP_LISTEN ?
1766			fastopenq->max_qlen :
1767			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1768		   );
1769}
1770
1771static void get_timewait6_sock(struct seq_file *seq,
1772			       struct inet_timewait_sock *tw, int i)
1773{
1774	long delta = tw->tw_timer.expires - jiffies;
1775	const struct in6_addr *dest, *src;
1776	__u16 destp, srcp;
 
 
1777
1778	dest = &tw->tw_v6_daddr;
1779	src  = &tw->tw_v6_rcv_saddr;
 
 
 
1780	destp = ntohs(tw->tw_dport);
1781	srcp  = ntohs(tw->tw_sport);
1782
1783	seq_printf(seq,
1784		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1785		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1786		   i,
1787		   src->s6_addr32[0], src->s6_addr32[1],
1788		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1789		   dest->s6_addr32[0], dest->s6_addr32[1],
1790		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1791		   tw->tw_substate, 0, 0,
1792		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1793		   atomic_read(&tw->tw_refcnt), tw);
1794}
1795
1796static int tcp6_seq_show(struct seq_file *seq, void *v)
1797{
1798	struct tcp_iter_state *st;
1799	struct sock *sk = v;
1800
1801	if (v == SEQ_START_TOKEN) {
1802		seq_puts(seq,
1803			 "  sl  "
1804			 "local_address                         "
1805			 "remote_address                        "
1806			 "st tx_queue rx_queue tr tm->when retrnsmt"
1807			 "   uid  timeout inode\n");
1808		goto out;
1809	}
1810	st = seq->private;
1811
1812	if (sk->sk_state == TCP_TIME_WAIT)
 
 
 
 
 
 
 
 
1813		get_timewait6_sock(seq, v, st->num);
1814	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1815		get_openreq6(seq, v, st->num);
1816	else
1817		get_tcp6_sock(seq, v, st->num);
1818out:
1819	return 0;
1820}
1821
1822static const struct file_operations tcp6_afinfo_seq_fops = {
1823	.owner   = THIS_MODULE,
1824	.open    = tcp_seq_open,
1825	.read    = seq_read,
1826	.llseek  = seq_lseek,
1827	.release = seq_release_net
1828};
1829
1830static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1831	.name		= "tcp6",
1832	.family		= AF_INET6,
1833	.seq_fops	= &tcp6_afinfo_seq_fops,
 
 
1834	.seq_ops	= {
1835		.show		= tcp6_seq_show,
1836	},
1837};
1838
1839int __net_init tcp6_proc_init(struct net *net)
1840{
1841	return tcp_proc_register(net, &tcp6_seq_afinfo);
1842}
1843
1844void tcp6_proc_exit(struct net *net)
1845{
1846	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1847}
1848#endif
1849
1850static void tcp_v6_clear_sk(struct sock *sk, int size)
1851{
1852	struct inet_sock *inet = inet_sk(sk);
1853
1854	/* we do not want to clear pinet6 field, because of RCU lookups */
1855	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1856
1857	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1858	memset(&inet->pinet6 + 1, 0, size);
1859}
1860
1861struct proto tcpv6_prot = {
1862	.name			= "TCPv6",
1863	.owner			= THIS_MODULE,
1864	.close			= tcp_close,
1865	.connect		= tcp_v6_connect,
1866	.disconnect		= tcp_disconnect,
1867	.accept			= inet_csk_accept,
1868	.ioctl			= tcp_ioctl,
1869	.init			= tcp_v6_init_sock,
1870	.destroy		= tcp_v6_destroy_sock,
1871	.shutdown		= tcp_shutdown,
1872	.setsockopt		= tcp_setsockopt,
1873	.getsockopt		= tcp_getsockopt,
1874	.recvmsg		= tcp_recvmsg,
1875	.sendmsg		= tcp_sendmsg,
1876	.sendpage		= tcp_sendpage,
1877	.backlog_rcv		= tcp_v6_do_rcv,
1878	.release_cb		= tcp_release_cb,
1879	.hash			= inet6_hash,
1880	.unhash			= inet_unhash,
1881	.get_port		= inet_csk_get_port,
1882	.enter_memory_pressure	= tcp_enter_memory_pressure,
1883	.stream_memory_free	= tcp_stream_memory_free,
1884	.sockets_allocated	= &tcp_sockets_allocated,
1885	.memory_allocated	= &tcp_memory_allocated,
1886	.memory_pressure	= &tcp_memory_pressure,
1887	.orphan_count		= &tcp_orphan_count,
1888	.sysctl_mem		= sysctl_tcp_mem,
1889	.sysctl_wmem		= sysctl_tcp_wmem,
1890	.sysctl_rmem		= sysctl_tcp_rmem,
1891	.max_header		= MAX_TCP_HEADER,
1892	.obj_size		= sizeof(struct tcp6_sock),
1893	.slab_flags		= SLAB_DESTROY_BY_RCU,
1894	.twsk_prot		= &tcp6_timewait_sock_ops,
1895	.rsk_prot		= &tcp6_request_sock_ops,
1896	.h.hashinfo		= &tcp_hashinfo,
1897	.no_autobind		= true,
1898#ifdef CONFIG_COMPAT
1899	.compat_setsockopt	= compat_tcp_setsockopt,
1900	.compat_getsockopt	= compat_tcp_getsockopt,
1901#endif
1902	.clear_sk		= tcp_v6_clear_sk,
1903	.diag_destroy		= tcp_abort,
1904};
1905
1906static const struct inet6_protocol tcpv6_protocol = {
1907	.early_demux	=	tcp_v6_early_demux,
1908	.handler	=	tcp_v6_rcv,
1909	.err_handler	=	tcp_v6_err,
 
 
 
 
1910	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1911};
1912
1913static struct inet_protosw tcpv6_protosw = {
1914	.type		=	SOCK_STREAM,
1915	.protocol	=	IPPROTO_TCP,
1916	.prot		=	&tcpv6_prot,
1917	.ops		=	&inet6_stream_ops,
 
1918	.flags		=	INET_PROTOSW_PERMANENT |
1919				INET_PROTOSW_ICSK,
1920};
1921
1922static int __net_init tcpv6_net_init(struct net *net)
1923{
1924	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1925				    SOCK_RAW, IPPROTO_TCP, net);
1926}
1927
1928static void __net_exit tcpv6_net_exit(struct net *net)
1929{
1930	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1931}
1932
1933static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1934{
1935	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1936}
1937
1938static struct pernet_operations tcpv6_net_ops = {
1939	.init	    = tcpv6_net_init,
1940	.exit	    = tcpv6_net_exit,
1941	.exit_batch = tcpv6_net_exit_batch,
1942};
1943
1944int __init tcpv6_init(void)
1945{
1946	int ret;
1947
1948	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1949	if (ret)
1950		goto out;
1951
1952	/* register inet6 protocol */
1953	ret = inet6_register_protosw(&tcpv6_protosw);
1954	if (ret)
1955		goto out_tcpv6_protocol;
1956
1957	ret = register_pernet_subsys(&tcpv6_net_ops);
1958	if (ret)
1959		goto out_tcpv6_protosw;
1960out:
1961	return ret;
1962
 
 
1963out_tcpv6_protosw:
1964	inet6_unregister_protosw(&tcpv6_protosw);
1965out_tcpv6_protocol:
1966	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967	goto out;
1968}
1969
1970void tcpv6_exit(void)
1971{
1972	unregister_pernet_subsys(&tcpv6_net_ops);
1973	inet6_unregister_protosw(&tcpv6_protosw);
1974	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1975}