Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65
  66#include <asm/uaccess.h>
  67
  68#include <linux/proc_fs.h>
  69#include <linux/seq_file.h>
  70
  71#include <linux/crypto.h>
  72#include <linux/scatterlist.h>
  73
  74static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  75static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  76				      struct request_sock *req);
  77
  78static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  79static void	__tcp_v6_send_check(struct sk_buff *skb,
  80				    const struct in6_addr *saddr,
  81				    const struct in6_addr *daddr);
  82
  83static const struct inet_connection_sock_af_ops ipv6_mapped;
  84static const struct inet_connection_sock_af_ops ipv6_specific;
  85#ifdef CONFIG_TCP_MD5SIG
  86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  88#else
  89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  90						   const struct in6_addr *addr)
  91{
  92	return NULL;
  93}
  94#endif
  95
  96static void tcp_v6_hash(struct sock *sk)
  97{
  98	if (sk->sk_state != TCP_CLOSE) {
  99		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 100			tcp_prot.hash(sk);
 101			return;
 102		}
 103		local_bh_disable();
 104		__inet6_hash(sk, NULL);
 105		local_bh_enable();
 106	}
 107}
 108
 109static __inline__ __sum16 tcp_v6_check(int len,
 110				   const struct in6_addr *saddr,
 111				   const struct in6_addr *daddr,
 112				   __wsum base)
 113{
 114	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 115}
 116
 117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
 118{
 119	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 120					    ipv6_hdr(skb)->saddr.s6_addr32,
 121					    tcp_hdr(skb)->dest,
 122					    tcp_hdr(skb)->source);
 123}
 124
 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 126			  int addr_len)
 127{
 128	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 129	struct inet_sock *inet = inet_sk(sk);
 130	struct inet_connection_sock *icsk = inet_csk(sk);
 131	struct ipv6_pinfo *np = inet6_sk(sk);
 132	struct tcp_sock *tp = tcp_sk(sk);
 133	struct in6_addr *saddr = NULL, *final_p, final;
 134	struct rt6_info *rt;
 135	struct flowi6 fl6;
 136	struct dst_entry *dst;
 137	int addr_type;
 138	int err;
 139
 140	if (addr_len < SIN6_LEN_RFC2133)
 141		return -EINVAL;
 142
 143	if (usin->sin6_family != AF_INET6)
 144		return -EAFNOSUPPORT;
 145
 146	memset(&fl6, 0, sizeof(fl6));
 147
 148	if (np->sndflow) {
 149		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 150		IP6_ECN_flow_init(fl6.flowlabel);
 151		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 152			struct ip6_flowlabel *flowlabel;
 153			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 154			if (flowlabel == NULL)
 155				return -EINVAL;
 156			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
 157			fl6_sock_release(flowlabel);
 158		}
 159	}
 160
 161	/*
 162	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 163	 */
 164
 165	if(ipv6_addr_any(&usin->sin6_addr))
 166		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 167
 168	addr_type = ipv6_addr_type(&usin->sin6_addr);
 169
 170	if(addr_type & IPV6_ADDR_MULTICAST)
 171		return -ENETUNREACH;
 172
 173	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 174		if (addr_len >= sizeof(struct sockaddr_in6) &&
 175		    usin->sin6_scope_id) {
 176			/* If interface is set while binding, indices
 177			 * must coincide.
 178			 */
 179			if (sk->sk_bound_dev_if &&
 180			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 181				return -EINVAL;
 182
 183			sk->sk_bound_dev_if = usin->sin6_scope_id;
 184		}
 185
 186		/* Connect to link-local address requires an interface */
 187		if (!sk->sk_bound_dev_if)
 188			return -EINVAL;
 189	}
 190
 191	if (tp->rx_opt.ts_recent_stamp &&
 192	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
 193		tp->rx_opt.ts_recent = 0;
 194		tp->rx_opt.ts_recent_stamp = 0;
 195		tp->write_seq = 0;
 196	}
 197
 198	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
 199	np->flow_label = fl6.flowlabel;
 200
 201	/*
 202	 *	TCP over IPv4
 203	 */
 204
 205	if (addr_type == IPV6_ADDR_MAPPED) {
 206		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 207		struct sockaddr_in sin;
 208
 209		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 210
 211		if (__ipv6_only_sock(sk))
 212			return -ENETUNREACH;
 213
 214		sin.sin_family = AF_INET;
 215		sin.sin_port = usin->sin6_port;
 216		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 217
 218		icsk->icsk_af_ops = &ipv6_mapped;
 219		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 220#ifdef CONFIG_TCP_MD5SIG
 221		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 222#endif
 223
 224		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 225
 226		if (err) {
 227			icsk->icsk_ext_hdr_len = exthdrlen;
 228			icsk->icsk_af_ops = &ipv6_specific;
 229			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 230#ifdef CONFIG_TCP_MD5SIG
 231			tp->af_specific = &tcp_sock_ipv6_specific;
 232#endif
 233			goto failure;
 234		} else {
 235			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 236			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 237					       &np->rcv_saddr);
 238		}
 
 239
 240		return err;
 241	}
 242
 243	if (!ipv6_addr_any(&np->rcv_saddr))
 244		saddr = &np->rcv_saddr;
 245
 246	fl6.flowi6_proto = IPPROTO_TCP;
 247	ipv6_addr_copy(&fl6.daddr, &np->daddr);
 248	ipv6_addr_copy(&fl6.saddr,
 249		       (saddr ? saddr : &np->saddr));
 250	fl6.flowi6_oif = sk->sk_bound_dev_if;
 251	fl6.flowi6_mark = sk->sk_mark;
 252	fl6.fl6_dport = usin->sin6_port;
 253	fl6.fl6_sport = inet->inet_sport;
 
 254
 255	final_p = fl6_update_dst(&fl6, np->opt, &final);
 
 256
 257	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 258
 259	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
 260	if (IS_ERR(dst)) {
 261		err = PTR_ERR(dst);
 262		goto failure;
 263	}
 264
 265	if (saddr == NULL) {
 266		saddr = &fl6.saddr;
 267		ipv6_addr_copy(&np->rcv_saddr, saddr);
 268	}
 269
 270	/* set the source address */
 271	ipv6_addr_copy(&np->saddr, saddr);
 272	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 273
 274	sk->sk_gso_type = SKB_GSO_TCPV6;
 275	__ip6_dst_store(sk, dst, NULL, NULL);
 276
 277	rt = (struct rt6_info *) dst;
 278	if (tcp_death_row.sysctl_tw_recycle &&
 279	    !tp->rx_opt.ts_recent_stamp &&
 280	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
 281		struct inet_peer *peer = rt6_get_peer(rt);
 282		/*
 283		 * VJ's idea. We save last timestamp seen from
 284		 * the destination in peer table, when entering state
 285		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 286		 * when trying new connection.
 287		 */
 288		if (peer) {
 289			inet_peer_refcheck(peer);
 290			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 291				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 292				tp->rx_opt.ts_recent = peer->tcp_ts;
 293			}
 294		}
 295	}
 296
 297	icsk->icsk_ext_hdr_len = 0;
 298	if (np->opt)
 299		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 300					  np->opt->opt_nflen);
 301
 302	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 303
 304	inet->inet_dport = usin->sin6_port;
 305
 306	tcp_set_state(sk, TCP_SYN_SENT);
 307	err = inet6_hash_connect(&tcp_death_row, sk);
 308	if (err)
 309		goto late_failure;
 310
 311	if (!tp->write_seq)
 
 
 312		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 313							     np->daddr.s6_addr32,
 314							     inet->inet_sport,
 315							     inet->inet_dport);
 
 316
 317	err = tcp_connect(sk);
 318	if (err)
 319		goto late_failure;
 320
 321	return 0;
 322
 323late_failure:
 324	tcp_set_state(sk, TCP_CLOSE);
 325	__sk_dst_reset(sk);
 326failure:
 327	inet->inet_dport = 0;
 328	sk->sk_route_caps = 0;
 329	return err;
 330}
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 333		u8 type, u8 code, int offset, __be32 info)
 334{
 335	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
 336	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 
 
 337	struct ipv6_pinfo *np;
 
 
 338	struct sock *sk;
 
 339	int err;
 340	struct tcp_sock *tp;
 341	__u32 seq;
 342	struct net *net = dev_net(skb->dev);
 343
 344	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 345			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 346
 347	if (sk == NULL) {
 348		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 349				   ICMP6_MIB_INERRORS);
 
 
 350		return;
 351	}
 352
 353	if (sk->sk_state == TCP_TIME_WAIT) {
 354		inet_twsk_put(inet_twsk(sk));
 355		return;
 356	}
 
 
 
 
 357
 358	bh_lock_sock(sk);
 359	if (sock_owned_by_user(sk))
 360		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 361
 362	if (sk->sk_state == TCP_CLOSE)
 363		goto out;
 364
 365	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 366		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 367		goto out;
 368	}
 369
 370	tp = tcp_sk(sk);
 371	seq = ntohl(th->seq);
 
 
 372	if (sk->sk_state != TCP_LISTEN &&
 373	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 374		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 375		goto out;
 376	}
 377
 378	np = inet6_sk(sk);
 379
 380	if (type == ICMPV6_PKT_TOOBIG) {
 381		struct dst_entry *dst;
 382
 383		if (sock_owned_by_user(sk))
 384			goto out;
 385		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 386			goto out;
 387
 388		/* icmp should have updated the destination cache entry */
 389		dst = __sk_dst_check(sk, np->dst_cookie);
 
 
 
 390
 391		if (dst == NULL) {
 392			struct inet_sock *inet = inet_sk(sk);
 393			struct flowi6 fl6;
 394
 395			/* BUGGG_FUTURE: Again, it is not clear how
 396			   to handle rthdr case. Ignore this complexity
 397			   for now.
 398			 */
 399			memset(&fl6, 0, sizeof(fl6));
 400			fl6.flowi6_proto = IPPROTO_TCP;
 401			ipv6_addr_copy(&fl6.daddr, &np->daddr);
 402			ipv6_addr_copy(&fl6.saddr, &np->saddr);
 403			fl6.flowi6_oif = sk->sk_bound_dev_if;
 404			fl6.flowi6_mark = sk->sk_mark;
 405			fl6.fl6_dport = inet->inet_dport;
 406			fl6.fl6_sport = inet->inet_sport;
 407			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 408
 409			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
 410			if (IS_ERR(dst)) {
 411				sk->sk_err_soft = -PTR_ERR(dst);
 412				goto out;
 413			}
 414
 415		} else
 416			dst_hold(dst);
 417
 418		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 419			tcp_sync_mss(sk, dst_mtu(dst));
 420			tcp_simple_retransmit(sk);
 421		} /* else let the usual retransmit timer handle it */
 422		dst_release(dst);
 
 423		goto out;
 424	}
 425
 426	icmpv6_err_convert(type, code, &err);
 427
 428	/* Might be for an request_sock */
 429	switch (sk->sk_state) {
 430		struct request_sock *req, **prev;
 431	case TCP_LISTEN:
 432		if (sock_owned_by_user(sk))
 433			goto out;
 434
 435		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 436					   &hdr->saddr, inet6_iif(skb));
 437		if (!req)
 438			goto out;
 439
 440		/* ICMPs are not backlogged, hence we cannot get
 441		 * an established socket here.
 442		 */
 443		WARN_ON(req->sk != NULL);
 444
 445		if (seq != tcp_rsk(req)->snt_isn) {
 446			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 447			goto out;
 448		}
 449
 450		inet_csk_reqsk_queue_drop(sk, req, prev);
 451		goto out;
 452
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:  /* Cannot happen.
 455			       It can, it SYNs are crossed. --ANK */
 456		if (!sock_owned_by_user(sk)) {
 457			sk->sk_err = err;
 458			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 459
 460			tcp_done(sk);
 461		} else
 462			sk->sk_err_soft = err;
 463		goto out;
 464	}
 465
 466	if (!sock_owned_by_user(sk) && np->recverr) {
 467		sk->sk_err = err;
 468		sk->sk_error_report(sk);
 469	} else
 470		sk->sk_err_soft = err;
 471
 472out:
 473	bh_unlock_sock(sk);
 474	sock_put(sk);
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 479			      struct request_values *rvp)
 
 
 
 480{
 481	struct inet6_request_sock *treq = inet6_rsk(req);
 482	struct ipv6_pinfo *np = inet6_sk(sk);
 483	struct sk_buff * skb;
 484	struct ipv6_txoptions *opt = NULL;
 485	struct in6_addr * final_p, final;
 486	struct flowi6 fl6;
 487	struct dst_entry *dst;
 488	int err;
 489
 490	memset(&fl6, 0, sizeof(fl6));
 491	fl6.flowi6_proto = IPPROTO_TCP;
 492	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 493	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
 494	fl6.flowlabel = 0;
 495	fl6.flowi6_oif = treq->iif;
 496	fl6.flowi6_mark = sk->sk_mark;
 497	fl6.fl6_dport = inet_rsk(req)->rmt_port;
 498	fl6.fl6_sport = inet_rsk(req)->loc_port;
 499	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 500
 501	opt = np->opt;
 502	final_p = fl6_update_dst(&fl6, opt, &final);
 503
 504	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 505	if (IS_ERR(dst)) {
 506		err = PTR_ERR(dst);
 507		dst = NULL;
 508		goto done;
 509	}
 510	skb = tcp_make_synack(sk, dst, req, rvp);
 511	err = -ENOMEM;
 512	if (skb) {
 513		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 514
 515		ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
 516		err = ip6_xmit(sk, skb, &fl6, opt);
 
 
 
 
 
 
 
 
 517		err = net_xmit_eval(err);
 518	}
 519
 520done:
 521	if (opt && opt != np->opt)
 522		sock_kfree_s(sk, opt, opt->tot_len);
 523	dst_release(dst);
 524	return err;
 525}
 526
 527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
 528			     struct request_values *rvp)
 529{
 530	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 531	return tcp_v6_send_synack(sk, req, rvp);
 532}
 533
 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
 535{
 536	kfree_skb(inet6_rsk(req)->pktopts);
 
 537}
 538
 539#ifdef CONFIG_TCP_MD5SIG
 540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 541						   const struct in6_addr *addr)
 542{
 543	struct tcp_sock *tp = tcp_sk(sk);
 544	int i;
 545
 546	BUG_ON(tp == NULL);
 547
 548	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
 549		return NULL;
 550
 551	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 552		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
 553			return &tp->md5sig_info->keys6[i].base;
 554	}
 555	return NULL;
 556}
 557
 558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 559						struct sock *addr_sk)
 560{
 561	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
 562}
 563
 564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 565						      struct request_sock *req)
 566{
 567	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
 568}
 569
 570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
 571			     char *newkey, u8 newkeylen)
 572{
 573	/* Add key to the list */
 574	struct tcp_md5sig_key *key;
 575	struct tcp_sock *tp = tcp_sk(sk);
 576	struct tcp6_md5sig_key *keys;
 577
 578	key = tcp_v6_md5_do_lookup(sk, peer);
 579	if (key) {
 580		/* modify existing entry - just update that one */
 581		kfree(key->key);
 582		key->key = newkey;
 583		key->keylen = newkeylen;
 584	} else {
 585		/* reallocate new list if current one is full. */
 586		if (!tp->md5sig_info) {
 587			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
 588			if (!tp->md5sig_info) {
 589				kfree(newkey);
 590				return -ENOMEM;
 591			}
 592			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 593		}
 594		if (tp->md5sig_info->entries6 == 0 &&
 595			tcp_alloc_md5sig_pool(sk) == NULL) {
 596			kfree(newkey);
 597			return -ENOMEM;
 598		}
 599		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
 600			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
 601				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
 602
 603			if (!keys) {
 604				kfree(newkey);
 605				if (tp->md5sig_info->entries6 == 0)
 606					tcp_free_md5sig_pool();
 607				return -ENOMEM;
 608			}
 609
 610			if (tp->md5sig_info->entries6)
 611				memmove(keys, tp->md5sig_info->keys6,
 612					(sizeof (tp->md5sig_info->keys6[0]) *
 613					 tp->md5sig_info->entries6));
 614
 615			kfree(tp->md5sig_info->keys6);
 616			tp->md5sig_info->keys6 = keys;
 617			tp->md5sig_info->alloced6++;
 618		}
 619
 620		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
 621			       peer);
 622		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
 623		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 624
 625		tp->md5sig_info->entries6++;
 626	}
 627	return 0;
 628}
 629
 630static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
 631			       u8 *newkey, __u8 newkeylen)
 632{
 633	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
 634				 newkey, newkeylen);
 635}
 636
 637static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
 638{
 639	struct tcp_sock *tp = tcp_sk(sk);
 640	int i;
 641
 642	for (i = 0; i < tp->md5sig_info->entries6; i++) {
 643		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
 644			/* Free the key */
 645			kfree(tp->md5sig_info->keys6[i].base.key);
 646			tp->md5sig_info->entries6--;
 647
 648			if (tp->md5sig_info->entries6 == 0) {
 649				kfree(tp->md5sig_info->keys6);
 650				tp->md5sig_info->keys6 = NULL;
 651				tp->md5sig_info->alloced6 = 0;
 652				tcp_free_md5sig_pool();
 653			} else {
 654				/* shrink the database */
 655				if (tp->md5sig_info->entries6 != i)
 656					memmove(&tp->md5sig_info->keys6[i],
 657						&tp->md5sig_info->keys6[i+1],
 658						(tp->md5sig_info->entries6 - i)
 659						* sizeof (tp->md5sig_info->keys6[0]));
 660			}
 661			return 0;
 662		}
 663	}
 664	return -ENOENT;
 665}
 666
 667static void tcp_v6_clear_md5_list (struct sock *sk)
 
 668{
 669	struct tcp_sock *tp = tcp_sk(sk);
 670	int i;
 671
 672	if (tp->md5sig_info->entries6) {
 673		for (i = 0; i < tp->md5sig_info->entries6; i++)
 674			kfree(tp->md5sig_info->keys6[i].base.key);
 675		tp->md5sig_info->entries6 = 0;
 676		tcp_free_md5sig_pool();
 677	}
 678
 679	kfree(tp->md5sig_info->keys6);
 680	tp->md5sig_info->keys6 = NULL;
 681	tp->md5sig_info->alloced6 = 0;
 682
 683	if (tp->md5sig_info->entries4) {
 684		for (i = 0; i < tp->md5sig_info->entries4; i++)
 685			kfree(tp->md5sig_info->keys4[i].base.key);
 686		tp->md5sig_info->entries4 = 0;
 687		tcp_free_md5sig_pool();
 688	}
 689
 690	kfree(tp->md5sig_info->keys4);
 691	tp->md5sig_info->keys4 = NULL;
 692	tp->md5sig_info->alloced4 = 0;
 693}
 694
 695static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
 696				  int optlen)
 697{
 698	struct tcp_md5sig cmd;
 699	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 700	u8 *newkey;
 701
 702	if (optlen < sizeof(cmd))
 703		return -EINVAL;
 704
 705	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 706		return -EFAULT;
 707
 708	if (sin6->sin6_family != AF_INET6)
 709		return -EINVAL;
 710
 711	if (!cmd.tcpm_keylen) {
 712		if (!tcp_sk(sk)->md5sig_info)
 713			return -ENOENT;
 714		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 715			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
 716		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
 
 
 717	}
 718
 719	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 720		return -EINVAL;
 721
 722	if (!tcp_sk(sk)->md5sig_info) {
 723		struct tcp_sock *tp = tcp_sk(sk);
 724		struct tcp_md5sig_info *p;
 725
 726		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
 727		if (!p)
 728			return -ENOMEM;
 729
 730		tp->md5sig_info = p;
 731		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 732	}
 733
 734	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 735	if (!newkey)
 736		return -ENOMEM;
 737	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
 738		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
 739					 newkey, cmd.tcpm_keylen);
 740	}
 741	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
 742}
 743
 744static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 745					const struct in6_addr *daddr,
 746					const struct in6_addr *saddr, int nbytes)
 
 747{
 748	struct tcp6_pseudohdr *bp;
 749	struct scatterlist sg;
 
 750
 751	bp = &hp->md5_blk.ip6;
 752	/* 1. TCP pseudo-header (RFC2460) */
 753	ipv6_addr_copy(&bp->saddr, saddr);
 754	ipv6_addr_copy(&bp->daddr, daddr);
 755	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 756	bp->len = cpu_to_be32(nbytes);
 757
 758	sg_init_one(&sg, bp, sizeof(*bp));
 759	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
 
 
 
 
 
 760}
 761
 762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 763			       const struct in6_addr *daddr, struct in6_addr *saddr,
 764			       struct tcphdr *th)
 765{
 766	struct tcp_md5sig_pool *hp;
 767	struct hash_desc *desc;
 768
 769	hp = tcp_get_md5sig_pool();
 770	if (!hp)
 771		goto clear_hash_noput;
 772	desc = &hp->md5_desc;
 773
 774	if (crypto_hash_init(desc))
 775		goto clear_hash;
 776	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 777		goto clear_hash;
 778	if (tcp_md5_hash_header(hp, th))
 779		goto clear_hash;
 780	if (tcp_md5_hash_key(hp, key))
 781		goto clear_hash;
 782	if (crypto_hash_final(desc, md5_hash))
 
 783		goto clear_hash;
 784
 785	tcp_put_md5sig_pool();
 786	return 0;
 787
 788clear_hash:
 789	tcp_put_md5sig_pool();
 790clear_hash_noput:
 791	memset(md5_hash, 0, 16);
 792	return 1;
 793}
 794
 795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 796			       struct sock *sk, struct request_sock *req,
 797			       struct sk_buff *skb)
 
 798{
 799	const struct in6_addr *saddr, *daddr;
 800	struct tcp_md5sig_pool *hp;
 801	struct hash_desc *desc;
 802	struct tcphdr *th = tcp_hdr(skb);
 803
 804	if (sk) {
 805		saddr = &inet6_sk(sk)->saddr;
 806		daddr = &inet6_sk(sk)->daddr;
 807	} else if (req) {
 808		saddr = &inet6_rsk(req)->loc_addr;
 809		daddr = &inet6_rsk(req)->rmt_addr;
 810	} else {
 811		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 812		saddr = &ip6h->saddr;
 813		daddr = &ip6h->daddr;
 814	}
 815
 816	hp = tcp_get_md5sig_pool();
 817	if (!hp)
 818		goto clear_hash_noput;
 819	desc = &hp->md5_desc;
 820
 821	if (crypto_hash_init(desc))
 822		goto clear_hash;
 823
 824	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 825		goto clear_hash;
 826	if (tcp_md5_hash_header(hp, th))
 827		goto clear_hash;
 828	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 829		goto clear_hash;
 830	if (tcp_md5_hash_key(hp, key))
 831		goto clear_hash;
 832	if (crypto_hash_final(desc, md5_hash))
 
 833		goto clear_hash;
 834
 835	tcp_put_md5sig_pool();
 836	return 0;
 837
 838clear_hash:
 839	tcp_put_md5sig_pool();
 840clear_hash_noput:
 841	memset(md5_hash, 0, 16);
 842	return 1;
 843}
 844
 845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 
 
 
 846{
 847	__u8 *hash_location = NULL;
 
 848	struct tcp_md5sig_key *hash_expected;
 849	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 850	struct tcphdr *th = tcp_hdr(skb);
 851	int genhash;
 852	u8 newhash[16];
 853
 854	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 855	hash_location = tcp_parse_md5sig_option(th);
 856
 857	/* We've parsed the options - do we have a hash? */
 858	if (!hash_expected && !hash_location)
 859		return 0;
 860
 861	if (hash_expected && !hash_location) {
 862		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 863		return 1;
 864	}
 865
 866	if (!hash_expected && hash_location) {
 867		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 868		return 1;
 869	}
 870
 871	/* check the signature */
 872	genhash = tcp_v6_md5_hash_skb(newhash,
 873				      hash_expected,
 874				      NULL, NULL, skb);
 875
 876	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 877		if (net_ratelimit()) {
 878			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 879			       genhash ? "failed" : "mismatch",
 880			       &ip6h->saddr, ntohs(th->source),
 881			       &ip6h->daddr, ntohs(th->dest));
 882		}
 883		return 1;
 884	}
 885	return 0;
 886}
 887#endif
 888
 889struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 890	.family		=	AF_INET6,
 891	.obj_size	=	sizeof(struct tcp6_request_sock),
 892	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 893	.send_ack	=	tcp_v6_reqsk_send_ack,
 894	.destructor	=	tcp_v6_reqsk_destructor,
 895	.send_reset	=	tcp_v6_send_reset,
 896	.syn_ack_timeout = 	tcp_syn_ack_timeout,
 897};
 898
 899#ifdef CONFIG_TCP_MD5SIG
 900static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 901	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 902	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 903};
 904#endif
 905
 906static void __tcp_v6_send_check(struct sk_buff *skb,
 907				const struct in6_addr *saddr, const struct in6_addr *daddr)
 908{
 909	struct tcphdr *th = tcp_hdr(skb);
 910
 911	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 912		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
 913		skb->csum_start = skb_transport_header(skb) - skb->head;
 914		skb->csum_offset = offsetof(struct tcphdr, check);
 915	} else {
 916		th->check = tcp_v6_check(skb->len, saddr, daddr,
 917					 csum_partial(th, th->doff << 2,
 918						      skb->csum));
 919	}
 920}
 921
 922static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 
 
 923{
 924	struct ipv6_pinfo *np = inet6_sk(sk);
 925
 926	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
 927}
 928
 929static int tcp_v6_gso_send_check(struct sk_buff *skb)
 930{
 931	const struct ipv6hdr *ipv6h;
 932	struct tcphdr *th;
 933
 934	if (!pskb_may_pull(skb, sizeof(*th)))
 935		return -EINVAL;
 
 
 936
 937	ipv6h = ipv6_hdr(skb);
 938	th = tcp_hdr(skb);
 939
 940	th->check = 0;
 941	skb->ip_summed = CHECKSUM_PARTIAL;
 942	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
 943	return 0;
 944}
 945
 946static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
 947					 struct sk_buff *skb)
 948{
 949	const struct ipv6hdr *iph = skb_gro_network_header(skb);
 950
 951	switch (skb->ip_summed) {
 952	case CHECKSUM_COMPLETE:
 953		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
 954				  skb->csum)) {
 955			skb->ip_summed = CHECKSUM_UNNECESSARY;
 956			break;
 957		}
 958
 959		/* fall through */
 960	case CHECKSUM_NONE:
 961		NAPI_GRO_CB(skb)->flush = 1;
 962		return NULL;
 963	}
 964
 965	return tcp_gro_receive(head, skb);
 966}
 967
 968static int tcp6_gro_complete(struct sk_buff *skb)
 
 
 
 969{
 970	const struct ipv6hdr *iph = ipv6_hdr(skb);
 971	struct tcphdr *th = tcp_hdr(skb);
 
 
 972
 973	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 974				  &iph->saddr, &iph->daddr, 0);
 975	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 
 
 
 
 
 
 976
 977	return tcp_gro_complete(skb);
 978}
 
 
 
 
 
 
 
 
 
 
 
 
 
 979
 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 981				 u32 ts, struct tcp_md5sig_key *key, int rst)
 
 
 982{
 983	struct tcphdr *th = tcp_hdr(skb), *t1;
 
 984	struct sk_buff *buff;
 985	struct flowi6 fl6;
 986	struct net *net = dev_net(skb_dst(skb)->dev);
 987	struct sock *ctl_sk = net->ipv6.tcp_sk;
 988	unsigned int tot_len = sizeof(struct tcphdr);
 989	struct dst_entry *dst;
 990	__be32 *topt;
 991
 992	if (ts)
 993		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 994#ifdef CONFIG_TCP_MD5SIG
 995	if (key)
 996		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 997#endif
 998
 999	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1000			 GFP_ATOMIC);
1001	if (buff == NULL)
1002		return;
1003
1004	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1005
1006	t1 = (struct tcphdr *) skb_push(buff, tot_len);
1007	skb_reset_transport_header(buff);
1008
1009	/* Swap the send and the receive. */
1010	memset(t1, 0, sizeof(*t1));
1011	t1->dest = th->source;
1012	t1->source = th->dest;
1013	t1->doff = tot_len / 4;
1014	t1->seq = htonl(seq);
1015	t1->ack_seq = htonl(ack);
1016	t1->ack = !rst || !th->ack;
1017	t1->rst = rst;
1018	t1->window = htons(win);
1019
1020	topt = (__be32 *)(t1 + 1);
1021
1022	if (ts) {
1023		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025		*topt++ = htonl(tcp_time_stamp);
1026		*topt++ = htonl(ts);
1027	}
1028
1029#ifdef CONFIG_TCP_MD5SIG
1030	if (key) {
1031		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1034				    &ipv6_hdr(skb)->saddr,
1035				    &ipv6_hdr(skb)->daddr, t1);
1036	}
1037#endif
1038
1039	memset(&fl6, 0, sizeof(fl6));
1040	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1041	ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
 
1042
1043	buff->ip_summed = CHECKSUM_PARTIAL;
1044	buff->csum = 0;
1045
1046	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1047
1048	fl6.flowi6_proto = IPPROTO_TCP;
1049	fl6.flowi6_oif = inet6_iif(skb);
 
 
 
 
 
 
 
 
 
1050	fl6.fl6_dport = t1->dest;
1051	fl6.fl6_sport = t1->source;
 
1052	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1053
1054	/* Pass a socket to ip6_dst_lookup either it is for RST
1055	 * Underlying function will use this to retrieve the network
1056	 * namespace
1057	 */
1058	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059	if (!IS_ERR(dst)) {
1060		skb_dst_set(buff, dst);
1061		ip6_xmit(ctl_sk, buff, &fl6, NULL);
1062		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063		if (rst)
1064			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1065		return;
1066	}
1067
1068	kfree_skb(buff);
1069}
1070
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{
1073	struct tcphdr *th = tcp_hdr(skb);
1074	u32 seq = 0, ack_seq = 0;
1075	struct tcp_md5sig_key *key = NULL;
 
 
 
 
 
 
 
 
1076
1077	if (th->rst)
1078		return;
1079
1080	if (!ipv6_unicast_destination(skb))
 
 
 
1081		return;
1082
1083#ifdef CONFIG_TCP_MD5SIG
1084	if (sk)
1085		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1086#endif
1087
1088	if (th->ack)
1089		seq = ntohl(th->ack_seq);
1090	else
1091		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092			  (th->doff << 2);
1093
1094	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
 
 
 
 
 
 
1095}
1096
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098			    struct tcp_md5sig_key *key)
 
 
1099{
1100	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
 
1101}
1102
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
1105	struct inet_timewait_sock *tw = inet_twsk(sk);
1106	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1107
1108	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
 
 
1111
1112	inet_twsk_put(tw);
1113}
1114
1115static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116				  struct request_sock *req)
1117{
1118	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
1121
1122
1123static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1124{
1125	struct request_sock *req, **prev;
1126	const struct tcphdr *th = tcp_hdr(skb);
1127	struct sock *nsk;
1128
1129	/* Find possible connection requests. */
1130	req = inet6_csk_search_req(sk, &prev, th->source,
1131				   &ipv6_hdr(skb)->saddr,
1132				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1133	if (req)
1134		return tcp_check_req(sk, skb, req, prev);
1135
1136	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1137			&ipv6_hdr(skb)->saddr, th->source,
1138			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1139
1140	if (nsk) {
1141		if (nsk->sk_state != TCP_TIME_WAIT) {
1142			bh_lock_sock(nsk);
1143			return nsk;
1144		}
1145		inet_twsk_put(inet_twsk(nsk));
1146		return NULL;
1147	}
1148
1149#ifdef CONFIG_SYN_COOKIES
1150	if (!th->syn)
1151		sk = cookie_v6_check(sk, skb);
1152#endif
1153	return sk;
1154}
1155
1156/* FIXME: this is substantially similar to the ipv4 code.
1157 * Can some kind of merge be done? -- erics
1158 */
1159static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{
1161	struct tcp_extend_values tmp_ext;
1162	struct tcp_options_received tmp_opt;
1163	u8 *hash_location;
1164	struct request_sock *req;
1165	struct inet6_request_sock *treq;
1166	struct ipv6_pinfo *np = inet6_sk(sk);
1167	struct tcp_sock *tp = tcp_sk(sk);
1168	__u32 isn = TCP_SKB_CB(skb)->when;
1169	struct dst_entry *dst = NULL;
1170	int want_cookie = 0;
1171
1172	if (skb->protocol == htons(ETH_P_IP))
1173		return tcp_v4_conn_request(sk, skb);
1174
1175	if (!ipv6_unicast_destination(skb))
1176		goto drop;
1177
1178	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1179		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1180		if (!want_cookie)
1181			goto drop;
1182	}
1183
1184	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1185		goto drop;
1186
1187	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1188	if (req == NULL)
1189		goto drop;
1190
1191#ifdef CONFIG_TCP_MD5SIG
1192	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1193#endif
1194
1195	tcp_clear_options(&tmp_opt);
1196	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1197	tmp_opt.user_mss = tp->rx_opt.user_mss;
1198	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1199
1200	if (tmp_opt.cookie_plus > 0 &&
1201	    tmp_opt.saw_tstamp &&
1202	    !tp->rx_opt.cookie_out_never &&
1203	    (sysctl_tcp_cookie_size > 0 ||
1204	     (tp->cookie_values != NULL &&
1205	      tp->cookie_values->cookie_desired > 0))) {
1206		u8 *c;
1207		u32 *d;
1208		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1209		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1210
1211		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1212			goto drop_and_free;
1213
1214		/* Secret recipe starts with IP addresses */
1215		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1216		*mess++ ^= *d++;
1217		*mess++ ^= *d++;
1218		*mess++ ^= *d++;
1219		*mess++ ^= *d++;
1220		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1221		*mess++ ^= *d++;
1222		*mess++ ^= *d++;
1223		*mess++ ^= *d++;
1224		*mess++ ^= *d++;
1225
1226		/* plus variable length Initiator Cookie */
1227		c = (u8 *)mess;
1228		while (l-- > 0)
1229			*c++ ^= *hash_location++;
1230
1231		want_cookie = 0;	/* not our kind of cookie */
1232		tmp_ext.cookie_out_never = 0; /* false */
1233		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1234	} else if (!tp->rx_opt.cookie_in_always) {
1235		/* redundant indications, but ensure initialization. */
1236		tmp_ext.cookie_out_never = 1; /* true */
1237		tmp_ext.cookie_plus = 0;
1238	} else {
1239		goto drop_and_free;
1240	}
1241	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1242
1243	if (want_cookie && !tmp_opt.saw_tstamp)
1244		tcp_clear_options(&tmp_opt);
1245
1246	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1247	tcp_openreq_init(req, &tmp_opt, skb);
1248
1249	treq = inet6_rsk(req);
1250	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1251	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1252	if (!want_cookie || tmp_opt.tstamp_ok)
1253		TCP_ECN_create_request(req, tcp_hdr(skb));
1254
1255	if (!isn) {
1256		struct inet_peer *peer = NULL;
1257
1258		if (ipv6_opt_accepted(sk, skb) ||
1259		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1260		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1261			atomic_inc(&skb->users);
1262			treq->pktopts = skb;
1263		}
1264		treq->iif = sk->sk_bound_dev_if;
1265
1266		/* So that link locals have meaning */
1267		if (!sk->sk_bound_dev_if &&
1268		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1269			treq->iif = inet6_iif(skb);
1270
1271		if (want_cookie) {
1272			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1273			req->cookie_ts = tmp_opt.tstamp_ok;
1274			goto have_isn;
1275		}
1276
1277		/* VJ's idea. We save last timestamp seen
1278		 * from the destination in peer table, when entering
1279		 * state TIME-WAIT, and check against it before
1280		 * accepting new connection request.
1281		 *
1282		 * If "isn" is not zero, this request hit alive
1283		 * timewait bucket, so that all the necessary checks
1284		 * are made in the function processing timewait state.
1285		 */
1286		if (tmp_opt.saw_tstamp &&
1287		    tcp_death_row.sysctl_tw_recycle &&
1288		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1289		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1290		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1291				    &treq->rmt_addr)) {
1292			inet_peer_refcheck(peer);
1293			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1294			    (s32)(peer->tcp_ts - req->ts_recent) >
1295							TCP_PAWS_WINDOW) {
1296				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1297				goto drop_and_release;
1298			}
1299		}
1300		/* Kill the following clause, if you dislike this way. */
1301		else if (!sysctl_tcp_syncookies &&
1302			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1303			  (sysctl_max_syn_backlog >> 2)) &&
1304			 (!peer || !peer->tcp_ts_stamp) &&
1305			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1306			/* Without syncookies last quarter of
1307			 * backlog is filled with destinations,
1308			 * proven to be alive.
1309			 * It means that we continue to communicate
1310			 * to destinations, already remembered
1311			 * to the moment of synflood.
1312			 */
1313			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1314				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1315			goto drop_and_release;
1316		}
1317
1318		isn = tcp_v6_init_sequence(skb);
1319	}
1320have_isn:
1321	tcp_rsk(req)->snt_isn = isn;
1322	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1323
1324	security_inet_conn_request(sk, skb, req);
1325
1326	if (tcp_v6_send_synack(sk, req,
1327			       (struct request_values *)&tmp_ext) ||
1328	    want_cookie)
1329		goto drop_and_free;
1330
1331	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1332	return 0;
1333
1334drop_and_release:
1335	dst_release(dst);
1336drop_and_free:
1337	reqsk_free(req);
1338drop:
 
1339	return 0; /* don't send reset */
1340}
1341
1342static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1343					  struct request_sock *req,
1344					  struct dst_entry *dst)
1345{
1346	struct inet6_request_sock *treq;
1347	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1348	struct tcp6_sock *newtcp6sk;
1349	struct inet_sock *newinet;
1350	struct tcp_sock *newtp;
1351	struct sock *newsk;
1352	struct ipv6_txoptions *opt;
1353#ifdef CONFIG_TCP_MD5SIG
1354	struct tcp_md5sig_key *key;
1355#endif
 
1356
1357	if (skb->protocol == htons(ETH_P_IP)) {
1358		/*
1359		 *	v6 mapped
1360		 */
1361
1362		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
 
1363
1364		if (newsk == NULL)
1365			return NULL;
1366
1367		newtcp6sk = (struct tcp6_sock *)newsk;
1368		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1369
1370		newinet = inet_sk(newsk);
1371		newnp = inet6_sk(newsk);
1372		newtp = tcp_sk(newsk);
1373
1374		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1375
1376		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1377
1378		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1379
1380		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1381
1382		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1383		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1384#ifdef CONFIG_TCP_MD5SIG
1385		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1386#endif
1387
1388		newnp->ipv6_ac_list = NULL;
1389		newnp->ipv6_fl_list = NULL;
1390		newnp->pktoptions  = NULL;
1391		newnp->opt	   = NULL;
1392		newnp->mcast_oif   = inet6_iif(skb);
1393		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
 
 
1394
1395		/*
1396		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1397		 * here, tcp_create_openreq_child now does this for us, see the comment in
1398		 * that function for the gory details. -acme
1399		 */
1400
1401		/* It is tricky place. Until this moment IPv4 tcp
1402		   worked with IPv6 icsk.icsk_af_ops.
1403		   Sync it now.
1404		 */
1405		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1406
1407		return newsk;
1408	}
1409
1410	treq = inet6_rsk(req);
1411	opt = np->opt;
1412
1413	if (sk_acceptq_is_full(sk))
1414		goto out_overflow;
1415
1416	if (!dst) {
1417		dst = inet6_csk_route_req(sk, req);
1418		if (!dst)
1419			goto out;
1420	}
1421
1422	newsk = tcp_create_openreq_child(sk, req, skb);
1423	if (newsk == NULL)
1424		goto out_nonewsk;
1425
1426	/*
1427	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1428	 * count here, tcp_create_openreq_child now does this for us, see the
1429	 * comment in that function for the gory details. -acme
1430	 */
1431
1432	newsk->sk_gso_type = SKB_GSO_TCPV6;
1433	__ip6_dst_store(newsk, dst, NULL, NULL);
 
1434
1435	newtcp6sk = (struct tcp6_sock *)newsk;
1436	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1437
1438	newtp = tcp_sk(newsk);
1439	newinet = inet_sk(newsk);
1440	newnp = inet6_sk(newsk);
1441
1442	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1443
1444	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1445	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1446	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1447	newsk->sk_bound_dev_if = treq->iif;
1448
1449	/* Now IPv6 options...
1450
1451	   First: no IPv4 options.
1452	 */
1453	newinet->inet_opt = NULL;
1454	newnp->ipv6_ac_list = NULL;
1455	newnp->ipv6_fl_list = NULL;
1456
1457	/* Clone RX bits */
1458	newnp->rxopt.all = np->rxopt.all;
1459
1460	/* Clone pktoptions received with SYN */
1461	newnp->pktoptions = NULL;
1462	if (treq->pktopts != NULL) {
1463		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1464		kfree_skb(treq->pktopts);
1465		treq->pktopts = NULL;
1466		if (newnp->pktoptions)
1467			skb_set_owner_r(newnp->pktoptions, newsk);
1468	}
1469	newnp->opt	  = NULL;
1470	newnp->mcast_oif  = inet6_iif(skb);
1471	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 
 
 
1472
1473	/* Clone native IPv6 options from listening socket (if any)
1474
1475	   Yes, keeping reference count would be much more clever,
1476	   but we make one more one thing there: reattach optmem
1477	   to newsk.
1478	 */
 
 
 
1479	if (opt) {
1480		newnp->opt = ipv6_dup_options(newsk, opt);
1481		if (opt != np->opt)
1482			sock_kfree_s(sk, opt, opt->tot_len);
1483	}
1484
1485	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486	if (newnp->opt)
1487		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1488						     newnp->opt->opt_flen);
 
 
1489
1490	tcp_mtup_init(newsk);
1491	tcp_sync_mss(newsk, dst_mtu(dst));
1492	newtp->advmss = dst_metric_advmss(dst);
 
 
 
 
1493	tcp_initialize_rcv_mss(newsk);
1494	if (tcp_rsk(req)->snt_synack)
1495		tcp_valid_rtt_meas(newsk,
1496		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1497	newtp->total_retrans = req->retrans;
1498
1499	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1500	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1501
1502#ifdef CONFIG_TCP_MD5SIG
1503	/* Copy over the MD5 key from the original socket */
1504	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
 
1505		/* We're using one, so create a matching key
1506		 * on the newsk structure. If we fail to get
1507		 * memory, then we end up not copying the key
1508		 * across. Shucks.
1509		 */
1510		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1511		if (newkey != NULL)
1512			tcp_v6_md5_do_add(newsk, &newnp->daddr,
1513					  newkey, key->keylen);
1514	}
1515#endif
1516
1517	if (__inet_inherit_port(sk, newsk) < 0) {
1518		sock_put(newsk);
 
1519		goto out;
1520	}
1521	__inet6_hash(newsk, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522
1523	return newsk;
1524
1525out_overflow:
1526	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1527out_nonewsk:
1528	if (opt && opt != np->opt)
1529		sock_kfree_s(sk, opt, opt->tot_len);
1530	dst_release(dst);
1531out:
1532	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1533	return NULL;
1534}
1535
1536static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1537{
1538	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1539		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1540				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1541			skb->ip_summed = CHECKSUM_UNNECESSARY;
1542			return 0;
1543		}
1544	}
1545
1546	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1547					      &ipv6_hdr(skb)->saddr,
1548					      &ipv6_hdr(skb)->daddr, 0));
1549
1550	if (skb->len <= 76) {
1551		return __skb_checksum_complete(skb);
1552	}
1553	return 0;
1554}
1555
1556/* The socket must have it's spinlock held when we get
1557 * here.
1558 *
1559 * We have a potential double-lock case here, so even when
1560 * doing backlog processing we use the BH locking scheme.
1561 * This is because we cannot sleep with the original spinlock
1562 * held.
1563 */
1564static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1565{
1566	struct ipv6_pinfo *np = inet6_sk(sk);
1567	struct tcp_sock *tp;
1568	struct sk_buff *opt_skb = NULL;
1569
1570	/* Imagine: socket is IPv6. IPv4 packet arrives,
1571	   goes to IPv4 receive handler and backlogged.
1572	   From backlog it always goes here. Kerboom...
1573	   Fortunately, tcp_rcv_established and rcv_established
1574	   handle them correctly, but it is not case with
1575	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1576	 */
1577
1578	if (skb->protocol == htons(ETH_P_IP))
1579		return tcp_v4_do_rcv(sk, skb);
1580
1581#ifdef CONFIG_TCP_MD5SIG
1582	if (tcp_v6_inbound_md5_hash (sk, skb))
1583		goto discard;
1584#endif
1585
1586	if (sk_filter(sk, skb))
1587		goto discard;
1588
1589	/*
1590	 *	socket locking is here for SMP purposes as backlog rcv
1591	 *	is currently called with bh processing disabled.
1592	 */
1593
1594	/* Do Stevens' IPV6_PKTOPTIONS.
1595
1596	   Yes, guys, it is the only place in our code, where we
1597	   may make it not affecting IPv4.
1598	   The rest of code is protocol independent,
1599	   and I do not like idea to uglify IPv4.
1600
1601	   Actually, all the idea behind IPV6_PKTOPTIONS
1602	   looks not very well thought. For now we latch
1603	   options, received in the last packet, enqueued
1604	   by tcp. Feel free to propose better solution.
1605					       --ANK (980728)
1606	 */
1607	if (np->rxopt.all)
1608		opt_skb = skb_clone(skb, GFP_ATOMIC);
1609
1610	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611		sock_rps_save_rxhash(sk, skb->rxhash);
1612		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613			goto reset;
 
 
 
 
 
 
 
 
 
 
1614		if (opt_skb)
1615			goto ipv6_pktoptions;
1616		return 0;
1617	}
1618
1619	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1620		goto csum_err;
1621
1622	if (sk->sk_state == TCP_LISTEN) {
1623		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
 
1624		if (!nsk)
1625			goto discard;
1626
1627		/*
1628		 * Queue it on the new socket if the new socket is active,
1629		 * otherwise we just shortcircuit this and continue with
1630		 * the new socket..
1631		 */
1632		if(nsk != sk) {
1633			sock_rps_save_rxhash(nsk, skb->rxhash);
1634			if (tcp_child_process(sk, nsk, skb))
1635				goto reset;
1636			if (opt_skb)
1637				__kfree_skb(opt_skb);
1638			return 0;
1639		}
1640	} else
1641		sock_rps_save_rxhash(sk, skb->rxhash);
1642
1643	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644		goto reset;
1645	if (opt_skb)
1646		goto ipv6_pktoptions;
1647	return 0;
1648
1649reset:
1650	tcp_v6_send_reset(sk, skb);
1651discard:
1652	if (opt_skb)
1653		__kfree_skb(opt_skb);
1654	kfree_skb(skb);
1655	return 0;
1656csum_err:
1657	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
1658	goto discard;
1659
1660
1661ipv6_pktoptions:
1662	/* Do you ask, what is it?
1663
1664	   1. skb was enqueued by tcp.
1665	   2. skb is added to tail of read queue, rather than out of order.
1666	   3. socket is not in passive state.
1667	   4. Finally, it really contains options, which user wants to receive.
1668	 */
1669	tp = tcp_sk(sk);
1670	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1671	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1672		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1673			np->mcast_oif = inet6_iif(opt_skb);
1674		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1675			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1676		if (ipv6_opt_accepted(sk, opt_skb)) {
 
 
 
 
1677			skb_set_owner_r(opt_skb, sk);
 
1678			opt_skb = xchg(&np->pktoptions, opt_skb);
1679		} else {
1680			__kfree_skb(opt_skb);
1681			opt_skb = xchg(&np->pktoptions, NULL);
1682		}
1683	}
1684
1685	kfree_skb(opt_skb);
1686	return 0;
1687}
1688
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689static int tcp_v6_rcv(struct sk_buff *skb)
1690{
1691	struct tcphdr *th;
1692	const struct ipv6hdr *hdr;
 
1693	struct sock *sk;
1694	int ret;
1695	struct net *net = dev_net(skb->dev);
1696
1697	if (skb->pkt_type != PACKET_HOST)
1698		goto discard_it;
1699
1700	/*
1701	 *	Count it even if it's bad.
1702	 */
1703	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1704
1705	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1706		goto discard_it;
1707
1708	th = tcp_hdr(skb);
1709
1710	if (th->doff < sizeof(struct tcphdr)/4)
1711		goto bad_packet;
1712	if (!pskb_may_pull(skb, th->doff*4))
1713		goto discard_it;
1714
1715	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1716		goto bad_packet;
1717
1718	th = tcp_hdr(skb);
1719	hdr = ipv6_hdr(skb);
1720	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1721	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1722				    skb->len - th->doff*4);
1723	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724	TCP_SKB_CB(skb)->when = 0;
1725	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1726	TCP_SKB_CB(skb)->sacked = 0;
1727
1728	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 
 
 
1729	if (!sk)
1730		goto no_tcp_socket;
1731
1732process:
1733	if (sk->sk_state == TCP_TIME_WAIT)
1734		goto do_time_wait;
1735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1736	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1737		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1738		goto discard_and_relse;
1739	}
1740
1741	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1742		goto discard_and_relse;
1743
1744	if (sk_filter(sk, skb))
 
 
1745		goto discard_and_relse;
1746
 
 
 
 
 
1747	skb->dev = NULL;
1748
 
 
 
 
 
 
 
1749	bh_lock_sock_nested(sk);
 
1750	ret = 0;
1751	if (!sock_owned_by_user(sk)) {
1752#ifdef CONFIG_NET_DMA
1753		struct tcp_sock *tp = tcp_sk(sk);
1754		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1755			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1756		if (tp->ucopy.dma_chan)
1757			ret = tcp_v6_do_rcv(sk, skb);
1758		else
1759#endif
1760		{
1761			if (!tcp_prequeue(sk, skb))
1762				ret = tcp_v6_do_rcv(sk, skb);
1763		}
1764	} else if (unlikely(sk_add_backlog(sk, skb))) {
1765		bh_unlock_sock(sk);
1766		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1767		goto discard_and_relse;
1768	}
1769	bh_unlock_sock(sk);
1770
1771	sock_put(sk);
 
 
1772	return ret ? -1 : 0;
1773
1774no_tcp_socket:
1775	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1776		goto discard_it;
1777
1778	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 
 
 
 
1779bad_packet:
1780		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1781	} else {
1782		tcp_v6_send_reset(NULL, skb);
1783	}
1784
1785discard_it:
1786
1787	/*
1788	 *	Discard frame
1789	 */
1790
1791	kfree_skb(skb);
1792	return 0;
1793
1794discard_and_relse:
1795	sock_put(sk);
 
 
1796	goto discard_it;
1797
1798do_time_wait:
1799	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1800		inet_twsk_put(inet_twsk(sk));
1801		goto discard_it;
1802	}
1803
1804	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1805		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 
1806		inet_twsk_put(inet_twsk(sk));
1807		goto discard_it;
1808	}
1809
1810	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1811	case TCP_TW_SYN:
1812	{
1813		struct sock *sk2;
1814
1815		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
 
 
1816					    &ipv6_hdr(skb)->daddr,
1817					    ntohs(th->dest), inet6_iif(skb));
1818		if (sk2 != NULL) {
1819			struct inet_timewait_sock *tw = inet_twsk(sk);
1820			inet_twsk_deschedule(tw, &tcp_death_row);
1821			inet_twsk_put(tw);
1822			sk = sk2;
 
 
1823			goto process;
1824		}
1825		/* Fall through to ACK */
1826	}
1827	case TCP_TW_ACK:
1828		tcp_v6_timewait_ack(sk, skb);
1829		break;
1830	case TCP_TW_RST:
1831		goto no_tcp_socket;
1832	case TCP_TW_SUCCESS:;
 
 
 
 
1833	}
1834	goto discard_it;
1835}
1836
1837static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1838{
1839	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1840	struct ipv6_pinfo *np = inet6_sk(sk);
1841	struct inet_peer *peer;
1842
1843	if (!rt ||
1844	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1845		peer = inet_getpeer_v6(&np->daddr, 1);
1846		*release_it = true;
1847	} else {
1848		if (!rt->rt6i_peer)
1849			rt6_bind_peer(rt, 1);
1850		peer = rt->rt6i_peer;
1851		*release_it = false;
1852	}
1853
1854	return peer;
1855}
1856
1857static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{
1859	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860	struct inet_timewait_sock *tw = inet_twsk(sk);
1861
1862	if (tw->tw_family == AF_INET)
1863		return tcp_v4_tw_get_peer(sk);
1864
1865	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866}
1867
1868static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1869	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1870	.twsk_unique	= tcp_twsk_unique,
1871	.twsk_destructor= tcp_twsk_destructor,
1872	.twsk_getpeer	= tcp_v6_tw_get_peer,
1873};
1874
1875static const struct inet_connection_sock_af_ops ipv6_specific = {
1876	.queue_xmit	   = inet6_csk_xmit,
1877	.send_check	   = tcp_v6_send_check,
1878	.rebuild_header	   = inet6_sk_rebuild_header,
 
1879	.conn_request	   = tcp_v6_conn_request,
1880	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1881	.get_peer	   = tcp_v6_get_peer,
1882	.net_header_len	   = sizeof(struct ipv6hdr),
 
1883	.setsockopt	   = ipv6_setsockopt,
1884	.getsockopt	   = ipv6_getsockopt,
1885	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1886	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1887	.bind_conflict	   = inet6_csk_bind_conflict,
1888#ifdef CONFIG_COMPAT
1889	.compat_setsockopt = compat_ipv6_setsockopt,
1890	.compat_getsockopt = compat_ipv6_getsockopt,
1891#endif
 
1892};
1893
1894#ifdef CONFIG_TCP_MD5SIG
1895static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1896	.md5_lookup	=	tcp_v6_md5_lookup,
1897	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1898	.md5_add	=	tcp_v6_md5_add_func,
1899	.md5_parse	=	tcp_v6_parse_md5_keys,
1900};
1901#endif
1902
1903/*
1904 *	TCP over IPv4 via INET6 API
1905 */
1906
1907static const struct inet_connection_sock_af_ops ipv6_mapped = {
1908	.queue_xmit	   = ip_queue_xmit,
1909	.send_check	   = tcp_v4_send_check,
1910	.rebuild_header	   = inet_sk_rebuild_header,
 
1911	.conn_request	   = tcp_v6_conn_request,
1912	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1913	.get_peer	   = tcp_v4_get_peer,
1914	.net_header_len	   = sizeof(struct iphdr),
1915	.setsockopt	   = ipv6_setsockopt,
1916	.getsockopt	   = ipv6_getsockopt,
1917	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1918	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1919	.bind_conflict	   = inet6_csk_bind_conflict,
1920#ifdef CONFIG_COMPAT
1921	.compat_setsockopt = compat_ipv6_setsockopt,
1922	.compat_getsockopt = compat_ipv6_getsockopt,
1923#endif
 
1924};
1925
1926#ifdef CONFIG_TCP_MD5SIG
1927static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1928	.md5_lookup	=	tcp_v4_md5_lookup,
1929	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1930	.md5_add	=	tcp_v6_md5_add_func,
1931	.md5_parse	=	tcp_v6_parse_md5_keys,
1932};
1933#endif
1934
1935/* NOTE: A lot of things set to zero explicitly by call to
1936 *       sk_alloc() so need not be done here.
1937 */
1938static int tcp_v6_init_sock(struct sock *sk)
1939{
1940	struct inet_connection_sock *icsk = inet_csk(sk);
1941	struct tcp_sock *tp = tcp_sk(sk);
1942
1943	skb_queue_head_init(&tp->out_of_order_queue);
1944	tcp_init_xmit_timers(sk);
1945	tcp_prequeue_init(tp);
1946
1947	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1948	tp->mdev = TCP_TIMEOUT_INIT;
1949
1950	/* So many TCP implementations out there (incorrectly) count the
1951	 * initial SYN frame in their delayed-ACK and congestion control
1952	 * algorithms that we must have the following bandaid to talk
1953	 * efficiently to them.  -DaveM
1954	 */
1955	tp->snd_cwnd = 2;
1956
1957	/* See draft-stevens-tcpca-spec-01 for discussion of the
1958	 * initialization of these values.
1959	 */
1960	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1961	tp->snd_cwnd_clamp = ~0;
1962	tp->mss_cache = TCP_MSS_DEFAULT;
1963
1964	tp->reordering = sysctl_tcp_reordering;
1965
1966	sk->sk_state = TCP_CLOSE;
1967
1968	icsk->icsk_af_ops = &ipv6_specific;
1969	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1970	icsk->icsk_sync_mss = tcp_sync_mss;
1971	sk->sk_write_space = sk_stream_write_space;
1972	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1973
1974#ifdef CONFIG_TCP_MD5SIG
1975	tp->af_specific = &tcp_sock_ipv6_specific;
1976#endif
1977
1978	/* TCP Cookie Transactions */
1979	if (sysctl_tcp_cookie_size > 0) {
1980		/* Default, cookies without s_data_payload. */
1981		tp->cookie_values =
1982			kzalloc(sizeof(*tp->cookie_values),
1983				sk->sk_allocation);
1984		if (tp->cookie_values != NULL)
1985			kref_init(&tp->cookie_values->kref);
1986	}
1987	/* Presumed zeroed, in order of appearance:
1988	 *	cookie_in_always, cookie_out_never,
1989	 *	s_data_constant, s_data_in, s_data_out
1990	 */
1991	sk->sk_sndbuf = sysctl_tcp_wmem[1];
1992	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1993
1994	local_bh_disable();
1995	percpu_counter_inc(&tcp_sockets_allocated);
1996	local_bh_enable();
1997
1998	return 0;
1999}
2000
2001static void tcp_v6_destroy_sock(struct sock *sk)
2002{
2003#ifdef CONFIG_TCP_MD5SIG
2004	/* Clean up the MD5 key list */
2005	if (tcp_sk(sk)->md5sig_info)
2006		tcp_v6_clear_md5_list(sk);
2007#endif
2008	tcp_v4_destroy_sock(sk);
2009	inet6_destroy_sock(sk);
2010}
2011
2012#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq,
2015			 struct sock *sk, struct request_sock *req, int i, int uid)
2016{
2017	int ttd = req->expires - jiffies;
2018	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2019	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2020
2021	if (ttd < 0)
2022		ttd = 0;
2023
2024	seq_printf(seq,
2025		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2026		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2027		   i,
2028		   src->s6_addr32[0], src->s6_addr32[1],
2029		   src->s6_addr32[2], src->s6_addr32[3],
2030		   ntohs(inet_rsk(req)->loc_port),
2031		   dest->s6_addr32[0], dest->s6_addr32[1],
2032		   dest->s6_addr32[2], dest->s6_addr32[3],
2033		   ntohs(inet_rsk(req)->rmt_port),
2034		   TCP_SYN_RECV,
2035		   0,0, /* could print option size, but that is af dependent. */
2036		   1,   /* timers active (only the expire timer) */
2037		   jiffies_to_clock_t(ttd),
2038		   req->retrans,
2039		   uid,
 
2040		   0,  /* non standard timer */
2041		   0, /* open_requests have no inode */
2042		   0, req);
2043}
2044
2045static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2046{
2047	const struct in6_addr *dest, *src;
2048	__u16 destp, srcp;
2049	int timer_active;
2050	unsigned long timer_expires;
2051	struct inet_sock *inet = inet_sk(sp);
2052	struct tcp_sock *tp = tcp_sk(sp);
2053	const struct inet_connection_sock *icsk = inet_csk(sp);
2054	struct ipv6_pinfo *np = inet6_sk(sp);
 
 
2055
2056	dest  = &np->daddr;
2057	src   = &np->rcv_saddr;
2058	destp = ntohs(inet->inet_dport);
2059	srcp  = ntohs(inet->inet_sport);
2060
2061	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
2062		timer_active	= 1;
2063		timer_expires	= icsk->icsk_timeout;
2064	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2065		timer_active	= 4;
2066		timer_expires	= icsk->icsk_timeout;
2067	} else if (timer_pending(&sp->sk_timer)) {
2068		timer_active	= 2;
2069		timer_expires	= sp->sk_timer.expires;
2070	} else {
2071		timer_active	= 0;
2072		timer_expires = jiffies;
2073	}
2074
 
 
 
 
 
 
 
 
 
2075	seq_printf(seq,
2076		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2077		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2078		   i,
2079		   src->s6_addr32[0], src->s6_addr32[1],
2080		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2081		   dest->s6_addr32[0], dest->s6_addr32[1],
2082		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2083		   sp->sk_state,
2084		   tp->write_seq-tp->snd_una,
2085		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2086		   timer_active,
2087		   jiffies_to_clock_t(timer_expires - jiffies),
2088		   icsk->icsk_retransmits,
2089		   sock_i_uid(sp),
2090		   icsk->icsk_probes_out,
2091		   sock_i_ino(sp),
2092		   atomic_read(&sp->sk_refcnt), sp,
2093		   jiffies_to_clock_t(icsk->icsk_rto),
2094		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2095		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2096		   tp->snd_cwnd,
2097		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
 
 
2098		   );
2099}
2100
2101static void get_timewait6_sock(struct seq_file *seq,
2102			       struct inet_timewait_sock *tw, int i)
2103{
 
2104	const struct in6_addr *dest, *src;
2105	__u16 destp, srcp;
2106	struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107	int ttd = tw->tw_ttd - jiffies;
2108
2109	if (ttd < 0)
2110		ttd = 0;
2111
2112	dest = &tw6->tw_v6_daddr;
2113	src  = &tw6->tw_v6_rcv_saddr;
2114	destp = ntohs(tw->tw_dport);
2115	srcp  = ntohs(tw->tw_sport);
2116
2117	seq_printf(seq,
2118		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2119		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2120		   i,
2121		   src->s6_addr32[0], src->s6_addr32[1],
2122		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2123		   dest->s6_addr32[0], dest->s6_addr32[1],
2124		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2125		   tw->tw_substate, 0, 0,
2126		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2127		   atomic_read(&tw->tw_refcnt), tw);
2128}
2129
2130static int tcp6_seq_show(struct seq_file *seq, void *v)
2131{
2132	struct tcp_iter_state *st;
 
2133
2134	if (v == SEQ_START_TOKEN) {
2135		seq_puts(seq,
2136			 "  sl  "
2137			 "local_address                         "
2138			 "remote_address                        "
2139			 "st tx_queue rx_queue tr tm->when retrnsmt"
2140			 "   uid  timeout inode\n");
2141		goto out;
2142	}
2143	st = seq->private;
2144
2145	switch (st->state) {
2146	case TCP_SEQ_STATE_LISTENING:
2147	case TCP_SEQ_STATE_ESTABLISHED:
2148		get_tcp6_sock(seq, v, st->num);
2149		break;
2150	case TCP_SEQ_STATE_OPENREQ:
2151		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2152		break;
2153	case TCP_SEQ_STATE_TIME_WAIT:
2154		get_timewait6_sock(seq, v, st->num);
2155		break;
2156	}
 
 
2157out:
2158	return 0;
2159}
2160
 
 
 
 
 
 
 
 
2161static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2162	.name		= "tcp6",
2163	.family		= AF_INET6,
2164	.seq_fops	= {
2165		.owner		= THIS_MODULE,
2166	},
2167	.seq_ops	= {
2168		.show		= tcp6_seq_show,
2169	},
2170};
2171
2172int __net_init tcp6_proc_init(struct net *net)
2173{
2174	return tcp_proc_register(net, &tcp6_seq_afinfo);
2175}
2176
2177void tcp6_proc_exit(struct net *net)
2178{
2179	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2180}
2181#endif
2182
2183struct proto tcpv6_prot = {
2184	.name			= "TCPv6",
2185	.owner			= THIS_MODULE,
2186	.close			= tcp_close,
2187	.connect		= tcp_v6_connect,
2188	.disconnect		= tcp_disconnect,
2189	.accept			= inet_csk_accept,
2190	.ioctl			= tcp_ioctl,
2191	.init			= tcp_v6_init_sock,
2192	.destroy		= tcp_v6_destroy_sock,
2193	.shutdown		= tcp_shutdown,
2194	.setsockopt		= tcp_setsockopt,
2195	.getsockopt		= tcp_getsockopt,
2196	.recvmsg		= tcp_recvmsg,
2197	.sendmsg		= tcp_sendmsg,
2198	.sendpage		= tcp_sendpage,
2199	.backlog_rcv		= tcp_v6_do_rcv,
2200	.hash			= tcp_v6_hash,
 
2201	.unhash			= inet_unhash,
2202	.get_port		= inet_csk_get_port,
2203	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2204	.sockets_allocated	= &tcp_sockets_allocated,
2205	.memory_allocated	= &tcp_memory_allocated,
2206	.memory_pressure	= &tcp_memory_pressure,
2207	.orphan_count		= &tcp_orphan_count,
2208	.sysctl_mem		= sysctl_tcp_mem,
2209	.sysctl_wmem		= sysctl_tcp_wmem,
2210	.sysctl_rmem		= sysctl_tcp_rmem,
2211	.max_header		= MAX_TCP_HEADER,
2212	.obj_size		= sizeof(struct tcp6_sock),
2213	.slab_flags		= SLAB_DESTROY_BY_RCU,
2214	.twsk_prot		= &tcp6_timewait_sock_ops,
2215	.rsk_prot		= &tcp6_request_sock_ops,
2216	.h.hashinfo		= &tcp_hashinfo,
2217	.no_autobind		= true,
2218#ifdef CONFIG_COMPAT
2219	.compat_setsockopt	= compat_tcp_setsockopt,
2220	.compat_getsockopt	= compat_tcp_getsockopt,
2221#endif
 
2222};
2223
2224static const struct inet6_protocol tcpv6_protocol = {
 
2225	.handler	=	tcp_v6_rcv,
2226	.err_handler	=	tcp_v6_err,
2227	.gso_send_check	=	tcp_v6_gso_send_check,
2228	.gso_segment	=	tcp_tso_segment,
2229	.gro_receive	=	tcp6_gro_receive,
2230	.gro_complete	=	tcp6_gro_complete,
2231	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2232};
2233
2234static struct inet_protosw tcpv6_protosw = {
2235	.type		=	SOCK_STREAM,
2236	.protocol	=	IPPROTO_TCP,
2237	.prot		=	&tcpv6_prot,
2238	.ops		=	&inet6_stream_ops,
2239	.no_check	=	0,
2240	.flags		=	INET_PROTOSW_PERMANENT |
2241				INET_PROTOSW_ICSK,
2242};
2243
2244static int __net_init tcpv6_net_init(struct net *net)
2245{
2246	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2247				    SOCK_RAW, IPPROTO_TCP, net);
2248}
2249
2250static void __net_exit tcpv6_net_exit(struct net *net)
2251{
2252	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2253}
2254
2255static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2256{
2257	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2258}
2259
2260static struct pernet_operations tcpv6_net_ops = {
2261	.init	    = tcpv6_net_init,
2262	.exit	    = tcpv6_net_exit,
2263	.exit_batch = tcpv6_net_exit_batch,
2264};
2265
2266int __init tcpv6_init(void)
2267{
2268	int ret;
2269
2270	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2271	if (ret)
2272		goto out;
2273
2274	/* register inet6 protocol */
2275	ret = inet6_register_protosw(&tcpv6_protosw);
2276	if (ret)
2277		goto out_tcpv6_protocol;
2278
2279	ret = register_pernet_subsys(&tcpv6_net_ops);
2280	if (ret)
2281		goto out_tcpv6_protosw;
2282out:
2283	return ret;
2284
2285out_tcpv6_protocol:
2286	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2287out_tcpv6_protosw:
2288	inet6_unregister_protosw(&tcpv6_protosw);
 
 
2289	goto out;
2290}
2291
2292void tcpv6_exit(void)
2293{
2294	unregister_pernet_subsys(&tcpv6_net_ops);
2295	inet6_unregister_protosw(&tcpv6_protosw);
2296	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297}
v4.10.11
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
 
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/busy_poll.h>
 
  65
  66#include <linux/proc_fs.h>
  67#include <linux/seq_file.h>
  68
  69#include <crypto/hash.h>
  70#include <linux/scatterlist.h>
  71
  72static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  73static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  74				      struct request_sock *req);
  75
  76static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 
 
  77
  78static const struct inet_connection_sock_af_ops ipv6_mapped;
  79static const struct inet_connection_sock_af_ops ipv6_specific;
  80#ifdef CONFIG_TCP_MD5SIG
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  83#else
  84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  85						   const struct in6_addr *addr)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  92{
  93	struct dst_entry *dst = skb_dst(skb);
 
 
 
 
 
 
 
 
 
  94
  95	if (dst && dst_hold_safe(dst)) {
  96		const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98		sk->sk_rx_dst = dst;
  99		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 100		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 101	}
 102}
 103
 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
 105{
 106	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 107					    ipv6_hdr(skb)->saddr.s6_addr32,
 108					    tcp_hdr(skb)->dest,
 109					    tcp_hdr(skb)->source, tsoff);
 110}
 111
 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 113			  int addr_len)
 114{
 115	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 116	struct inet_sock *inet = inet_sk(sk);
 117	struct inet_connection_sock *icsk = inet_csk(sk);
 118	struct ipv6_pinfo *np = inet6_sk(sk);
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	struct in6_addr *saddr = NULL, *final_p, final;
 121	struct ipv6_txoptions *opt;
 122	struct flowi6 fl6;
 123	struct dst_entry *dst;
 124	int addr_type;
 125	int err;
 126
 127	if (addr_len < SIN6_LEN_RFC2133)
 128		return -EINVAL;
 129
 130	if (usin->sin6_family != AF_INET6)
 131		return -EAFNOSUPPORT;
 132
 133	memset(&fl6, 0, sizeof(fl6));
 134
 135	if (np->sndflow) {
 136		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 137		IP6_ECN_flow_init(fl6.flowlabel);
 138		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 139			struct ip6_flowlabel *flowlabel;
 140			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 141			if (!flowlabel)
 142				return -EINVAL;
 
 143			fl6_sock_release(flowlabel);
 144		}
 145	}
 146
 147	/*
 148	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 149	 */
 150
 151	if (ipv6_addr_any(&usin->sin6_addr)) {
 152		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 153			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 154					       &usin->sin6_addr);
 155		else
 156			usin->sin6_addr = in6addr_loopback;
 157	}
 158
 159	addr_type = ipv6_addr_type(&usin->sin6_addr);
 160
 161	if (addr_type & IPV6_ADDR_MULTICAST)
 162		return -ENETUNREACH;
 163
 164	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 165		if (addr_len >= sizeof(struct sockaddr_in6) &&
 166		    usin->sin6_scope_id) {
 167			/* If interface is set while binding, indices
 168			 * must coincide.
 169			 */
 170			if (sk->sk_bound_dev_if &&
 171			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 172				return -EINVAL;
 173
 174			sk->sk_bound_dev_if = usin->sin6_scope_id;
 175		}
 176
 177		/* Connect to link-local address requires an interface */
 178		if (!sk->sk_bound_dev_if)
 179			return -EINVAL;
 180	}
 181
 182	if (tp->rx_opt.ts_recent_stamp &&
 183	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 184		tp->rx_opt.ts_recent = 0;
 185		tp->rx_opt.ts_recent_stamp = 0;
 186		tp->write_seq = 0;
 187	}
 188
 189	sk->sk_v6_daddr = usin->sin6_addr;
 190	np->flow_label = fl6.flowlabel;
 191
 192	/*
 193	 *	TCP over IPv4
 194	 */
 195
 196	if (addr_type & IPV6_ADDR_MAPPED) {
 197		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 198		struct sockaddr_in sin;
 199
 200		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 201
 202		if (__ipv6_only_sock(sk))
 203			return -ENETUNREACH;
 204
 205		sin.sin_family = AF_INET;
 206		sin.sin_port = usin->sin6_port;
 207		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 208
 209		icsk->icsk_af_ops = &ipv6_mapped;
 210		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 211#ifdef CONFIG_TCP_MD5SIG
 212		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 213#endif
 214
 215		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 216
 217		if (err) {
 218			icsk->icsk_ext_hdr_len = exthdrlen;
 219			icsk->icsk_af_ops = &ipv6_specific;
 220			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222			tp->af_specific = &tcp_sock_ipv6_specific;
 223#endif
 224			goto failure;
 
 
 
 
 225		}
 226		np->saddr = sk->sk_v6_rcv_saddr;
 227
 228		return err;
 229	}
 230
 231	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 232		saddr = &sk->sk_v6_rcv_saddr;
 233
 234	fl6.flowi6_proto = IPPROTO_TCP;
 235	fl6.daddr = sk->sk_v6_daddr;
 236	fl6.saddr = saddr ? *saddr : np->saddr;
 
 237	fl6.flowi6_oif = sk->sk_bound_dev_if;
 238	fl6.flowi6_mark = sk->sk_mark;
 239	fl6.fl6_dport = usin->sin6_port;
 240	fl6.fl6_sport = inet->inet_sport;
 241	fl6.flowi6_uid = sk->sk_uid;
 242
 243	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 244	final_p = fl6_update_dst(&fl6, opt, &final);
 245
 246	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 247
 248	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 249	if (IS_ERR(dst)) {
 250		err = PTR_ERR(dst);
 251		goto failure;
 252	}
 253
 254	if (!saddr) {
 255		saddr = &fl6.saddr;
 256		sk->sk_v6_rcv_saddr = *saddr;
 257	}
 258
 259	/* set the source address */
 260	np->saddr = *saddr;
 261	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 262
 263	sk->sk_gso_type = SKB_GSO_TCPV6;
 264	ip6_dst_store(sk, dst, NULL, NULL);
 265
 
 266	if (tcp_death_row.sysctl_tw_recycle &&
 267	    !tp->rx_opt.ts_recent_stamp &&
 268	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 269		tcp_fetch_timewait_stamp(sk, dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270
 271	icsk->icsk_ext_hdr_len = 0;
 272	if (opt)
 273		icsk->icsk_ext_hdr_len = opt->opt_flen +
 274					 opt->opt_nflen;
 275
 276	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 277
 278	inet->inet_dport = usin->sin6_port;
 279
 280	tcp_set_state(sk, TCP_SYN_SENT);
 281	err = inet6_hash_connect(&tcp_death_row, sk);
 282	if (err)
 283		goto late_failure;
 284
 285	sk_set_txhash(sk);
 286
 287	if (!tp->write_seq && likely(!tp->repair))
 288		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 289							     sk->sk_v6_daddr.s6_addr32,
 290							     inet->inet_sport,
 291							     inet->inet_dport,
 292							     &tp->tsoffset);
 293
 294	err = tcp_connect(sk);
 295	if (err)
 296		goto late_failure;
 297
 298	return 0;
 299
 300late_failure:
 301	tcp_set_state(sk, TCP_CLOSE);
 302	__sk_dst_reset(sk);
 303failure:
 304	inet->inet_dport = 0;
 305	sk->sk_route_caps = 0;
 306	return err;
 307}
 308
 309static void tcp_v6_mtu_reduced(struct sock *sk)
 310{
 311	struct dst_entry *dst;
 312
 313	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 314		return;
 315
 316	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 317	if (!dst)
 318		return;
 319
 320	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 321		tcp_sync_mss(sk, dst_mtu(dst));
 322		tcp_simple_retransmit(sk);
 323	}
 324}
 325
 326static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 327		u8 type, u8 code, int offset, __be32 info)
 328{
 329	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 330	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 331	struct net *net = dev_net(skb->dev);
 332	struct request_sock *fastopen;
 333	struct ipv6_pinfo *np;
 334	struct tcp_sock *tp;
 335	__u32 seq, snd_una;
 336	struct sock *sk;
 337	bool fatal;
 338	int err;
 
 
 
 339
 340	sk = __inet6_lookup_established(net, &tcp_hashinfo,
 341					&hdr->daddr, th->dest,
 342					&hdr->saddr, ntohs(th->source),
 343					skb->dev->ifindex);
 344
 345	if (!sk) {
 346		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 347				  ICMP6_MIB_INERRORS);
 348		return;
 349	}
 350
 351	if (sk->sk_state == TCP_TIME_WAIT) {
 352		inet_twsk_put(inet_twsk(sk));
 353		return;
 354	}
 355	seq = ntohl(th->seq);
 356	fatal = icmpv6_err_convert(type, code, &err);
 357	if (sk->sk_state == TCP_NEW_SYN_RECV)
 358		return tcp_req_err(sk, seq, fatal);
 359
 360	bh_lock_sock(sk);
 361	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 362		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 363
 364	if (sk->sk_state == TCP_CLOSE)
 365		goto out;
 366
 367	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 368		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 369		goto out;
 370	}
 371
 372	tp = tcp_sk(sk);
 373	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 374	fastopen = tp->fastopen_rsk;
 375	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 376	if (sk->sk_state != TCP_LISTEN &&
 377	    !between(seq, snd_una, tp->snd_nxt)) {
 378		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 379		goto out;
 380	}
 381
 382	np = inet6_sk(sk);
 383
 384	if (type == NDISC_REDIRECT) {
 385		if (!sock_owned_by_user(sk)) {
 386			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 
 
 
 387
 388			if (dst)
 389				dst->ops->redirect(dst, sk, skb);
 390		}
 391		goto out;
 392	}
 393
 394	if (type == ICMPV6_PKT_TOOBIG) {
 395		/* We are not interested in TCP_LISTEN and open_requests
 396		 * (SYN-ACKs send out by Linux are always <576bytes so
 397		 * they should go through unfragmented).
 398		 */
 399		if (sk->sk_state == TCP_LISTEN)
 400			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 401
 402		if (!ip6_sk_accept_pmtu(sk))
 403			goto out;
 404
 405		tp->mtu_info = ntohl(info);
 406		if (!sock_owned_by_user(sk))
 407			tcp_v6_mtu_reduced(sk);
 408		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 409					   &sk->sk_tsq_flags))
 410			sock_hold(sk);
 411		goto out;
 412	}
 413
 
 414
 415	/* Might be for an request_sock */
 416	switch (sk->sk_state) {
 417	case TCP_SYN_SENT:
 418	case TCP_SYN_RECV:
 419		/* Only in fast or simultaneous open. If a fast open socket is
 420		 * is already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 421		 */
 422		if (fastopen && !fastopen->sk)
 423			break;
 
 
 
 
 
 
 
 424
 
 
 
 425		if (!sock_owned_by_user(sk)) {
 426			sk->sk_err = err;
 427			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 428
 429			tcp_done(sk);
 430		} else
 431			sk->sk_err_soft = err;
 432		goto out;
 433	}
 434
 435	if (!sock_owned_by_user(sk) && np->recverr) {
 436		sk->sk_err = err;
 437		sk->sk_error_report(sk);
 438	} else
 439		sk->sk_err_soft = err;
 440
 441out:
 442	bh_unlock_sock(sk);
 443	sock_put(sk);
 444}
 445
 446
 447static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 448			      struct flowi *fl,
 449			      struct request_sock *req,
 450			      struct tcp_fastopen_cookie *foc,
 451			      enum tcp_synack_type synack_type)
 452{
 453	struct inet_request_sock *ireq = inet_rsk(req);
 454	struct ipv6_pinfo *np = inet6_sk(sk);
 455	struct ipv6_txoptions *opt;
 456	struct flowi6 *fl6 = &fl->u.ip6;
 457	struct sk_buff *skb;
 458	int err = -ENOMEM;
 459
 460	/* First, grab a route. */
 461	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 462					       IPPROTO_TCP)) == NULL)
 463		goto done;
 
 
 
 
 
 
 
 
 464
 465	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
 466
 
 
 
 
 
 
 
 
 467	if (skb) {
 468		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 469				    &ireq->ir_v6_rmt_addr);
 470
 471		fl6->daddr = ireq->ir_v6_rmt_addr;
 472		if (np->repflow && ireq->pktopts)
 473			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 474
 475		rcu_read_lock();
 476		opt = ireq->ipv6_opt;
 477		if (!opt)
 478			opt = rcu_dereference(np->opt);
 479		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
 480		rcu_read_unlock();
 481		err = net_xmit_eval(err);
 482	}
 483
 484done:
 
 
 
 485	return err;
 486}
 487
 
 
 
 
 
 
 488
 489static void tcp_v6_reqsk_destructor(struct request_sock *req)
 490{
 491	kfree(inet_rsk(req)->ipv6_opt);
 492	kfree_skb(inet_rsk(req)->pktopts);
 493}
 494
 495#ifdef CONFIG_TCP_MD5SIG
 496static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 497						   const struct in6_addr *addr)
 498{
 499	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500}
 501
 502static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 503						const struct sock *addr_sk)
 504{
 505	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506}
 507
 508static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 509				 int optlen)
 510{
 511	struct tcp_md5sig cmd;
 512	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 
 513
 514	if (optlen < sizeof(cmd))
 515		return -EINVAL;
 516
 517	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 518		return -EFAULT;
 519
 520	if (sin6->sin6_family != AF_INET6)
 521		return -EINVAL;
 522
 523	if (!cmd.tcpm_keylen) {
 
 
 524		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 525			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 526					      AF_INET);
 527		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 528				      AF_INET6);
 529	}
 530
 531	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 532		return -EINVAL;
 533
 534	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 535		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 536				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 537
 538	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 539			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540}
 541
 542static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
 543				   const struct in6_addr *daddr,
 544				   const struct in6_addr *saddr,
 545				   const struct tcphdr *th, int nbytes)
 546{
 547	struct tcp6_pseudohdr *bp;
 548	struct scatterlist sg;
 549	struct tcphdr *_th;
 550
 551	bp = hp->scratch;
 552	/* 1. TCP pseudo-header (RFC2460) */
 553	bp->saddr = *saddr;
 554	bp->daddr = *daddr;
 555	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 556	bp->len = cpu_to_be32(nbytes);
 557
 558	_th = (struct tcphdr *)(bp + 1);
 559	memcpy(_th, th, sizeof(*th));
 560	_th->check = 0;
 561
 562	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 563	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
 564				sizeof(*bp) + sizeof(*th));
 565	return crypto_ahash_update(hp->md5_req);
 566}
 567
 568static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 569			       const struct in6_addr *daddr, struct in6_addr *saddr,
 570			       const struct tcphdr *th)
 571{
 572	struct tcp_md5sig_pool *hp;
 573	struct ahash_request *req;
 574
 575	hp = tcp_get_md5sig_pool();
 576	if (!hp)
 577		goto clear_hash_noput;
 578	req = hp->md5_req;
 579
 580	if (crypto_ahash_init(req))
 
 
 581		goto clear_hash;
 582	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 583		goto clear_hash;
 584	if (tcp_md5_hash_key(hp, key))
 585		goto clear_hash;
 586	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 587	if (crypto_ahash_final(req))
 588		goto clear_hash;
 589
 590	tcp_put_md5sig_pool();
 591	return 0;
 592
 593clear_hash:
 594	tcp_put_md5sig_pool();
 595clear_hash_noput:
 596	memset(md5_hash, 0, 16);
 597	return 1;
 598}
 599
 600static int tcp_v6_md5_hash_skb(char *md5_hash,
 601			       const struct tcp_md5sig_key *key,
 602			       const struct sock *sk,
 603			       const struct sk_buff *skb)
 604{
 605	const struct in6_addr *saddr, *daddr;
 606	struct tcp_md5sig_pool *hp;
 607	struct ahash_request *req;
 608	const struct tcphdr *th = tcp_hdr(skb);
 609
 610	if (sk) { /* valid for establish/request sockets */
 611		saddr = &sk->sk_v6_rcv_saddr;
 612		daddr = &sk->sk_v6_daddr;
 
 
 
 613	} else {
 614		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 615		saddr = &ip6h->saddr;
 616		daddr = &ip6h->daddr;
 617	}
 618
 619	hp = tcp_get_md5sig_pool();
 620	if (!hp)
 621		goto clear_hash_noput;
 622	req = hp->md5_req;
 623
 624	if (crypto_ahash_init(req))
 625		goto clear_hash;
 626
 627	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
 628		goto clear_hash;
 629	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 630		goto clear_hash;
 631	if (tcp_md5_hash_key(hp, key))
 632		goto clear_hash;
 633	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 634	if (crypto_ahash_final(req))
 635		goto clear_hash;
 636
 637	tcp_put_md5sig_pool();
 638	return 0;
 639
 640clear_hash:
 641	tcp_put_md5sig_pool();
 642clear_hash_noput:
 643	memset(md5_hash, 0, 16);
 644	return 1;
 645}
 646
 647#endif
 648
 649static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
 650				    const struct sk_buff *skb)
 651{
 652#ifdef CONFIG_TCP_MD5SIG
 653	const __u8 *hash_location = NULL;
 654	struct tcp_md5sig_key *hash_expected;
 655	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 656	const struct tcphdr *th = tcp_hdr(skb);
 657	int genhash;
 658	u8 newhash[16];
 659
 660	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 661	hash_location = tcp_parse_md5sig_option(th);
 662
 663	/* We've parsed the options - do we have a hash? */
 664	if (!hash_expected && !hash_location)
 665		return false;
 666
 667	if (hash_expected && !hash_location) {
 668		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 669		return true;
 670	}
 671
 672	if (!hash_expected && hash_location) {
 673		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 674		return true;
 675	}
 676
 677	/* check the signature */
 678	genhash = tcp_v6_md5_hash_skb(newhash,
 679				      hash_expected,
 680				      NULL, skb);
 681
 682	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 683		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
 684		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 685				     genhash ? "failed" : "mismatch",
 686				     &ip6h->saddr, ntohs(th->source),
 687				     &ip6h->daddr, ntohs(th->dest));
 688		return true;
 
 689	}
 
 
 690#endif
 691	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692}
 693
 694static void tcp_v6_init_req(struct request_sock *req,
 695			    const struct sock *sk_listener,
 696			    struct sk_buff *skb)
 697{
 698	struct inet_request_sock *ireq = inet_rsk(req);
 699	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 
 
 700
 701	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 702	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 
 703
 704	/* So that link locals have meaning */
 705	if (!sk_listener->sk_bound_dev_if &&
 706	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 707		ireq->ir_iif = tcp_v6_iif(skb);
 708
 709	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 710	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 711	     np->rxopt.bits.rxinfo ||
 712	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 713	     np->rxopt.bits.rxohlim || np->repflow)) {
 714		atomic_inc(&skb->users);
 715		ireq->pktopts = skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716	}
 
 
 717}
 718
 719static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 720					  struct flowi *fl,
 721					  const struct request_sock *req,
 722					  bool *strict)
 723{
 724	if (strict)
 725		*strict = true;
 726	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 727}
 728
 729struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 730	.family		=	AF_INET6,
 731	.obj_size	=	sizeof(struct tcp6_request_sock),
 732	.rtx_syn_ack	=	tcp_rtx_synack,
 733	.send_ack	=	tcp_v6_reqsk_send_ack,
 734	.destructor	=	tcp_v6_reqsk_destructor,
 735	.send_reset	=	tcp_v6_send_reset,
 736	.syn_ack_timeout =	tcp_syn_ack_timeout,
 737};
 738
 739static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 740	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 741				sizeof(struct ipv6hdr),
 742#ifdef CONFIG_TCP_MD5SIG
 743	.req_md5_lookup	=	tcp_v6_md5_lookup,
 744	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 745#endif
 746	.init_req	=	tcp_v6_init_req,
 747#ifdef CONFIG_SYN_COOKIES
 748	.cookie_init_seq =	cookie_v6_init_sequence,
 749#endif
 750	.route_req	=	tcp_v6_route_req,
 751	.init_seq	=	tcp_v6_init_sequence,
 752	.send_synack	=	tcp_v6_send_synack,
 753};
 754
 755static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 756				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 757				 int oif, struct tcp_md5sig_key *key, int rst,
 758				 u8 tclass, __be32 label)
 759{
 760	const struct tcphdr *th = tcp_hdr(skb);
 761	struct tcphdr *t1;
 762	struct sk_buff *buff;
 763	struct flowi6 fl6;
 764	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 765	struct sock *ctl_sk = net->ipv6.tcp_sk;
 766	unsigned int tot_len = sizeof(struct tcphdr);
 767	struct dst_entry *dst;
 768	__be32 *topt;
 769
 770	if (tsecr)
 771		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 772#ifdef CONFIG_TCP_MD5SIG
 773	if (key)
 774		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 775#endif
 776
 777	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 778			 GFP_ATOMIC);
 779	if (!buff)
 780		return;
 781
 782	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 783
 784	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 785	skb_reset_transport_header(buff);
 786
 787	/* Swap the send and the receive. */
 788	memset(t1, 0, sizeof(*t1));
 789	t1->dest = th->source;
 790	t1->source = th->dest;
 791	t1->doff = tot_len / 4;
 792	t1->seq = htonl(seq);
 793	t1->ack_seq = htonl(ack);
 794	t1->ack = !rst || !th->ack;
 795	t1->rst = rst;
 796	t1->window = htons(win);
 797
 798	topt = (__be32 *)(t1 + 1);
 799
 800	if (tsecr) {
 801		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 802				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 803		*topt++ = htonl(tsval);
 804		*topt++ = htonl(tsecr);
 805	}
 806
 807#ifdef CONFIG_TCP_MD5SIG
 808	if (key) {
 809		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 810				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 811		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 812				    &ipv6_hdr(skb)->saddr,
 813				    &ipv6_hdr(skb)->daddr, t1);
 814	}
 815#endif
 816
 817	memset(&fl6, 0, sizeof(fl6));
 818	fl6.daddr = ipv6_hdr(skb)->saddr;
 819	fl6.saddr = ipv6_hdr(skb)->daddr;
 820	fl6.flowlabel = label;
 821
 822	buff->ip_summed = CHECKSUM_PARTIAL;
 823	buff->csum = 0;
 824
 825	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 826
 827	fl6.flowi6_proto = IPPROTO_TCP;
 828	if (rt6_need_strict(&fl6.daddr) && !oif)
 829		fl6.flowi6_oif = tcp_v6_iif(skb);
 830	else {
 831		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 832			oif = skb->skb_iif;
 833
 834		fl6.flowi6_oif = oif;
 835	}
 836
 837	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 838	fl6.fl6_dport = t1->dest;
 839	fl6.fl6_sport = t1->source;
 840	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 841	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 842
 843	/* Pass a socket to ip6_dst_lookup either it is for RST
 844	 * Underlying function will use this to retrieve the network
 845	 * namespace
 846	 */
 847	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 848	if (!IS_ERR(dst)) {
 849		skb_dst_set(buff, dst);
 850		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
 851		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 852		if (rst)
 853			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 854		return;
 855	}
 856
 857	kfree_skb(buff);
 858}
 859
 860static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 861{
 862	const struct tcphdr *th = tcp_hdr(skb);
 863	u32 seq = 0, ack_seq = 0;
 864	struct tcp_md5sig_key *key = NULL;
 865#ifdef CONFIG_TCP_MD5SIG
 866	const __u8 *hash_location = NULL;
 867	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 868	unsigned char newhash[16];
 869	int genhash;
 870	struct sock *sk1 = NULL;
 871#endif
 872	int oif;
 873
 874	if (th->rst)
 875		return;
 876
 877	/* If sk not NULL, it means we did a successful lookup and incoming
 878	 * route had to be correct. prequeue might have dropped our dst.
 879	 */
 880	if (!sk && !ipv6_unicast_destination(skb))
 881		return;
 882
 883#ifdef CONFIG_TCP_MD5SIG
 884	rcu_read_lock();
 885	hash_location = tcp_parse_md5sig_option(th);
 886	if (sk && sk_fullsock(sk)) {
 887		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 888	} else if (hash_location) {
 889		/*
 890		 * active side is lost. Try to find listening socket through
 891		 * source port, and then find md5 key through listening socket.
 892		 * we are not loose security here:
 893		 * Incoming packet is checked with md5 hash with finding key,
 894		 * no RST generated if md5 hash doesn't match.
 895		 */
 896		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 897					   &tcp_hashinfo, NULL, 0,
 898					   &ipv6h->saddr,
 899					   th->source, &ipv6h->daddr,
 900					   ntohs(th->source), tcp_v6_iif(skb));
 901		if (!sk1)
 902			goto out;
 903
 904		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 905		if (!key)
 906			goto out;
 907
 908		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 909		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 910			goto out;
 911	}
 912#endif
 913
 914	if (th->ack)
 915		seq = ntohl(th->ack_seq);
 916	else
 917		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 918			  (th->doff << 2);
 919
 920	oif = sk ? sk->sk_bound_dev_if : 0;
 921	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 922
 923#ifdef CONFIG_TCP_MD5SIG
 924out:
 925	rcu_read_unlock();
 926#endif
 927}
 928
 929static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 930			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 931			    struct tcp_md5sig_key *key, u8 tclass,
 932			    __be32 label)
 933{
 934	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 935			     tclass, label);
 936}
 937
 938static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 939{
 940	struct inet_timewait_sock *tw = inet_twsk(sk);
 941	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 942
 943	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 944			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 945			tcp_time_stamp + tcptw->tw_ts_offset,
 946			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 947			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 948
 949	inet_twsk_put(tw);
 950}
 951
 952static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 953				  struct request_sock *req)
 954{
 955	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 956	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 957	 */
 958	/* RFC 7323 2.3
 959	 * The window field (SEG.WND) of every outgoing segment, with the
 960	 * exception of <SYN> segments, MUST be right-shifted by
 961	 * Rcv.Wind.Shift bits:
 962	 */
 963	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 964			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 965			tcp_rsk(req)->rcv_nxt,
 966			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 967			tcp_time_stamp + tcp_rsk(req)->ts_off,
 968			req->ts_recent, sk->sk_bound_dev_if,
 969			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 970			0, 0);
 971}
 972
 973
 974static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 975{
 976#ifdef CONFIG_SYN_COOKIES
 977	const struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978
 
 979	if (!th->syn)
 980		sk = cookie_v6_check(sk, skb);
 981#endif
 982	return sk;
 983}
 984
 
 
 
 985static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 986{
 
 
 
 
 
 
 
 
 
 
 
 987	if (skb->protocol == htons(ETH_P_IP))
 988		return tcp_v4_conn_request(sk, skb);
 989
 990	if (!ipv6_unicast_destination(skb))
 991		goto drop;
 992
 993	return tcp_conn_request(&tcp6_request_sock_ops,
 994				&tcp_request_sock_ipv6_ops, sk, skb);
 
 
 
 995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996drop:
 997	tcp_listendrop(sk);
 998	return 0; /* don't send reset */
 999}
1000
1001static void tcp_v6_restore_cb(struct sk_buff *skb)
 
 
1002{
1003	/* We need to move header back to the beginning if xfrm6_policy_check()
1004	 * and tcp_v6_fill_cb() are going to be called again.
1005	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1006	 */
1007	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1008		sizeof(struct inet6_skb_parm));
1009}
1010
1011static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1012					 struct request_sock *req,
1013					 struct dst_entry *dst,
1014					 struct request_sock *req_unhash,
1015					 bool *own_req)
1016{
1017	struct inet_request_sock *ireq;
1018	struct ipv6_pinfo *newnp;
1019	const struct ipv6_pinfo *np = inet6_sk(sk);
1020	struct ipv6_txoptions *opt;
1021	struct tcp6_sock *newtcp6sk;
1022	struct inet_sock *newinet;
1023	struct tcp_sock *newtp;
1024	struct sock *newsk;
 
1025#ifdef CONFIG_TCP_MD5SIG
1026	struct tcp_md5sig_key *key;
1027#endif
1028	struct flowi6 fl6;
1029
1030	if (skb->protocol == htons(ETH_P_IP)) {
1031		/*
1032		 *	v6 mapped
1033		 */
1034
1035		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1036					     req_unhash, own_req);
1037
1038		if (!newsk)
1039			return NULL;
1040
1041		newtcp6sk = (struct tcp6_sock *)newsk;
1042		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1043
1044		newinet = inet_sk(newsk);
1045		newnp = inet6_sk(newsk);
1046		newtp = tcp_sk(newsk);
1047
1048		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1049
1050		newnp->saddr = newsk->sk_v6_rcv_saddr;
 
 
 
 
1051
1052		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1053		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1054#ifdef CONFIG_TCP_MD5SIG
1055		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1056#endif
1057
1058		newnp->ipv6_ac_list = NULL;
1059		newnp->ipv6_fl_list = NULL;
1060		newnp->pktoptions  = NULL;
1061		newnp->opt	   = NULL;
1062		newnp->mcast_oif   = tcp_v6_iif(skb);
1063		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1064		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1065		if (np->repflow)
1066			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1067
1068		/*
1069		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1070		 * here, tcp_create_openreq_child now does this for us, see the comment in
1071		 * that function for the gory details. -acme
1072		 */
1073
1074		/* It is tricky place. Until this moment IPv4 tcp
1075		   worked with IPv6 icsk.icsk_af_ops.
1076		   Sync it now.
1077		 */
1078		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1079
1080		return newsk;
1081	}
1082
1083	ireq = inet_rsk(req);
 
1084
1085	if (sk_acceptq_is_full(sk))
1086		goto out_overflow;
1087
1088	if (!dst) {
1089		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1090		if (!dst)
1091			goto out;
1092	}
1093
1094	newsk = tcp_create_openreq_child(sk, req, skb);
1095	if (!newsk)
1096		goto out_nonewsk;
1097
1098	/*
1099	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1100	 * count here, tcp_create_openreq_child now does this for us, see the
1101	 * comment in that function for the gory details. -acme
1102	 */
1103
1104	newsk->sk_gso_type = SKB_GSO_TCPV6;
1105	ip6_dst_store(newsk, dst, NULL, NULL);
1106	inet6_sk_rx_dst_set(newsk, skb);
1107
1108	newtcp6sk = (struct tcp6_sock *)newsk;
1109	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1110
1111	newtp = tcp_sk(newsk);
1112	newinet = inet_sk(newsk);
1113	newnp = inet6_sk(newsk);
1114
1115	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1116
1117	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1118	newnp->saddr = ireq->ir_v6_loc_addr;
1119	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1120	newsk->sk_bound_dev_if = ireq->ir_iif;
1121
1122	/* Now IPv6 options...
1123
1124	   First: no IPv4 options.
1125	 */
1126	newinet->inet_opt = NULL;
1127	newnp->ipv6_ac_list = NULL;
1128	newnp->ipv6_fl_list = NULL;
1129
1130	/* Clone RX bits */
1131	newnp->rxopt.all = np->rxopt.all;
1132
 
1133	newnp->pktoptions = NULL;
 
 
 
 
 
 
 
1134	newnp->opt	  = NULL;
1135	newnp->mcast_oif  = tcp_v6_iif(skb);
1136	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1137	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1138	if (np->repflow)
1139		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1140
1141	/* Clone native IPv6 options from listening socket (if any)
1142
1143	   Yes, keeping reference count would be much more clever,
1144	   but we make one more one thing there: reattach optmem
1145	   to newsk.
1146	 */
1147	opt = ireq->ipv6_opt;
1148	if (!opt)
1149		opt = rcu_dereference(np->opt);
1150	if (opt) {
1151		opt = ipv6_dup_options(newsk, opt);
1152		RCU_INIT_POINTER(newnp->opt, opt);
 
1153	}
 
1154	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1155	if (opt)
1156		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1157						    opt->opt_flen;
1158
1159	tcp_ca_openreq_child(newsk, dst);
1160
 
1161	tcp_sync_mss(newsk, dst_mtu(dst));
1162	newtp->advmss = dst_metric_advmss(dst);
1163	if (tcp_sk(sk)->rx_opt.user_mss &&
1164	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1165		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1166
1167	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1168
1169	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1170	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1171
1172#ifdef CONFIG_TCP_MD5SIG
1173	/* Copy over the MD5 key from the original socket */
1174	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1175	if (key) {
1176		/* We're using one, so create a matching key
1177		 * on the newsk structure. If we fail to get
1178		 * memory, then we end up not copying the key
1179		 * across. Shucks.
1180		 */
1181		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1182			       AF_INET6, key->key, key->keylen,
1183			       sk_gfp_mask(sk, GFP_ATOMIC));
 
1184	}
1185#endif
1186
1187	if (__inet_inherit_port(sk, newsk) < 0) {
1188		inet_csk_prepare_forced_close(newsk);
1189		tcp_done(newsk);
1190		goto out;
1191	}
1192	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1193	if (*own_req) {
1194		tcp_move_syn(newtp, req);
1195
1196		/* Clone pktoptions received with SYN, if we own the req */
1197		if (ireq->pktopts) {
1198			newnp->pktoptions = skb_clone(ireq->pktopts,
1199						      sk_gfp_mask(sk, GFP_ATOMIC));
1200			consume_skb(ireq->pktopts);
1201			ireq->pktopts = NULL;
1202			if (newnp->pktoptions) {
1203				tcp_v6_restore_cb(newnp->pktoptions);
1204				skb_set_owner_r(newnp->pktoptions, newsk);
1205			}
1206		}
1207	}
1208
1209	return newsk;
1210
1211out_overflow:
1212	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1213out_nonewsk:
 
 
1214	dst_release(dst);
1215out:
1216	tcp_listendrop(sk);
1217	return NULL;
1218}
1219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1220/* The socket must have it's spinlock held when we get
1221 * here, unless it is a TCP_LISTEN socket.
1222 *
1223 * We have a potential double-lock case here, so even when
1224 * doing backlog processing we use the BH locking scheme.
1225 * This is because we cannot sleep with the original spinlock
1226 * held.
1227 */
1228static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1229{
1230	struct ipv6_pinfo *np = inet6_sk(sk);
1231	struct tcp_sock *tp;
1232	struct sk_buff *opt_skb = NULL;
1233
1234	/* Imagine: socket is IPv6. IPv4 packet arrives,
1235	   goes to IPv4 receive handler and backlogged.
1236	   From backlog it always goes here. Kerboom...
1237	   Fortunately, tcp_rcv_established and rcv_established
1238	   handle them correctly, but it is not case with
1239	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1240	 */
1241
1242	if (skb->protocol == htons(ETH_P_IP))
1243		return tcp_v4_do_rcv(sk, skb);
1244
1245	if (tcp_filter(sk, skb))
 
 
 
 
 
1246		goto discard;
1247
1248	/*
1249	 *	socket locking is here for SMP purposes as backlog rcv
1250	 *	is currently called with bh processing disabled.
1251	 */
1252
1253	/* Do Stevens' IPV6_PKTOPTIONS.
1254
1255	   Yes, guys, it is the only place in our code, where we
1256	   may make it not affecting IPv4.
1257	   The rest of code is protocol independent,
1258	   and I do not like idea to uglify IPv4.
1259
1260	   Actually, all the idea behind IPV6_PKTOPTIONS
1261	   looks not very well thought. For now we latch
1262	   options, received in the last packet, enqueued
1263	   by tcp. Feel free to propose better solution.
1264					       --ANK (980728)
1265	 */
1266	if (np->rxopt.all)
1267		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1268
1269	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1270		struct dst_entry *dst = sk->sk_rx_dst;
1271
1272		sock_rps_save_rxhash(sk, skb);
1273		sk_mark_napi_id(sk, skb);
1274		if (dst) {
1275			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1276			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1277				dst_release(dst);
1278				sk->sk_rx_dst = NULL;
1279			}
1280		}
1281
1282		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1283		if (opt_skb)
1284			goto ipv6_pktoptions;
1285		return 0;
1286	}
1287
1288	if (tcp_checksum_complete(skb))
1289		goto csum_err;
1290
1291	if (sk->sk_state == TCP_LISTEN) {
1292		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1293
1294		if (!nsk)
1295			goto discard;
1296
1297		if (nsk != sk) {
1298			sock_rps_save_rxhash(nsk, skb);
1299			sk_mark_napi_id(nsk, skb);
 
 
 
 
1300			if (tcp_child_process(sk, nsk, skb))
1301				goto reset;
1302			if (opt_skb)
1303				__kfree_skb(opt_skb);
1304			return 0;
1305		}
1306	} else
1307		sock_rps_save_rxhash(sk, skb);
1308
1309	if (tcp_rcv_state_process(sk, skb))
1310		goto reset;
1311	if (opt_skb)
1312		goto ipv6_pktoptions;
1313	return 0;
1314
1315reset:
1316	tcp_v6_send_reset(sk, skb);
1317discard:
1318	if (opt_skb)
1319		__kfree_skb(opt_skb);
1320	kfree_skb(skb);
1321	return 0;
1322csum_err:
1323	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1324	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1325	goto discard;
1326
1327
1328ipv6_pktoptions:
1329	/* Do you ask, what is it?
1330
1331	   1. skb was enqueued by tcp.
1332	   2. skb is added to tail of read queue, rather than out of order.
1333	   3. socket is not in passive state.
1334	   4. Finally, it really contains options, which user wants to receive.
1335	 */
1336	tp = tcp_sk(sk);
1337	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1338	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1339		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1340			np->mcast_oif = tcp_v6_iif(opt_skb);
1341		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1342			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1343		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1344			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1345		if (np->repflow)
1346			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1347		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1348			skb_set_owner_r(opt_skb, sk);
1349			tcp_v6_restore_cb(opt_skb);
1350			opt_skb = xchg(&np->pktoptions, opt_skb);
1351		} else {
1352			__kfree_skb(opt_skb);
1353			opt_skb = xchg(&np->pktoptions, NULL);
1354		}
1355	}
1356
1357	kfree_skb(opt_skb);
1358	return 0;
1359}
1360
1361static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1362			   const struct tcphdr *th)
1363{
1364	/* This is tricky: we move IP6CB at its correct location into
1365	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1366	 * _decode_session6() uses IP6CB().
1367	 * barrier() makes sure compiler won't play aliasing games.
1368	 */
1369	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1370		sizeof(struct inet6_skb_parm));
1371	barrier();
1372
1373	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1374	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1375				    skb->len - th->doff*4);
1376	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1377	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1378	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1379	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1380	TCP_SKB_CB(skb)->sacked = 0;
1381}
1382
1383static int tcp_v6_rcv(struct sk_buff *skb)
1384{
1385	const struct tcphdr *th;
1386	const struct ipv6hdr *hdr;
1387	bool refcounted;
1388	struct sock *sk;
1389	int ret;
1390	struct net *net = dev_net(skb->dev);
1391
1392	if (skb->pkt_type != PACKET_HOST)
1393		goto discard_it;
1394
1395	/*
1396	 *	Count it even if it's bad.
1397	 */
1398	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1399
1400	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1401		goto discard_it;
1402
1403	th = (const struct tcphdr *)skb->data;
1404
1405	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1406		goto bad_packet;
1407	if (!pskb_may_pull(skb, th->doff*4))
1408		goto discard_it;
1409
1410	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1411		goto csum_error;
1412
1413	th = (const struct tcphdr *)skb->data;
1414	hdr = ipv6_hdr(skb);
 
 
 
 
 
 
 
1415
1416lookup:
1417	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1418				th->source, th->dest, inet6_iif(skb),
1419				&refcounted);
1420	if (!sk)
1421		goto no_tcp_socket;
1422
1423process:
1424	if (sk->sk_state == TCP_TIME_WAIT)
1425		goto do_time_wait;
1426
1427	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1428		struct request_sock *req = inet_reqsk(sk);
1429		struct sock *nsk;
1430
1431		sk = req->rsk_listener;
1432		tcp_v6_fill_cb(skb, hdr, th);
1433		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1434			sk_drops_add(sk, skb);
1435			reqsk_put(req);
1436			goto discard_it;
1437		}
1438		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1439			inet_csk_reqsk_queue_drop_and_put(sk, req);
1440			goto lookup;
1441		}
1442		sock_hold(sk);
1443		refcounted = true;
1444		nsk = tcp_check_req(sk, skb, req, false);
1445		if (!nsk) {
1446			reqsk_put(req);
1447			goto discard_and_relse;
1448		}
1449		if (nsk == sk) {
1450			reqsk_put(req);
1451			tcp_v6_restore_cb(skb);
1452		} else if (tcp_child_process(sk, nsk, skb)) {
1453			tcp_v6_send_reset(nsk, skb);
1454			goto discard_and_relse;
1455		} else {
1456			sock_put(sk);
1457			return 0;
1458		}
1459	}
1460	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1461		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1462		goto discard_and_relse;
1463	}
1464
1465	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1466		goto discard_and_relse;
1467
1468	tcp_v6_fill_cb(skb, hdr, th);
1469
1470	if (tcp_v6_inbound_md5_hash(sk, skb))
1471		goto discard_and_relse;
1472
1473	if (tcp_filter(sk, skb))
1474		goto discard_and_relse;
1475	th = (const struct tcphdr *)skb->data;
1476	hdr = ipv6_hdr(skb);
1477
1478	skb->dev = NULL;
1479
1480	if (sk->sk_state == TCP_LISTEN) {
1481		ret = tcp_v6_do_rcv(sk, skb);
1482		goto put_and_return;
1483	}
1484
1485	sk_incoming_cpu_update(sk);
1486
1487	bh_lock_sock_nested(sk);
1488	tcp_segs_in(tcp_sk(sk), skb);
1489	ret = 0;
1490	if (!sock_owned_by_user(sk)) {
1491		if (!tcp_prequeue(sk, skb))
 
 
 
 
1492			ret = tcp_v6_do_rcv(sk, skb);
1493	} else if (tcp_add_backlog(sk, skb)) {
 
 
 
 
 
 
 
 
1494		goto discard_and_relse;
1495	}
1496	bh_unlock_sock(sk);
1497
1498put_and_return:
1499	if (refcounted)
1500		sock_put(sk);
1501	return ret ? -1 : 0;
1502
1503no_tcp_socket:
1504	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1505		goto discard_it;
1506
1507	tcp_v6_fill_cb(skb, hdr, th);
1508
1509	if (tcp_checksum_complete(skb)) {
1510csum_error:
1511		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1512bad_packet:
1513		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1514	} else {
1515		tcp_v6_send_reset(NULL, skb);
1516	}
1517
1518discard_it:
 
 
 
 
 
1519	kfree_skb(skb);
1520	return 0;
1521
1522discard_and_relse:
1523	sk_drops_add(sk, skb);
1524	if (refcounted)
1525		sock_put(sk);
1526	goto discard_it;
1527
1528do_time_wait:
1529	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1530		inet_twsk_put(inet_twsk(sk));
1531		goto discard_it;
1532	}
1533
1534	tcp_v6_fill_cb(skb, hdr, th);
1535
1536	if (tcp_checksum_complete(skb)) {
1537		inet_twsk_put(inet_twsk(sk));
1538		goto csum_error;
1539	}
1540
1541	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1542	case TCP_TW_SYN:
1543	{
1544		struct sock *sk2;
1545
1546		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1547					    skb, __tcp_hdrlen(th),
1548					    &ipv6_hdr(skb)->saddr, th->source,
1549					    &ipv6_hdr(skb)->daddr,
1550					    ntohs(th->dest), tcp_v6_iif(skb));
1551		if (sk2) {
1552			struct inet_timewait_sock *tw = inet_twsk(sk);
1553			inet_twsk_deschedule_put(tw);
 
1554			sk = sk2;
1555			tcp_v6_restore_cb(skb);
1556			refcounted = false;
1557			goto process;
1558		}
1559		/* Fall through to ACK */
1560	}
1561	case TCP_TW_ACK:
1562		tcp_v6_timewait_ack(sk, skb);
1563		break;
1564	case TCP_TW_RST:
1565		tcp_v6_restore_cb(skb);
1566		tcp_v6_send_reset(sk, skb);
1567		inet_twsk_deschedule_put(inet_twsk(sk));
1568		goto discard_it;
1569	case TCP_TW_SUCCESS:
1570		;
1571	}
1572	goto discard_it;
1573}
1574
1575static void tcp_v6_early_demux(struct sk_buff *skb)
1576{
1577	const struct ipv6hdr *hdr;
1578	const struct tcphdr *th;
1579	struct sock *sk;
1580
1581	if (skb->pkt_type != PACKET_HOST)
1582		return;
 
 
 
 
 
 
 
 
1583
1584	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1585		return;
1586
1587	hdr = ipv6_hdr(skb);
1588	th = tcp_hdr(skb);
 
 
1589
1590	if (th->doff < sizeof(struct tcphdr) / 4)
1591		return;
1592
1593	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1594	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1595					&hdr->saddr, th->source,
1596					&hdr->daddr, ntohs(th->dest),
1597					inet6_iif(skb));
1598	if (sk) {
1599		skb->sk = sk;
1600		skb->destructor = sock_edemux;
1601		if (sk_fullsock(sk)) {
1602			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1603
1604			if (dst)
1605				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1606			if (dst &&
1607			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1608				skb_dst_set_noref(skb, dst);
1609		}
1610	}
1611}
1612
1613static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1614	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1615	.twsk_unique	= tcp_twsk_unique,
1616	.twsk_destructor = tcp_twsk_destructor,
 
1617};
1618
1619static const struct inet_connection_sock_af_ops ipv6_specific = {
1620	.queue_xmit	   = inet6_csk_xmit,
1621	.send_check	   = tcp_v6_send_check,
1622	.rebuild_header	   = inet6_sk_rebuild_header,
1623	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1624	.conn_request	   = tcp_v6_conn_request,
1625	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1626	.net_header_len	   = sizeof(struct ipv6hdr),
1627	.net_frag_header_len = sizeof(struct frag_hdr),
1628	.setsockopt	   = ipv6_setsockopt,
1629	.getsockopt	   = ipv6_getsockopt,
1630	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1631	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1632	.bind_conflict	   = inet6_csk_bind_conflict,
1633#ifdef CONFIG_COMPAT
1634	.compat_setsockopt = compat_ipv6_setsockopt,
1635	.compat_getsockopt = compat_ipv6_getsockopt,
1636#endif
1637	.mtu_reduced	   = tcp_v6_mtu_reduced,
1638};
1639
1640#ifdef CONFIG_TCP_MD5SIG
1641static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1642	.md5_lookup	=	tcp_v6_md5_lookup,
1643	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 
1644	.md5_parse	=	tcp_v6_parse_md5_keys,
1645};
1646#endif
1647
1648/*
1649 *	TCP over IPv4 via INET6 API
1650 */
 
1651static const struct inet_connection_sock_af_ops ipv6_mapped = {
1652	.queue_xmit	   = ip_queue_xmit,
1653	.send_check	   = tcp_v4_send_check,
1654	.rebuild_header	   = inet_sk_rebuild_header,
1655	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1656	.conn_request	   = tcp_v6_conn_request,
1657	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1658	.net_header_len	   = sizeof(struct iphdr),
1659	.setsockopt	   = ipv6_setsockopt,
1660	.getsockopt	   = ipv6_getsockopt,
1661	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1662	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1663	.bind_conflict	   = inet6_csk_bind_conflict,
1664#ifdef CONFIG_COMPAT
1665	.compat_setsockopt = compat_ipv6_setsockopt,
1666	.compat_getsockopt = compat_ipv6_getsockopt,
1667#endif
1668	.mtu_reduced	   = tcp_v4_mtu_reduced,
1669};
1670
1671#ifdef CONFIG_TCP_MD5SIG
1672static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1673	.md5_lookup	=	tcp_v4_md5_lookup,
1674	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 
1675	.md5_parse	=	tcp_v6_parse_md5_keys,
1676};
1677#endif
1678
1679/* NOTE: A lot of things set to zero explicitly by call to
1680 *       sk_alloc() so need not be done here.
1681 */
1682static int tcp_v6_init_sock(struct sock *sk)
1683{
1684	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685
1686	tcp_init_sock(sk);
 
 
 
 
 
 
 
 
 
1687
1688	icsk->icsk_af_ops = &ipv6_specific;
 
 
 
 
1689
1690#ifdef CONFIG_TCP_MD5SIG
1691	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1692#endif
1693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694	return 0;
1695}
1696
1697static void tcp_v6_destroy_sock(struct sock *sk)
1698{
 
 
 
 
 
1699	tcp_v4_destroy_sock(sk);
1700	inet6_destroy_sock(sk);
1701}
1702
1703#ifdef CONFIG_PROC_FS
1704/* Proc filesystem TCPv6 sock list dumping. */
1705static void get_openreq6(struct seq_file *seq,
1706			 const struct request_sock *req, int i)
1707{
1708	long ttd = req->rsk_timer.expires - jiffies;
1709	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1710	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1711
1712	if (ttd < 0)
1713		ttd = 0;
1714
1715	seq_printf(seq,
1716		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1717		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1718		   i,
1719		   src->s6_addr32[0], src->s6_addr32[1],
1720		   src->s6_addr32[2], src->s6_addr32[3],
1721		   inet_rsk(req)->ir_num,
1722		   dest->s6_addr32[0], dest->s6_addr32[1],
1723		   dest->s6_addr32[2], dest->s6_addr32[3],
1724		   ntohs(inet_rsk(req)->ir_rmt_port),
1725		   TCP_SYN_RECV,
1726		   0, 0, /* could print option size, but that is af dependent. */
1727		   1,   /* timers active (only the expire timer) */
1728		   jiffies_to_clock_t(ttd),
1729		   req->num_timeout,
1730		   from_kuid_munged(seq_user_ns(seq),
1731				    sock_i_uid(req->rsk_listener)),
1732		   0,  /* non standard timer */
1733		   0, /* open_requests have no inode */
1734		   0, req);
1735}
1736
1737static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1738{
1739	const struct in6_addr *dest, *src;
1740	__u16 destp, srcp;
1741	int timer_active;
1742	unsigned long timer_expires;
1743	const struct inet_sock *inet = inet_sk(sp);
1744	const struct tcp_sock *tp = tcp_sk(sp);
1745	const struct inet_connection_sock *icsk = inet_csk(sp);
1746	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1747	int rx_queue;
1748	int state;
1749
1750	dest  = &sp->sk_v6_daddr;
1751	src   = &sp->sk_v6_rcv_saddr;
1752	destp = ntohs(inet->inet_dport);
1753	srcp  = ntohs(inet->inet_sport);
1754
1755	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1756	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1757	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1758		timer_active	= 1;
1759		timer_expires	= icsk->icsk_timeout;
1760	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1761		timer_active	= 4;
1762		timer_expires	= icsk->icsk_timeout;
1763	} else if (timer_pending(&sp->sk_timer)) {
1764		timer_active	= 2;
1765		timer_expires	= sp->sk_timer.expires;
1766	} else {
1767		timer_active	= 0;
1768		timer_expires = jiffies;
1769	}
1770
1771	state = sk_state_load(sp);
1772	if (state == TCP_LISTEN)
1773		rx_queue = sp->sk_ack_backlog;
1774	else
1775		/* Because we don't lock the socket,
1776		 * we might find a transient negative value.
1777		 */
1778		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1779
1780	seq_printf(seq,
1781		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1782		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1783		   i,
1784		   src->s6_addr32[0], src->s6_addr32[1],
1785		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1786		   dest->s6_addr32[0], dest->s6_addr32[1],
1787		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1788		   state,
1789		   tp->write_seq - tp->snd_una,
1790		   rx_queue,
1791		   timer_active,
1792		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1793		   icsk->icsk_retransmits,
1794		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1795		   icsk->icsk_probes_out,
1796		   sock_i_ino(sp),
1797		   atomic_read(&sp->sk_refcnt), sp,
1798		   jiffies_to_clock_t(icsk->icsk_rto),
1799		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1800		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1801		   tp->snd_cwnd,
1802		   state == TCP_LISTEN ?
1803			fastopenq->max_qlen :
1804			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1805		   );
1806}
1807
1808static void get_timewait6_sock(struct seq_file *seq,
1809			       struct inet_timewait_sock *tw, int i)
1810{
1811	long delta = tw->tw_timer.expires - jiffies;
1812	const struct in6_addr *dest, *src;
1813	__u16 destp, srcp;
 
 
1814
1815	dest = &tw->tw_v6_daddr;
1816	src  = &tw->tw_v6_rcv_saddr;
 
 
 
1817	destp = ntohs(tw->tw_dport);
1818	srcp  = ntohs(tw->tw_sport);
1819
1820	seq_printf(seq,
1821		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1822		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1823		   i,
1824		   src->s6_addr32[0], src->s6_addr32[1],
1825		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1826		   dest->s6_addr32[0], dest->s6_addr32[1],
1827		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1828		   tw->tw_substate, 0, 0,
1829		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1830		   atomic_read(&tw->tw_refcnt), tw);
1831}
1832
1833static int tcp6_seq_show(struct seq_file *seq, void *v)
1834{
1835	struct tcp_iter_state *st;
1836	struct sock *sk = v;
1837
1838	if (v == SEQ_START_TOKEN) {
1839		seq_puts(seq,
1840			 "  sl  "
1841			 "local_address                         "
1842			 "remote_address                        "
1843			 "st tx_queue rx_queue tr tm->when retrnsmt"
1844			 "   uid  timeout inode\n");
1845		goto out;
1846	}
1847	st = seq->private;
1848
1849	if (sk->sk_state == TCP_TIME_WAIT)
 
 
 
 
 
 
 
 
1850		get_timewait6_sock(seq, v, st->num);
1851	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1852		get_openreq6(seq, v, st->num);
1853	else
1854		get_tcp6_sock(seq, v, st->num);
1855out:
1856	return 0;
1857}
1858
1859static const struct file_operations tcp6_afinfo_seq_fops = {
1860	.owner   = THIS_MODULE,
1861	.open    = tcp_seq_open,
1862	.read    = seq_read,
1863	.llseek  = seq_lseek,
1864	.release = seq_release_net
1865};
1866
1867static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1868	.name		= "tcp6",
1869	.family		= AF_INET6,
1870	.seq_fops	= &tcp6_afinfo_seq_fops,
 
 
1871	.seq_ops	= {
1872		.show		= tcp6_seq_show,
1873	},
1874};
1875
1876int __net_init tcp6_proc_init(struct net *net)
1877{
1878	return tcp_proc_register(net, &tcp6_seq_afinfo);
1879}
1880
1881void tcp6_proc_exit(struct net *net)
1882{
1883	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1884}
1885#endif
1886
1887struct proto tcpv6_prot = {
1888	.name			= "TCPv6",
1889	.owner			= THIS_MODULE,
1890	.close			= tcp_close,
1891	.connect		= tcp_v6_connect,
1892	.disconnect		= tcp_disconnect,
1893	.accept			= inet_csk_accept,
1894	.ioctl			= tcp_ioctl,
1895	.init			= tcp_v6_init_sock,
1896	.destroy		= tcp_v6_destroy_sock,
1897	.shutdown		= tcp_shutdown,
1898	.setsockopt		= tcp_setsockopt,
1899	.getsockopt		= tcp_getsockopt,
1900	.recvmsg		= tcp_recvmsg,
1901	.sendmsg		= tcp_sendmsg,
1902	.sendpage		= tcp_sendpage,
1903	.backlog_rcv		= tcp_v6_do_rcv,
1904	.release_cb		= tcp_release_cb,
1905	.hash			= inet6_hash,
1906	.unhash			= inet_unhash,
1907	.get_port		= inet_csk_get_port,
1908	.enter_memory_pressure	= tcp_enter_memory_pressure,
1909	.stream_memory_free	= tcp_stream_memory_free,
1910	.sockets_allocated	= &tcp_sockets_allocated,
1911	.memory_allocated	= &tcp_memory_allocated,
1912	.memory_pressure	= &tcp_memory_pressure,
1913	.orphan_count		= &tcp_orphan_count,
1914	.sysctl_mem		= sysctl_tcp_mem,
1915	.sysctl_wmem		= sysctl_tcp_wmem,
1916	.sysctl_rmem		= sysctl_tcp_rmem,
1917	.max_header		= MAX_TCP_HEADER,
1918	.obj_size		= sizeof(struct tcp6_sock),
1919	.slab_flags		= SLAB_DESTROY_BY_RCU,
1920	.twsk_prot		= &tcp6_timewait_sock_ops,
1921	.rsk_prot		= &tcp6_request_sock_ops,
1922	.h.hashinfo		= &tcp_hashinfo,
1923	.no_autobind		= true,
1924#ifdef CONFIG_COMPAT
1925	.compat_setsockopt	= compat_tcp_setsockopt,
1926	.compat_getsockopt	= compat_tcp_getsockopt,
1927#endif
1928	.diag_destroy		= tcp_abort,
1929};
1930
1931static const struct inet6_protocol tcpv6_protocol = {
1932	.early_demux	=	tcp_v6_early_demux,
1933	.handler	=	tcp_v6_rcv,
1934	.err_handler	=	tcp_v6_err,
 
 
 
 
1935	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1936};
1937
1938static struct inet_protosw tcpv6_protosw = {
1939	.type		=	SOCK_STREAM,
1940	.protocol	=	IPPROTO_TCP,
1941	.prot		=	&tcpv6_prot,
1942	.ops		=	&inet6_stream_ops,
 
1943	.flags		=	INET_PROTOSW_PERMANENT |
1944				INET_PROTOSW_ICSK,
1945};
1946
1947static int __net_init tcpv6_net_init(struct net *net)
1948{
1949	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1950				    SOCK_RAW, IPPROTO_TCP, net);
1951}
1952
1953static void __net_exit tcpv6_net_exit(struct net *net)
1954{
1955	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1956}
1957
1958static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1959{
1960	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1961}
1962
1963static struct pernet_operations tcpv6_net_ops = {
1964	.init	    = tcpv6_net_init,
1965	.exit	    = tcpv6_net_exit,
1966	.exit_batch = tcpv6_net_exit_batch,
1967};
1968
1969int __init tcpv6_init(void)
1970{
1971	int ret;
1972
1973	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1974	if (ret)
1975		goto out;
1976
1977	/* register inet6 protocol */
1978	ret = inet6_register_protosw(&tcpv6_protosw);
1979	if (ret)
1980		goto out_tcpv6_protocol;
1981
1982	ret = register_pernet_subsys(&tcpv6_net_ops);
1983	if (ret)
1984		goto out_tcpv6_protosw;
1985out:
1986	return ret;
1987
 
 
1988out_tcpv6_protosw:
1989	inet6_unregister_protosw(&tcpv6_protosw);
1990out_tcpv6_protocol:
1991	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1992	goto out;
1993}
1994
1995void tcpv6_exit(void)
1996{
1997	unregister_pernet_subsys(&tcpv6_net_ops);
1998	inet6_unregister_protosw(&tcpv6_protosw);
1999	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2000}