Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65#include <net/tcp_memcontrol.h>
  66#include <net/busy_poll.h>
 
  67
  68#include <linux/proc_fs.h>
  69#include <linux/seq_file.h>
  70
  71#include <linux/crypto.h>
  72#include <linux/scatterlist.h>
  73
  74static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  75static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  76				      struct request_sock *req);
  77
  78static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 
 
  79
  80static const struct inet_connection_sock_af_ops ipv6_mapped;
  81static const struct inet_connection_sock_af_ops ipv6_specific;
  82#ifdef CONFIG_TCP_MD5SIG
  83static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  85#else
  86static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  87						   const struct in6_addr *addr)
  88{
  89	return NULL;
  90}
  91#endif
  92
  93static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  94{
  95	struct dst_entry *dst = skb_dst(skb);
  96	const struct rt6_info *rt = (const struct rt6_info *)dst;
  97
  98	dst_hold(dst);
  99	sk->sk_rx_dst = dst;
 100	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 101	if (rt->rt6i_node)
 102		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
 103}
 104
 105static void tcp_v6_hash(struct sock *sk)
 106{
 107	if (sk->sk_state != TCP_CLOSE) {
 108		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 109			tcp_prot.hash(sk);
 110			return;
 111		}
 112		local_bh_disable();
 113		__inet6_hash(sk, NULL);
 114		local_bh_enable();
 115	}
 116}
 117
 
 
 
 
 
 
 
 
 118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 119{
 120	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 121					    ipv6_hdr(skb)->saddr.s6_addr32,
 122					    tcp_hdr(skb)->dest,
 123					    tcp_hdr(skb)->source);
 124}
 125
 126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 127			  int addr_len)
 128{
 129	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 130	struct inet_sock *inet = inet_sk(sk);
 131	struct inet_connection_sock *icsk = inet_csk(sk);
 132	struct ipv6_pinfo *np = inet6_sk(sk);
 133	struct tcp_sock *tp = tcp_sk(sk);
 134	struct in6_addr *saddr = NULL, *final_p, final;
 135	struct rt6_info *rt;
 136	struct flowi6 fl6;
 137	struct dst_entry *dst;
 138	int addr_type;
 139	int err;
 140
 141	if (addr_len < SIN6_LEN_RFC2133)
 142		return -EINVAL;
 143
 144	if (usin->sin6_family != AF_INET6)
 145		return -EAFNOSUPPORT;
 146
 147	memset(&fl6, 0, sizeof(fl6));
 148
 149	if (np->sndflow) {
 150		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 151		IP6_ECN_flow_init(fl6.flowlabel);
 152		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 153			struct ip6_flowlabel *flowlabel;
 154			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 155			if (flowlabel == NULL)
 156				return -EINVAL;
 
 157			fl6_sock_release(flowlabel);
 158		}
 159	}
 160
 161	/*
 162	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 163	 */
 164
 165	if (ipv6_addr_any(&usin->sin6_addr))
 166		usin->sin6_addr.s6_addr[15] = 0x1;
 167
 168	addr_type = ipv6_addr_type(&usin->sin6_addr);
 169
 170	if (addr_type & IPV6_ADDR_MULTICAST)
 171		return -ENETUNREACH;
 172
 173	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 174		if (addr_len >= sizeof(struct sockaddr_in6) &&
 175		    usin->sin6_scope_id) {
 176			/* If interface is set while binding, indices
 177			 * must coincide.
 178			 */
 179			if (sk->sk_bound_dev_if &&
 180			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 181				return -EINVAL;
 182
 183			sk->sk_bound_dev_if = usin->sin6_scope_id;
 184		}
 185
 186		/* Connect to link-local address requires an interface */
 187		if (!sk->sk_bound_dev_if)
 188			return -EINVAL;
 189	}
 190
 191	if (tp->rx_opt.ts_recent_stamp &&
 192	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 193		tp->rx_opt.ts_recent = 0;
 194		tp->rx_opt.ts_recent_stamp = 0;
 195		tp->write_seq = 0;
 196	}
 197
 198	sk->sk_v6_daddr = usin->sin6_addr;
 199	np->flow_label = fl6.flowlabel;
 200
 201	/*
 202	 *	TCP over IPv4
 203	 */
 204
 205	if (addr_type == IPV6_ADDR_MAPPED) {
 206		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 207		struct sockaddr_in sin;
 208
 209		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 210
 211		if (__ipv6_only_sock(sk))
 212			return -ENETUNREACH;
 213
 214		sin.sin_family = AF_INET;
 215		sin.sin_port = usin->sin6_port;
 216		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 217
 218		icsk->icsk_af_ops = &ipv6_mapped;
 219		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 220#ifdef CONFIG_TCP_MD5SIG
 221		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 222#endif
 223
 224		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 225
 226		if (err) {
 227			icsk->icsk_ext_hdr_len = exthdrlen;
 228			icsk->icsk_af_ops = &ipv6_specific;
 229			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 230#ifdef CONFIG_TCP_MD5SIG
 231			tp->af_specific = &tcp_sock_ipv6_specific;
 232#endif
 233			goto failure;
 234		} else {
 235			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 236			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 237					       &sk->sk_v6_rcv_saddr);
 238		}
 239
 240		return err;
 241	}
 242
 243	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 244		saddr = &sk->sk_v6_rcv_saddr;
 245
 246	fl6.flowi6_proto = IPPROTO_TCP;
 247	fl6.daddr = sk->sk_v6_daddr;
 248	fl6.saddr = saddr ? *saddr : np->saddr;
 249	fl6.flowi6_oif = sk->sk_bound_dev_if;
 250	fl6.flowi6_mark = sk->sk_mark;
 251	fl6.fl6_dport = usin->sin6_port;
 252	fl6.fl6_sport = inet->inet_sport;
 253
 254	final_p = fl6_update_dst(&fl6, np->opt, &final);
 255
 256	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 257
 258	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 259	if (IS_ERR(dst)) {
 260		err = PTR_ERR(dst);
 261		goto failure;
 262	}
 263
 264	if (saddr == NULL) {
 265		saddr = &fl6.saddr;
 266		sk->sk_v6_rcv_saddr = *saddr;
 267	}
 268
 269	/* set the source address */
 270	np->saddr = *saddr;
 271	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 272
 273	sk->sk_gso_type = SKB_GSO_TCPV6;
 274	__ip6_dst_store(sk, dst, NULL, NULL);
 275
 276	rt = (struct rt6_info *) dst;
 277	if (tcp_death_row.sysctl_tw_recycle &&
 278	    !tp->rx_opt.ts_recent_stamp &&
 279	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
 280		tcp_fetch_timewait_stamp(sk, dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281
 282	icsk->icsk_ext_hdr_len = 0;
 283	if (np->opt)
 284		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 285					  np->opt->opt_nflen);
 286
 287	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 288
 289	inet->inet_dport = usin->sin6_port;
 290
 291	tcp_set_state(sk, TCP_SYN_SENT);
 292	err = inet6_hash_connect(&tcp_death_row, sk);
 293	if (err)
 294		goto late_failure;
 295
 296	if (!tp->write_seq && likely(!tp->repair))
 297		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 298							     sk->sk_v6_daddr.s6_addr32,
 299							     inet->inet_sport,
 300							     inet->inet_dport);
 301
 302	err = tcp_connect(sk);
 303	if (err)
 304		goto late_failure;
 305
 306	return 0;
 307
 308late_failure:
 309	tcp_set_state(sk, TCP_CLOSE);
 310	__sk_dst_reset(sk);
 311failure:
 312	inet->inet_dport = 0;
 313	sk->sk_route_caps = 0;
 314	return err;
 315}
 316
 317static void tcp_v6_mtu_reduced(struct sock *sk)
 318{
 319	struct dst_entry *dst;
 320
 321	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 322		return;
 323
 324	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 325	if (!dst)
 326		return;
 327
 328	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 329		tcp_sync_mss(sk, dst_mtu(dst));
 330		tcp_simple_retransmit(sk);
 331	}
 332}
 333
 334static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 335		u8 type, u8 code, int offset, __be32 info)
 336{
 337	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 338	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 339	struct ipv6_pinfo *np;
 340	struct sock *sk;
 341	int err;
 342	struct tcp_sock *tp;
 343	__u32 seq;
 344	struct net *net = dev_net(skb->dev);
 345
 346	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 347			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 348
 349	if (sk == NULL) {
 350		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 351				   ICMP6_MIB_INERRORS);
 352		return;
 353	}
 354
 355	if (sk->sk_state == TCP_TIME_WAIT) {
 356		inet_twsk_put(inet_twsk(sk));
 357		return;
 358	}
 359
 360	bh_lock_sock(sk);
 361	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 362		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 363
 364	if (sk->sk_state == TCP_CLOSE)
 365		goto out;
 366
 367	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 368		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 369		goto out;
 370	}
 371
 372	tp = tcp_sk(sk);
 373	seq = ntohl(th->seq);
 374	if (sk->sk_state != TCP_LISTEN &&
 375	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 376		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 377		goto out;
 378	}
 379
 380	np = inet6_sk(sk);
 381
 382	if (type == NDISC_REDIRECT) {
 383		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 384
 385		if (dst)
 386			dst->ops->redirect(dst, sk, skb);
 387		goto out;
 388	}
 389
 390	if (type == ICMPV6_PKT_TOOBIG) {
 391		/* We are not interested in TCP_LISTEN and open_requests
 392		 * (SYN-ACKs send out by Linux are always <576bytes so
 393		 * they should go through unfragmented).
 394		 */
 395		if (sk->sk_state == TCP_LISTEN)
 396			goto out;
 397
 398		if (!ip6_sk_accept_pmtu(sk))
 
 
 399			goto out;
 400
 401		tp->mtu_info = ntohl(info);
 402		if (!sock_owned_by_user(sk))
 403			tcp_v6_mtu_reduced(sk);
 404		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 405					   &tp->tsq_flags))
 406			sock_hold(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407		goto out;
 408	}
 409
 410	icmpv6_err_convert(type, code, &err);
 411
 412	/* Might be for an request_sock */
 413	switch (sk->sk_state) {
 414		struct request_sock *req, **prev;
 415	case TCP_LISTEN:
 416		if (sock_owned_by_user(sk))
 417			goto out;
 418
 419		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 420					   &hdr->saddr, inet6_iif(skb));
 421		if (!req)
 422			goto out;
 423
 424		/* ICMPs are not backlogged, hence we cannot get
 425		 * an established socket here.
 426		 */
 427		WARN_ON(req->sk != NULL);
 428
 429		if (seq != tcp_rsk(req)->snt_isn) {
 430			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 431			goto out;
 432		}
 433
 434		inet_csk_reqsk_queue_drop(sk, req, prev);
 435		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 436		goto out;
 437
 438	case TCP_SYN_SENT:
 439	case TCP_SYN_RECV:  /* Cannot happen.
 440			       It can, it SYNs are crossed. --ANK */
 441		if (!sock_owned_by_user(sk)) {
 442			sk->sk_err = err;
 443			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 444
 445			tcp_done(sk);
 446		} else
 447			sk->sk_err_soft = err;
 448		goto out;
 449	}
 450
 451	if (!sock_owned_by_user(sk) && np->recverr) {
 452		sk->sk_err = err;
 453		sk->sk_error_report(sk);
 454	} else
 455		sk->sk_err_soft = err;
 456
 457out:
 458	bh_unlock_sock(sk);
 459	sock_put(sk);
 460}
 461
 462
 463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
 464			      struct flowi6 *fl6,
 465			      struct request_sock *req,
 466			      u16 queue_mapping)
 467{
 468	struct inet_request_sock *ireq = inet_rsk(req);
 469	struct ipv6_pinfo *np = inet6_sk(sk);
 470	struct sk_buff *skb;
 471	int err = -ENOMEM;
 
 
 
 
 472
 473	/* First, grab a route. */
 474	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
 475		goto done;
 
 
 
 
 
 
 
 476
 477	skb = tcp_make_synack(sk, dst, req, NULL);
 
 478
 
 
 
 
 
 
 
 
 479	if (skb) {
 480		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 481				    &ireq->ir_v6_rmt_addr);
 482
 483		fl6->daddr = ireq->ir_v6_rmt_addr;
 484		if (np->repflow && (ireq->pktopts != NULL))
 485			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 486
 
 487		skb_set_queue_mapping(skb, queue_mapping);
 488		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
 489		err = net_xmit_eval(err);
 490	}
 491
 492done:
 
 
 
 493	return err;
 494}
 495
 496static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
 
 497{
 498	struct flowi6 fl6;
 499	int res;
 500
 501	res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
 502	if (!res) {
 503		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 504		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
 505	}
 506	return res;
 507}
 508
 509static void tcp_v6_reqsk_destructor(struct request_sock *req)
 510{
 511	kfree_skb(inet_rsk(req)->pktopts);
 512}
 513
 514#ifdef CONFIG_TCP_MD5SIG
 515static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 516						   const struct in6_addr *addr)
 517{
 518	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 519}
 520
 521static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 522						struct sock *addr_sk)
 523{
 524	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 525}
 526
 527static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 528						      struct request_sock *req)
 529{
 530	return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 531}
 532
 533static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 534				 int optlen)
 535{
 536	struct tcp_md5sig cmd;
 537	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 538
 539	if (optlen < sizeof(cmd))
 540		return -EINVAL;
 541
 542	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 543		return -EFAULT;
 544
 545	if (sin6->sin6_family != AF_INET6)
 546		return -EINVAL;
 547
 548	if (!cmd.tcpm_keylen) {
 549		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 550			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 551					      AF_INET);
 552		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 553				      AF_INET6);
 554	}
 555
 556	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 557		return -EINVAL;
 558
 559	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 560		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 561				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 562
 563	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 564			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 565}
 566
 567static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 568					const struct in6_addr *daddr,
 569					const struct in6_addr *saddr, int nbytes)
 570{
 571	struct tcp6_pseudohdr *bp;
 572	struct scatterlist sg;
 573
 574	bp = &hp->md5_blk.ip6;
 575	/* 1. TCP pseudo-header (RFC2460) */
 576	bp->saddr = *saddr;
 577	bp->daddr = *daddr;
 578	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 579	bp->len = cpu_to_be32(nbytes);
 580
 581	sg_init_one(&sg, bp, sizeof(*bp));
 582	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 583}
 584
 585static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 586			       const struct in6_addr *daddr, struct in6_addr *saddr,
 587			       const struct tcphdr *th)
 588{
 589	struct tcp_md5sig_pool *hp;
 590	struct hash_desc *desc;
 591
 592	hp = tcp_get_md5sig_pool();
 593	if (!hp)
 594		goto clear_hash_noput;
 595	desc = &hp->md5_desc;
 596
 597	if (crypto_hash_init(desc))
 598		goto clear_hash;
 599	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 600		goto clear_hash;
 601	if (tcp_md5_hash_header(hp, th))
 602		goto clear_hash;
 603	if (tcp_md5_hash_key(hp, key))
 604		goto clear_hash;
 605	if (crypto_hash_final(desc, md5_hash))
 606		goto clear_hash;
 607
 608	tcp_put_md5sig_pool();
 609	return 0;
 610
 611clear_hash:
 612	tcp_put_md5sig_pool();
 613clear_hash_noput:
 614	memset(md5_hash, 0, 16);
 615	return 1;
 616}
 617
 618static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 619			       const struct sock *sk,
 620			       const struct request_sock *req,
 621			       const struct sk_buff *skb)
 622{
 623	const struct in6_addr *saddr, *daddr;
 624	struct tcp_md5sig_pool *hp;
 625	struct hash_desc *desc;
 626	const struct tcphdr *th = tcp_hdr(skb);
 627
 628	if (sk) {
 629		saddr = &inet6_sk(sk)->saddr;
 630		daddr = &sk->sk_v6_daddr;
 631	} else if (req) {
 632		saddr = &inet_rsk(req)->ir_v6_loc_addr;
 633		daddr = &inet_rsk(req)->ir_v6_rmt_addr;
 634	} else {
 635		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 636		saddr = &ip6h->saddr;
 637		daddr = &ip6h->daddr;
 638	}
 639
 640	hp = tcp_get_md5sig_pool();
 641	if (!hp)
 642		goto clear_hash_noput;
 643	desc = &hp->md5_desc;
 644
 645	if (crypto_hash_init(desc))
 646		goto clear_hash;
 647
 648	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 649		goto clear_hash;
 650	if (tcp_md5_hash_header(hp, th))
 651		goto clear_hash;
 652	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 653		goto clear_hash;
 654	if (tcp_md5_hash_key(hp, key))
 655		goto clear_hash;
 656	if (crypto_hash_final(desc, md5_hash))
 657		goto clear_hash;
 658
 659	tcp_put_md5sig_pool();
 660	return 0;
 661
 662clear_hash:
 663	tcp_put_md5sig_pool();
 664clear_hash_noput:
 665	memset(md5_hash, 0, 16);
 666	return 1;
 667}
 668
 669static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 670{
 671	const __u8 *hash_location = NULL;
 672	struct tcp_md5sig_key *hash_expected;
 673	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 674	const struct tcphdr *th = tcp_hdr(skb);
 675	int genhash;
 676	u8 newhash[16];
 677
 678	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 679	hash_location = tcp_parse_md5sig_option(th);
 680
 681	/* We've parsed the options - do we have a hash? */
 682	if (!hash_expected && !hash_location)
 683		return 0;
 684
 685	if (hash_expected && !hash_location) {
 686		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 687		return 1;
 688	}
 689
 690	if (!hash_expected && hash_location) {
 691		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 692		return 1;
 693	}
 694
 695	/* check the signature */
 696	genhash = tcp_v6_md5_hash_skb(newhash,
 697				      hash_expected,
 698				      NULL, NULL, skb);
 699
 700	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 701		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 702				     genhash ? "failed" : "mismatch",
 703				     &ip6h->saddr, ntohs(th->source),
 704				     &ip6h->daddr, ntohs(th->dest));
 705		return 1;
 706	}
 707	return 0;
 708}
 709#endif
 710
 711struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 712	.family		=	AF_INET6,
 713	.obj_size	=	sizeof(struct tcp6_request_sock),
 714	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 715	.send_ack	=	tcp_v6_reqsk_send_ack,
 716	.destructor	=	tcp_v6_reqsk_destructor,
 717	.send_reset	=	tcp_v6_send_reset,
 718	.syn_ack_timeout =	tcp_syn_ack_timeout,
 719};
 720
 721#ifdef CONFIG_TCP_MD5SIG
 722static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 723	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 724	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 725};
 726#endif
 727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 728static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 729				 u32 tsval, u32 tsecr, int oif,
 730				 struct tcp_md5sig_key *key, int rst, u8 tclass,
 731				 u32 label)
 732{
 733	const struct tcphdr *th = tcp_hdr(skb);
 734	struct tcphdr *t1;
 735	struct sk_buff *buff;
 736	struct flowi6 fl6;
 737	struct net *net = dev_net(skb_dst(skb)->dev);
 738	struct sock *ctl_sk = net->ipv6.tcp_sk;
 739	unsigned int tot_len = sizeof(struct tcphdr);
 740	struct dst_entry *dst;
 741	__be32 *topt;
 742
 743	if (tsecr)
 744		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 745#ifdef CONFIG_TCP_MD5SIG
 746	if (key)
 747		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 748#endif
 749
 750	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 751			 GFP_ATOMIC);
 752	if (buff == NULL)
 753		return;
 754
 755	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 756
 757	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 758	skb_reset_transport_header(buff);
 759
 760	/* Swap the send and the receive. */
 761	memset(t1, 0, sizeof(*t1));
 762	t1->dest = th->source;
 763	t1->source = th->dest;
 764	t1->doff = tot_len / 4;
 765	t1->seq = htonl(seq);
 766	t1->ack_seq = htonl(ack);
 767	t1->ack = !rst || !th->ack;
 768	t1->rst = rst;
 769	t1->window = htons(win);
 770
 771	topt = (__be32 *)(t1 + 1);
 772
 773	if (tsecr) {
 774		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 775				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 776		*topt++ = htonl(tsval);
 777		*topt++ = htonl(tsecr);
 778	}
 779
 780#ifdef CONFIG_TCP_MD5SIG
 781	if (key) {
 782		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 783				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 784		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 785				    &ipv6_hdr(skb)->saddr,
 786				    &ipv6_hdr(skb)->daddr, t1);
 787	}
 788#endif
 789
 790	memset(&fl6, 0, sizeof(fl6));
 791	fl6.daddr = ipv6_hdr(skb)->saddr;
 792	fl6.saddr = ipv6_hdr(skb)->daddr;
 793	fl6.flowlabel = label;
 794
 795	buff->ip_summed = CHECKSUM_PARTIAL;
 796	buff->csum = 0;
 797
 798	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 799
 800	fl6.flowi6_proto = IPPROTO_TCP;
 801	if (rt6_need_strict(&fl6.daddr) && !oif)
 802		fl6.flowi6_oif = inet6_iif(skb);
 803	else
 804		fl6.flowi6_oif = oif;
 805	fl6.fl6_dport = t1->dest;
 806	fl6.fl6_sport = t1->source;
 807	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 808
 809	/* Pass a socket to ip6_dst_lookup either it is for RST
 810	 * Underlying function will use this to retrieve the network
 811	 * namespace
 812	 */
 813	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 814	if (!IS_ERR(dst)) {
 815		skb_dst_set(buff, dst);
 816		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 817		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 818		if (rst)
 819			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 820		return;
 821	}
 822
 823	kfree_skb(buff);
 824}
 825
 826static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 827{
 828	const struct tcphdr *th = tcp_hdr(skb);
 829	u32 seq = 0, ack_seq = 0;
 830	struct tcp_md5sig_key *key = NULL;
 831#ifdef CONFIG_TCP_MD5SIG
 832	const __u8 *hash_location = NULL;
 833	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 834	unsigned char newhash[16];
 835	int genhash;
 836	struct sock *sk1 = NULL;
 837#endif
 838	int oif;
 839
 840	if (th->rst)
 841		return;
 842
 843	if (!ipv6_unicast_destination(skb))
 844		return;
 845
 846#ifdef CONFIG_TCP_MD5SIG
 847	hash_location = tcp_parse_md5sig_option(th);
 848	if (!sk && hash_location) {
 849		/*
 850		 * active side is lost. Try to find listening socket through
 851		 * source port, and then find md5 key through listening socket.
 852		 * we are not loose security here:
 853		 * Incoming packet is checked with md5 hash with finding key,
 854		 * no RST generated if md5 hash doesn't match.
 855		 */
 856		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 857					   &tcp_hashinfo, &ipv6h->saddr,
 858					   th->source, &ipv6h->daddr,
 859					   ntohs(th->source), inet6_iif(skb));
 860		if (!sk1)
 861			return;
 862
 863		rcu_read_lock();
 864		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 865		if (!key)
 866			goto release_sk1;
 867
 868		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
 869		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 870			goto release_sk1;
 871	} else {
 872		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
 873	}
 874#endif
 875
 876	if (th->ack)
 877		seq = ntohl(th->ack_seq);
 878	else
 879		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 880			  (th->doff << 2);
 881
 882	oif = sk ? sk->sk_bound_dev_if : 0;
 883	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 884
 885#ifdef CONFIG_TCP_MD5SIG
 886release_sk1:
 887	if (sk1) {
 888		rcu_read_unlock();
 889		sock_put(sk1);
 890	}
 891#endif
 892}
 893
 894static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 895			    u32 win, u32 tsval, u32 tsecr, int oif,
 896			    struct tcp_md5sig_key *key, u8 tclass,
 897			    u32 label)
 898{
 899	tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
 900			     label);
 901}
 902
 903static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 904{
 905	struct inet_timewait_sock *tw = inet_twsk(sk);
 906	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 907
 908	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 909			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 910			tcp_time_stamp + tcptw->tw_ts_offset,
 911			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 912			tw->tw_tclass, (tw->tw_flowlabel << 12));
 913
 914	inet_twsk_put(tw);
 915}
 916
 917static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 918				  struct request_sock *req)
 919{
 920	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
 921			req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 922			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 923			0, 0);
 924}
 925
 926
 927static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
 928{
 929	struct request_sock *req, **prev;
 930	const struct tcphdr *th = tcp_hdr(skb);
 931	struct sock *nsk;
 932
 933	/* Find possible connection requests. */
 934	req = inet6_csk_search_req(sk, &prev, th->source,
 935				   &ipv6_hdr(skb)->saddr,
 936				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
 937	if (req)
 938		return tcp_check_req(sk, skb, req, prev, false);
 939
 940	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
 941			&ipv6_hdr(skb)->saddr, th->source,
 942			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
 943
 944	if (nsk) {
 945		if (nsk->sk_state != TCP_TIME_WAIT) {
 946			bh_lock_sock(nsk);
 947			return nsk;
 948		}
 949		inet_twsk_put(inet_twsk(nsk));
 950		return NULL;
 951	}
 952
 953#ifdef CONFIG_SYN_COOKIES
 954	if (!th->syn)
 955		sk = cookie_v6_check(sk, skb);
 956#endif
 957	return sk;
 958}
 959
 960/* FIXME: this is substantially similar to the ipv4 code.
 961 * Can some kind of merge be done? -- erics
 962 */
 963static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 964{
 
 965	struct tcp_options_received tmp_opt;
 
 966	struct request_sock *req;
 967	struct inet_request_sock *ireq;
 968	struct ipv6_pinfo *np = inet6_sk(sk);
 969	struct tcp_sock *tp = tcp_sk(sk);
 970	__u32 isn = TCP_SKB_CB(skb)->when;
 971	struct dst_entry *dst = NULL;
 972	struct flowi6 fl6;
 973	bool want_cookie = false;
 974
 975	if (skb->protocol == htons(ETH_P_IP))
 976		return tcp_v4_conn_request(sk, skb);
 977
 978	if (!ipv6_unicast_destination(skb))
 979		goto drop;
 980
 981	if ((sysctl_tcp_syncookies == 2 ||
 982	     inet_csk_reqsk_queue_is_full(sk)) && !isn) {
 983		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
 984		if (!want_cookie)
 985			goto drop;
 986	}
 987
 988	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
 989		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 990		goto drop;
 991	}
 992
 993	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
 994	if (req == NULL)
 995		goto drop;
 996
 997#ifdef CONFIG_TCP_MD5SIG
 998	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
 999#endif
1000
1001	tcp_clear_options(&tmp_opt);
1002	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1003	tmp_opt.user_mss = tp->rx_opt.user_mss;
1004	tcp_parse_options(skb, &tmp_opt, 0, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005
1006	if (want_cookie && !tmp_opt.saw_tstamp)
1007		tcp_clear_options(&tmp_opt);
1008
1009	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1010	tcp_openreq_init(req, &tmp_opt, skb);
1011
1012	ireq = inet_rsk(req);
1013	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
1014	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1015	if (!want_cookie || tmp_opt.tstamp_ok)
1016		TCP_ECN_create_request(req, skb, sock_net(sk));
1017
1018	ireq->ir_iif = sk->sk_bound_dev_if;
1019
1020	/* So that link locals have meaning */
1021	if (!sk->sk_bound_dev_if &&
1022	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
1023		ireq->ir_iif = inet6_iif(skb);
1024
1025	if (!isn) {
 
 
1026		if (ipv6_opt_accepted(sk, skb) ||
1027		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1028		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
1029		    np->repflow) {
1030			atomic_inc(&skb->users);
1031			ireq->pktopts = skb;
1032		}
1033
1034		if (want_cookie) {
1035			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1036			req->cookie_ts = tmp_opt.tstamp_ok;
1037			goto have_isn;
1038		}
1039
1040		/* VJ's idea. We save last timestamp seen
1041		 * from the destination in peer table, when entering
1042		 * state TIME-WAIT, and check against it before
1043		 * accepting new connection request.
1044		 *
1045		 * If "isn" is not zero, this request hit alive
1046		 * timewait bucket, so that all the necessary checks
1047		 * are made in the function processing timewait state.
1048		 */
1049		if (tmp_opt.saw_tstamp &&
1050		    tcp_death_row.sysctl_tw_recycle &&
1051		    (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1052			if (!tcp_peer_is_proven(req, dst, true)) {
 
 
 
 
 
 
1053				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1054				goto drop_and_release;
1055			}
1056		}
1057		/* Kill the following clause, if you dislike this way. */
1058		else if (!sysctl_tcp_syncookies &&
1059			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1060			  (sysctl_max_syn_backlog >> 2)) &&
1061			 !tcp_peer_is_proven(req, dst, false)) {
 
1062			/* Without syncookies last quarter of
1063			 * backlog is filled with destinations,
1064			 * proven to be alive.
1065			 * It means that we continue to communicate
1066			 * to destinations, already remembered
1067			 * to the moment of synflood.
1068			 */
1069			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1070				       &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
1071			goto drop_and_release;
1072		}
1073
1074		isn = tcp_v6_init_sequence(skb);
1075	}
1076have_isn:
1077	tcp_rsk(req)->snt_isn = isn;
 
1078
1079	if (security_inet_conn_request(sk, skb, req))
1080		goto drop_and_release;
1081
1082	if (tcp_v6_send_synack(sk, dst, &fl6, req,
 
1083			       skb_get_queue_mapping(skb)) ||
1084	    want_cookie)
1085		goto drop_and_free;
1086
1087	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1088	tcp_rsk(req)->listener = NULL;
1089	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1090	return 0;
1091
1092drop_and_release:
1093	dst_release(dst);
1094drop_and_free:
1095	reqsk_free(req);
1096drop:
1097	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1098	return 0; /* don't send reset */
1099}
1100
1101static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1102					 struct request_sock *req,
1103					 struct dst_entry *dst)
1104{
1105	struct inet_request_sock *ireq;
1106	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1107	struct tcp6_sock *newtcp6sk;
1108	struct inet_sock *newinet;
1109	struct tcp_sock *newtp;
1110	struct sock *newsk;
 
1111#ifdef CONFIG_TCP_MD5SIG
1112	struct tcp_md5sig_key *key;
1113#endif
1114	struct flowi6 fl6;
1115
1116	if (skb->protocol == htons(ETH_P_IP)) {
1117		/*
1118		 *	v6 mapped
1119		 */
1120
1121		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1122
1123		if (newsk == NULL)
1124			return NULL;
1125
1126		newtcp6sk = (struct tcp6_sock *)newsk;
1127		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1128
1129		newinet = inet_sk(newsk);
1130		newnp = inet6_sk(newsk);
1131		newtp = tcp_sk(newsk);
1132
1133		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1134
1135		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1136
1137		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1138
1139		newsk->sk_v6_rcv_saddr = newnp->saddr;
1140
1141		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1142		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1143#ifdef CONFIG_TCP_MD5SIG
1144		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1145#endif
1146
1147		newnp->ipv6_ac_list = NULL;
1148		newnp->ipv6_fl_list = NULL;
1149		newnp->pktoptions  = NULL;
1150		newnp->opt	   = NULL;
1151		newnp->mcast_oif   = inet6_iif(skb);
1152		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1153		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1154		if (np->repflow)
1155			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1156
1157		/*
1158		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1159		 * here, tcp_create_openreq_child now does this for us, see the comment in
1160		 * that function for the gory details. -acme
1161		 */
1162
1163		/* It is tricky place. Until this moment IPv4 tcp
1164		   worked with IPv6 icsk.icsk_af_ops.
1165		   Sync it now.
1166		 */
1167		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1168
1169		return newsk;
1170	}
1171
1172	ireq = inet_rsk(req);
 
1173
1174	if (sk_acceptq_is_full(sk))
1175		goto out_overflow;
1176
1177	if (!dst) {
1178		dst = inet6_csk_route_req(sk, &fl6, req);
1179		if (!dst)
1180			goto out;
1181	}
1182
1183	newsk = tcp_create_openreq_child(sk, req, skb);
1184	if (newsk == NULL)
1185		goto out_nonewsk;
1186
1187	/*
1188	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1189	 * count here, tcp_create_openreq_child now does this for us, see the
1190	 * comment in that function for the gory details. -acme
1191	 */
1192
1193	newsk->sk_gso_type = SKB_GSO_TCPV6;
1194	__ip6_dst_store(newsk, dst, NULL, NULL);
1195	inet6_sk_rx_dst_set(newsk, skb);
1196
1197	newtcp6sk = (struct tcp6_sock *)newsk;
1198	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1199
1200	newtp = tcp_sk(newsk);
1201	newinet = inet_sk(newsk);
1202	newnp = inet6_sk(newsk);
1203
1204	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1205
1206	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1207	newnp->saddr = ireq->ir_v6_loc_addr;
1208	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1209	newsk->sk_bound_dev_if = ireq->ir_iif;
1210
1211	/* Now IPv6 options...
1212
1213	   First: no IPv4 options.
1214	 */
1215	newinet->inet_opt = NULL;
1216	newnp->ipv6_ac_list = NULL;
1217	newnp->ipv6_fl_list = NULL;
1218
1219	/* Clone RX bits */
1220	newnp->rxopt.all = np->rxopt.all;
1221
1222	/* Clone pktoptions received with SYN */
1223	newnp->pktoptions = NULL;
1224	if (ireq->pktopts != NULL) {
1225		newnp->pktoptions = skb_clone(ireq->pktopts,
1226					      sk_gfp_atomic(sk, GFP_ATOMIC));
1227		consume_skb(ireq->pktopts);
1228		ireq->pktopts = NULL;
1229		if (newnp->pktoptions)
1230			skb_set_owner_r(newnp->pktoptions, newsk);
1231	}
1232	newnp->opt	  = NULL;
1233	newnp->mcast_oif  = inet6_iif(skb);
1234	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1235	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1236	if (np->repflow)
1237		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1238
1239	/* Clone native IPv6 options from listening socket (if any)
1240
1241	   Yes, keeping reference count would be much more clever,
1242	   but we make one more one thing there: reattach optmem
1243	   to newsk.
1244	 */
1245	if (np->opt)
1246		newnp->opt = ipv6_dup_options(newsk, np->opt);
 
 
 
1247
1248	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1249	if (newnp->opt)
1250		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1251						     newnp->opt->opt_flen);
1252
 
1253	tcp_sync_mss(newsk, dst_mtu(dst));
1254	newtp->advmss = dst_metric_advmss(dst);
1255	if (tcp_sk(sk)->rx_opt.user_mss &&
1256	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1257		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1258
1259	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1260
1261	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1262	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1263
1264#ifdef CONFIG_TCP_MD5SIG
1265	/* Copy over the MD5 key from the original socket */
1266	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1267	if (key != NULL) {
1268		/* We're using one, so create a matching key
1269		 * on the newsk structure. If we fail to get
1270		 * memory, then we end up not copying the key
1271		 * across. Shucks.
1272		 */
1273		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1274			       AF_INET6, key->key, key->keylen,
1275			       sk_gfp_atomic(sk, GFP_ATOMIC));
1276	}
1277#endif
1278
1279	if (__inet_inherit_port(sk, newsk) < 0) {
1280		inet_csk_prepare_forced_close(newsk);
1281		tcp_done(newsk);
1282		goto out;
1283	}
1284	__inet6_hash(newsk, NULL);
1285
1286	return newsk;
1287
1288out_overflow:
1289	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1290out_nonewsk:
 
 
1291	dst_release(dst);
1292out:
1293	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1294	return NULL;
1295}
1296
1297static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1298{
1299	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1300		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1301				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1302			skb->ip_summed = CHECKSUM_UNNECESSARY;
1303			return 0;
1304		}
1305	}
1306
1307	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1308					      &ipv6_hdr(skb)->saddr,
1309					      &ipv6_hdr(skb)->daddr, 0));
1310
1311	if (skb->len <= 76)
1312		return __skb_checksum_complete(skb);
 
1313	return 0;
1314}
1315
1316/* The socket must have it's spinlock held when we get
1317 * here.
1318 *
1319 * We have a potential double-lock case here, so even when
1320 * doing backlog processing we use the BH locking scheme.
1321 * This is because we cannot sleep with the original spinlock
1322 * held.
1323 */
1324static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1325{
1326	struct ipv6_pinfo *np = inet6_sk(sk);
1327	struct tcp_sock *tp;
1328	struct sk_buff *opt_skb = NULL;
1329
1330	/* Imagine: socket is IPv6. IPv4 packet arrives,
1331	   goes to IPv4 receive handler and backlogged.
1332	   From backlog it always goes here. Kerboom...
1333	   Fortunately, tcp_rcv_established and rcv_established
1334	   handle them correctly, but it is not case with
1335	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1336	 */
1337
1338	if (skb->protocol == htons(ETH_P_IP))
1339		return tcp_v4_do_rcv(sk, skb);
1340
1341#ifdef CONFIG_TCP_MD5SIG
1342	if (tcp_v6_inbound_md5_hash(sk, skb))
1343		goto discard;
1344#endif
1345
1346	if (sk_filter(sk, skb))
1347		goto discard;
1348
1349	/*
1350	 *	socket locking is here for SMP purposes as backlog rcv
1351	 *	is currently called with bh processing disabled.
1352	 */
1353
1354	/* Do Stevens' IPV6_PKTOPTIONS.
1355
1356	   Yes, guys, it is the only place in our code, where we
1357	   may make it not affecting IPv4.
1358	   The rest of code is protocol independent,
1359	   and I do not like idea to uglify IPv4.
1360
1361	   Actually, all the idea behind IPV6_PKTOPTIONS
1362	   looks not very well thought. For now we latch
1363	   options, received in the last packet, enqueued
1364	   by tcp. Feel free to propose better solution.
1365					       --ANK (980728)
1366	 */
1367	if (np->rxopt.all)
1368		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1369
1370	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1371		struct dst_entry *dst = sk->sk_rx_dst;
1372
1373		sock_rps_save_rxhash(sk, skb);
1374		if (dst) {
1375			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1376			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1377				dst_release(dst);
1378				sk->sk_rx_dst = NULL;
1379			}
1380		}
1381
1382		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1383		if (opt_skb)
1384			goto ipv6_pktoptions;
1385		return 0;
1386	}
1387
1388	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1389		goto csum_err;
1390
1391	if (sk->sk_state == TCP_LISTEN) {
1392		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1393		if (!nsk)
1394			goto discard;
1395
1396		/*
1397		 * Queue it on the new socket if the new socket is active,
1398		 * otherwise we just shortcircuit this and continue with
1399		 * the new socket..
1400		 */
1401		if (nsk != sk) {
1402			sock_rps_save_rxhash(nsk, skb);
1403			if (tcp_child_process(sk, nsk, skb))
1404				goto reset;
1405			if (opt_skb)
1406				__kfree_skb(opt_skb);
1407			return 0;
1408		}
1409	} else
1410		sock_rps_save_rxhash(sk, skb);
1411
1412	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1413		goto reset;
1414	if (opt_skb)
1415		goto ipv6_pktoptions;
1416	return 0;
1417
1418reset:
1419	tcp_v6_send_reset(sk, skb);
1420discard:
1421	if (opt_skb)
1422		__kfree_skb(opt_skb);
1423	kfree_skb(skb);
1424	return 0;
1425csum_err:
1426	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1427	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1428	goto discard;
1429
1430
1431ipv6_pktoptions:
1432	/* Do you ask, what is it?
1433
1434	   1. skb was enqueued by tcp.
1435	   2. skb is added to tail of read queue, rather than out of order.
1436	   3. socket is not in passive state.
1437	   4. Finally, it really contains options, which user wants to receive.
1438	 */
1439	tp = tcp_sk(sk);
1440	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1441	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1442		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1443			np->mcast_oif = inet6_iif(opt_skb);
1444		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1445			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1446		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1447			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1448		if (np->repflow)
1449			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1450		if (ipv6_opt_accepted(sk, opt_skb)) {
1451			skb_set_owner_r(opt_skb, sk);
1452			opt_skb = xchg(&np->pktoptions, opt_skb);
1453		} else {
1454			__kfree_skb(opt_skb);
1455			opt_skb = xchg(&np->pktoptions, NULL);
1456		}
1457	}
1458
1459	kfree_skb(opt_skb);
1460	return 0;
1461}
1462
1463static int tcp_v6_rcv(struct sk_buff *skb)
1464{
1465	const struct tcphdr *th;
1466	const struct ipv6hdr *hdr;
1467	struct sock *sk;
1468	int ret;
1469	struct net *net = dev_net(skb->dev);
1470
1471	if (skb->pkt_type != PACKET_HOST)
1472		goto discard_it;
1473
1474	/*
1475	 *	Count it even if it's bad.
1476	 */
1477	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1478
1479	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1480		goto discard_it;
1481
1482	th = tcp_hdr(skb);
1483
1484	if (th->doff < sizeof(struct tcphdr)/4)
1485		goto bad_packet;
1486	if (!pskb_may_pull(skb, th->doff*4))
1487		goto discard_it;
1488
1489	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1490		goto csum_error;
1491
1492	th = tcp_hdr(skb);
1493	hdr = ipv6_hdr(skb);
1494	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1495	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1496				    skb->len - th->doff*4);
1497	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1498	TCP_SKB_CB(skb)->when = 0;
1499	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1500	TCP_SKB_CB(skb)->sacked = 0;
1501
1502	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1503	if (!sk)
1504		goto no_tcp_socket;
1505
1506process:
1507	if (sk->sk_state == TCP_TIME_WAIT)
1508		goto do_time_wait;
1509
1510	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1511		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1512		goto discard_and_relse;
1513	}
1514
1515	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1516		goto discard_and_relse;
1517
1518	if (sk_filter(sk, skb))
1519		goto discard_and_relse;
1520
1521	sk_mark_napi_id(sk, skb);
1522	skb->dev = NULL;
1523
1524	bh_lock_sock_nested(sk);
1525	ret = 0;
1526	if (!sock_owned_by_user(sk)) {
1527#ifdef CONFIG_NET_DMA
1528		struct tcp_sock *tp = tcp_sk(sk);
1529		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1530			tp->ucopy.dma_chan = net_dma_find_channel();
1531		if (tp->ucopy.dma_chan)
1532			ret = tcp_v6_do_rcv(sk, skb);
1533		else
1534#endif
1535		{
1536			if (!tcp_prequeue(sk, skb))
1537				ret = tcp_v6_do_rcv(sk, skb);
1538		}
1539	} else if (unlikely(sk_add_backlog(sk, skb,
1540					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1541		bh_unlock_sock(sk);
1542		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1543		goto discard_and_relse;
1544	}
1545	bh_unlock_sock(sk);
1546
1547	sock_put(sk);
1548	return ret ? -1 : 0;
1549
1550no_tcp_socket:
1551	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1552		goto discard_it;
1553
1554	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1555csum_error:
1556		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1557bad_packet:
1558		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1559	} else {
1560		tcp_v6_send_reset(NULL, skb);
1561	}
1562
1563discard_it:
 
 
 
 
 
1564	kfree_skb(skb);
1565	return 0;
1566
1567discard_and_relse:
1568	sock_put(sk);
1569	goto discard_it;
1570
1571do_time_wait:
1572	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1573		inet_twsk_put(inet_twsk(sk));
1574		goto discard_it;
1575	}
1576
1577	if (skb->len < (th->doff<<2)) {
1578		inet_twsk_put(inet_twsk(sk));
1579		goto bad_packet;
1580	}
1581	if (tcp_checksum_complete(skb)) {
1582		inet_twsk_put(inet_twsk(sk));
1583		goto csum_error;
1584	}
1585
1586	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1587	case TCP_TW_SYN:
1588	{
1589		struct sock *sk2;
1590
1591		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1592					    &ipv6_hdr(skb)->saddr, th->source,
1593					    &ipv6_hdr(skb)->daddr,
1594					    ntohs(th->dest), inet6_iif(skb));
1595		if (sk2 != NULL) {
1596			struct inet_timewait_sock *tw = inet_twsk(sk);
1597			inet_twsk_deschedule(tw, &tcp_death_row);
1598			inet_twsk_put(tw);
1599			sk = sk2;
1600			goto process;
1601		}
1602		/* Fall through to ACK */
1603	}
1604	case TCP_TW_ACK:
1605		tcp_v6_timewait_ack(sk, skb);
1606		break;
1607	case TCP_TW_RST:
1608		goto no_tcp_socket;
1609	case TCP_TW_SUCCESS:
1610		;
1611	}
1612	goto discard_it;
1613}
1614
1615static void tcp_v6_early_demux(struct sk_buff *skb)
1616{
1617	const struct ipv6hdr *hdr;
1618	const struct tcphdr *th;
1619	struct sock *sk;
1620
1621	if (skb->pkt_type != PACKET_HOST)
1622		return;
 
 
 
 
 
 
 
 
1623
1624	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1625		return;
1626
1627	hdr = ipv6_hdr(skb);
1628	th = tcp_hdr(skb);
 
 
1629
1630	if (th->doff < sizeof(struct tcphdr) / 4)
1631		return;
1632
1633	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1634					&hdr->saddr, th->source,
1635					&hdr->daddr, ntohs(th->dest),
1636					inet6_iif(skb));
1637	if (sk) {
1638		skb->sk = sk;
1639		skb->destructor = sock_edemux;
1640		if (sk->sk_state != TCP_TIME_WAIT) {
1641			struct dst_entry *dst = sk->sk_rx_dst;
1642
1643			if (dst)
1644				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1645			if (dst &&
1646			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1647				skb_dst_set_noref(skb, dst);
1648		}
1649	}
1650}
1651
1652static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1653	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1654	.twsk_unique	= tcp_twsk_unique,
1655	.twsk_destructor = tcp_twsk_destructor,
 
1656};
1657
1658static const struct inet_connection_sock_af_ops ipv6_specific = {
1659	.queue_xmit	   = inet6_csk_xmit,
1660	.send_check	   = tcp_v6_send_check,
1661	.rebuild_header	   = inet6_sk_rebuild_header,
1662	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1663	.conn_request	   = tcp_v6_conn_request,
1664	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1665	.net_header_len	   = sizeof(struct ipv6hdr),
1666	.net_frag_header_len = sizeof(struct frag_hdr),
1667	.setsockopt	   = ipv6_setsockopt,
1668	.getsockopt	   = ipv6_getsockopt,
1669	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1670	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1671	.bind_conflict	   = inet6_csk_bind_conflict,
1672#ifdef CONFIG_COMPAT
1673	.compat_setsockopt = compat_ipv6_setsockopt,
1674	.compat_getsockopt = compat_ipv6_getsockopt,
1675#endif
1676};
1677
1678#ifdef CONFIG_TCP_MD5SIG
1679static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1680	.md5_lookup	=	tcp_v6_md5_lookup,
1681	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1682	.md5_parse	=	tcp_v6_parse_md5_keys,
1683};
1684#endif
1685
1686/*
1687 *	TCP over IPv4 via INET6 API
1688 */
 
1689static const struct inet_connection_sock_af_ops ipv6_mapped = {
1690	.queue_xmit	   = ip_queue_xmit,
1691	.send_check	   = tcp_v4_send_check,
1692	.rebuild_header	   = inet_sk_rebuild_header,
1693	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1694	.conn_request	   = tcp_v6_conn_request,
1695	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1696	.net_header_len	   = sizeof(struct iphdr),
1697	.setsockopt	   = ipv6_setsockopt,
1698	.getsockopt	   = ipv6_getsockopt,
1699	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1700	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1701	.bind_conflict	   = inet6_csk_bind_conflict,
1702#ifdef CONFIG_COMPAT
1703	.compat_setsockopt = compat_ipv6_setsockopt,
1704	.compat_getsockopt = compat_ipv6_getsockopt,
1705#endif
1706};
1707
1708#ifdef CONFIG_TCP_MD5SIG
1709static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1710	.md5_lookup	=	tcp_v4_md5_lookup,
1711	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1712	.md5_parse	=	tcp_v6_parse_md5_keys,
1713};
1714#endif
1715
1716/* NOTE: A lot of things set to zero explicitly by call to
1717 *       sk_alloc() so need not be done here.
1718 */
1719static int tcp_v6_init_sock(struct sock *sk)
1720{
1721	struct inet_connection_sock *icsk = inet_csk(sk);
1722
1723	tcp_init_sock(sk);
1724
1725	icsk->icsk_af_ops = &ipv6_specific;
1726
1727#ifdef CONFIG_TCP_MD5SIG
1728	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1729#endif
1730
1731	return 0;
1732}
1733
1734static void tcp_v6_destroy_sock(struct sock *sk)
1735{
1736	tcp_v4_destroy_sock(sk);
1737	inet6_destroy_sock(sk);
1738}
1739
1740#ifdef CONFIG_PROC_FS
1741/* Proc filesystem TCPv6 sock list dumping. */
1742static void get_openreq6(struct seq_file *seq,
1743			 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1744{
1745	int ttd = req->expires - jiffies;
1746	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1747	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1748
1749	if (ttd < 0)
1750		ttd = 0;
1751
1752	seq_printf(seq,
1753		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1754		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1755		   i,
1756		   src->s6_addr32[0], src->s6_addr32[1],
1757		   src->s6_addr32[2], src->s6_addr32[3],
1758		   inet_rsk(req)->ir_num,
1759		   dest->s6_addr32[0], dest->s6_addr32[1],
1760		   dest->s6_addr32[2], dest->s6_addr32[3],
1761		   ntohs(inet_rsk(req)->ir_rmt_port),
1762		   TCP_SYN_RECV,
1763		   0, 0, /* could print option size, but that is af dependent. */
1764		   1,   /* timers active (only the expire timer) */
1765		   jiffies_to_clock_t(ttd),
1766		   req->num_timeout,
1767		   from_kuid_munged(seq_user_ns(seq), uid),
1768		   0,  /* non standard timer */
1769		   0, /* open_requests have no inode */
1770		   0, req);
1771}
1772
1773static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1774{
1775	const struct in6_addr *dest, *src;
1776	__u16 destp, srcp;
1777	int timer_active;
1778	unsigned long timer_expires;
1779	const struct inet_sock *inet = inet_sk(sp);
1780	const struct tcp_sock *tp = tcp_sk(sp);
1781	const struct inet_connection_sock *icsk = inet_csk(sp);
 
1782
1783	dest  = &sp->sk_v6_daddr;
1784	src   = &sp->sk_v6_rcv_saddr;
1785	destp = ntohs(inet->inet_dport);
1786	srcp  = ntohs(inet->inet_sport);
1787
1788	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1789		timer_active	= 1;
1790		timer_expires	= icsk->icsk_timeout;
1791	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1792		timer_active	= 4;
1793		timer_expires	= icsk->icsk_timeout;
1794	} else if (timer_pending(&sp->sk_timer)) {
1795		timer_active	= 2;
1796		timer_expires	= sp->sk_timer.expires;
1797	} else {
1798		timer_active	= 0;
1799		timer_expires = jiffies;
1800	}
1801
1802	seq_printf(seq,
1803		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1804		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1805		   i,
1806		   src->s6_addr32[0], src->s6_addr32[1],
1807		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1808		   dest->s6_addr32[0], dest->s6_addr32[1],
1809		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1810		   sp->sk_state,
1811		   tp->write_seq-tp->snd_una,
1812		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1813		   timer_active,
1814		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1815		   icsk->icsk_retransmits,
1816		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1817		   icsk->icsk_probes_out,
1818		   sock_i_ino(sp),
1819		   atomic_read(&sp->sk_refcnt), sp,
1820		   jiffies_to_clock_t(icsk->icsk_rto),
1821		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1822		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1823		   tp->snd_cwnd,
1824		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1825		   );
1826}
1827
1828static void get_timewait6_sock(struct seq_file *seq,
1829			       struct inet_timewait_sock *tw, int i)
1830{
1831	const struct in6_addr *dest, *src;
1832	__u16 destp, srcp;
1833	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
 
 
 
1834
1835	dest = &tw->tw_v6_daddr;
1836	src  = &tw->tw_v6_rcv_saddr;
1837	destp = ntohs(tw->tw_dport);
1838	srcp  = ntohs(tw->tw_sport);
1839
1840	seq_printf(seq,
1841		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1842		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1843		   i,
1844		   src->s6_addr32[0], src->s6_addr32[1],
1845		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1846		   dest->s6_addr32[0], dest->s6_addr32[1],
1847		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1848		   tw->tw_substate, 0, 0,
1849		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1850		   atomic_read(&tw->tw_refcnt), tw);
1851}
1852
1853static int tcp6_seq_show(struct seq_file *seq, void *v)
1854{
1855	struct tcp_iter_state *st;
1856	struct sock *sk = v;
1857
1858	if (v == SEQ_START_TOKEN) {
1859		seq_puts(seq,
1860			 "  sl  "
1861			 "local_address                         "
1862			 "remote_address                        "
1863			 "st tx_queue rx_queue tr tm->when retrnsmt"
1864			 "   uid  timeout inode\n");
1865		goto out;
1866	}
1867	st = seq->private;
1868
1869	switch (st->state) {
1870	case TCP_SEQ_STATE_LISTENING:
1871	case TCP_SEQ_STATE_ESTABLISHED:
1872		if (sk->sk_state == TCP_TIME_WAIT)
1873			get_timewait6_sock(seq, v, st->num);
1874		else
1875			get_tcp6_sock(seq, v, st->num);
1876		break;
1877	case TCP_SEQ_STATE_OPENREQ:
1878		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1879		break;
 
 
 
1880	}
1881out:
1882	return 0;
1883}
1884
1885static const struct file_operations tcp6_afinfo_seq_fops = {
1886	.owner   = THIS_MODULE,
1887	.open    = tcp_seq_open,
1888	.read    = seq_read,
1889	.llseek  = seq_lseek,
1890	.release = seq_release_net
1891};
1892
1893static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1894	.name		= "tcp6",
1895	.family		= AF_INET6,
1896	.seq_fops	= &tcp6_afinfo_seq_fops,
1897	.seq_ops	= {
1898		.show		= tcp6_seq_show,
1899	},
1900};
1901
1902int __net_init tcp6_proc_init(struct net *net)
1903{
1904	return tcp_proc_register(net, &tcp6_seq_afinfo);
1905}
1906
1907void tcp6_proc_exit(struct net *net)
1908{
1909	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1910}
1911#endif
1912
1913static void tcp_v6_clear_sk(struct sock *sk, int size)
1914{
1915	struct inet_sock *inet = inet_sk(sk);
1916
1917	/* we do not want to clear pinet6 field, because of RCU lookups */
1918	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1919
1920	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1921	memset(&inet->pinet6 + 1, 0, size);
1922}
1923
1924struct proto tcpv6_prot = {
1925	.name			= "TCPv6",
1926	.owner			= THIS_MODULE,
1927	.close			= tcp_close,
1928	.connect		= tcp_v6_connect,
1929	.disconnect		= tcp_disconnect,
1930	.accept			= inet_csk_accept,
1931	.ioctl			= tcp_ioctl,
1932	.init			= tcp_v6_init_sock,
1933	.destroy		= tcp_v6_destroy_sock,
1934	.shutdown		= tcp_shutdown,
1935	.setsockopt		= tcp_setsockopt,
1936	.getsockopt		= tcp_getsockopt,
1937	.recvmsg		= tcp_recvmsg,
1938	.sendmsg		= tcp_sendmsg,
1939	.sendpage		= tcp_sendpage,
1940	.backlog_rcv		= tcp_v6_do_rcv,
1941	.release_cb		= tcp_release_cb,
1942	.mtu_reduced		= tcp_v6_mtu_reduced,
1943	.hash			= tcp_v6_hash,
1944	.unhash			= inet_unhash,
1945	.get_port		= inet_csk_get_port,
1946	.enter_memory_pressure	= tcp_enter_memory_pressure,
1947	.stream_memory_free	= tcp_stream_memory_free,
1948	.sockets_allocated	= &tcp_sockets_allocated,
1949	.memory_allocated	= &tcp_memory_allocated,
1950	.memory_pressure	= &tcp_memory_pressure,
1951	.orphan_count		= &tcp_orphan_count,
1952	.sysctl_mem		= sysctl_tcp_mem,
1953	.sysctl_wmem		= sysctl_tcp_wmem,
1954	.sysctl_rmem		= sysctl_tcp_rmem,
1955	.max_header		= MAX_TCP_HEADER,
1956	.obj_size		= sizeof(struct tcp6_sock),
1957	.slab_flags		= SLAB_DESTROY_BY_RCU,
1958	.twsk_prot		= &tcp6_timewait_sock_ops,
1959	.rsk_prot		= &tcp6_request_sock_ops,
1960	.h.hashinfo		= &tcp_hashinfo,
1961	.no_autobind		= true,
1962#ifdef CONFIG_COMPAT
1963	.compat_setsockopt	= compat_tcp_setsockopt,
1964	.compat_getsockopt	= compat_tcp_getsockopt,
1965#endif
1966#ifdef CONFIG_MEMCG_KMEM
1967	.proto_cgroup		= tcp_proto_cgroup,
1968#endif
1969	.clear_sk		= tcp_v6_clear_sk,
1970};
1971
1972static const struct inet6_protocol tcpv6_protocol = {
1973	.early_demux	=	tcp_v6_early_demux,
1974	.handler	=	tcp_v6_rcv,
1975	.err_handler	=	tcp_v6_err,
 
 
 
 
1976	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1977};
1978
1979static struct inet_protosw tcpv6_protosw = {
1980	.type		=	SOCK_STREAM,
1981	.protocol	=	IPPROTO_TCP,
1982	.prot		=	&tcpv6_prot,
1983	.ops		=	&inet6_stream_ops,
1984	.no_check	=	0,
1985	.flags		=	INET_PROTOSW_PERMANENT |
1986				INET_PROTOSW_ICSK,
1987};
1988
1989static int __net_init tcpv6_net_init(struct net *net)
1990{
1991	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1992				    SOCK_RAW, IPPROTO_TCP, net);
1993}
1994
1995static void __net_exit tcpv6_net_exit(struct net *net)
1996{
1997	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1998}
1999
2000static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2001{
2002	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2003}
2004
2005static struct pernet_operations tcpv6_net_ops = {
2006	.init	    = tcpv6_net_init,
2007	.exit	    = tcpv6_net_exit,
2008	.exit_batch = tcpv6_net_exit_batch,
2009};
2010
2011int __init tcpv6_init(void)
2012{
2013	int ret;
2014
2015	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2016	if (ret)
2017		goto out;
2018
2019	/* register inet6 protocol */
2020	ret = inet6_register_protosw(&tcpv6_protosw);
2021	if (ret)
2022		goto out_tcpv6_protocol;
2023
2024	ret = register_pernet_subsys(&tcpv6_net_ops);
2025	if (ret)
2026		goto out_tcpv6_protosw;
2027out:
2028	return ret;
2029
2030out_tcpv6_protosw:
2031	inet6_unregister_protosw(&tcpv6_protosw);
2032out_tcpv6_protocol:
2033	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
 
 
2034	goto out;
2035}
2036
2037void tcpv6_exit(void)
2038{
2039	unregister_pernet_subsys(&tcpv6_net_ops);
2040	inet6_unregister_protosw(&tcpv6_protosw);
2041	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2042}
v3.5.6
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65#include <net/tcp_memcontrol.h>
  66
  67#include <asm/uaccess.h>
  68
  69#include <linux/proc_fs.h>
  70#include <linux/seq_file.h>
  71
  72#include <linux/crypto.h>
  73#include <linux/scatterlist.h>
  74
  75static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  76static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  77				      struct request_sock *req);
  78
  79static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  80static void	__tcp_v6_send_check(struct sk_buff *skb,
  81				    const struct in6_addr *saddr,
  82				    const struct in6_addr *daddr);
  83
  84static const struct inet_connection_sock_af_ops ipv6_mapped;
  85static const struct inet_connection_sock_af_ops ipv6_specific;
  86#ifdef CONFIG_TCP_MD5SIG
  87static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  88static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  89#else
  90static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  91						   const struct in6_addr *addr)
  92{
  93	return NULL;
  94}
  95#endif
  96
 
 
 
 
 
 
 
 
 
 
 
 
  97static void tcp_v6_hash(struct sock *sk)
  98{
  99	if (sk->sk_state != TCP_CLOSE) {
 100		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 101			tcp_prot.hash(sk);
 102			return;
 103		}
 104		local_bh_disable();
 105		__inet6_hash(sk, NULL);
 106		local_bh_enable();
 107	}
 108}
 109
 110static __inline__ __sum16 tcp_v6_check(int len,
 111				   const struct in6_addr *saddr,
 112				   const struct in6_addr *daddr,
 113				   __wsum base)
 114{
 115	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 116}
 117
 118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 119{
 120	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 121					    ipv6_hdr(skb)->saddr.s6_addr32,
 122					    tcp_hdr(skb)->dest,
 123					    tcp_hdr(skb)->source);
 124}
 125
 126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 127			  int addr_len)
 128{
 129	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 130	struct inet_sock *inet = inet_sk(sk);
 131	struct inet_connection_sock *icsk = inet_csk(sk);
 132	struct ipv6_pinfo *np = inet6_sk(sk);
 133	struct tcp_sock *tp = tcp_sk(sk);
 134	struct in6_addr *saddr = NULL, *final_p, final;
 135	struct rt6_info *rt;
 136	struct flowi6 fl6;
 137	struct dst_entry *dst;
 138	int addr_type;
 139	int err;
 140
 141	if (addr_len < SIN6_LEN_RFC2133)
 142		return -EINVAL;
 143
 144	if (usin->sin6_family != AF_INET6)
 145		return -EAFNOSUPPORT;
 146
 147	memset(&fl6, 0, sizeof(fl6));
 148
 149	if (np->sndflow) {
 150		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 151		IP6_ECN_flow_init(fl6.flowlabel);
 152		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 153			struct ip6_flowlabel *flowlabel;
 154			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 155			if (flowlabel == NULL)
 156				return -EINVAL;
 157			usin->sin6_addr = flowlabel->dst;
 158			fl6_sock_release(flowlabel);
 159		}
 160	}
 161
 162	/*
 163	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 164	 */
 165
 166	if(ipv6_addr_any(&usin->sin6_addr))
 167		usin->sin6_addr.s6_addr[15] = 0x1;
 168
 169	addr_type = ipv6_addr_type(&usin->sin6_addr);
 170
 171	if(addr_type & IPV6_ADDR_MULTICAST)
 172		return -ENETUNREACH;
 173
 174	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 175		if (addr_len >= sizeof(struct sockaddr_in6) &&
 176		    usin->sin6_scope_id) {
 177			/* If interface is set while binding, indices
 178			 * must coincide.
 179			 */
 180			if (sk->sk_bound_dev_if &&
 181			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 182				return -EINVAL;
 183
 184			sk->sk_bound_dev_if = usin->sin6_scope_id;
 185		}
 186
 187		/* Connect to link-local address requires an interface */
 188		if (!sk->sk_bound_dev_if)
 189			return -EINVAL;
 190	}
 191
 192	if (tp->rx_opt.ts_recent_stamp &&
 193	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
 194		tp->rx_opt.ts_recent = 0;
 195		tp->rx_opt.ts_recent_stamp = 0;
 196		tp->write_seq = 0;
 197	}
 198
 199	np->daddr = usin->sin6_addr;
 200	np->flow_label = fl6.flowlabel;
 201
 202	/*
 203	 *	TCP over IPv4
 204	 */
 205
 206	if (addr_type == IPV6_ADDR_MAPPED) {
 207		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 208		struct sockaddr_in sin;
 209
 210		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 211
 212		if (__ipv6_only_sock(sk))
 213			return -ENETUNREACH;
 214
 215		sin.sin_family = AF_INET;
 216		sin.sin_port = usin->sin6_port;
 217		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 218
 219		icsk->icsk_af_ops = &ipv6_mapped;
 220		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 223#endif
 224
 225		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 226
 227		if (err) {
 228			icsk->icsk_ext_hdr_len = exthdrlen;
 229			icsk->icsk_af_ops = &ipv6_specific;
 230			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 231#ifdef CONFIG_TCP_MD5SIG
 232			tp->af_specific = &tcp_sock_ipv6_specific;
 233#endif
 234			goto failure;
 235		} else {
 236			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 237			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 238					       &np->rcv_saddr);
 239		}
 240
 241		return err;
 242	}
 243
 244	if (!ipv6_addr_any(&np->rcv_saddr))
 245		saddr = &np->rcv_saddr;
 246
 247	fl6.flowi6_proto = IPPROTO_TCP;
 248	fl6.daddr = np->daddr;
 249	fl6.saddr = saddr ? *saddr : np->saddr;
 250	fl6.flowi6_oif = sk->sk_bound_dev_if;
 251	fl6.flowi6_mark = sk->sk_mark;
 252	fl6.fl6_dport = usin->sin6_port;
 253	fl6.fl6_sport = inet->inet_sport;
 254
 255	final_p = fl6_update_dst(&fl6, np->opt, &final);
 256
 257	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 258
 259	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
 260	if (IS_ERR(dst)) {
 261		err = PTR_ERR(dst);
 262		goto failure;
 263	}
 264
 265	if (saddr == NULL) {
 266		saddr = &fl6.saddr;
 267		np->rcv_saddr = *saddr;
 268	}
 269
 270	/* set the source address */
 271	np->saddr = *saddr;
 272	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 273
 274	sk->sk_gso_type = SKB_GSO_TCPV6;
 275	__ip6_dst_store(sk, dst, NULL, NULL);
 276
 277	rt = (struct rt6_info *) dst;
 278	if (tcp_death_row.sysctl_tw_recycle &&
 279	    !tp->rx_opt.ts_recent_stamp &&
 280	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
 281		struct inet_peer *peer = rt6_get_peer(rt);
 282		/*
 283		 * VJ's idea. We save last timestamp seen from
 284		 * the destination in peer table, when entering state
 285		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 286		 * when trying new connection.
 287		 */
 288		if (peer) {
 289			inet_peer_refcheck(peer);
 290			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 291				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 292				tp->rx_opt.ts_recent = peer->tcp_ts;
 293			}
 294		}
 295	}
 296
 297	icsk->icsk_ext_hdr_len = 0;
 298	if (np->opt)
 299		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 300					  np->opt->opt_nflen);
 301
 302	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 303
 304	inet->inet_dport = usin->sin6_port;
 305
 306	tcp_set_state(sk, TCP_SYN_SENT);
 307	err = inet6_hash_connect(&tcp_death_row, sk);
 308	if (err)
 309		goto late_failure;
 310
 311	if (!tp->write_seq)
 312		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 313							     np->daddr.s6_addr32,
 314							     inet->inet_sport,
 315							     inet->inet_dport);
 316
 317	err = tcp_connect(sk);
 318	if (err)
 319		goto late_failure;
 320
 321	return 0;
 322
 323late_failure:
 324	tcp_set_state(sk, TCP_CLOSE);
 325	__sk_dst_reset(sk);
 326failure:
 327	inet->inet_dport = 0;
 328	sk->sk_route_caps = 0;
 329	return err;
 330}
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 333		u8 type, u8 code, int offset, __be32 info)
 334{
 335	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
 336	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 337	struct ipv6_pinfo *np;
 338	struct sock *sk;
 339	int err;
 340	struct tcp_sock *tp;
 341	__u32 seq;
 342	struct net *net = dev_net(skb->dev);
 343
 344	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 345			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 346
 347	if (sk == NULL) {
 348		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 349				   ICMP6_MIB_INERRORS);
 350		return;
 351	}
 352
 353	if (sk->sk_state == TCP_TIME_WAIT) {
 354		inet_twsk_put(inet_twsk(sk));
 355		return;
 356	}
 357
 358	bh_lock_sock(sk);
 359	if (sock_owned_by_user(sk))
 360		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 361
 362	if (sk->sk_state == TCP_CLOSE)
 363		goto out;
 364
 365	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 366		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 367		goto out;
 368	}
 369
 370	tp = tcp_sk(sk);
 371	seq = ntohl(th->seq);
 372	if (sk->sk_state != TCP_LISTEN &&
 373	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 374		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 375		goto out;
 376	}
 377
 378	np = inet6_sk(sk);
 379
 
 
 
 
 
 
 
 
 380	if (type == ICMPV6_PKT_TOOBIG) {
 381		struct dst_entry *dst;
 
 
 
 
 
 382
 383		if (sock_owned_by_user(sk))
 384			goto out;
 385		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 386			goto out;
 387
 388		/* icmp should have updated the destination cache entry */
 389		dst = __sk_dst_check(sk, np->dst_cookie);
 390
 391		if (dst == NULL) {
 392			struct inet_sock *inet = inet_sk(sk);
 393			struct flowi6 fl6;
 394
 395			/* BUGGG_FUTURE: Again, it is not clear how
 396			   to handle rthdr case. Ignore this complexity
 397			   for now.
 398			 */
 399			memset(&fl6, 0, sizeof(fl6));
 400			fl6.flowi6_proto = IPPROTO_TCP;
 401			fl6.daddr = np->daddr;
 402			fl6.saddr = np->saddr;
 403			fl6.flowi6_oif = sk->sk_bound_dev_if;
 404			fl6.flowi6_mark = sk->sk_mark;
 405			fl6.fl6_dport = inet->inet_dport;
 406			fl6.fl6_sport = inet->inet_sport;
 407			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 408
 409			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
 410			if (IS_ERR(dst)) {
 411				sk->sk_err_soft = -PTR_ERR(dst);
 412				goto out;
 413			}
 414
 415		} else
 416			dst_hold(dst);
 417
 418		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 419			tcp_sync_mss(sk, dst_mtu(dst));
 420			tcp_simple_retransmit(sk);
 421		} /* else let the usual retransmit timer handle it */
 422		dst_release(dst);
 423		goto out;
 424	}
 425
 426	icmpv6_err_convert(type, code, &err);
 427
 428	/* Might be for an request_sock */
 429	switch (sk->sk_state) {
 430		struct request_sock *req, **prev;
 431	case TCP_LISTEN:
 432		if (sock_owned_by_user(sk))
 433			goto out;
 434
 435		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 436					   &hdr->saddr, inet6_iif(skb));
 437		if (!req)
 438			goto out;
 439
 440		/* ICMPs are not backlogged, hence we cannot get
 441		 * an established socket here.
 442		 */
 443		WARN_ON(req->sk != NULL);
 444
 445		if (seq != tcp_rsk(req)->snt_isn) {
 446			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 447			goto out;
 448		}
 449
 450		inet_csk_reqsk_queue_drop(sk, req, prev);
 
 451		goto out;
 452
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:  /* Cannot happen.
 455			       It can, it SYNs are crossed. --ANK */
 456		if (!sock_owned_by_user(sk)) {
 457			sk->sk_err = err;
 458			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 459
 460			tcp_done(sk);
 461		} else
 462			sk->sk_err_soft = err;
 463		goto out;
 464	}
 465
 466	if (!sock_owned_by_user(sk) && np->recverr) {
 467		sk->sk_err = err;
 468		sk->sk_error_report(sk);
 469	} else
 470		sk->sk_err_soft = err;
 471
 472out:
 473	bh_unlock_sock(sk);
 474	sock_put(sk);
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 479			      struct request_values *rvp,
 
 480			      u16 queue_mapping)
 481{
 482	struct inet6_request_sock *treq = inet6_rsk(req);
 483	struct ipv6_pinfo *np = inet6_sk(sk);
 484	struct sk_buff * skb;
 485	struct ipv6_txoptions *opt = NULL;
 486	struct in6_addr * final_p, final;
 487	struct flowi6 fl6;
 488	struct dst_entry *dst;
 489	int err;
 490
 491	memset(&fl6, 0, sizeof(fl6));
 492	fl6.flowi6_proto = IPPROTO_TCP;
 493	fl6.daddr = treq->rmt_addr;
 494	fl6.saddr = treq->loc_addr;
 495	fl6.flowlabel = 0;
 496	fl6.flowi6_oif = treq->iif;
 497	fl6.flowi6_mark = sk->sk_mark;
 498	fl6.fl6_dport = inet_rsk(req)->rmt_port;
 499	fl6.fl6_sport = inet_rsk(req)->loc_port;
 500	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 501
 502	opt = np->opt;
 503	final_p = fl6_update_dst(&fl6, opt, &final);
 504
 505	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 506	if (IS_ERR(dst)) {
 507		err = PTR_ERR(dst);
 508		dst = NULL;
 509		goto done;
 510	}
 511	skb = tcp_make_synack(sk, dst, req, rvp);
 512	err = -ENOMEM;
 513	if (skb) {
 514		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 
 
 
 
 515
 516		fl6.daddr = treq->rmt_addr;
 517		skb_set_queue_mapping(skb, queue_mapping);
 518		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
 519		err = net_xmit_eval(err);
 520	}
 521
 522done:
 523	if (opt && opt != np->opt)
 524		sock_kfree_s(sk, opt, opt->tot_len);
 525	dst_release(dst);
 526	return err;
 527}
 528
 529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
 530			     struct request_values *rvp)
 531{
 532	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 533	return tcp_v6_send_synack(sk, req, rvp, 0);
 
 
 
 
 
 
 
 534}
 535
 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
 537{
 538	kfree_skb(inet6_rsk(req)->pktopts);
 539}
 540
 541#ifdef CONFIG_TCP_MD5SIG
 542static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 543						   const struct in6_addr *addr)
 544{
 545	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 546}
 547
 548static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 549						struct sock *addr_sk)
 550{
 551	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
 552}
 553
 554static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 555						      struct request_sock *req)
 556{
 557	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
 558}
 559
 560static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
 561				  int optlen)
 562{
 563	struct tcp_md5sig cmd;
 564	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 565
 566	if (optlen < sizeof(cmd))
 567		return -EINVAL;
 568
 569	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 570		return -EFAULT;
 571
 572	if (sin6->sin6_family != AF_INET6)
 573		return -EINVAL;
 574
 575	if (!cmd.tcpm_keylen) {
 576		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 577			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 578					      AF_INET);
 579		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 580				      AF_INET6);
 581	}
 582
 583	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 584		return -EINVAL;
 585
 586	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 587		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 588				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 589
 590	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 591			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 592}
 593
 594static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 595					const struct in6_addr *daddr,
 596					const struct in6_addr *saddr, int nbytes)
 597{
 598	struct tcp6_pseudohdr *bp;
 599	struct scatterlist sg;
 600
 601	bp = &hp->md5_blk.ip6;
 602	/* 1. TCP pseudo-header (RFC2460) */
 603	bp->saddr = *saddr;
 604	bp->daddr = *daddr;
 605	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 606	bp->len = cpu_to_be32(nbytes);
 607
 608	sg_init_one(&sg, bp, sizeof(*bp));
 609	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 610}
 611
 612static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 613			       const struct in6_addr *daddr, struct in6_addr *saddr,
 614			       const struct tcphdr *th)
 615{
 616	struct tcp_md5sig_pool *hp;
 617	struct hash_desc *desc;
 618
 619	hp = tcp_get_md5sig_pool();
 620	if (!hp)
 621		goto clear_hash_noput;
 622	desc = &hp->md5_desc;
 623
 624	if (crypto_hash_init(desc))
 625		goto clear_hash;
 626	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 627		goto clear_hash;
 628	if (tcp_md5_hash_header(hp, th))
 629		goto clear_hash;
 630	if (tcp_md5_hash_key(hp, key))
 631		goto clear_hash;
 632	if (crypto_hash_final(desc, md5_hash))
 633		goto clear_hash;
 634
 635	tcp_put_md5sig_pool();
 636	return 0;
 637
 638clear_hash:
 639	tcp_put_md5sig_pool();
 640clear_hash_noput:
 641	memset(md5_hash, 0, 16);
 642	return 1;
 643}
 644
 645static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 646			       const struct sock *sk,
 647			       const struct request_sock *req,
 648			       const struct sk_buff *skb)
 649{
 650	const struct in6_addr *saddr, *daddr;
 651	struct tcp_md5sig_pool *hp;
 652	struct hash_desc *desc;
 653	const struct tcphdr *th = tcp_hdr(skb);
 654
 655	if (sk) {
 656		saddr = &inet6_sk(sk)->saddr;
 657		daddr = &inet6_sk(sk)->daddr;
 658	} else if (req) {
 659		saddr = &inet6_rsk(req)->loc_addr;
 660		daddr = &inet6_rsk(req)->rmt_addr;
 661	} else {
 662		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 663		saddr = &ip6h->saddr;
 664		daddr = &ip6h->daddr;
 665	}
 666
 667	hp = tcp_get_md5sig_pool();
 668	if (!hp)
 669		goto clear_hash_noput;
 670	desc = &hp->md5_desc;
 671
 672	if (crypto_hash_init(desc))
 673		goto clear_hash;
 674
 675	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 676		goto clear_hash;
 677	if (tcp_md5_hash_header(hp, th))
 678		goto clear_hash;
 679	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 680		goto clear_hash;
 681	if (tcp_md5_hash_key(hp, key))
 682		goto clear_hash;
 683	if (crypto_hash_final(desc, md5_hash))
 684		goto clear_hash;
 685
 686	tcp_put_md5sig_pool();
 687	return 0;
 688
 689clear_hash:
 690	tcp_put_md5sig_pool();
 691clear_hash_noput:
 692	memset(md5_hash, 0, 16);
 693	return 1;
 694}
 695
 696static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 697{
 698	const __u8 *hash_location = NULL;
 699	struct tcp_md5sig_key *hash_expected;
 700	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 701	const struct tcphdr *th = tcp_hdr(skb);
 702	int genhash;
 703	u8 newhash[16];
 704
 705	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 706	hash_location = tcp_parse_md5sig_option(th);
 707
 708	/* We've parsed the options - do we have a hash? */
 709	if (!hash_expected && !hash_location)
 710		return 0;
 711
 712	if (hash_expected && !hash_location) {
 713		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 714		return 1;
 715	}
 716
 717	if (!hash_expected && hash_location) {
 718		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 719		return 1;
 720	}
 721
 722	/* check the signature */
 723	genhash = tcp_v6_md5_hash_skb(newhash,
 724				      hash_expected,
 725				      NULL, NULL, skb);
 726
 727	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 728		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 729				     genhash ? "failed" : "mismatch",
 730				     &ip6h->saddr, ntohs(th->source),
 731				     &ip6h->daddr, ntohs(th->dest));
 732		return 1;
 733	}
 734	return 0;
 735}
 736#endif
 737
 738struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 739	.family		=	AF_INET6,
 740	.obj_size	=	sizeof(struct tcp6_request_sock),
 741	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 742	.send_ack	=	tcp_v6_reqsk_send_ack,
 743	.destructor	=	tcp_v6_reqsk_destructor,
 744	.send_reset	=	tcp_v6_send_reset,
 745	.syn_ack_timeout = 	tcp_syn_ack_timeout,
 746};
 747
 748#ifdef CONFIG_TCP_MD5SIG
 749static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 750	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 751	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 752};
 753#endif
 754
 755static void __tcp_v6_send_check(struct sk_buff *skb,
 756				const struct in6_addr *saddr, const struct in6_addr *daddr)
 757{
 758	struct tcphdr *th = tcp_hdr(skb);
 759
 760	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 761		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
 762		skb->csum_start = skb_transport_header(skb) - skb->head;
 763		skb->csum_offset = offsetof(struct tcphdr, check);
 764	} else {
 765		th->check = tcp_v6_check(skb->len, saddr, daddr,
 766					 csum_partial(th, th->doff << 2,
 767						      skb->csum));
 768	}
 769}
 770
 771static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 772{
 773	struct ipv6_pinfo *np = inet6_sk(sk);
 774
 775	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
 776}
 777
 778static int tcp_v6_gso_send_check(struct sk_buff *skb)
 779{
 780	const struct ipv6hdr *ipv6h;
 781	struct tcphdr *th;
 782
 783	if (!pskb_may_pull(skb, sizeof(*th)))
 784		return -EINVAL;
 785
 786	ipv6h = ipv6_hdr(skb);
 787	th = tcp_hdr(skb);
 788
 789	th->check = 0;
 790	skb->ip_summed = CHECKSUM_PARTIAL;
 791	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
 792	return 0;
 793}
 794
 795static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
 796					 struct sk_buff *skb)
 797{
 798	const struct ipv6hdr *iph = skb_gro_network_header(skb);
 799
 800	switch (skb->ip_summed) {
 801	case CHECKSUM_COMPLETE:
 802		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
 803				  skb->csum)) {
 804			skb->ip_summed = CHECKSUM_UNNECESSARY;
 805			break;
 806		}
 807
 808		/* fall through */
 809	case CHECKSUM_NONE:
 810		NAPI_GRO_CB(skb)->flush = 1;
 811		return NULL;
 812	}
 813
 814	return tcp_gro_receive(head, skb);
 815}
 816
 817static int tcp6_gro_complete(struct sk_buff *skb)
 818{
 819	const struct ipv6hdr *iph = ipv6_hdr(skb);
 820	struct tcphdr *th = tcp_hdr(skb);
 821
 822	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 823				  &iph->saddr, &iph->daddr, 0);
 824	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 825
 826	return tcp_gro_complete(skb);
 827}
 828
 829static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 830				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
 
 
 831{
 832	const struct tcphdr *th = tcp_hdr(skb);
 833	struct tcphdr *t1;
 834	struct sk_buff *buff;
 835	struct flowi6 fl6;
 836	struct net *net = dev_net(skb_dst(skb)->dev);
 837	struct sock *ctl_sk = net->ipv6.tcp_sk;
 838	unsigned int tot_len = sizeof(struct tcphdr);
 839	struct dst_entry *dst;
 840	__be32 *topt;
 841
 842	if (ts)
 843		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 844#ifdef CONFIG_TCP_MD5SIG
 845	if (key)
 846		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 847#endif
 848
 849	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 850			 GFP_ATOMIC);
 851	if (buff == NULL)
 852		return;
 853
 854	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 855
 856	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 857	skb_reset_transport_header(buff);
 858
 859	/* Swap the send and the receive. */
 860	memset(t1, 0, sizeof(*t1));
 861	t1->dest = th->source;
 862	t1->source = th->dest;
 863	t1->doff = tot_len / 4;
 864	t1->seq = htonl(seq);
 865	t1->ack_seq = htonl(ack);
 866	t1->ack = !rst || !th->ack;
 867	t1->rst = rst;
 868	t1->window = htons(win);
 869
 870	topt = (__be32 *)(t1 + 1);
 871
 872	if (ts) {
 873		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 874				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 875		*topt++ = htonl(tcp_time_stamp);
 876		*topt++ = htonl(ts);
 877	}
 878
 879#ifdef CONFIG_TCP_MD5SIG
 880	if (key) {
 881		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 882				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 883		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 884				    &ipv6_hdr(skb)->saddr,
 885				    &ipv6_hdr(skb)->daddr, t1);
 886	}
 887#endif
 888
 889	memset(&fl6, 0, sizeof(fl6));
 890	fl6.daddr = ipv6_hdr(skb)->saddr;
 891	fl6.saddr = ipv6_hdr(skb)->daddr;
 
 892
 893	buff->ip_summed = CHECKSUM_PARTIAL;
 894	buff->csum = 0;
 895
 896	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 897
 898	fl6.flowi6_proto = IPPROTO_TCP;
 899	fl6.flowi6_oif = inet6_iif(skb);
 
 
 
 900	fl6.fl6_dport = t1->dest;
 901	fl6.fl6_sport = t1->source;
 902	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 903
 904	/* Pass a socket to ip6_dst_lookup either it is for RST
 905	 * Underlying function will use this to retrieve the network
 906	 * namespace
 907	 */
 908	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
 909	if (!IS_ERR(dst)) {
 910		skb_dst_set(buff, dst);
 911		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 912		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 913		if (rst)
 914			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 915		return;
 916	}
 917
 918	kfree_skb(buff);
 919}
 920
 921static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 922{
 923	const struct tcphdr *th = tcp_hdr(skb);
 924	u32 seq = 0, ack_seq = 0;
 925	struct tcp_md5sig_key *key = NULL;
 926#ifdef CONFIG_TCP_MD5SIG
 927	const __u8 *hash_location = NULL;
 928	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 929	unsigned char newhash[16];
 930	int genhash;
 931	struct sock *sk1 = NULL;
 932#endif
 
 933
 934	if (th->rst)
 935		return;
 936
 937	if (!ipv6_unicast_destination(skb))
 938		return;
 939
 940#ifdef CONFIG_TCP_MD5SIG
 941	hash_location = tcp_parse_md5sig_option(th);
 942	if (!sk && hash_location) {
 943		/*
 944		 * active side is lost. Try to find listening socket through
 945		 * source port, and then find md5 key through listening socket.
 946		 * we are not loose security here:
 947		 * Incoming packet is checked with md5 hash with finding key,
 948		 * no RST generated if md5 hash doesn't match.
 949		 */
 950		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 951					   &tcp_hashinfo, &ipv6h->daddr,
 
 952					   ntohs(th->source), inet6_iif(skb));
 953		if (!sk1)
 954			return;
 955
 956		rcu_read_lock();
 957		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 958		if (!key)
 959			goto release_sk1;
 960
 961		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
 962		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 963			goto release_sk1;
 964	} else {
 965		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
 966	}
 967#endif
 968
 969	if (th->ack)
 970		seq = ntohl(th->ack_seq);
 971	else
 972		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 973			  (th->doff << 2);
 974
 975	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
 
 976
 977#ifdef CONFIG_TCP_MD5SIG
 978release_sk1:
 979	if (sk1) {
 980		rcu_read_unlock();
 981		sock_put(sk1);
 982	}
 983#endif
 984}
 985
 986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
 987			    struct tcp_md5sig_key *key, u8 tclass)
 
 
 988{
 989	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
 
 990}
 991
 992static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 993{
 994	struct inet_timewait_sock *tw = inet_twsk(sk);
 995	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 996
 997	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 998			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 999			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1000			tw->tw_tclass);
 
1001
1002	inet_twsk_put(tw);
1003}
1004
1005static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006				  struct request_sock *req)
1007{
1008	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
 
 
1010}
1011
1012
1013static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1014{
1015	struct request_sock *req, **prev;
1016	const struct tcphdr *th = tcp_hdr(skb);
1017	struct sock *nsk;
1018
1019	/* Find possible connection requests. */
1020	req = inet6_csk_search_req(sk, &prev, th->source,
1021				   &ipv6_hdr(skb)->saddr,
1022				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1023	if (req)
1024		return tcp_check_req(sk, skb, req, prev);
1025
1026	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027			&ipv6_hdr(skb)->saddr, th->source,
1028			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029
1030	if (nsk) {
1031		if (nsk->sk_state != TCP_TIME_WAIT) {
1032			bh_lock_sock(nsk);
1033			return nsk;
1034		}
1035		inet_twsk_put(inet_twsk(nsk));
1036		return NULL;
1037	}
1038
1039#ifdef CONFIG_SYN_COOKIES
1040	if (!th->syn)
1041		sk = cookie_v6_check(sk, skb);
1042#endif
1043	return sk;
1044}
1045
1046/* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1048 */
1049static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1050{
1051	struct tcp_extend_values tmp_ext;
1052	struct tcp_options_received tmp_opt;
1053	const u8 *hash_location;
1054	struct request_sock *req;
1055	struct inet6_request_sock *treq;
1056	struct ipv6_pinfo *np = inet6_sk(sk);
1057	struct tcp_sock *tp = tcp_sk(sk);
1058	__u32 isn = TCP_SKB_CB(skb)->when;
1059	struct dst_entry *dst = NULL;
 
1060	bool want_cookie = false;
1061
1062	if (skb->protocol == htons(ETH_P_IP))
1063		return tcp_v4_conn_request(sk, skb);
1064
1065	if (!ipv6_unicast_destination(skb))
1066		goto drop;
1067
1068	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
 
1069		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1070		if (!want_cookie)
1071			goto drop;
1072	}
1073
1074	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 
1075		goto drop;
 
1076
1077	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1078	if (req == NULL)
1079		goto drop;
1080
1081#ifdef CONFIG_TCP_MD5SIG
1082	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083#endif
1084
1085	tcp_clear_options(&tmp_opt);
1086	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087	tmp_opt.user_mss = tp->rx_opt.user_mss;
1088	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1089
1090	if (tmp_opt.cookie_plus > 0 &&
1091	    tmp_opt.saw_tstamp &&
1092	    !tp->rx_opt.cookie_out_never &&
1093	    (sysctl_tcp_cookie_size > 0 ||
1094	     (tp->cookie_values != NULL &&
1095	      tp->cookie_values->cookie_desired > 0))) {
1096		u8 *c;
1097		u32 *d;
1098		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1100
1101		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102			goto drop_and_free;
1103
1104		/* Secret recipe starts with IP addresses */
1105		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1106		*mess++ ^= *d++;
1107		*mess++ ^= *d++;
1108		*mess++ ^= *d++;
1109		*mess++ ^= *d++;
1110		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1111		*mess++ ^= *d++;
1112		*mess++ ^= *d++;
1113		*mess++ ^= *d++;
1114		*mess++ ^= *d++;
1115
1116		/* plus variable length Initiator Cookie */
1117		c = (u8 *)mess;
1118		while (l-- > 0)
1119			*c++ ^= *hash_location++;
1120
1121		want_cookie = false;	/* not our kind of cookie */
1122		tmp_ext.cookie_out_never = 0; /* false */
1123		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124	} else if (!tp->rx_opt.cookie_in_always) {
1125		/* redundant indications, but ensure initialization. */
1126		tmp_ext.cookie_out_never = 1; /* true */
1127		tmp_ext.cookie_plus = 0;
1128	} else {
1129		goto drop_and_free;
1130	}
1131	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1132
1133	if (want_cookie && !tmp_opt.saw_tstamp)
1134		tcp_clear_options(&tmp_opt);
1135
1136	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137	tcp_openreq_init(req, &tmp_opt, skb);
1138
1139	treq = inet6_rsk(req);
1140	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141	treq->loc_addr = ipv6_hdr(skb)->daddr;
1142	if (!want_cookie || tmp_opt.tstamp_ok)
1143		TCP_ECN_create_request(req, skb);
1144
1145	treq->iif = sk->sk_bound_dev_if;
1146
1147	/* So that link locals have meaning */
1148	if (!sk->sk_bound_dev_if &&
1149	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150		treq->iif = inet6_iif(skb);
1151
1152	if (!isn) {
1153		struct inet_peer *peer = NULL;
1154
1155		if (ipv6_opt_accepted(sk, skb) ||
1156		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 
1158			atomic_inc(&skb->users);
1159			treq->pktopts = skb;
1160		}
1161
1162		if (want_cookie) {
1163			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164			req->cookie_ts = tmp_opt.tstamp_ok;
1165			goto have_isn;
1166		}
1167
1168		/* VJ's idea. We save last timestamp seen
1169		 * from the destination in peer table, when entering
1170		 * state TIME-WAIT, and check against it before
1171		 * accepting new connection request.
1172		 *
1173		 * If "isn" is not zero, this request hit alive
1174		 * timewait bucket, so that all the necessary checks
1175		 * are made in the function processing timewait state.
1176		 */
1177		if (tmp_opt.saw_tstamp &&
1178		    tcp_death_row.sysctl_tw_recycle &&
1179		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182				    &treq->rmt_addr)) {
1183			inet_peer_refcheck(peer);
1184			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185			    (s32)(peer->tcp_ts - req->ts_recent) >
1186							TCP_PAWS_WINDOW) {
1187				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188				goto drop_and_release;
1189			}
1190		}
1191		/* Kill the following clause, if you dislike this way. */
1192		else if (!sysctl_tcp_syncookies &&
1193			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194			  (sysctl_max_syn_backlog >> 2)) &&
1195			 (!peer || !peer->tcp_ts_stamp) &&
1196			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197			/* Without syncookies last quarter of
1198			 * backlog is filled with destinations,
1199			 * proven to be alive.
1200			 * It means that we continue to communicate
1201			 * to destinations, already remembered
1202			 * to the moment of synflood.
1203			 */
1204			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206			goto drop_and_release;
1207		}
1208
1209		isn = tcp_v6_init_sequence(skb);
1210	}
1211have_isn:
1212	tcp_rsk(req)->snt_isn = isn;
1213	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214
1215	if (security_inet_conn_request(sk, skb, req))
1216		goto drop_and_release;
1217
1218	if (tcp_v6_send_synack(sk, req,
1219			       (struct request_values *)&tmp_ext,
1220			       skb_get_queue_mapping(skb)) ||
1221	    want_cookie)
1222		goto drop_and_free;
1223
 
 
1224	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225	return 0;
1226
1227drop_and_release:
1228	dst_release(dst);
1229drop_and_free:
1230	reqsk_free(req);
1231drop:
 
1232	return 0; /* don't send reset */
1233}
1234
1235static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1236					  struct request_sock *req,
1237					  struct dst_entry *dst)
1238{
1239	struct inet6_request_sock *treq;
1240	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241	struct tcp6_sock *newtcp6sk;
1242	struct inet_sock *newinet;
1243	struct tcp_sock *newtp;
1244	struct sock *newsk;
1245	struct ipv6_txoptions *opt;
1246#ifdef CONFIG_TCP_MD5SIG
1247	struct tcp_md5sig_key *key;
1248#endif
 
1249
1250	if (skb->protocol == htons(ETH_P_IP)) {
1251		/*
1252		 *	v6 mapped
1253		 */
1254
1255		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256
1257		if (newsk == NULL)
1258			return NULL;
1259
1260		newtcp6sk = (struct tcp6_sock *)newsk;
1261		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262
1263		newinet = inet_sk(newsk);
1264		newnp = inet6_sk(newsk);
1265		newtp = tcp_sk(newsk);
1266
1267		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268
1269		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270
1271		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272
1273		newnp->rcv_saddr = newnp->saddr;
1274
1275		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1276		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1277#ifdef CONFIG_TCP_MD5SIG
1278		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279#endif
1280
1281		newnp->ipv6_ac_list = NULL;
1282		newnp->ipv6_fl_list = NULL;
1283		newnp->pktoptions  = NULL;
1284		newnp->opt	   = NULL;
1285		newnp->mcast_oif   = inet6_iif(skb);
1286		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1287		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
 
 
1288
1289		/*
1290		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291		 * here, tcp_create_openreq_child now does this for us, see the comment in
1292		 * that function for the gory details. -acme
1293		 */
1294
1295		/* It is tricky place. Until this moment IPv4 tcp
1296		   worked with IPv6 icsk.icsk_af_ops.
1297		   Sync it now.
1298		 */
1299		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300
1301		return newsk;
1302	}
1303
1304	treq = inet6_rsk(req);
1305	opt = np->opt;
1306
1307	if (sk_acceptq_is_full(sk))
1308		goto out_overflow;
1309
1310	if (!dst) {
1311		dst = inet6_csk_route_req(sk, req);
1312		if (!dst)
1313			goto out;
1314	}
1315
1316	newsk = tcp_create_openreq_child(sk, req, skb);
1317	if (newsk == NULL)
1318		goto out_nonewsk;
1319
1320	/*
1321	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322	 * count here, tcp_create_openreq_child now does this for us, see the
1323	 * comment in that function for the gory details. -acme
1324	 */
1325
1326	newsk->sk_gso_type = SKB_GSO_TCPV6;
1327	__ip6_dst_store(newsk, dst, NULL, NULL);
 
1328
1329	newtcp6sk = (struct tcp6_sock *)newsk;
1330	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331
1332	newtp = tcp_sk(newsk);
1333	newinet = inet_sk(newsk);
1334	newnp = inet6_sk(newsk);
1335
1336	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337
1338	newnp->daddr = treq->rmt_addr;
1339	newnp->saddr = treq->loc_addr;
1340	newnp->rcv_saddr = treq->loc_addr;
1341	newsk->sk_bound_dev_if = treq->iif;
1342
1343	/* Now IPv6 options...
1344
1345	   First: no IPv4 options.
1346	 */
1347	newinet->inet_opt = NULL;
1348	newnp->ipv6_ac_list = NULL;
1349	newnp->ipv6_fl_list = NULL;
1350
1351	/* Clone RX bits */
1352	newnp->rxopt.all = np->rxopt.all;
1353
1354	/* Clone pktoptions received with SYN */
1355	newnp->pktoptions = NULL;
1356	if (treq->pktopts != NULL) {
1357		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1358		consume_skb(treq->pktopts);
1359		treq->pktopts = NULL;
 
1360		if (newnp->pktoptions)
1361			skb_set_owner_r(newnp->pktoptions, newsk);
1362	}
1363	newnp->opt	  = NULL;
1364	newnp->mcast_oif  = inet6_iif(skb);
1365	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
 
 
1367
1368	/* Clone native IPv6 options from listening socket (if any)
1369
1370	   Yes, keeping reference count would be much more clever,
1371	   but we make one more one thing there: reattach optmem
1372	   to newsk.
1373	 */
1374	if (opt) {
1375		newnp->opt = ipv6_dup_options(newsk, opt);
1376		if (opt != np->opt)
1377			sock_kfree_s(sk, opt, opt->tot_len);
1378	}
1379
1380	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381	if (newnp->opt)
1382		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383						     newnp->opt->opt_flen);
1384
1385	tcp_mtup_init(newsk);
1386	tcp_sync_mss(newsk, dst_mtu(dst));
1387	newtp->advmss = dst_metric_advmss(dst);
1388	if (tcp_sk(sk)->rx_opt.user_mss &&
1389	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1390		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391
1392	tcp_initialize_rcv_mss(newsk);
1393	if (tcp_rsk(req)->snt_synack)
1394		tcp_valid_rtt_meas(newsk,
1395		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1396	newtp->total_retrans = req->retrans;
1397
1398	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1399	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400
1401#ifdef CONFIG_TCP_MD5SIG
1402	/* Copy over the MD5 key from the original socket */
1403	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
 
1404		/* We're using one, so create a matching key
1405		 * on the newsk structure. If we fail to get
1406		 * memory, then we end up not copying the key
1407		 * across. Shucks.
1408		 */
1409		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1410			       AF_INET6, key->key, key->keylen, GFP_ATOMIC);
 
1411	}
1412#endif
1413
1414	if (__inet_inherit_port(sk, newsk) < 0) {
1415		sock_put(newsk);
 
1416		goto out;
1417	}
1418	__inet6_hash(newsk, NULL);
1419
1420	return newsk;
1421
1422out_overflow:
1423	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424out_nonewsk:
1425	if (opt && opt != np->opt)
1426		sock_kfree_s(sk, opt, opt->tot_len);
1427	dst_release(dst);
1428out:
1429	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430	return NULL;
1431}
1432
1433static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434{
1435	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1438			skb->ip_summed = CHECKSUM_UNNECESSARY;
1439			return 0;
1440		}
1441	}
1442
1443	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444					      &ipv6_hdr(skb)->saddr,
1445					      &ipv6_hdr(skb)->daddr, 0));
1446
1447	if (skb->len <= 76) {
1448		return __skb_checksum_complete(skb);
1449	}
1450	return 0;
1451}
1452
1453/* The socket must have it's spinlock held when we get
1454 * here.
1455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462{
1463	struct ipv6_pinfo *np = inet6_sk(sk);
1464	struct tcp_sock *tp;
1465	struct sk_buff *opt_skb = NULL;
1466
1467	/* Imagine: socket is IPv6. IPv4 packet arrives,
1468	   goes to IPv4 receive handler and backlogged.
1469	   From backlog it always goes here. Kerboom...
1470	   Fortunately, tcp_rcv_established and rcv_established
1471	   handle them correctly, but it is not case with
1472	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1473	 */
1474
1475	if (skb->protocol == htons(ETH_P_IP))
1476		return tcp_v4_do_rcv(sk, skb);
1477
1478#ifdef CONFIG_TCP_MD5SIG
1479	if (tcp_v6_inbound_md5_hash (sk, skb))
1480		goto discard;
1481#endif
1482
1483	if (sk_filter(sk, skb))
1484		goto discard;
1485
1486	/*
1487	 *	socket locking is here for SMP purposes as backlog rcv
1488	 *	is currently called with bh processing disabled.
1489	 */
1490
1491	/* Do Stevens' IPV6_PKTOPTIONS.
1492
1493	   Yes, guys, it is the only place in our code, where we
1494	   may make it not affecting IPv4.
1495	   The rest of code is protocol independent,
1496	   and I do not like idea to uglify IPv4.
1497
1498	   Actually, all the idea behind IPV6_PKTOPTIONS
1499	   looks not very well thought. For now we latch
1500	   options, received in the last packet, enqueued
1501	   by tcp. Feel free to propose better solution.
1502					       --ANK (980728)
1503	 */
1504	if (np->rxopt.all)
1505		opt_skb = skb_clone(skb, GFP_ATOMIC);
1506
1507	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 
 
1508		sock_rps_save_rxhash(sk, skb);
1509		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510			goto reset;
 
 
 
 
 
 
 
1511		if (opt_skb)
1512			goto ipv6_pktoptions;
1513		return 0;
1514	}
1515
1516	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517		goto csum_err;
1518
1519	if (sk->sk_state == TCP_LISTEN) {
1520		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521		if (!nsk)
1522			goto discard;
1523
1524		/*
1525		 * Queue it on the new socket if the new socket is active,
1526		 * otherwise we just shortcircuit this and continue with
1527		 * the new socket..
1528		 */
1529		if(nsk != sk) {
1530			sock_rps_save_rxhash(nsk, skb);
1531			if (tcp_child_process(sk, nsk, skb))
1532				goto reset;
1533			if (opt_skb)
1534				__kfree_skb(opt_skb);
1535			return 0;
1536		}
1537	} else
1538		sock_rps_save_rxhash(sk, skb);
1539
1540	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541		goto reset;
1542	if (opt_skb)
1543		goto ipv6_pktoptions;
1544	return 0;
1545
1546reset:
1547	tcp_v6_send_reset(sk, skb);
1548discard:
1549	if (opt_skb)
1550		__kfree_skb(opt_skb);
1551	kfree_skb(skb);
1552	return 0;
1553csum_err:
 
1554	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555	goto discard;
1556
1557
1558ipv6_pktoptions:
1559	/* Do you ask, what is it?
1560
1561	   1. skb was enqueued by tcp.
1562	   2. skb is added to tail of read queue, rather than out of order.
1563	   3. socket is not in passive state.
1564	   4. Finally, it really contains options, which user wants to receive.
1565	 */
1566	tp = tcp_sk(sk);
1567	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570			np->mcast_oif = inet6_iif(opt_skb);
1571		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573		if (np->rxopt.bits.rxtclass)
1574			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
 
 
1575		if (ipv6_opt_accepted(sk, opt_skb)) {
1576			skb_set_owner_r(opt_skb, sk);
1577			opt_skb = xchg(&np->pktoptions, opt_skb);
1578		} else {
1579			__kfree_skb(opt_skb);
1580			opt_skb = xchg(&np->pktoptions, NULL);
1581		}
1582	}
1583
1584	kfree_skb(opt_skb);
1585	return 0;
1586}
1587
1588static int tcp_v6_rcv(struct sk_buff *skb)
1589{
1590	const struct tcphdr *th;
1591	const struct ipv6hdr *hdr;
1592	struct sock *sk;
1593	int ret;
1594	struct net *net = dev_net(skb->dev);
1595
1596	if (skb->pkt_type != PACKET_HOST)
1597		goto discard_it;
1598
1599	/*
1600	 *	Count it even if it's bad.
1601	 */
1602	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603
1604	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605		goto discard_it;
1606
1607	th = tcp_hdr(skb);
1608
1609	if (th->doff < sizeof(struct tcphdr)/4)
1610		goto bad_packet;
1611	if (!pskb_may_pull(skb, th->doff*4))
1612		goto discard_it;
1613
1614	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615		goto bad_packet;
1616
1617	th = tcp_hdr(skb);
1618	hdr = ipv6_hdr(skb);
1619	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621				    skb->len - th->doff*4);
1622	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623	TCP_SKB_CB(skb)->when = 0;
1624	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625	TCP_SKB_CB(skb)->sacked = 0;
1626
1627	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628	if (!sk)
1629		goto no_tcp_socket;
1630
1631process:
1632	if (sk->sk_state == TCP_TIME_WAIT)
1633		goto do_time_wait;
1634
1635	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1637		goto discard_and_relse;
1638	}
1639
1640	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1641		goto discard_and_relse;
1642
1643	if (sk_filter(sk, skb))
1644		goto discard_and_relse;
1645
 
1646	skb->dev = NULL;
1647
1648	bh_lock_sock_nested(sk);
1649	ret = 0;
1650	if (!sock_owned_by_user(sk)) {
1651#ifdef CONFIG_NET_DMA
1652		struct tcp_sock *tp = tcp_sk(sk);
1653		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654			tp->ucopy.dma_chan = net_dma_find_channel();
1655		if (tp->ucopy.dma_chan)
1656			ret = tcp_v6_do_rcv(sk, skb);
1657		else
1658#endif
1659		{
1660			if (!tcp_prequeue(sk, skb))
1661				ret = tcp_v6_do_rcv(sk, skb);
1662		}
1663	} else if (unlikely(sk_add_backlog(sk, skb,
1664					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1665		bh_unlock_sock(sk);
1666		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1667		goto discard_and_relse;
1668	}
1669	bh_unlock_sock(sk);
1670
1671	sock_put(sk);
1672	return ret ? -1 : 0;
1673
1674no_tcp_socket:
1675	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676		goto discard_it;
1677
1678	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 
 
1679bad_packet:
1680		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681	} else {
1682		tcp_v6_send_reset(NULL, skb);
1683	}
1684
1685discard_it:
1686
1687	/*
1688	 *	Discard frame
1689	 */
1690
1691	kfree_skb(skb);
1692	return 0;
1693
1694discard_and_relse:
1695	sock_put(sk);
1696	goto discard_it;
1697
1698do_time_wait:
1699	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1700		inet_twsk_put(inet_twsk(sk));
1701		goto discard_it;
1702	}
1703
1704	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1705		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 
 
 
1706		inet_twsk_put(inet_twsk(sk));
1707		goto discard_it;
1708	}
1709
1710	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711	case TCP_TW_SYN:
1712	{
1713		struct sock *sk2;
1714
1715		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
 
1716					    &ipv6_hdr(skb)->daddr,
1717					    ntohs(th->dest), inet6_iif(skb));
1718		if (sk2 != NULL) {
1719			struct inet_timewait_sock *tw = inet_twsk(sk);
1720			inet_twsk_deschedule(tw, &tcp_death_row);
1721			inet_twsk_put(tw);
1722			sk = sk2;
1723			goto process;
1724		}
1725		/* Fall through to ACK */
1726	}
1727	case TCP_TW_ACK:
1728		tcp_v6_timewait_ack(sk, skb);
1729		break;
1730	case TCP_TW_RST:
1731		goto no_tcp_socket;
1732	case TCP_TW_SUCCESS:;
 
1733	}
1734	goto discard_it;
1735}
1736
1737static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738{
1739	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740	struct ipv6_pinfo *np = inet6_sk(sk);
1741	struct inet_peer *peer;
1742
1743	if (!rt ||
1744	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745		peer = inet_getpeer_v6(&np->daddr, 1);
1746		*release_it = true;
1747	} else {
1748		if (!rt->rt6i_peer)
1749			rt6_bind_peer(rt, 1);
1750		peer = rt->rt6i_peer;
1751		*release_it = false;
1752	}
1753
1754	return peer;
1755}
1756
1757static void *tcp_v6_tw_get_peer(struct sock *sk)
1758{
1759	const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760	const struct inet_timewait_sock *tw = inet_twsk(sk);
1761
1762	if (tw->tw_family == AF_INET)
1763		return tcp_v4_tw_get_peer(sk);
1764
1765	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766}
1767
1768static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1770	.twsk_unique	= tcp_twsk_unique,
1771	.twsk_destructor= tcp_twsk_destructor,
1772	.twsk_getpeer	= tcp_v6_tw_get_peer,
1773};
1774
1775static const struct inet_connection_sock_af_ops ipv6_specific = {
1776	.queue_xmit	   = inet6_csk_xmit,
1777	.send_check	   = tcp_v6_send_check,
1778	.rebuild_header	   = inet6_sk_rebuild_header,
 
1779	.conn_request	   = tcp_v6_conn_request,
1780	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1781	.get_peer	   = tcp_v6_get_peer,
1782	.net_header_len	   = sizeof(struct ipv6hdr),
1783	.net_frag_header_len = sizeof(struct frag_hdr),
1784	.setsockopt	   = ipv6_setsockopt,
1785	.getsockopt	   = ipv6_getsockopt,
1786	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1787	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1788	.bind_conflict	   = inet6_csk_bind_conflict,
1789#ifdef CONFIG_COMPAT
1790	.compat_setsockopt = compat_ipv6_setsockopt,
1791	.compat_getsockopt = compat_ipv6_getsockopt,
1792#endif
1793};
1794
1795#ifdef CONFIG_TCP_MD5SIG
1796static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797	.md5_lookup	=	tcp_v6_md5_lookup,
1798	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1799	.md5_parse	=	tcp_v6_parse_md5_keys,
1800};
1801#endif
1802
1803/*
1804 *	TCP over IPv4 via INET6 API
1805 */
1806
1807static const struct inet_connection_sock_af_ops ipv6_mapped = {
1808	.queue_xmit	   = ip_queue_xmit,
1809	.send_check	   = tcp_v4_send_check,
1810	.rebuild_header	   = inet_sk_rebuild_header,
 
1811	.conn_request	   = tcp_v6_conn_request,
1812	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1813	.get_peer	   = tcp_v4_get_peer,
1814	.net_header_len	   = sizeof(struct iphdr),
1815	.setsockopt	   = ipv6_setsockopt,
1816	.getsockopt	   = ipv6_getsockopt,
1817	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1818	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1819	.bind_conflict	   = inet6_csk_bind_conflict,
1820#ifdef CONFIG_COMPAT
1821	.compat_setsockopt = compat_ipv6_setsockopt,
1822	.compat_getsockopt = compat_ipv6_getsockopt,
1823#endif
1824};
1825
1826#ifdef CONFIG_TCP_MD5SIG
1827static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1828	.md5_lookup	=	tcp_v4_md5_lookup,
1829	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1830	.md5_parse	=	tcp_v6_parse_md5_keys,
1831};
1832#endif
1833
1834/* NOTE: A lot of things set to zero explicitly by call to
1835 *       sk_alloc() so need not be done here.
1836 */
1837static int tcp_v6_init_sock(struct sock *sk)
1838{
1839	struct inet_connection_sock *icsk = inet_csk(sk);
1840
1841	tcp_init_sock(sk);
1842
1843	icsk->icsk_af_ops = &ipv6_specific;
1844
1845#ifdef CONFIG_TCP_MD5SIG
1846	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1847#endif
1848
1849	return 0;
1850}
1851
1852static void tcp_v6_destroy_sock(struct sock *sk)
1853{
1854	tcp_v4_destroy_sock(sk);
1855	inet6_destroy_sock(sk);
1856}
1857
1858#ifdef CONFIG_PROC_FS
1859/* Proc filesystem TCPv6 sock list dumping. */
1860static void get_openreq6(struct seq_file *seq,
1861			 const struct sock *sk, struct request_sock *req, int i, int uid)
1862{
1863	int ttd = req->expires - jiffies;
1864	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1865	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1866
1867	if (ttd < 0)
1868		ttd = 0;
1869
1870	seq_printf(seq,
1871		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1872		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1873		   i,
1874		   src->s6_addr32[0], src->s6_addr32[1],
1875		   src->s6_addr32[2], src->s6_addr32[3],
1876		   ntohs(inet_rsk(req)->loc_port),
1877		   dest->s6_addr32[0], dest->s6_addr32[1],
1878		   dest->s6_addr32[2], dest->s6_addr32[3],
1879		   ntohs(inet_rsk(req)->rmt_port),
1880		   TCP_SYN_RECV,
1881		   0,0, /* could print option size, but that is af dependent. */
1882		   1,   /* timers active (only the expire timer) */
1883		   jiffies_to_clock_t(ttd),
1884		   req->retrans,
1885		   uid,
1886		   0,  /* non standard timer */
1887		   0, /* open_requests have no inode */
1888		   0, req);
1889}
1890
1891static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1892{
1893	const struct in6_addr *dest, *src;
1894	__u16 destp, srcp;
1895	int timer_active;
1896	unsigned long timer_expires;
1897	const struct inet_sock *inet = inet_sk(sp);
1898	const struct tcp_sock *tp = tcp_sk(sp);
1899	const struct inet_connection_sock *icsk = inet_csk(sp);
1900	const struct ipv6_pinfo *np = inet6_sk(sp);
1901
1902	dest  = &np->daddr;
1903	src   = &np->rcv_saddr;
1904	destp = ntohs(inet->inet_dport);
1905	srcp  = ntohs(inet->inet_sport);
1906
1907	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1908		timer_active	= 1;
1909		timer_expires	= icsk->icsk_timeout;
1910	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1911		timer_active	= 4;
1912		timer_expires	= icsk->icsk_timeout;
1913	} else if (timer_pending(&sp->sk_timer)) {
1914		timer_active	= 2;
1915		timer_expires	= sp->sk_timer.expires;
1916	} else {
1917		timer_active	= 0;
1918		timer_expires = jiffies;
1919	}
1920
1921	seq_printf(seq,
1922		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1923		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1924		   i,
1925		   src->s6_addr32[0], src->s6_addr32[1],
1926		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1927		   dest->s6_addr32[0], dest->s6_addr32[1],
1928		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1929		   sp->sk_state,
1930		   tp->write_seq-tp->snd_una,
1931		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1932		   timer_active,
1933		   jiffies_to_clock_t(timer_expires - jiffies),
1934		   icsk->icsk_retransmits,
1935		   sock_i_uid(sp),
1936		   icsk->icsk_probes_out,
1937		   sock_i_ino(sp),
1938		   atomic_read(&sp->sk_refcnt), sp,
1939		   jiffies_to_clock_t(icsk->icsk_rto),
1940		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1941		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1942		   tp->snd_cwnd,
1943		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1944		   );
1945}
1946
1947static void get_timewait6_sock(struct seq_file *seq,
1948			       struct inet_timewait_sock *tw, int i)
1949{
1950	const struct in6_addr *dest, *src;
1951	__u16 destp, srcp;
1952	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1953	int ttd = tw->tw_ttd - jiffies;
1954
1955	if (ttd < 0)
1956		ttd = 0;
1957
1958	dest = &tw6->tw_v6_daddr;
1959	src  = &tw6->tw_v6_rcv_saddr;
1960	destp = ntohs(tw->tw_dport);
1961	srcp  = ntohs(tw->tw_sport);
1962
1963	seq_printf(seq,
1964		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1966		   i,
1967		   src->s6_addr32[0], src->s6_addr32[1],
1968		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1969		   dest->s6_addr32[0], dest->s6_addr32[1],
1970		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1971		   tw->tw_substate, 0, 0,
1972		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1973		   atomic_read(&tw->tw_refcnt), tw);
1974}
1975
1976static int tcp6_seq_show(struct seq_file *seq, void *v)
1977{
1978	struct tcp_iter_state *st;
 
1979
1980	if (v == SEQ_START_TOKEN) {
1981		seq_puts(seq,
1982			 "  sl  "
1983			 "local_address                         "
1984			 "remote_address                        "
1985			 "st tx_queue rx_queue tr tm->when retrnsmt"
1986			 "   uid  timeout inode\n");
1987		goto out;
1988	}
1989	st = seq->private;
1990
1991	switch (st->state) {
1992	case TCP_SEQ_STATE_LISTENING:
1993	case TCP_SEQ_STATE_ESTABLISHED:
1994		get_tcp6_sock(seq, v, st->num);
 
 
 
1995		break;
1996	case TCP_SEQ_STATE_OPENREQ:
1997		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1998		break;
1999	case TCP_SEQ_STATE_TIME_WAIT:
2000		get_timewait6_sock(seq, v, st->num);
2001		break;
2002	}
2003out:
2004	return 0;
2005}
2006
2007static const struct file_operations tcp6_afinfo_seq_fops = {
2008	.owner   = THIS_MODULE,
2009	.open    = tcp_seq_open,
2010	.read    = seq_read,
2011	.llseek  = seq_lseek,
2012	.release = seq_release_net
2013};
2014
2015static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2016	.name		= "tcp6",
2017	.family		= AF_INET6,
2018	.seq_fops	= &tcp6_afinfo_seq_fops,
2019	.seq_ops	= {
2020		.show		= tcp6_seq_show,
2021	},
2022};
2023
2024int __net_init tcp6_proc_init(struct net *net)
2025{
2026	return tcp_proc_register(net, &tcp6_seq_afinfo);
2027}
2028
2029void tcp6_proc_exit(struct net *net)
2030{
2031	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2032}
2033#endif
2034
 
 
 
 
 
 
 
 
 
 
 
2035struct proto tcpv6_prot = {
2036	.name			= "TCPv6",
2037	.owner			= THIS_MODULE,
2038	.close			= tcp_close,
2039	.connect		= tcp_v6_connect,
2040	.disconnect		= tcp_disconnect,
2041	.accept			= inet_csk_accept,
2042	.ioctl			= tcp_ioctl,
2043	.init			= tcp_v6_init_sock,
2044	.destroy		= tcp_v6_destroy_sock,
2045	.shutdown		= tcp_shutdown,
2046	.setsockopt		= tcp_setsockopt,
2047	.getsockopt		= tcp_getsockopt,
2048	.recvmsg		= tcp_recvmsg,
2049	.sendmsg		= tcp_sendmsg,
2050	.sendpage		= tcp_sendpage,
2051	.backlog_rcv		= tcp_v6_do_rcv,
 
 
2052	.hash			= tcp_v6_hash,
2053	.unhash			= inet_unhash,
2054	.get_port		= inet_csk_get_port,
2055	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
2056	.sockets_allocated	= &tcp_sockets_allocated,
2057	.memory_allocated	= &tcp_memory_allocated,
2058	.memory_pressure	= &tcp_memory_pressure,
2059	.orphan_count		= &tcp_orphan_count,
 
2060	.sysctl_wmem		= sysctl_tcp_wmem,
2061	.sysctl_rmem		= sysctl_tcp_rmem,
2062	.max_header		= MAX_TCP_HEADER,
2063	.obj_size		= sizeof(struct tcp6_sock),
2064	.slab_flags		= SLAB_DESTROY_BY_RCU,
2065	.twsk_prot		= &tcp6_timewait_sock_ops,
2066	.rsk_prot		= &tcp6_request_sock_ops,
2067	.h.hashinfo		= &tcp_hashinfo,
2068	.no_autobind		= true,
2069#ifdef CONFIG_COMPAT
2070	.compat_setsockopt	= compat_tcp_setsockopt,
2071	.compat_getsockopt	= compat_tcp_getsockopt,
2072#endif
2073#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2074	.proto_cgroup		= tcp_proto_cgroup,
2075#endif
 
2076};
2077
2078static const struct inet6_protocol tcpv6_protocol = {
 
2079	.handler	=	tcp_v6_rcv,
2080	.err_handler	=	tcp_v6_err,
2081	.gso_send_check	=	tcp_v6_gso_send_check,
2082	.gso_segment	=	tcp_tso_segment,
2083	.gro_receive	=	tcp6_gro_receive,
2084	.gro_complete	=	tcp6_gro_complete,
2085	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2086};
2087
2088static struct inet_protosw tcpv6_protosw = {
2089	.type		=	SOCK_STREAM,
2090	.protocol	=	IPPROTO_TCP,
2091	.prot		=	&tcpv6_prot,
2092	.ops		=	&inet6_stream_ops,
2093	.no_check	=	0,
2094	.flags		=	INET_PROTOSW_PERMANENT |
2095				INET_PROTOSW_ICSK,
2096};
2097
2098static int __net_init tcpv6_net_init(struct net *net)
2099{
2100	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2101				    SOCK_RAW, IPPROTO_TCP, net);
2102}
2103
2104static void __net_exit tcpv6_net_exit(struct net *net)
2105{
2106	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2107}
2108
2109static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2110{
2111	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2112}
2113
2114static struct pernet_operations tcpv6_net_ops = {
2115	.init	    = tcpv6_net_init,
2116	.exit	    = tcpv6_net_exit,
2117	.exit_batch = tcpv6_net_exit_batch,
2118};
2119
2120int __init tcpv6_init(void)
2121{
2122	int ret;
2123
2124	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2125	if (ret)
2126		goto out;
2127
2128	/* register inet6 protocol */
2129	ret = inet6_register_protosw(&tcpv6_protosw);
2130	if (ret)
2131		goto out_tcpv6_protocol;
2132
2133	ret = register_pernet_subsys(&tcpv6_net_ops);
2134	if (ret)
2135		goto out_tcpv6_protosw;
2136out:
2137	return ret;
2138
 
 
2139out_tcpv6_protocol:
2140	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2141out_tcpv6_protosw:
2142	inet6_unregister_protosw(&tcpv6_protosw);
2143	goto out;
2144}
2145
2146void tcpv6_exit(void)
2147{
2148	unregister_pernet_subsys(&tcpv6_net_ops);
2149	inet6_unregister_protosw(&tcpv6_protosw);
2150	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2151}