Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	TCP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on:
  10 *	linux/net/ipv4/tcp.c
  11 *	linux/net/ipv4/tcp_input.c
  12 *	linux/net/ipv4/tcp_output.c
  13 *
  14 *	Fixes:
  15 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  16 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  17 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  18 *					a single port at the same time.
  19 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
 
 
 
 
 
  20 */
  21
  22#include <linux/bottom_half.h>
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/jiffies.h>
  30#include <linux/in.h>
  31#include <linux/in6.h>
  32#include <linux/netdevice.h>
  33#include <linux/init.h>
  34#include <linux/jhash.h>
  35#include <linux/ipsec.h>
  36#include <linux/times.h>
  37#include <linux/slab.h>
  38#include <linux/uaccess.h>
  39#include <linux/ipv6.h>
  40#include <linux/icmpv6.h>
  41#include <linux/random.h>
  42#include <linux/indirect_call_wrapper.h>
  43
  44#include <net/tcp.h>
  45#include <net/ndisc.h>
  46#include <net/inet6_hashtables.h>
  47#include <net/inet6_connection_sock.h>
  48#include <net/ipv6.h>
  49#include <net/transp_v6.h>
  50#include <net/addrconf.h>
  51#include <net/ip6_route.h>
  52#include <net/ip6_checksum.h>
  53#include <net/inet_ecn.h>
  54#include <net/protocol.h>
  55#include <net/xfrm.h>
  56#include <net/snmp.h>
  57#include <net/dsfield.h>
  58#include <net/timewait_sock.h>
 
  59#include <net/inet_common.h>
  60#include <net/secure_seq.h>
  61#include <net/busy_poll.h>
 
 
  62
  63#include <linux/proc_fs.h>
  64#include <linux/seq_file.h>
  65
  66#include <crypto/hash.h>
  67#include <linux/scatterlist.h>
  68
  69#include <trace/events/tcp.h>
  70
  71static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
  72static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
  73				      struct request_sock *req);
  74
  75INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 
 
  76
  77static const struct inet_connection_sock_af_ops ipv6_mapped;
  78const struct inet_connection_sock_af_ops ipv6_specific;
  79#ifdef CONFIG_TCP_MD5SIG
  80static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  82#else
  83static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
  84						   const struct in6_addr *addr,
  85						   int l3index)
  86{
  87	return NULL;
  88}
  89#endif
  90
  91/* Helper returning the inet6 address from a given tcp socket.
  92 * It can be used in TCP stack instead of inet6_sk(sk).
  93 * This avoids a dereference and allow compiler optimizations.
  94 * It is a specialized version of inet6_sk_generic().
  95 */
  96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
  97{
  98	unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
  99
 100	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
 101}
 102
 103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 104{
 105	struct dst_entry *dst = skb_dst(skb);
 106
 107	if (dst && dst_hold_safe(dst)) {
 108		const struct rt6_info *rt = (const struct rt6_info *)dst;
 109
 110		rcu_assign_pointer(sk->sk_rx_dst, dst);
 111		sk->sk_rx_dst_ifindex = skb->skb_iif;
 112		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
 113	}
 114}
 115
 116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
 117{
 118	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
 119				ipv6_hdr(skb)->saddr.s6_addr32,
 120				tcp_hdr(skb)->dest,
 121				tcp_hdr(skb)->source);
 122}
 123
 124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
 125{
 126	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
 127				   ipv6_hdr(skb)->saddr.s6_addr32);
 128}
 129
 130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 131			      int addr_len)
 132{
 133	/* This check is replicated from tcp_v6_connect() and intended to
 134	 * prevent BPF program called below from accessing bytes that are out
 135	 * of the bound specified by user in addr_len.
 136	 */
 137	if (addr_len < SIN6_LEN_RFC2133)
 138		return -EINVAL;
 139
 140	sock_owned_by_me(sk);
 141
 142	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
 143}
 144
 145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 146			  int addr_len)
 147{
 148	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 149	struct inet_connection_sock *icsk = inet_csk(sk);
 150	struct in6_addr *saddr = NULL, *final_p, final;
 151	struct inet_timewait_death_row *tcp_death_row;
 152	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 153	struct inet_sock *inet = inet_sk(sk);
 
 
 154	struct tcp_sock *tp = tcp_sk(sk);
 155	struct net *net = sock_net(sk);
 156	struct ipv6_txoptions *opt;
 157	struct dst_entry *dst;
 158	struct flowi6 fl6;
 
 159	int addr_type;
 160	int err;
 161
 162	if (addr_len < SIN6_LEN_RFC2133)
 163		return -EINVAL;
 164
 165	if (usin->sin6_family != AF_INET6)
 166		return -EAFNOSUPPORT;
 167
 168	memset(&fl6, 0, sizeof(fl6));
 169
 170	if (np->sndflow) {
 171		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 172		IP6_ECN_flow_init(fl6.flowlabel);
 173		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 174			struct ip6_flowlabel *flowlabel;
 175			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 176			if (IS_ERR(flowlabel))
 177				return -EINVAL;
 
 178			fl6_sock_release(flowlabel);
 179		}
 180	}
 181
 182	/*
 183	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 184	 */
 185
 186	if (ipv6_addr_any(&usin->sin6_addr)) {
 187		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
 188			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
 189					       &usin->sin6_addr);
 190		else
 191			usin->sin6_addr = in6addr_loopback;
 192	}
 193
 194	addr_type = ipv6_addr_type(&usin->sin6_addr);
 195
 196	if (addr_type & IPV6_ADDR_MULTICAST)
 197		return -ENETUNREACH;
 198
 199	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 200		if (addr_len >= sizeof(struct sockaddr_in6) &&
 201		    usin->sin6_scope_id) {
 202			/* If interface is set while binding, indices
 203			 * must coincide.
 204			 */
 205			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 
 206				return -EINVAL;
 207
 208			sk->sk_bound_dev_if = usin->sin6_scope_id;
 209		}
 210
 211		/* Connect to link-local address requires an interface */
 212		if (!sk->sk_bound_dev_if)
 213			return -EINVAL;
 214	}
 215
 216	if (tp->rx_opt.ts_recent_stamp &&
 217	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 218		tp->rx_opt.ts_recent = 0;
 219		tp->rx_opt.ts_recent_stamp = 0;
 220		WRITE_ONCE(tp->write_seq, 0);
 221	}
 222
 223	sk->sk_v6_daddr = usin->sin6_addr;
 224	np->flow_label = fl6.flowlabel;
 225
 226	/*
 227	 *	TCP over IPv4
 228	 */
 229
 230	if (addr_type & IPV6_ADDR_MAPPED) {
 231		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 232		struct sockaddr_in sin;
 233
 234		if (ipv6_only_sock(sk))
 
 
 235			return -ENETUNREACH;
 236
 237		sin.sin_family = AF_INET;
 238		sin.sin_port = usin->sin6_port;
 239		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 240
 241		/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
 242		WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
 243		if (sk_is_mptcp(sk))
 244			mptcpv6_handle_mapped(sk, true);
 245		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 246#ifdef CONFIG_TCP_MD5SIG
 247		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 248#endif
 249
 250		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 251
 252		if (err) {
 253			icsk->icsk_ext_hdr_len = exthdrlen;
 254			/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
 255			WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
 256			if (sk_is_mptcp(sk))
 257				mptcpv6_handle_mapped(sk, false);
 258			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 259#ifdef CONFIG_TCP_MD5SIG
 260			tp->af_specific = &tcp_sock_ipv6_specific;
 261#endif
 262			goto failure;
 
 
 
 
 263		}
 264		np->saddr = sk->sk_v6_rcv_saddr;
 265
 266		return err;
 267	}
 268
 269	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 270		saddr = &sk->sk_v6_rcv_saddr;
 271
 272	fl6.flowi6_proto = IPPROTO_TCP;
 273	fl6.daddr = sk->sk_v6_daddr;
 274	fl6.saddr = saddr ? *saddr : np->saddr;
 275	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
 276	fl6.flowi6_oif = sk->sk_bound_dev_if;
 277	fl6.flowi6_mark = sk->sk_mark;
 278	fl6.fl6_dport = usin->sin6_port;
 279	fl6.fl6_sport = inet->inet_sport;
 280	fl6.flowi6_uid = sk->sk_uid;
 281
 282	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 283	final_p = fl6_update_dst(&fl6, opt, &final);
 284
 285	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
 286
 287	dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
 288	if (IS_ERR(dst)) {
 289		err = PTR_ERR(dst);
 290		goto failure;
 291	}
 292
 293	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 294
 295	if (!saddr) {
 296		saddr = &fl6.saddr;
 297
 298		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
 299		if (err)
 300			goto failure;
 301	}
 302
 303	/* set the source address */
 304	np->saddr = *saddr;
 305	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 306
 307	sk->sk_gso_type = SKB_GSO_TCPV6;
 308	ip6_dst_store(sk, dst, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309
 310	icsk->icsk_ext_hdr_len = 0;
 311	if (opt)
 312		icsk->icsk_ext_hdr_len = opt->opt_flen +
 313					 opt->opt_nflen;
 314
 315	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 316
 317	inet->inet_dport = usin->sin6_port;
 318
 319	tcp_set_state(sk, TCP_SYN_SENT);
 320	err = inet6_hash_connect(tcp_death_row, sk);
 321	if (err)
 322		goto late_failure;
 323
 324	sk_set_txhash(sk);
 325
 326	if (likely(!tp->repair)) {
 327		if (!tp->write_seq)
 328			WRITE_ONCE(tp->write_seq,
 329				   secure_tcpv6_seq(np->saddr.s6_addr32,
 330						    sk->sk_v6_daddr.s6_addr32,
 331						    inet->inet_sport,
 332						    inet->inet_dport));
 333		tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
 334						   sk->sk_v6_daddr.s6_addr32);
 335	}
 336
 337	if (tcp_fastopen_defer_connect(sk, &err))
 338		return err;
 339	if (err)
 340		goto late_failure;
 341
 342	err = tcp_connect(sk);
 343	if (err)
 344		goto late_failure;
 345
 346	return 0;
 347
 348late_failure:
 349	tcp_set_state(sk, TCP_CLOSE);
 350	inet_bhash2_reset_saddr(sk);
 351failure:
 352	inet->inet_dport = 0;
 353	sk->sk_route_caps = 0;
 354	return err;
 355}
 356
 357static void tcp_v6_mtu_reduced(struct sock *sk)
 358{
 359	struct dst_entry *dst;
 360	u32 mtu;
 361
 362	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 363		return;
 364
 365	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
 366
 367	/* Drop requests trying to increase our current mss.
 368	 * Check done in __ip6_rt_update_pmtu() is too late.
 369	 */
 370	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
 371		return;
 372
 373	dst = inet6_csk_update_pmtu(sk, mtu);
 374	if (!dst)
 375		return;
 376
 377	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 378		tcp_sync_mss(sk, dst_mtu(dst));
 379		tcp_simple_retransmit(sk);
 380	}
 381}
 382
 383static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 384		u8 type, u8 code, int offset, __be32 info)
 385{
 386	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 387	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 388	struct net *net = dev_net(skb->dev);
 389	struct request_sock *fastopen;
 390	struct ipv6_pinfo *np;
 391	struct tcp_sock *tp;
 392	__u32 seq, snd_una;
 393	struct sock *sk;
 394	bool fatal;
 395	int err;
 
 
 
 396
 397	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
 398					&hdr->daddr, th->dest,
 399					&hdr->saddr, ntohs(th->source),
 400					skb->dev->ifindex, inet6_sdif(skb));
 401
 402	if (!sk) {
 403		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 404				  ICMP6_MIB_INERRORS);
 405		return -ENOENT;
 406	}
 407
 408	if (sk->sk_state == TCP_TIME_WAIT) {
 409		inet_twsk_put(inet_twsk(sk));
 410		return 0;
 411	}
 412	seq = ntohl(th->seq);
 413	fatal = icmpv6_err_convert(type, code, &err);
 414	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 415		tcp_req_err(sk, seq, fatal);
 416		return 0;
 417	}
 418
 419	bh_lock_sock(sk);
 420	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 421		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 422
 423	if (sk->sk_state == TCP_CLOSE)
 424		goto out;
 425
 426	if (static_branch_unlikely(&ip6_min_hopcount)) {
 427		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
 428		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
 429			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 430			goto out;
 431		}
 432	}
 433
 434	tp = tcp_sk(sk);
 435	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 436	fastopen = rcu_dereference(tp->fastopen_rsk);
 437	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 438	if (sk->sk_state != TCP_LISTEN &&
 439	    !between(seq, snd_una, tp->snd_nxt)) {
 440		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 441		goto out;
 442	}
 443
 444	np = tcp_inet6_sk(sk);
 445
 446	if (type == NDISC_REDIRECT) {
 447		if (!sock_owned_by_user(sk)) {
 448			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 449
 450			if (dst)
 451				dst->ops->redirect(dst, sk, skb);
 452		}
 453		goto out;
 454	}
 455
 456	if (type == ICMPV6_PKT_TOOBIG) {
 457		u32 mtu = ntohl(info);
 458
 459		/* We are not interested in TCP_LISTEN and open_requests
 460		 * (SYN-ACKs send out by Linux are always <576bytes so
 461		 * they should go through unfragmented).
 462		 */
 463		if (sk->sk_state == TCP_LISTEN)
 464			goto out;
 465
 466		if (!ip6_sk_accept_pmtu(sk))
 467			goto out;
 468
 469		if (mtu < IPV6_MIN_MTU)
 470			goto out;
 471
 472		WRITE_ONCE(tp->mtu_info, mtu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473
 474		if (!sock_owned_by_user(sk))
 475			tcp_v6_mtu_reduced(sk);
 476		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 477					   &sk->sk_tsq_flags))
 478			sock_hold(sk);
 
 
 
 479		goto out;
 480	}
 481
 
 482
 483	/* Might be for an request_sock */
 484	switch (sk->sk_state) {
 485	case TCP_SYN_SENT:
 486	case TCP_SYN_RECV:
 487		/* Only in fast or simultaneous open. If a fast open socket is
 488		 * already accepted it is treated as a connected one below.
 
 
 
 
 
 
 
 
 489		 */
 490		if (fastopen && !fastopen->sk)
 491			break;
 
 
 
 
 492
 493		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
 
 494
 
 
 
 495		if (!sock_owned_by_user(sk)) {
 496			sk->sk_err = err;
 497			sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 498
 499			tcp_done(sk);
 500		} else
 501			sk->sk_err_soft = err;
 502		goto out;
 503	case TCP_LISTEN:
 504		break;
 505	default:
 506		/* check if this ICMP message allows revert of backoff.
 507		 * (see RFC 6069)
 508		 */
 509		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
 510		    code == ICMPV6_NOROUTE)
 511			tcp_ld_RTO_revert(sk, seq);
 512	}
 513
 514	if (!sock_owned_by_user(sk) && np->recverr) {
 515		sk->sk_err = err;
 516		sk_error_report(sk);
 517	} else
 518		sk->sk_err_soft = err;
 519
 520out:
 521	bh_unlock_sock(sk);
 522	sock_put(sk);
 523	return 0;
 524}
 525
 526
 527static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 528			      struct flowi *fl,
 529			      struct request_sock *req,
 530			      struct tcp_fastopen_cookie *foc,
 531			      enum tcp_synack_type synack_type,
 532			      struct sk_buff *syn_skb)
 533{
 534	struct inet_request_sock *ireq = inet_rsk(req);
 535	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
 536	struct ipv6_txoptions *opt;
 537	struct flowi6 *fl6 = &fl->u.ip6;
 538	struct sk_buff *skb;
 539	int err = -ENOMEM;
 540	u8 tclass;
 541
 542	/* First, grab a route. */
 543	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
 544					       IPPROTO_TCP)) == NULL)
 545		goto done;
 546
 547	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
 
 
 
 
 
 
 
 
 
 548
 549	if (skb) {
 550		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 551				    &ireq->ir_v6_rmt_addr);
 552
 553		fl6->daddr = ireq->ir_v6_rmt_addr;
 554		if (np->repflow && ireq->pktopts)
 555			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 556
 557		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
 558				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
 559				(np->tclass & INET_ECN_MASK) :
 560				np->tclass;
 561
 562		if (!INET_ECN_is_capable(tclass) &&
 563		    tcp_bpf_ca_needs_ecn((struct sock *)req))
 564			tclass |= INET_ECN_ECT_0;
 565
 566		rcu_read_lock();
 567		opt = ireq->ipv6_opt;
 568		if (!opt)
 569			opt = rcu_dereference(np->opt);
 570		err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
 571			       tclass, sk->sk_priority);
 572		rcu_read_unlock();
 573		err = net_xmit_eval(err);
 574	}
 575
 576done:
 
 
 
 577	return err;
 578}
 579
 
 
 
 
 
 
 580
 581static void tcp_v6_reqsk_destructor(struct request_sock *req)
 582{
 583	kfree(inet_rsk(req)->ipv6_opt);
 584	consume_skb(inet_rsk(req)->pktopts);
 585}
 586
 587#ifdef CONFIG_TCP_MD5SIG
 588static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 589						   const struct in6_addr *addr,
 590						   int l3index)
 591{
 592	return tcp_md5_do_lookup(sk, l3index,
 593				 (union tcp_md5_addr *)addr, AF_INET6);
 594}
 595
 596static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 597						const struct sock *addr_sk)
 598{
 599	int l3index;
 
 600
 601	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
 602						 addr_sk->sk_bound_dev_if);
 603	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
 604				    l3index);
 605}
 606
 607static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 608				 sockptr_t optval, int optlen)
 609{
 610	struct tcp_md5sig cmd;
 611	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 612	int l3index = 0;
 613	u8 prefixlen;
 614	u8 flags;
 615
 616	if (optlen < sizeof(cmd))
 617		return -EINVAL;
 618
 619	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
 620		return -EFAULT;
 621
 622	if (sin6->sin6_family != AF_INET6)
 623		return -EINVAL;
 624
 625	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
 626
 627	if (optname == TCP_MD5SIG_EXT &&
 628	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
 629		prefixlen = cmd.tcpm_prefixlen;
 630		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
 631					prefixlen > 32))
 632			return -EINVAL;
 633	} else {
 634		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
 635	}
 636
 637	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
 638	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
 639		struct net_device *dev;
 640
 641		rcu_read_lock();
 642		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
 643		if (dev && netif_is_l3_master(dev))
 644			l3index = dev->ifindex;
 645		rcu_read_unlock();
 646
 647		/* ok to reference set/not set outside of rcu;
 648		 * right now device MUST be an L3 master
 649		 */
 650		if (!dev || !l3index)
 651			return -EINVAL;
 652	}
 653
 654	if (!cmd.tcpm_keylen) {
 655		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 656			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 657					      AF_INET, prefixlen,
 658					      l3index, flags);
 659		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 660				      AF_INET6, prefixlen, l3index, flags);
 661	}
 662
 663	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 664		return -EINVAL;
 665
 666	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 667		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 668				      AF_INET, prefixlen, l3index, flags,
 669				      cmd.tcpm_key, cmd.tcpm_keylen);
 670
 671	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 672			      AF_INET6, prefixlen, l3index, flags,
 673			      cmd.tcpm_key, cmd.tcpm_keylen);
 674}
 675
 676static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
 677				   const struct in6_addr *daddr,
 678				   const struct in6_addr *saddr,
 679				   const struct tcphdr *th, int nbytes)
 680{
 681	struct tcp6_pseudohdr *bp;
 682	struct scatterlist sg;
 683	struct tcphdr *_th;
 684
 685	bp = hp->scratch;
 686	/* 1. TCP pseudo-header (RFC2460) */
 687	bp->saddr = *saddr;
 688	bp->daddr = *daddr;
 689	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 690	bp->len = cpu_to_be32(nbytes);
 691
 692	_th = (struct tcphdr *)(bp + 1);
 693	memcpy(_th, th, sizeof(*th));
 694	_th->check = 0;
 695
 696	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
 697	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
 698				sizeof(*bp) + sizeof(*th));
 699	return crypto_ahash_update(hp->md5_req);
 700}
 701
 702static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 703			       const struct in6_addr *daddr, struct in6_addr *saddr,
 704			       const struct tcphdr *th)
 705{
 706	struct tcp_md5sig_pool *hp;
 707	struct ahash_request *req;
 708
 709	hp = tcp_get_md5sig_pool();
 710	if (!hp)
 711		goto clear_hash_noput;
 712	req = hp->md5_req;
 713
 714	if (crypto_ahash_init(req))
 715		goto clear_hash;
 716	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
 
 
 717		goto clear_hash;
 718	if (tcp_md5_hash_key(hp, key))
 719		goto clear_hash;
 720	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 721	if (crypto_ahash_final(req))
 722		goto clear_hash;
 723
 724	tcp_put_md5sig_pool();
 725	return 0;
 726
 727clear_hash:
 728	tcp_put_md5sig_pool();
 729clear_hash_noput:
 730	memset(md5_hash, 0, 16);
 731	return 1;
 732}
 733
 734static int tcp_v6_md5_hash_skb(char *md5_hash,
 735			       const struct tcp_md5sig_key *key,
 736			       const struct sock *sk,
 
 737			       const struct sk_buff *skb)
 738{
 739	const struct in6_addr *saddr, *daddr;
 740	struct tcp_md5sig_pool *hp;
 741	struct ahash_request *req;
 742	const struct tcphdr *th = tcp_hdr(skb);
 743
 744	if (sk) { /* valid for establish/request sockets */
 745		saddr = &sk->sk_v6_rcv_saddr;
 746		daddr = &sk->sk_v6_daddr;
 
 
 
 747	} else {
 748		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 749		saddr = &ip6h->saddr;
 750		daddr = &ip6h->daddr;
 751	}
 752
 753	hp = tcp_get_md5sig_pool();
 754	if (!hp)
 755		goto clear_hash_noput;
 756	req = hp->md5_req;
 757
 758	if (crypto_ahash_init(req))
 759		goto clear_hash;
 760
 761	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
 
 
 762		goto clear_hash;
 763	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 764		goto clear_hash;
 765	if (tcp_md5_hash_key(hp, key))
 766		goto clear_hash;
 767	ahash_request_set_crypt(req, NULL, md5_hash, 0);
 768	if (crypto_ahash_final(req))
 769		goto clear_hash;
 770
 771	tcp_put_md5sig_pool();
 772	return 0;
 773
 774clear_hash:
 775	tcp_put_md5sig_pool();
 776clear_hash_noput:
 777	memset(md5_hash, 0, 16);
 778	return 1;
 779}
 780
 781#endif
 
 
 
 
 
 
 
 782
 783static void tcp_v6_init_req(struct request_sock *req,
 784			    const struct sock *sk_listener,
 785			    struct sk_buff *skb)
 786{
 787	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 788	struct inet_request_sock *ireq = inet_rsk(req);
 789	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
 790
 791	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 792	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 793
 794	/* So that link locals have meaning */
 795	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
 796	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 797		ireq->ir_iif = tcp_v6_iif(skb);
 798
 799	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 800	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 801	     np->rxopt.bits.rxinfo ||
 802	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 803	     np->rxopt.bits.rxohlim || np->repflow)) {
 804		refcount_inc(&skb->users);
 805		ireq->pktopts = skb;
 
 
 
 
 
 
 
 
 
 806	}
 
 807}
 808
 809static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 810					  struct sk_buff *skb,
 811					  struct flowi *fl,
 812					  struct request_sock *req)
 813{
 814	tcp_v6_init_req(req, sk, skb);
 815
 816	if (security_inet_conn_request(sk, skb, req))
 817		return NULL;
 818
 819	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 820}
 821
 822struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 823	.family		=	AF_INET6,
 824	.obj_size	=	sizeof(struct tcp6_request_sock),
 825	.rtx_syn_ack	=	tcp_rtx_synack,
 826	.send_ack	=	tcp_v6_reqsk_send_ack,
 827	.destructor	=	tcp_v6_reqsk_destructor,
 828	.send_reset	=	tcp_v6_send_reset,
 829	.syn_ack_timeout =	tcp_syn_ack_timeout,
 830};
 831
 832const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 833	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
 834				sizeof(struct ipv6hdr),
 835#ifdef CONFIG_TCP_MD5SIG
 836	.req_md5_lookup	=	tcp_v6_md5_lookup,
 
 837	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 838#endif
 839#ifdef CONFIG_SYN_COOKIES
 840	.cookie_init_seq =	cookie_v6_init_sequence,
 841#endif
 842	.route_req	=	tcp_v6_route_req,
 843	.init_seq	=	tcp_v6_init_seq,
 844	.init_ts_off	=	tcp_v6_init_ts_off,
 845	.send_synack	=	tcp_v6_send_synack,
 846};
 
 847
 848static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 849				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 850				 int oif, struct tcp_md5sig_key *key, int rst,
 851				 u8 tclass, __be32 label, u32 priority, u32 txhash)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 852{
 853	const struct tcphdr *th = tcp_hdr(skb);
 854	struct tcphdr *t1;
 855	struct sk_buff *buff;
 856	struct flowi6 fl6;
 857	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 858	struct sock *ctl_sk = net->ipv6.tcp_sk;
 859	unsigned int tot_len = sizeof(struct tcphdr);
 860	__be32 mrst = 0, *topt;
 861	struct dst_entry *dst;
 862	__u32 mark = 0;
 863
 864	if (tsecr)
 865		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 866#ifdef CONFIG_TCP_MD5SIG
 867	if (key)
 868		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 869#endif
 870
 871#ifdef CONFIG_MPTCP
 872	if (rst && !key) {
 873		mrst = mptcp_reset_option(skb);
 874
 875		if (mrst)
 876			tot_len += sizeof(__be32);
 877	}
 878#endif
 879
 880	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
 881	if (!buff)
 882		return;
 883
 884	skb_reserve(buff, MAX_TCP_HEADER);
 885
 886	t1 = skb_push(buff, tot_len);
 887	skb_reset_transport_header(buff);
 888
 889	/* Swap the send and the receive. */
 890	memset(t1, 0, sizeof(*t1));
 891	t1->dest = th->source;
 892	t1->source = th->dest;
 893	t1->doff = tot_len / 4;
 894	t1->seq = htonl(seq);
 895	t1->ack_seq = htonl(ack);
 896	t1->ack = !rst || !th->ack;
 897	t1->rst = rst;
 898	t1->window = htons(win);
 899
 900	topt = (__be32 *)(t1 + 1);
 901
 902	if (tsecr) {
 903		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 904				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 905		*topt++ = htonl(tsval);
 906		*topt++ = htonl(tsecr);
 907	}
 908
 909	if (mrst)
 910		*topt++ = mrst;
 911
 912#ifdef CONFIG_TCP_MD5SIG
 913	if (key) {
 914		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 915				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 916		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 917				    &ipv6_hdr(skb)->saddr,
 918				    &ipv6_hdr(skb)->daddr, t1);
 919	}
 920#endif
 921
 922	memset(&fl6, 0, sizeof(fl6));
 923	fl6.daddr = ipv6_hdr(skb)->saddr;
 924	fl6.saddr = ipv6_hdr(skb)->daddr;
 925	fl6.flowlabel = label;
 926
 927	buff->ip_summed = CHECKSUM_PARTIAL;
 
 928
 929	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 930
 931	fl6.flowi6_proto = IPPROTO_TCP;
 932	if (rt6_need_strict(&fl6.daddr) && !oif)
 933		fl6.flowi6_oif = tcp_v6_iif(skb);
 934	else {
 935		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 936			oif = skb->skb_iif;
 937
 938		fl6.flowi6_oif = oif;
 939	}
 940
 941	if (sk) {
 942		if (sk->sk_state == TCP_TIME_WAIT)
 943			mark = inet_twsk(sk)->tw_mark;
 944		else
 945			mark = sk->sk_mark;
 946		skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
 947	}
 948	if (txhash) {
 949		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
 950		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
 951	}
 952	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
 953	fl6.fl6_dport = t1->dest;
 954	fl6.fl6_sport = t1->source;
 955	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
 956	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
 957
 958	/* Pass a socket to ip6_dst_lookup either it is for RST
 959	 * Underlying function will use this to retrieve the network
 960	 * namespace
 961	 */
 962	if (sk && sk->sk_state != TCP_TIME_WAIT)
 963		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
 964	else
 965		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
 966	if (!IS_ERR(dst)) {
 967		skb_dst_set(buff, dst);
 968		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
 969			 tclass & ~INET_ECN_MASK, priority);
 970		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 971		if (rst)
 972			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 973		return;
 974	}
 975
 976	kfree_skb(buff);
 977}
 978
 979static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 980{
 981	const struct tcphdr *th = tcp_hdr(skb);
 982	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 983	u32 seq = 0, ack_seq = 0;
 984	struct tcp_md5sig_key *key = NULL;
 985#ifdef CONFIG_TCP_MD5SIG
 986	const __u8 *hash_location = NULL;
 
 987	unsigned char newhash[16];
 988	int genhash;
 989	struct sock *sk1 = NULL;
 990#endif
 991	__be32 label = 0;
 992	u32 priority = 0;
 993	struct net *net;
 994	u32 txhash = 0;
 995	int oif = 0;
 996
 997	if (th->rst)
 998		return;
 999
1000	/* If sk not NULL, it means we did a successful lookup and incoming
1001	 * route had to be correct. prequeue might have dropped our dst.
1002	 */
1003	if (!sk && !ipv6_unicast_destination(skb))
1004		return;
1005
1006	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1007#ifdef CONFIG_TCP_MD5SIG
1008	rcu_read_lock();
1009	hash_location = tcp_parse_md5sig_option(th);
1010	if (sk && sk_fullsock(sk)) {
1011		int l3index;
1012
1013		/* sdif set, means packet ingressed via a device
1014		 * in an L3 domain and inet_iif is set to it.
1015		 */
1016		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1017		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1018	} else if (hash_location) {
1019		int dif = tcp_v6_iif_l3_slave(skb);
1020		int sdif = tcp_v6_sdif(skb);
1021		int l3index;
1022
1023		/*
1024		 * active side is lost. Try to find listening socket through
1025		 * source port, and then find md5 key through listening socket.
1026		 * we are not loose security here:
1027		 * Incoming packet is checked with md5 hash with finding key,
1028		 * no RST generated if md5 hash doesn't match.
1029		 */
1030		sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1031					    NULL, 0, &ipv6h->saddr, th->source,
1032					    &ipv6h->daddr, ntohs(th->source),
1033					    dif, sdif);
1034		if (!sk1)
1035			goto out;
1036
1037		/* sdif set, means packet ingressed via a device
1038		 * in an L3 domain and dif is set to it.
1039		 */
1040		l3index = tcp_v6_sdif(skb) ? dif : 0;
1041
1042		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
 
1043		if (!key)
1044			goto out;
1045
1046		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
1047		if (genhash || memcmp(hash_location, newhash, 16) != 0)
1048			goto out;
 
 
1049	}
1050#endif
1051
1052	if (th->ack)
1053		seq = ntohl(th->ack_seq);
1054	else
1055		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1056			  (th->doff << 2);
1057
1058	if (sk) {
1059		oif = sk->sk_bound_dev_if;
1060		if (sk_fullsock(sk)) {
1061			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1062
1063			trace_tcp_send_reset(sk, skb);
1064			if (np->repflow)
1065				label = ip6_flowlabel(ipv6h);
1066			priority = sk->sk_priority;
1067			txhash = sk->sk_hash;
1068		}
1069		if (sk->sk_state == TCP_TIME_WAIT) {
1070			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1071			priority = inet_twsk(sk)->tw_priority;
1072			txhash = inet_twsk(sk)->tw_txhash;
1073		}
1074	} else {
1075		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1076			label = ip6_flowlabel(ipv6h);
1077	}
1078
1079	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1080			     ipv6_get_dsfield(ipv6h), label, priority, txhash);
1081
1082#ifdef CONFIG_TCP_MD5SIG
1083out:
1084	rcu_read_unlock();
 
 
 
1085#endif
1086}
1087
1088static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1089			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1090			    struct tcp_md5sig_key *key, u8 tclass,
1091			    __be32 label, u32 priority, u32 txhash)
1092{
1093	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1094			     tclass, label, priority, txhash);
1095}
1096
1097static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1098{
1099	struct inet_timewait_sock *tw = inet_twsk(sk);
1100	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1101
1102	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1103			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1104			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1105			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1106			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
1107			tw->tw_txhash);
1108
1109	inet_twsk_put(tw);
1110}
1111
1112static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1113				  struct request_sock *req)
1114{
1115	int l3index;
1116
1117	l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1118
1119	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1120	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1121	 */
1122	/* RFC 7323 2.3
1123	 * The window field (SEG.WND) of every outgoing segment, with the
1124	 * exception of <SYN> segments, MUST be right-shifted by
1125	 * Rcv.Wind.Shift bits:
1126	 */
1127	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1128			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1129			tcp_rsk(req)->rcv_nxt,
1130			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1131			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1132			req->ts_recent, sk->sk_bound_dev_if,
1133			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
1134			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
1135			tcp_rsk(req)->txhash);
1136}
1137
1138
1139static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1140{
1141#ifdef CONFIG_SYN_COOKIES
1142	const struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143
 
1144	if (!th->syn)
1145		sk = cookie_v6_check(sk, skb);
1146#endif
1147	return sk;
1148}
1149
1150u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1151			 struct tcphdr *th, u32 *cookie)
1152{
1153	u16 mss = 0;
1154#ifdef CONFIG_SYN_COOKIES
1155	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1156				    &tcp_request_sock_ipv6_ops, sk, th);
1157	if (mss) {
1158		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1159		tcp_synq_overflow(sk);
1160	}
1161#endif
1162	return mss;
1163}
1164
1165static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1166{
 
 
 
 
 
 
 
 
 
 
 
1167	if (skb->protocol == htons(ETH_P_IP))
1168		return tcp_v4_conn_request(sk, skb);
1169
1170	if (!ipv6_unicast_destination(skb))
1171		goto drop;
1172
1173	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1174		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1175		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1176	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177
1178	return tcp_conn_request(&tcp6_request_sock_ops,
1179				&tcp_request_sock_ipv6_ops, sk, skb);
1180
 
 
 
 
1181drop:
1182	tcp_listendrop(sk);
1183	return 0; /* don't send reset */
1184}
1185
1186static void tcp_v6_restore_cb(struct sk_buff *skb)
1187{
1188	/* We need to move header back to the beginning if xfrm6_policy_check()
1189	 * and tcp_v6_fill_cb() are going to be called again.
1190	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1191	 */
1192	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1193		sizeof(struct inet6_skb_parm));
1194}
1195
1196static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1197					 struct request_sock *req,
1198					 struct dst_entry *dst,
1199					 struct request_sock *req_unhash,
1200					 bool *own_req)
1201{
1202	struct inet_request_sock *ireq;
1203	struct ipv6_pinfo *newnp;
1204	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1205	struct ipv6_txoptions *opt;
1206	struct inet_sock *newinet;
1207	bool found_dup_sk = false;
1208	struct tcp_sock *newtp;
1209	struct sock *newsk;
 
1210#ifdef CONFIG_TCP_MD5SIG
1211	struct tcp_md5sig_key *key;
1212	int l3index;
1213#endif
1214	struct flowi6 fl6;
1215
1216	if (skb->protocol == htons(ETH_P_IP)) {
1217		/*
1218		 *	v6 mapped
1219		 */
1220
1221		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1222					     req_unhash, own_req);
1223
1224		if (!newsk)
1225			return NULL;
1226
1227		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1228
1229		newnp = tcp_inet6_sk(newsk);
 
1230		newtp = tcp_sk(newsk);
1231
1232		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1233
1234		newnp->saddr = newsk->sk_v6_rcv_saddr;
 
 
 
 
1235
1236		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1237		if (sk_is_mptcp(newsk))
1238			mptcpv6_handle_mapped(newsk, true);
1239		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1240#ifdef CONFIG_TCP_MD5SIG
1241		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1242#endif
1243
1244		newnp->ipv6_mc_list = NULL;
1245		newnp->ipv6_ac_list = NULL;
1246		newnp->ipv6_fl_list = NULL;
1247		newnp->pktoptions  = NULL;
1248		newnp->opt	   = NULL;
1249		newnp->mcast_oif   = inet_iif(skb);
1250		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1251		newnp->rcv_flowinfo = 0;
1252		if (np->repflow)
1253			newnp->flow_label = 0;
1254
1255		/*
1256		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1257		 * here, tcp_create_openreq_child now does this for us, see the comment in
1258		 * that function for the gory details. -acme
1259		 */
1260
1261		/* It is tricky place. Until this moment IPv4 tcp
1262		   worked with IPv6 icsk.icsk_af_ops.
1263		   Sync it now.
1264		 */
1265		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1266
1267		return newsk;
1268	}
1269
1270	ireq = inet_rsk(req);
 
1271
1272	if (sk_acceptq_is_full(sk))
1273		goto out_overflow;
1274
1275	if (!dst) {
1276		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1277		if (!dst)
1278			goto out;
1279	}
1280
1281	newsk = tcp_create_openreq_child(sk, req, skb);
1282	if (!newsk)
1283		goto out_nonewsk;
1284
1285	/*
1286	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1287	 * count here, tcp_create_openreq_child now does this for us, see the
1288	 * comment in that function for the gory details. -acme
1289	 */
1290
1291	newsk->sk_gso_type = SKB_GSO_TCPV6;
1292	ip6_dst_store(newsk, dst, NULL, NULL);
1293	inet6_sk_rx_dst_set(newsk, skb);
1294
1295	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
 
1296
1297	newtp = tcp_sk(newsk);
1298	newinet = inet_sk(newsk);
1299	newnp = tcp_inet6_sk(newsk);
1300
1301	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1302
1303	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1304	newnp->saddr = ireq->ir_v6_loc_addr;
1305	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1306	newsk->sk_bound_dev_if = ireq->ir_iif;
1307
1308	/* Now IPv6 options...
1309
1310	   First: no IPv4 options.
1311	 */
1312	newinet->inet_opt = NULL;
1313	newnp->ipv6_mc_list = NULL;
1314	newnp->ipv6_ac_list = NULL;
1315	newnp->ipv6_fl_list = NULL;
1316
1317	/* Clone RX bits */
1318	newnp->rxopt.all = np->rxopt.all;
1319
 
1320	newnp->pktoptions = NULL;
 
 
 
 
 
 
 
1321	newnp->opt	  = NULL;
1322	newnp->mcast_oif  = tcp_v6_iif(skb);
1323	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1324	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1325	if (np->repflow)
1326		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1327
1328	/* Set ToS of the new socket based upon the value of incoming SYN.
1329	 * ECT bits are set later in tcp_init_transfer().
1330	 */
1331	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1332		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1333
1334	/* Clone native IPv6 options from listening socket (if any)
1335
1336	   Yes, keeping reference count would be much more clever,
1337	   but we make one more one thing there: reattach optmem
1338	   to newsk.
1339	 */
1340	opt = ireq->ipv6_opt;
1341	if (!opt)
1342		opt = rcu_dereference(np->opt);
1343	if (opt) {
1344		opt = ipv6_dup_options(newsk, opt);
1345		RCU_INIT_POINTER(newnp->opt, opt);
 
1346	}
1347	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1348	if (opt)
1349		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1350						    opt->opt_flen;
1351
1352	tcp_ca_openreq_child(newsk, dst);
 
 
 
1353
 
1354	tcp_sync_mss(newsk, dst_mtu(dst));
1355	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
 
 
1356
1357	tcp_initialize_rcv_mss(newsk);
 
 
 
 
1358
1359	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1360	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1361
1362#ifdef CONFIG_TCP_MD5SIG
1363	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1364
1365	/* Copy over the MD5 key from the original socket */
1366	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1367	if (key) {
1368		const union tcp_md5_addr *addr;
1369
1370		addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1371		if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1372			inet_csk_prepare_forced_close(newsk);
1373			tcp_done(newsk);
1374			goto out;
1375		}
1376	}
1377#endif
1378
1379	if (__inet_inherit_port(sk, newsk) < 0) {
1380		inet_csk_prepare_forced_close(newsk);
1381		tcp_done(newsk);
1382		goto out;
1383	}
1384	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1385				       &found_dup_sk);
1386	if (*own_req) {
1387		tcp_move_syn(newtp, req);
1388
1389		/* Clone pktoptions received with SYN, if we own the req */
1390		if (ireq->pktopts) {
1391			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1392			consume_skb(ireq->pktopts);
1393			ireq->pktopts = NULL;
1394			if (newnp->pktoptions)
1395				tcp_v6_restore_cb(newnp->pktoptions);
1396		}
1397	} else {
1398		if (!req_unhash && found_dup_sk) {
1399			/* This code path should only be executed in the
1400			 * syncookie case only
1401			 */
1402			bh_unlock_sock(newsk);
1403			sock_put(newsk);
1404			newsk = NULL;
1405		}
1406	}
1407
1408	return newsk;
1409
1410out_overflow:
1411	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1412out_nonewsk:
 
 
1413	dst_release(dst);
1414out:
1415	tcp_listendrop(sk);
1416	return NULL;
1417}
1418
1419INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1420							   u32));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1421/* The socket must have it's spinlock held when we get
1422 * here, unless it is a TCP_LISTEN socket.
1423 *
1424 * We have a potential double-lock case here, so even when
1425 * doing backlog processing we use the BH locking scheme.
1426 * This is because we cannot sleep with the original spinlock
1427 * held.
1428 */
1429INDIRECT_CALLABLE_SCOPE
1430int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1431{
1432	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1433	struct sk_buff *opt_skb = NULL;
1434	enum skb_drop_reason reason;
1435	struct tcp_sock *tp;
 
1436
1437	/* Imagine: socket is IPv6. IPv4 packet arrives,
1438	   goes to IPv4 receive handler and backlogged.
1439	   From backlog it always goes here. Kerboom...
1440	   Fortunately, tcp_rcv_established and rcv_established
1441	   handle them correctly, but it is not case with
1442	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1443	 */
1444
1445	if (skb->protocol == htons(ETH_P_IP))
1446		return tcp_v4_do_rcv(sk, skb);
1447
 
 
 
 
 
 
 
 
1448	/*
1449	 *	socket locking is here for SMP purposes as backlog rcv
1450	 *	is currently called with bh processing disabled.
1451	 */
1452
1453	/* Do Stevens' IPV6_PKTOPTIONS.
1454
1455	   Yes, guys, it is the only place in our code, where we
1456	   may make it not affecting IPv4.
1457	   The rest of code is protocol independent,
1458	   and I do not like idea to uglify IPv4.
1459
1460	   Actually, all the idea behind IPV6_PKTOPTIONS
1461	   looks not very well thought. For now we latch
1462	   options, received in the last packet, enqueued
1463	   by tcp. Feel free to propose better solution.
1464					       --ANK (980728)
1465	 */
1466	if (np->rxopt.all)
1467		opt_skb = skb_clone_and_charge_r(skb, sk);
1468
1469	reason = SKB_DROP_REASON_NOT_SPECIFIED;
1470	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1471		struct dst_entry *dst;
1472
1473		dst = rcu_dereference_protected(sk->sk_rx_dst,
1474						lockdep_sock_is_held(sk));
1475
1476		sock_rps_save_rxhash(sk, skb);
1477		sk_mark_napi_id(sk, skb);
1478		if (dst) {
1479			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1480			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1481					    dst, sk->sk_rx_dst_cookie) == NULL) {
1482				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1483				dst_release(dst);
1484			}
1485		}
1486
1487		tcp_rcv_established(sk, skb);
1488		if (opt_skb)
1489			goto ipv6_pktoptions;
1490		return 0;
1491	}
1492
1493	if (tcp_checksum_complete(skb))
1494		goto csum_err;
1495
1496	if (sk->sk_state == TCP_LISTEN) {
1497		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1498
1499		if (!nsk)
1500			goto discard;
1501
1502		if (nsk != sk) {
 
 
 
 
 
 
1503			if (tcp_child_process(sk, nsk, skb))
1504				goto reset;
1505			if (opt_skb)
1506				__kfree_skb(opt_skb);
1507			return 0;
1508		}
1509	} else
1510		sock_rps_save_rxhash(sk, skb);
1511
1512	if (tcp_rcv_state_process(sk, skb))
1513		goto reset;
1514	if (opt_skb)
1515		goto ipv6_pktoptions;
1516	return 0;
1517
1518reset:
1519	tcp_v6_send_reset(sk, skb);
1520discard:
1521	if (opt_skb)
1522		__kfree_skb(opt_skb);
1523	kfree_skb_reason(skb, reason);
1524	return 0;
1525csum_err:
1526	reason = SKB_DROP_REASON_TCP_CSUM;
1527	trace_tcp_bad_csum(skb);
1528	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1529	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1530	goto discard;
1531
1532
1533ipv6_pktoptions:
1534	/* Do you ask, what is it?
1535
1536	   1. skb was enqueued by tcp.
1537	   2. skb is added to tail of read queue, rather than out of order.
1538	   3. socket is not in passive state.
1539	   4. Finally, it really contains options, which user wants to receive.
1540	 */
1541	tp = tcp_sk(sk);
1542	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1543	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1544		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1545			np->mcast_oif = tcp_v6_iif(opt_skb);
1546		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1547			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1548		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1549			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1550		if (np->repflow)
1551			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1552		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1553			tcp_v6_restore_cb(opt_skb);
1554			opt_skb = xchg(&np->pktoptions, opt_skb);
1555		} else {
1556			__kfree_skb(opt_skb);
1557			opt_skb = xchg(&np->pktoptions, NULL);
1558		}
1559	}
1560
1561	consume_skb(opt_skb);
1562	return 0;
1563}
1564
1565static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1566			   const struct tcphdr *th)
1567{
1568	/* This is tricky: we move IP6CB at its correct location into
1569	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1570	 * _decode_session6() uses IP6CB().
1571	 * barrier() makes sure compiler won't play aliasing games.
1572	 */
1573	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1574		sizeof(struct inet6_skb_parm));
1575	barrier();
1576
1577	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1578	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1579				    skb->len - th->doff*4);
1580	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1581	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1582	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1583	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1584	TCP_SKB_CB(skb)->sacked = 0;
1585	TCP_SKB_CB(skb)->has_rxtstamp =
1586			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1587}
1588
1589INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1590{
1591	enum skb_drop_reason drop_reason;
1592	int sdif = inet6_sdif(skb);
1593	int dif = inet6_iif(skb);
1594	const struct tcphdr *th;
1595	const struct ipv6hdr *hdr;
1596	bool refcounted;
1597	struct sock *sk;
1598	int ret;
1599	struct net *net = dev_net(skb->dev);
1600
1601	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1602	if (skb->pkt_type != PACKET_HOST)
1603		goto discard_it;
1604
1605	/*
1606	 *	Count it even if it's bad.
1607	 */
1608	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1609
1610	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1611		goto discard_it;
1612
1613	th = (const struct tcphdr *)skb->data;
1614
1615	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1616		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1617		goto bad_packet;
1618	}
1619	if (!pskb_may_pull(skb, th->doff*4))
1620		goto discard_it;
1621
1622	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1623		goto csum_error;
1624
1625	th = (const struct tcphdr *)skb->data;
1626	hdr = ipv6_hdr(skb);
 
 
 
 
 
 
 
1627
1628lookup:
1629	sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1630				th->source, th->dest, inet6_iif(skb), sdif,
1631				&refcounted);
1632	if (!sk)
1633		goto no_tcp_socket;
1634
1635process:
1636	if (sk->sk_state == TCP_TIME_WAIT)
1637		goto do_time_wait;
1638
1639	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1640		struct request_sock *req = inet_reqsk(sk);
1641		bool req_stolen = false;
1642		struct sock *nsk;
1643
1644		sk = req->rsk_listener;
1645		drop_reason = tcp_inbound_md5_hash(sk, skb,
1646						   &hdr->saddr, &hdr->daddr,
1647						   AF_INET6, dif, sdif);
1648		if (drop_reason) {
1649			sk_drops_add(sk, skb);
1650			reqsk_put(req);
1651			goto discard_it;
1652		}
1653		if (tcp_checksum_complete(skb)) {
1654			reqsk_put(req);
1655			goto csum_error;
1656		}
1657		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1658			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1659			if (!nsk) {
1660				inet_csk_reqsk_queue_drop_and_put(sk, req);
1661				goto lookup;
1662			}
1663			sk = nsk;
1664			/* reuseport_migrate_sock() has already held one sk_refcnt
1665			 * before returning.
1666			 */
1667		} else {
1668			sock_hold(sk);
1669		}
1670		refcounted = true;
1671		nsk = NULL;
1672		if (!tcp_filter(sk, skb)) {
1673			th = (const struct tcphdr *)skb->data;
1674			hdr = ipv6_hdr(skb);
1675			tcp_v6_fill_cb(skb, hdr, th);
1676			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1677		} else {
1678			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1679		}
1680		if (!nsk) {
1681			reqsk_put(req);
1682			if (req_stolen) {
1683				/* Another cpu got exclusive access to req
1684				 * and created a full blown socket.
1685				 * Try to feed this packet to this socket
1686				 * instead of discarding it.
1687				 */
1688				tcp_v6_restore_cb(skb);
1689				sock_put(sk);
1690				goto lookup;
1691			}
1692			goto discard_and_relse;
1693		}
1694		if (nsk == sk) {
1695			reqsk_put(req);
1696			tcp_v6_restore_cb(skb);
1697		} else if (tcp_child_process(sk, nsk, skb)) {
1698			tcp_v6_send_reset(nsk, skb);
1699			goto discard_and_relse;
1700		} else {
1701			sock_put(sk);
1702			return 0;
1703		}
1704	}
1705
1706	if (static_branch_unlikely(&ip6_min_hopcount)) {
1707		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1708		if (hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
1709			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1710			goto discard_and_relse;
1711		}
1712	}
1713
1714	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1715		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1716		goto discard_and_relse;
1717	}
1718
1719	drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr,
1720					   AF_INET6, dif, sdif);
1721	if (drop_reason)
1722		goto discard_and_relse;
1723
1724	if (tcp_filter(sk, skb)) {
1725		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1726		goto discard_and_relse;
1727	}
1728	th = (const struct tcphdr *)skb->data;
1729	hdr = ipv6_hdr(skb);
1730	tcp_v6_fill_cb(skb, hdr, th);
1731
1732	skb->dev = NULL;
1733
1734	if (sk->sk_state == TCP_LISTEN) {
1735		ret = tcp_v6_do_rcv(sk, skb);
1736		goto put_and_return;
1737	}
1738
1739	sk_incoming_cpu_update(sk);
1740
1741	bh_lock_sock_nested(sk);
1742	tcp_segs_in(tcp_sk(sk), skb);
1743	ret = 0;
1744	if (!sock_owned_by_user(sk)) {
1745		ret = tcp_v6_do_rcv(sk, skb);
1746	} else {
1747		if (tcp_add_backlog(sk, skb, &drop_reason))
1748			goto discard_and_relse;
 
 
 
 
 
 
 
 
 
 
 
 
 
1749	}
1750	bh_unlock_sock(sk);
1751put_and_return:
1752	if (refcounted)
1753		sock_put(sk);
1754	return ret ? -1 : 0;
1755
1756no_tcp_socket:
1757	drop_reason = SKB_DROP_REASON_NO_SOCKET;
1758	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1759		goto discard_it;
1760
1761	tcp_v6_fill_cb(skb, hdr, th);
1762
1763	if (tcp_checksum_complete(skb)) {
1764csum_error:
1765		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1766		trace_tcp_bad_csum(skb);
1767		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1768bad_packet:
1769		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1770	} else {
1771		tcp_v6_send_reset(NULL, skb);
1772	}
1773
1774discard_it:
1775	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1776	kfree_skb_reason(skb, drop_reason);
 
 
 
 
1777	return 0;
1778
1779discard_and_relse:
1780	sk_drops_add(sk, skb);
1781	if (refcounted)
1782		sock_put(sk);
1783	goto discard_it;
1784
1785do_time_wait:
1786	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1787		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1788		inet_twsk_put(inet_twsk(sk));
1789		goto discard_it;
1790	}
1791
1792	tcp_v6_fill_cb(skb, hdr, th);
1793
1794	if (tcp_checksum_complete(skb)) {
1795		inet_twsk_put(inet_twsk(sk));
1796		goto csum_error;
1797	}
1798
1799	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1800	case TCP_TW_SYN:
1801	{
1802		struct sock *sk2;
1803
1804		sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1805					    skb, __tcp_hdrlen(th),
1806					    &ipv6_hdr(skb)->saddr, th->source,
1807					    &ipv6_hdr(skb)->daddr,
1808					    ntohs(th->dest),
1809					    tcp_v6_iif_l3_slave(skb),
1810					    sdif);
1811		if (sk2) {
1812			struct inet_timewait_sock *tw = inet_twsk(sk);
1813			inet_twsk_deschedule_put(tw);
 
1814			sk = sk2;
1815			tcp_v6_restore_cb(skb);
1816			refcounted = false;
1817			goto process;
1818		}
 
1819	}
1820		/* to ACK */
1821		fallthrough;
1822	case TCP_TW_ACK:
1823		tcp_v6_timewait_ack(sk, skb);
1824		break;
1825	case TCP_TW_RST:
1826		tcp_v6_send_reset(sk, skb);
1827		inet_twsk_deschedule_put(inet_twsk(sk));
1828		goto discard_it;
1829	case TCP_TW_SUCCESS:
1830		;
1831	}
1832	goto discard_it;
1833}
1834
1835void tcp_v6_early_demux(struct sk_buff *skb)
1836{
1837	struct net *net = dev_net(skb->dev);
1838	const struct ipv6hdr *hdr;
1839	const struct tcphdr *th;
1840	struct sock *sk;
1841
1842	if (skb->pkt_type != PACKET_HOST)
1843		return;
 
 
 
 
 
 
 
1844
1845	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1846		return;
1847
1848	hdr = ipv6_hdr(skb);
1849	th = tcp_hdr(skb);
 
 
1850
1851	if (th->doff < sizeof(struct tcphdr) / 4)
1852		return;
1853
1854	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1855	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1856					&hdr->saddr, th->source,
1857					&hdr->daddr, ntohs(th->dest),
1858					inet6_iif(skb), inet6_sdif(skb));
1859	if (sk) {
1860		skb->sk = sk;
1861		skb->destructor = sock_edemux;
1862		if (sk_fullsock(sk)) {
1863			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1864
1865			if (dst)
1866				dst = dst_check(dst, sk->sk_rx_dst_cookie);
1867			if (dst &&
1868			    sk->sk_rx_dst_ifindex == skb->skb_iif)
1869				skb_dst_set_noref(skb, dst);
1870		}
1871	}
1872}
1873
1874static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1875	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1876	.twsk_unique	= tcp_twsk_unique,
1877	.twsk_destructor = tcp_twsk_destructor,
 
1878};
1879
1880INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1881{
1882	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
1883}
1884
1885const struct inet_connection_sock_af_ops ipv6_specific = {
1886	.queue_xmit	   = inet6_csk_xmit,
1887	.send_check	   = tcp_v6_send_check,
1888	.rebuild_header	   = inet6_sk_rebuild_header,
1889	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1890	.conn_request	   = tcp_v6_conn_request,
1891	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1892	.net_header_len	   = sizeof(struct ipv6hdr),
1893	.net_frag_header_len = sizeof(struct frag_hdr),
1894	.setsockopt	   = ipv6_setsockopt,
1895	.getsockopt	   = ipv6_getsockopt,
1896	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1897	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1898	.mtu_reduced	   = tcp_v6_mtu_reduced,
 
 
 
 
1899};
1900
1901#ifdef CONFIG_TCP_MD5SIG
1902static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1903	.md5_lookup	=	tcp_v6_md5_lookup,
1904	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1905	.md5_parse	=	tcp_v6_parse_md5_keys,
1906};
1907#endif
1908
1909/*
1910 *	TCP over IPv4 via INET6 API
1911 */
 
1912static const struct inet_connection_sock_af_ops ipv6_mapped = {
1913	.queue_xmit	   = ip_queue_xmit,
1914	.send_check	   = tcp_v4_send_check,
1915	.rebuild_header	   = inet_sk_rebuild_header,
1916	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1917	.conn_request	   = tcp_v6_conn_request,
1918	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 
1919	.net_header_len	   = sizeof(struct iphdr),
1920	.setsockopt	   = ipv6_setsockopt,
1921	.getsockopt	   = ipv6_getsockopt,
1922	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1923	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1924	.mtu_reduced	   = tcp_v4_mtu_reduced,
 
 
 
 
1925};
1926
1927#ifdef CONFIG_TCP_MD5SIG
1928static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1929	.md5_lookup	=	tcp_v4_md5_lookup,
1930	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1931	.md5_parse	=	tcp_v6_parse_md5_keys,
1932};
1933#endif
1934
1935/* NOTE: A lot of things set to zero explicitly by call to
1936 *       sk_alloc() so need not be done here.
1937 */
1938static int tcp_v6_init_sock(struct sock *sk)
1939{
1940	struct inet_connection_sock *icsk = inet_csk(sk);
1941
1942	tcp_init_sock(sk);
1943
1944	icsk->icsk_af_ops = &ipv6_specific;
1945
1946#ifdef CONFIG_TCP_MD5SIG
1947	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1948#endif
1949
1950	return 0;
1951}
1952
 
 
 
 
 
 
1953#ifdef CONFIG_PROC_FS
1954/* Proc filesystem TCPv6 sock list dumping. */
1955static void get_openreq6(struct seq_file *seq,
1956			 const struct request_sock *req, int i)
1957{
1958	long ttd = req->rsk_timer.expires - jiffies;
1959	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1960	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1961
1962	if (ttd < 0)
1963		ttd = 0;
1964
1965	seq_printf(seq,
1966		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1967		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1968		   i,
1969		   src->s6_addr32[0], src->s6_addr32[1],
1970		   src->s6_addr32[2], src->s6_addr32[3],
1971		   inet_rsk(req)->ir_num,
1972		   dest->s6_addr32[0], dest->s6_addr32[1],
1973		   dest->s6_addr32[2], dest->s6_addr32[3],
1974		   ntohs(inet_rsk(req)->ir_rmt_port),
1975		   TCP_SYN_RECV,
1976		   0, 0, /* could print option size, but that is af dependent. */
1977		   1,   /* timers active (only the expire timer) */
1978		   jiffies_to_clock_t(ttd),
1979		   req->num_timeout,
1980		   from_kuid_munged(seq_user_ns(seq),
1981				    sock_i_uid(req->rsk_listener)),
1982		   0,  /* non standard timer */
1983		   0, /* open_requests have no inode */
1984		   0, req);
1985}
1986
1987static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1988{
1989	const struct in6_addr *dest, *src;
1990	__u16 destp, srcp;
1991	int timer_active;
1992	unsigned long timer_expires;
1993	const struct inet_sock *inet = inet_sk(sp);
1994	const struct tcp_sock *tp = tcp_sk(sp);
1995	const struct inet_connection_sock *icsk = inet_csk(sp);
1996	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1997	int rx_queue;
1998	int state;
1999
2000	dest  = &sp->sk_v6_daddr;
2001	src   = &sp->sk_v6_rcv_saddr;
2002	destp = ntohs(inet->inet_dport);
2003	srcp  = ntohs(inet->inet_sport);
2004
2005	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2006	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2007	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2008		timer_active	= 1;
2009		timer_expires	= icsk->icsk_timeout;
2010	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2011		timer_active	= 4;
2012		timer_expires	= icsk->icsk_timeout;
2013	} else if (timer_pending(&sp->sk_timer)) {
2014		timer_active	= 2;
2015		timer_expires	= sp->sk_timer.expires;
2016	} else {
2017		timer_active	= 0;
2018		timer_expires = jiffies;
2019	}
2020
2021	state = inet_sk_state_load(sp);
2022	if (state == TCP_LISTEN)
2023		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2024	else
2025		/* Because we don't lock the socket,
2026		 * we might find a transient negative value.
2027		 */
2028		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2029				      READ_ONCE(tp->copied_seq), 0);
2030
2031	seq_printf(seq,
2032		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2033		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2034		   i,
2035		   src->s6_addr32[0], src->s6_addr32[1],
2036		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2037		   dest->s6_addr32[0], dest->s6_addr32[1],
2038		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2039		   state,
2040		   READ_ONCE(tp->write_seq) - tp->snd_una,
2041		   rx_queue,
2042		   timer_active,
2043		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2044		   icsk->icsk_retransmits,
2045		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2046		   icsk->icsk_probes_out,
2047		   sock_i_ino(sp),
2048		   refcount_read(&sp->sk_refcnt), sp,
2049		   jiffies_to_clock_t(icsk->icsk_rto),
2050		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2051		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2052		   tcp_snd_cwnd(tp),
2053		   state == TCP_LISTEN ?
2054			fastopenq->max_qlen :
2055			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2056		   );
2057}
2058
2059static void get_timewait6_sock(struct seq_file *seq,
2060			       struct inet_timewait_sock *tw, int i)
2061{
2062	long delta = tw->tw_timer.expires - jiffies;
2063	const struct in6_addr *dest, *src;
2064	__u16 destp, srcp;
 
 
 
 
 
2065
2066	dest = &tw->tw_v6_daddr;
2067	src  = &tw->tw_v6_rcv_saddr;
2068	destp = ntohs(tw->tw_dport);
2069	srcp  = ntohs(tw->tw_sport);
2070
2071	seq_printf(seq,
2072		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2073		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2074		   i,
2075		   src->s6_addr32[0], src->s6_addr32[1],
2076		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2077		   dest->s6_addr32[0], dest->s6_addr32[1],
2078		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2079		   tw->tw_substate, 0, 0,
2080		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2081		   refcount_read(&tw->tw_refcnt), tw);
2082}
2083
2084static int tcp6_seq_show(struct seq_file *seq, void *v)
2085{
2086	struct tcp_iter_state *st;
2087	struct sock *sk = v;
2088
2089	if (v == SEQ_START_TOKEN) {
2090		seq_puts(seq,
2091			 "  sl  "
2092			 "local_address                         "
2093			 "remote_address                        "
2094			 "st tx_queue rx_queue tr tm->when retrnsmt"
2095			 "   uid  timeout inode\n");
2096		goto out;
2097	}
2098	st = seq->private;
2099
2100	if (sk->sk_state == TCP_TIME_WAIT)
2101		get_timewait6_sock(seq, v, st->num);
2102	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2103		get_openreq6(seq, v, st->num);
2104	else
2105		get_tcp6_sock(seq, v, st->num);
 
 
 
 
 
 
 
 
2106out:
2107	return 0;
2108}
2109
2110static const struct seq_operations tcp6_seq_ops = {
2111	.show		= tcp6_seq_show,
2112	.start		= tcp_seq_start,
2113	.next		= tcp_seq_next,
2114	.stop		= tcp_seq_stop,
 
2115};
2116
2117static struct tcp_seq_afinfo tcp6_seq_afinfo = {
 
2118	.family		= AF_INET6,
 
 
 
 
2119};
2120
2121int __net_init tcp6_proc_init(struct net *net)
2122{
2123	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2124			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2125		return -ENOMEM;
2126	return 0;
2127}
2128
2129void tcp6_proc_exit(struct net *net)
2130{
2131	remove_proc_entry("tcp6", net->proc_net);
2132}
2133#endif
2134
2135struct proto tcpv6_prot = {
2136	.name			= "TCPv6",
2137	.owner			= THIS_MODULE,
2138	.close			= tcp_close,
2139	.pre_connect		= tcp_v6_pre_connect,
2140	.connect		= tcp_v6_connect,
2141	.disconnect		= tcp_disconnect,
2142	.accept			= inet_csk_accept,
2143	.ioctl			= tcp_ioctl,
2144	.init			= tcp_v6_init_sock,
2145	.destroy		= tcp_v4_destroy_sock,
2146	.shutdown		= tcp_shutdown,
2147	.setsockopt		= tcp_setsockopt,
2148	.getsockopt		= tcp_getsockopt,
2149	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2150	.keepalive		= tcp_set_keepalive,
2151	.recvmsg		= tcp_recvmsg,
2152	.sendmsg		= tcp_sendmsg,
2153	.sendpage		= tcp_sendpage,
2154	.backlog_rcv		= tcp_v6_do_rcv,
2155	.release_cb		= tcp_release_cb,
2156	.hash			= inet6_hash,
2157	.unhash			= inet_unhash,
2158	.get_port		= inet_csk_get_port,
2159	.put_port		= inet_put_port,
2160#ifdef CONFIG_BPF_SYSCALL
2161	.psock_update_sk_prot	= tcp_bpf_update_proto,
2162#endif
2163	.enter_memory_pressure	= tcp_enter_memory_pressure,
2164	.leave_memory_pressure	= tcp_leave_memory_pressure,
2165	.stream_memory_free	= tcp_stream_memory_free,
2166	.sockets_allocated	= &tcp_sockets_allocated,
2167
2168	.memory_allocated	= &tcp_memory_allocated,
2169	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
2170
2171	.memory_pressure	= &tcp_memory_pressure,
2172	.orphan_count		= &tcp_orphan_count,
2173	.sysctl_mem		= sysctl_tcp_mem,
2174	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2175	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2176	.max_header		= MAX_TCP_HEADER,
2177	.obj_size		= sizeof(struct tcp6_sock),
2178	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2179	.twsk_prot		= &tcp6_timewait_sock_ops,
2180	.rsk_prot		= &tcp6_request_sock_ops,
2181	.h.hashinfo		= NULL,
2182	.no_autobind		= true,
2183	.diag_destroy		= tcp_abort,
 
 
 
 
 
 
2184};
2185EXPORT_SYMBOL_GPL(tcpv6_prot);
2186
2187static const struct inet6_protocol tcpv6_protocol = {
2188	.handler	=	tcp_v6_rcv,
2189	.err_handler	=	tcp_v6_err,
 
 
 
 
2190	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2191};
2192
2193static struct inet_protosw tcpv6_protosw = {
2194	.type		=	SOCK_STREAM,
2195	.protocol	=	IPPROTO_TCP,
2196	.prot		=	&tcpv6_prot,
2197	.ops		=	&inet6_stream_ops,
 
2198	.flags		=	INET_PROTOSW_PERMANENT |
2199				INET_PROTOSW_ICSK,
2200};
2201
2202static int __net_init tcpv6_net_init(struct net *net)
2203{
2204	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2205				    SOCK_RAW, IPPROTO_TCP, net);
2206}
2207
2208static void __net_exit tcpv6_net_exit(struct net *net)
2209{
2210	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2211}
2212
2213static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2214{
2215	tcp_twsk_purge(net_exit_list, AF_INET6);
2216}
2217
2218static struct pernet_operations tcpv6_net_ops = {
2219	.init	    = tcpv6_net_init,
2220	.exit	    = tcpv6_net_exit,
2221	.exit_batch = tcpv6_net_exit_batch,
2222};
2223
2224int __init tcpv6_init(void)
2225{
2226	int ret;
2227
2228	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2229	if (ret)
2230		goto out;
2231
2232	/* register inet6 protocol */
2233	ret = inet6_register_protosw(&tcpv6_protosw);
2234	if (ret)
2235		goto out_tcpv6_protocol;
2236
2237	ret = register_pernet_subsys(&tcpv6_net_ops);
2238	if (ret)
2239		goto out_tcpv6_protosw;
2240
2241	ret = mptcpv6_init();
2242	if (ret)
2243		goto out_tcpv6_pernet_subsys;
2244
2245out:
2246	return ret;
2247
2248out_tcpv6_pernet_subsys:
2249	unregister_pernet_subsys(&tcpv6_net_ops);
2250out_tcpv6_protosw:
2251	inet6_unregister_protosw(&tcpv6_protosw);
2252out_tcpv6_protocol:
2253	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
 
 
2254	goto out;
2255}
2256
2257void tcpv6_exit(void)
2258{
2259	unregister_pernet_subsys(&tcpv6_net_ops);
2260	inet6_unregister_protosw(&tcpv6_protosw);
2261	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2262}
v3.5.6
 
   1/*
   2 *	TCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on:
   9 *	linux/net/ipv4/tcp.c
  10 *	linux/net/ipv4/tcp_input.c
  11 *	linux/net/ipv4/tcp_output.c
  12 *
  13 *	Fixes:
  14 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  15 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  16 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  17 *					a single port at the same time.
  18 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
  19 *
  20 *	This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
 
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/netdma.h>
  63#include <net/inet_common.h>
  64#include <net/secure_seq.h>
  65#include <net/tcp_memcontrol.h>
  66
  67#include <asm/uaccess.h>
  68
  69#include <linux/proc_fs.h>
  70#include <linux/seq_file.h>
  71
  72#include <linux/crypto.h>
  73#include <linux/scatterlist.h>
  74
  75static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  76static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 
 
  77				      struct request_sock *req);
  78
  79static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  80static void	__tcp_v6_send_check(struct sk_buff *skb,
  81				    const struct in6_addr *saddr,
  82				    const struct in6_addr *daddr);
  83
  84static const struct inet_connection_sock_af_ops ipv6_mapped;
  85static const struct inet_connection_sock_af_ops ipv6_specific;
  86#ifdef CONFIG_TCP_MD5SIG
  87static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  88static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  89#else
  90static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  91						   const struct in6_addr *addr)
 
  92{
  93	return NULL;
  94}
  95#endif
  96
  97static void tcp_v6_hash(struct sock *sk)
 
 
 
 
 
  98{
  99	if (sk->sk_state != TCP_CLOSE) {
 100		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 101			tcp_prot.hash(sk);
 102			return;
 103		}
 104		local_bh_disable();
 105		__inet6_hash(sk, NULL);
 106		local_bh_enable();
 
 
 
 
 
 
 
 107	}
 108}
 109
 110static __inline__ __sum16 tcp_v6_check(int len,
 111				   const struct in6_addr *saddr,
 112				   const struct in6_addr *daddr,
 113				   __wsum base)
 
 
 
 
 
 114{
 115	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 116}
 117
 118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 
 119{
 120	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 121					    ipv6_hdr(skb)->saddr.s6_addr32,
 122					    tcp_hdr(skb)->dest,
 123					    tcp_hdr(skb)->source);
 
 
 
 
 
 
 124}
 125
 126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 127			  int addr_len)
 128{
 129	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 
 
 
 
 130	struct inet_sock *inet = inet_sk(sk);
 131	struct inet_connection_sock *icsk = inet_csk(sk);
 132	struct ipv6_pinfo *np = inet6_sk(sk);
 133	struct tcp_sock *tp = tcp_sk(sk);
 134	struct in6_addr *saddr = NULL, *final_p, final;
 135	struct rt6_info *rt;
 
 136	struct flowi6 fl6;
 137	struct dst_entry *dst;
 138	int addr_type;
 139	int err;
 140
 141	if (addr_len < SIN6_LEN_RFC2133)
 142		return -EINVAL;
 143
 144	if (usin->sin6_family != AF_INET6)
 145		return -EAFNOSUPPORT;
 146
 147	memset(&fl6, 0, sizeof(fl6));
 148
 149	if (np->sndflow) {
 150		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 151		IP6_ECN_flow_init(fl6.flowlabel);
 152		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 153			struct ip6_flowlabel *flowlabel;
 154			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 155			if (flowlabel == NULL)
 156				return -EINVAL;
 157			usin->sin6_addr = flowlabel->dst;
 158			fl6_sock_release(flowlabel);
 159		}
 160	}
 161
 162	/*
 163	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 164	 */
 165
 166	if(ipv6_addr_any(&usin->sin6_addr))
 167		usin->sin6_addr.s6_addr[15] = 0x1;
 
 
 
 
 
 168
 169	addr_type = ipv6_addr_type(&usin->sin6_addr);
 170
 171	if(addr_type & IPV6_ADDR_MULTICAST)
 172		return -ENETUNREACH;
 173
 174	if (addr_type&IPV6_ADDR_LINKLOCAL) {
 175		if (addr_len >= sizeof(struct sockaddr_in6) &&
 176		    usin->sin6_scope_id) {
 177			/* If interface is set while binding, indices
 178			 * must coincide.
 179			 */
 180			if (sk->sk_bound_dev_if &&
 181			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 182				return -EINVAL;
 183
 184			sk->sk_bound_dev_if = usin->sin6_scope_id;
 185		}
 186
 187		/* Connect to link-local address requires an interface */
 188		if (!sk->sk_bound_dev_if)
 189			return -EINVAL;
 190	}
 191
 192	if (tp->rx_opt.ts_recent_stamp &&
 193	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
 194		tp->rx_opt.ts_recent = 0;
 195		tp->rx_opt.ts_recent_stamp = 0;
 196		tp->write_seq = 0;
 197	}
 198
 199	np->daddr = usin->sin6_addr;
 200	np->flow_label = fl6.flowlabel;
 201
 202	/*
 203	 *	TCP over IPv4
 204	 */
 205
 206	if (addr_type == IPV6_ADDR_MAPPED) {
 207		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 208		struct sockaddr_in sin;
 209
 210		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 211
 212		if (__ipv6_only_sock(sk))
 213			return -ENETUNREACH;
 214
 215		sin.sin_family = AF_INET;
 216		sin.sin_port = usin->sin6_port;
 217		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 218
 219		icsk->icsk_af_ops = &ipv6_mapped;
 
 
 
 220		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 223#endif
 224
 225		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 226
 227		if (err) {
 228			icsk->icsk_ext_hdr_len = exthdrlen;
 229			icsk->icsk_af_ops = &ipv6_specific;
 
 
 
 230			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 231#ifdef CONFIG_TCP_MD5SIG
 232			tp->af_specific = &tcp_sock_ipv6_specific;
 233#endif
 234			goto failure;
 235		} else {
 236			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 237			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 238					       &np->rcv_saddr);
 239		}
 
 240
 241		return err;
 242	}
 243
 244	if (!ipv6_addr_any(&np->rcv_saddr))
 245		saddr = &np->rcv_saddr;
 246
 247	fl6.flowi6_proto = IPPROTO_TCP;
 248	fl6.daddr = np->daddr;
 249	fl6.saddr = saddr ? *saddr : np->saddr;
 
 250	fl6.flowi6_oif = sk->sk_bound_dev_if;
 251	fl6.flowi6_mark = sk->sk_mark;
 252	fl6.fl6_dport = usin->sin6_port;
 253	fl6.fl6_sport = inet->inet_sport;
 
 254
 255	final_p = fl6_update_dst(&fl6, np->opt, &final);
 
 256
 257	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 258
 259	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
 260	if (IS_ERR(dst)) {
 261		err = PTR_ERR(dst);
 262		goto failure;
 263	}
 264
 265	if (saddr == NULL) {
 
 
 266		saddr = &fl6.saddr;
 267		np->rcv_saddr = *saddr;
 
 
 
 268	}
 269
 270	/* set the source address */
 271	np->saddr = *saddr;
 272	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 273
 274	sk->sk_gso_type = SKB_GSO_TCPV6;
 275	__ip6_dst_store(sk, dst, NULL, NULL);
 276
 277	rt = (struct rt6_info *) dst;
 278	if (tcp_death_row.sysctl_tw_recycle &&
 279	    !tp->rx_opt.ts_recent_stamp &&
 280	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
 281		struct inet_peer *peer = rt6_get_peer(rt);
 282		/*
 283		 * VJ's idea. We save last timestamp seen from
 284		 * the destination in peer table, when entering state
 285		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 286		 * when trying new connection.
 287		 */
 288		if (peer) {
 289			inet_peer_refcheck(peer);
 290			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
 291				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 292				tp->rx_opt.ts_recent = peer->tcp_ts;
 293			}
 294		}
 295	}
 296
 297	icsk->icsk_ext_hdr_len = 0;
 298	if (np->opt)
 299		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
 300					  np->opt->opt_nflen);
 301
 302	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 303
 304	inet->inet_dport = usin->sin6_port;
 305
 306	tcp_set_state(sk, TCP_SYN_SENT);
 307	err = inet6_hash_connect(&tcp_death_row, sk);
 308	if (err)
 309		goto late_failure;
 310
 311	if (!tp->write_seq)
 312		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 313							     np->daddr.s6_addr32,
 314							     inet->inet_sport,
 315							     inet->inet_dport);
 
 
 
 
 
 
 
 
 
 
 
 
 316
 317	err = tcp_connect(sk);
 318	if (err)
 319		goto late_failure;
 320
 321	return 0;
 322
 323late_failure:
 324	tcp_set_state(sk, TCP_CLOSE);
 325	__sk_dst_reset(sk);
 326failure:
 327	inet->inet_dport = 0;
 328	sk->sk_route_caps = 0;
 329	return err;
 330}
 331
 332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333		u8 type, u8 code, int offset, __be32 info)
 334{
 335	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
 336	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 
 
 337	struct ipv6_pinfo *np;
 
 
 338	struct sock *sk;
 
 339	int err;
 340	struct tcp_sock *tp;
 341	__u32 seq;
 342	struct net *net = dev_net(skb->dev);
 343
 344	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 345			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 346
 347	if (sk == NULL) {
 348		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 349				   ICMP6_MIB_INERRORS);
 350		return;
 
 
 351	}
 352
 353	if (sk->sk_state == TCP_TIME_WAIT) {
 354		inet_twsk_put(inet_twsk(sk));
 355		return;
 
 
 
 
 
 
 356	}
 357
 358	bh_lock_sock(sk);
 359	if (sock_owned_by_user(sk))
 360		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 361
 362	if (sk->sk_state == TCP_CLOSE)
 363		goto out;
 364
 365	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 366		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 367		goto out;
 
 
 
 368	}
 369
 370	tp = tcp_sk(sk);
 371	seq = ntohl(th->seq);
 
 
 372	if (sk->sk_state != TCP_LISTEN &&
 373	    !between(seq, tp->snd_una, tp->snd_nxt)) {
 374		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 375		goto out;
 376	}
 377
 378	np = inet6_sk(sk);
 
 
 
 
 
 
 
 
 
 
 379
 380	if (type == ICMPV6_PKT_TOOBIG) {
 381		struct dst_entry *dst;
 382
 383		if (sock_owned_by_user(sk))
 
 
 
 
 384			goto out;
 385		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 
 386			goto out;
 387
 388		/* icmp should have updated the destination cache entry */
 389		dst = __sk_dst_check(sk, np->dst_cookie);
 390
 391		if (dst == NULL) {
 392			struct inet_sock *inet = inet_sk(sk);
 393			struct flowi6 fl6;
 394
 395			/* BUGGG_FUTURE: Again, it is not clear how
 396			   to handle rthdr case. Ignore this complexity
 397			   for now.
 398			 */
 399			memset(&fl6, 0, sizeof(fl6));
 400			fl6.flowi6_proto = IPPROTO_TCP;
 401			fl6.daddr = np->daddr;
 402			fl6.saddr = np->saddr;
 403			fl6.flowi6_oif = sk->sk_bound_dev_if;
 404			fl6.flowi6_mark = sk->sk_mark;
 405			fl6.fl6_dport = inet->inet_dport;
 406			fl6.fl6_sport = inet->inet_sport;
 407			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 408
 409			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
 410			if (IS_ERR(dst)) {
 411				sk->sk_err_soft = -PTR_ERR(dst);
 412				goto out;
 413			}
 414
 415		} else
 416			dst_hold(dst);
 417
 418		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 419			tcp_sync_mss(sk, dst_mtu(dst));
 420			tcp_simple_retransmit(sk);
 421		} /* else let the usual retransmit timer handle it */
 422		dst_release(dst);
 423		goto out;
 424	}
 425
 426	icmpv6_err_convert(type, code, &err);
 427
 428	/* Might be for an request_sock */
 429	switch (sk->sk_state) {
 430		struct request_sock *req, **prev;
 431	case TCP_LISTEN:
 432		if (sock_owned_by_user(sk))
 433			goto out;
 434
 435		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 436					   &hdr->saddr, inet6_iif(skb));
 437		if (!req)
 438			goto out;
 439
 440		/* ICMPs are not backlogged, hence we cannot get
 441		 * an established socket here.
 442		 */
 443		WARN_ON(req->sk != NULL);
 444
 445		if (seq != tcp_rsk(req)->snt_isn) {
 446			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 447			goto out;
 448		}
 449
 450		inet_csk_reqsk_queue_drop(sk, req, prev);
 451		goto out;
 452
 453	case TCP_SYN_SENT:
 454	case TCP_SYN_RECV:  /* Cannot happen.
 455			       It can, it SYNs are crossed. --ANK */
 456		if (!sock_owned_by_user(sk)) {
 457			sk->sk_err = err;
 458			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
 459
 460			tcp_done(sk);
 461		} else
 462			sk->sk_err_soft = err;
 463		goto out;
 
 
 
 
 
 
 
 
 
 464	}
 465
 466	if (!sock_owned_by_user(sk) && np->recverr) {
 467		sk->sk_err = err;
 468		sk->sk_error_report(sk);
 469	} else
 470		sk->sk_err_soft = err;
 471
 472out:
 473	bh_unlock_sock(sk);
 474	sock_put(sk);
 
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 479			      struct request_values *rvp,
 480			      u16 queue_mapping)
 481{
 482	struct inet6_request_sock *treq = inet6_rsk(req);
 483	struct ipv6_pinfo *np = inet6_sk(sk);
 484	struct sk_buff * skb;
 485	struct ipv6_txoptions *opt = NULL;
 486	struct in6_addr * final_p, final;
 487	struct flowi6 fl6;
 488	struct dst_entry *dst;
 489	int err;
 
 
 
 
 
 
 
 490
 491	memset(&fl6, 0, sizeof(fl6));
 492	fl6.flowi6_proto = IPPROTO_TCP;
 493	fl6.daddr = treq->rmt_addr;
 494	fl6.saddr = treq->loc_addr;
 495	fl6.flowlabel = 0;
 496	fl6.flowi6_oif = treq->iif;
 497	fl6.flowi6_mark = sk->sk_mark;
 498	fl6.fl6_dport = inet_rsk(req)->rmt_port;
 499	fl6.fl6_sport = inet_rsk(req)->loc_port;
 500	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 501
 502	opt = np->opt;
 503	final_p = fl6_update_dst(&fl6, opt, &final);
 
 504
 505	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 506	if (IS_ERR(dst)) {
 507		err = PTR_ERR(dst);
 508		dst = NULL;
 509		goto done;
 510	}
 511	skb = tcp_make_synack(sk, dst, req, rvp);
 512	err = -ENOMEM;
 513	if (skb) {
 514		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
 
 515
 516		fl6.daddr = treq->rmt_addr;
 517		skb_set_queue_mapping(skb, queue_mapping);
 518		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
 
 
 
 
 519		err = net_xmit_eval(err);
 520	}
 521
 522done:
 523	if (opt && opt != np->opt)
 524		sock_kfree_s(sk, opt, opt->tot_len);
 525	dst_release(dst);
 526	return err;
 527}
 528
 529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
 530			     struct request_values *rvp)
 531{
 532	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 533	return tcp_v6_send_synack(sk, req, rvp, 0);
 534}
 535
 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
 537{
 538	kfree_skb(inet6_rsk(req)->pktopts);
 
 539}
 540
 541#ifdef CONFIG_TCP_MD5SIG
 542static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 543						   const struct in6_addr *addr)
 
 544{
 545	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 
 546}
 547
 548static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 549						struct sock *addr_sk)
 550{
 551	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
 552}
 553
 554static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 555						      struct request_sock *req)
 556{
 557	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
 558}
 559
 560static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
 561				  int optlen)
 562{
 563	struct tcp_md5sig cmd;
 564	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 
 
 
 565
 566	if (optlen < sizeof(cmd))
 567		return -EINVAL;
 568
 569	if (copy_from_user(&cmd, optval, sizeof(cmd)))
 570		return -EFAULT;
 571
 572	if (sin6->sin6_family != AF_INET6)
 573		return -EINVAL;
 574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 575	if (!cmd.tcpm_keylen) {
 576		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 577			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 578					      AF_INET);
 
 579		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 580				      AF_INET6);
 581	}
 582
 583	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 584		return -EINVAL;
 585
 586	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 587		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 588				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 589
 590	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 591			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 
 592}
 593
 594static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 595					const struct in6_addr *daddr,
 596					const struct in6_addr *saddr, int nbytes)
 
 597{
 598	struct tcp6_pseudohdr *bp;
 599	struct scatterlist sg;
 
 600
 601	bp = &hp->md5_blk.ip6;
 602	/* 1. TCP pseudo-header (RFC2460) */
 603	bp->saddr = *saddr;
 604	bp->daddr = *daddr;
 605	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 606	bp->len = cpu_to_be32(nbytes);
 607
 608	sg_init_one(&sg, bp, sizeof(*bp));
 609	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 
 
 
 
 
 
 610}
 611
 612static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 613			       const struct in6_addr *daddr, struct in6_addr *saddr,
 614			       const struct tcphdr *th)
 615{
 616	struct tcp_md5sig_pool *hp;
 617	struct hash_desc *desc;
 618
 619	hp = tcp_get_md5sig_pool();
 620	if (!hp)
 621		goto clear_hash_noput;
 622	desc = &hp->md5_desc;
 623
 624	if (crypto_hash_init(desc))
 625		goto clear_hash;
 626	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 627		goto clear_hash;
 628	if (tcp_md5_hash_header(hp, th))
 629		goto clear_hash;
 630	if (tcp_md5_hash_key(hp, key))
 631		goto clear_hash;
 632	if (crypto_hash_final(desc, md5_hash))
 
 633		goto clear_hash;
 634
 635	tcp_put_md5sig_pool();
 636	return 0;
 637
 638clear_hash:
 639	tcp_put_md5sig_pool();
 640clear_hash_noput:
 641	memset(md5_hash, 0, 16);
 642	return 1;
 643}
 644
 645static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 
 646			       const struct sock *sk,
 647			       const struct request_sock *req,
 648			       const struct sk_buff *skb)
 649{
 650	const struct in6_addr *saddr, *daddr;
 651	struct tcp_md5sig_pool *hp;
 652	struct hash_desc *desc;
 653	const struct tcphdr *th = tcp_hdr(skb);
 654
 655	if (sk) {
 656		saddr = &inet6_sk(sk)->saddr;
 657		daddr = &inet6_sk(sk)->daddr;
 658	} else if (req) {
 659		saddr = &inet6_rsk(req)->loc_addr;
 660		daddr = &inet6_rsk(req)->rmt_addr;
 661	} else {
 662		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 663		saddr = &ip6h->saddr;
 664		daddr = &ip6h->daddr;
 665	}
 666
 667	hp = tcp_get_md5sig_pool();
 668	if (!hp)
 669		goto clear_hash_noput;
 670	desc = &hp->md5_desc;
 671
 672	if (crypto_hash_init(desc))
 673		goto clear_hash;
 674
 675	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 676		goto clear_hash;
 677	if (tcp_md5_hash_header(hp, th))
 678		goto clear_hash;
 679	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 680		goto clear_hash;
 681	if (tcp_md5_hash_key(hp, key))
 682		goto clear_hash;
 683	if (crypto_hash_final(desc, md5_hash))
 
 684		goto clear_hash;
 685
 686	tcp_put_md5sig_pool();
 687	return 0;
 688
 689clear_hash:
 690	tcp_put_md5sig_pool();
 691clear_hash_noput:
 692	memset(md5_hash, 0, 16);
 693	return 1;
 694}
 695
 696static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 697{
 698	const __u8 *hash_location = NULL;
 699	struct tcp_md5sig_key *hash_expected;
 700	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 701	const struct tcphdr *th = tcp_hdr(skb);
 702	int genhash;
 703	u8 newhash[16];
 704
 705	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 706	hash_location = tcp_parse_md5sig_option(th);
 
 
 
 
 
 707
 708	/* We've parsed the options - do we have a hash? */
 709	if (!hash_expected && !hash_location)
 710		return 0;
 711
 712	if (hash_expected && !hash_location) {
 713		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 714		return 1;
 715	}
 716
 717	if (!hash_expected && hash_location) {
 718		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 719		return 1;
 720	}
 721
 722	/* check the signature */
 723	genhash = tcp_v6_md5_hash_skb(newhash,
 724				      hash_expected,
 725				      NULL, NULL, skb);
 726
 727	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 728		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 729				     genhash ? "failed" : "mismatch",
 730				     &ip6h->saddr, ntohs(th->source),
 731				     &ip6h->daddr, ntohs(th->dest));
 732		return 1;
 733	}
 734	return 0;
 735}
 736#endif
 
 
 
 
 
 
 
 
 
 
 
 
 737
 738struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 739	.family		=	AF_INET6,
 740	.obj_size	=	sizeof(struct tcp6_request_sock),
 741	.rtx_syn_ack	=	tcp_v6_rtx_synack,
 742	.send_ack	=	tcp_v6_reqsk_send_ack,
 743	.destructor	=	tcp_v6_reqsk_destructor,
 744	.send_reset	=	tcp_v6_send_reset,
 745	.syn_ack_timeout = 	tcp_syn_ack_timeout,
 746};
 747
 
 
 
 748#ifdef CONFIG_TCP_MD5SIG
 749static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 750	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 751	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 
 
 
 
 
 
 
 
 752};
 753#endif
 754
 755static void __tcp_v6_send_check(struct sk_buff *skb,
 756				const struct in6_addr *saddr, const struct in6_addr *daddr)
 757{
 758	struct tcphdr *th = tcp_hdr(skb);
 759
 760	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 761		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
 762		skb->csum_start = skb_transport_header(skb) - skb->head;
 763		skb->csum_offset = offsetof(struct tcphdr, check);
 764	} else {
 765		th->check = tcp_v6_check(skb->len, saddr, daddr,
 766					 csum_partial(th, th->doff << 2,
 767						      skb->csum));
 768	}
 769}
 770
 771static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 772{
 773	struct ipv6_pinfo *np = inet6_sk(sk);
 774
 775	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
 776}
 777
 778static int tcp_v6_gso_send_check(struct sk_buff *skb)
 779{
 780	const struct ipv6hdr *ipv6h;
 781	struct tcphdr *th;
 782
 783	if (!pskb_may_pull(skb, sizeof(*th)))
 784		return -EINVAL;
 785
 786	ipv6h = ipv6_hdr(skb);
 787	th = tcp_hdr(skb);
 788
 789	th->check = 0;
 790	skb->ip_summed = CHECKSUM_PARTIAL;
 791	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
 792	return 0;
 793}
 794
 795static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
 796					 struct sk_buff *skb)
 797{
 798	const struct ipv6hdr *iph = skb_gro_network_header(skb);
 799
 800	switch (skb->ip_summed) {
 801	case CHECKSUM_COMPLETE:
 802		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
 803				  skb->csum)) {
 804			skb->ip_summed = CHECKSUM_UNNECESSARY;
 805			break;
 806		}
 807
 808		/* fall through */
 809	case CHECKSUM_NONE:
 810		NAPI_GRO_CB(skb)->flush = 1;
 811		return NULL;
 812	}
 813
 814	return tcp_gro_receive(head, skb);
 815}
 816
 817static int tcp6_gro_complete(struct sk_buff *skb)
 818{
 819	const struct ipv6hdr *iph = ipv6_hdr(skb);
 820	struct tcphdr *th = tcp_hdr(skb);
 821
 822	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 823				  &iph->saddr, &iph->daddr, 0);
 824	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 825
 826	return tcp_gro_complete(skb);
 827}
 828
 829static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 830				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
 831{
 832	const struct tcphdr *th = tcp_hdr(skb);
 833	struct tcphdr *t1;
 834	struct sk_buff *buff;
 835	struct flowi6 fl6;
 836	struct net *net = dev_net(skb_dst(skb)->dev);
 837	struct sock *ctl_sk = net->ipv6.tcp_sk;
 838	unsigned int tot_len = sizeof(struct tcphdr);
 
 839	struct dst_entry *dst;
 840	__be32 *topt;
 841
 842	if (ts)
 843		tot_len += TCPOLEN_TSTAMP_ALIGNED;
 844#ifdef CONFIG_TCP_MD5SIG
 845	if (key)
 846		tot_len += TCPOLEN_MD5SIG_ALIGNED;
 847#endif
 848
 849	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 850			 GFP_ATOMIC);
 851	if (buff == NULL)
 
 
 
 
 
 
 
 
 852		return;
 853
 854	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 855
 856	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 857	skb_reset_transport_header(buff);
 858
 859	/* Swap the send and the receive. */
 860	memset(t1, 0, sizeof(*t1));
 861	t1->dest = th->source;
 862	t1->source = th->dest;
 863	t1->doff = tot_len / 4;
 864	t1->seq = htonl(seq);
 865	t1->ack_seq = htonl(ack);
 866	t1->ack = !rst || !th->ack;
 867	t1->rst = rst;
 868	t1->window = htons(win);
 869
 870	topt = (__be32 *)(t1 + 1);
 871
 872	if (ts) {
 873		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 874				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 875		*topt++ = htonl(tcp_time_stamp);
 876		*topt++ = htonl(ts);
 877	}
 878
 
 
 
 879#ifdef CONFIG_TCP_MD5SIG
 880	if (key) {
 881		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 882				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 883		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 884				    &ipv6_hdr(skb)->saddr,
 885				    &ipv6_hdr(skb)->daddr, t1);
 886	}
 887#endif
 888
 889	memset(&fl6, 0, sizeof(fl6));
 890	fl6.daddr = ipv6_hdr(skb)->saddr;
 891	fl6.saddr = ipv6_hdr(skb)->daddr;
 
 892
 893	buff->ip_summed = CHECKSUM_PARTIAL;
 894	buff->csum = 0;
 895
 896	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 897
 898	fl6.flowi6_proto = IPPROTO_TCP;
 899	fl6.flowi6_oif = inet6_iif(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900	fl6.fl6_dport = t1->dest;
 901	fl6.fl6_sport = t1->source;
 902	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 903
 904	/* Pass a socket to ip6_dst_lookup either it is for RST
 905	 * Underlying function will use this to retrieve the network
 906	 * namespace
 907	 */
 908	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
 
 
 
 909	if (!IS_ERR(dst)) {
 910		skb_dst_set(buff, dst);
 911		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 912		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 
 913		if (rst)
 914			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 915		return;
 916	}
 917
 918	kfree_skb(buff);
 919}
 920
 921static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 922{
 923	const struct tcphdr *th = tcp_hdr(skb);
 
 924	u32 seq = 0, ack_seq = 0;
 925	struct tcp_md5sig_key *key = NULL;
 926#ifdef CONFIG_TCP_MD5SIG
 927	const __u8 *hash_location = NULL;
 928	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 929	unsigned char newhash[16];
 930	int genhash;
 931	struct sock *sk1 = NULL;
 932#endif
 
 
 
 
 
 933
 934	if (th->rst)
 935		return;
 936
 937	if (!ipv6_unicast_destination(skb))
 
 
 
 938		return;
 939
 
 940#ifdef CONFIG_TCP_MD5SIG
 
 941	hash_location = tcp_parse_md5sig_option(th);
 942	if (!sk && hash_location) {
 
 
 
 
 
 
 
 
 
 
 
 
 943		/*
 944		 * active side is lost. Try to find listening socket through
 945		 * source port, and then find md5 key through listening socket.
 946		 * we are not loose security here:
 947		 * Incoming packet is checked with md5 hash with finding key,
 948		 * no RST generated if md5 hash doesn't match.
 949		 */
 950		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 951					   &tcp_hashinfo, &ipv6h->daddr,
 952					   ntohs(th->source), inet6_iif(skb));
 
 953		if (!sk1)
 954			return;
 
 
 
 
 
 955
 956		rcu_read_lock();
 957		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 958		if (!key)
 959			goto release_sk1;
 960
 961		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
 962		if (genhash || memcmp(hash_location, newhash, 16) != 0)
 963			goto release_sk1;
 964	} else {
 965		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
 966	}
 967#endif
 968
 969	if (th->ack)
 970		seq = ntohl(th->ack_seq);
 971	else
 972		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 973			  (th->doff << 2);
 974
 975	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976
 977#ifdef CONFIG_TCP_MD5SIG
 978release_sk1:
 979	if (sk1) {
 980		rcu_read_unlock();
 981		sock_put(sk1);
 982	}
 983#endif
 984}
 985
 986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
 987			    struct tcp_md5sig_key *key, u8 tclass)
 
 
 988{
 989	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
 
 990}
 991
 992static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 993{
 994	struct inet_timewait_sock *tw = inet_twsk(sk);
 995	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 996
 997	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 998			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 999			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1000			tw->tw_tclass);
 
 
1001
1002	inet_twsk_put(tw);
1003}
1004
1005static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006				  struct request_sock *req)
1007{
1008	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010}
1011
1012
1013static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1014{
1015	struct request_sock *req, **prev;
1016	const struct tcphdr *th = tcp_hdr(skb);
1017	struct sock *nsk;
1018
1019	/* Find possible connection requests. */
1020	req = inet6_csk_search_req(sk, &prev, th->source,
1021				   &ipv6_hdr(skb)->saddr,
1022				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1023	if (req)
1024		return tcp_check_req(sk, skb, req, prev);
1025
1026	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027			&ipv6_hdr(skb)->saddr, th->source,
1028			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029
1030	if (nsk) {
1031		if (nsk->sk_state != TCP_TIME_WAIT) {
1032			bh_lock_sock(nsk);
1033			return nsk;
1034		}
1035		inet_twsk_put(inet_twsk(nsk));
1036		return NULL;
1037	}
1038
1039#ifdef CONFIG_SYN_COOKIES
1040	if (!th->syn)
1041		sk = cookie_v6_check(sk, skb);
1042#endif
1043	return sk;
1044}
1045
1046/* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1048 */
 
 
 
 
 
 
 
 
 
 
 
 
1049static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1050{
1051	struct tcp_extend_values tmp_ext;
1052	struct tcp_options_received tmp_opt;
1053	const u8 *hash_location;
1054	struct request_sock *req;
1055	struct inet6_request_sock *treq;
1056	struct ipv6_pinfo *np = inet6_sk(sk);
1057	struct tcp_sock *tp = tcp_sk(sk);
1058	__u32 isn = TCP_SKB_CB(skb)->when;
1059	struct dst_entry *dst = NULL;
1060	bool want_cookie = false;
1061
1062	if (skb->protocol == htons(ETH_P_IP))
1063		return tcp_v4_conn_request(sk, skb);
1064
1065	if (!ipv6_unicast_destination(skb))
1066		goto drop;
1067
1068	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1069		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1070		if (!want_cookie)
1071			goto drop;
1072	}
1073
1074	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1075		goto drop;
1076
1077	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1078	if (req == NULL)
1079		goto drop;
1080
1081#ifdef CONFIG_TCP_MD5SIG
1082	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083#endif
1084
1085	tcp_clear_options(&tmp_opt);
1086	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087	tmp_opt.user_mss = tp->rx_opt.user_mss;
1088	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1089
1090	if (tmp_opt.cookie_plus > 0 &&
1091	    tmp_opt.saw_tstamp &&
1092	    !tp->rx_opt.cookie_out_never &&
1093	    (sysctl_tcp_cookie_size > 0 ||
1094	     (tp->cookie_values != NULL &&
1095	      tp->cookie_values->cookie_desired > 0))) {
1096		u8 *c;
1097		u32 *d;
1098		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1100
1101		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102			goto drop_and_free;
1103
1104		/* Secret recipe starts with IP addresses */
1105		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1106		*mess++ ^= *d++;
1107		*mess++ ^= *d++;
1108		*mess++ ^= *d++;
1109		*mess++ ^= *d++;
1110		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1111		*mess++ ^= *d++;
1112		*mess++ ^= *d++;
1113		*mess++ ^= *d++;
1114		*mess++ ^= *d++;
1115
1116		/* plus variable length Initiator Cookie */
1117		c = (u8 *)mess;
1118		while (l-- > 0)
1119			*c++ ^= *hash_location++;
1120
1121		want_cookie = false;	/* not our kind of cookie */
1122		tmp_ext.cookie_out_never = 0; /* false */
1123		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124	} else if (!tp->rx_opt.cookie_in_always) {
1125		/* redundant indications, but ensure initialization. */
1126		tmp_ext.cookie_out_never = 1; /* true */
1127		tmp_ext.cookie_plus = 0;
1128	} else {
1129		goto drop_and_free;
1130	}
1131	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1132
1133	if (want_cookie && !tmp_opt.saw_tstamp)
1134		tcp_clear_options(&tmp_opt);
1135
1136	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137	tcp_openreq_init(req, &tmp_opt, skb);
1138
1139	treq = inet6_rsk(req);
1140	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141	treq->loc_addr = ipv6_hdr(skb)->daddr;
1142	if (!want_cookie || tmp_opt.tstamp_ok)
1143		TCP_ECN_create_request(req, skb);
1144
1145	treq->iif = sk->sk_bound_dev_if;
1146
1147	/* So that link locals have meaning */
1148	if (!sk->sk_bound_dev_if &&
1149	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150		treq->iif = inet6_iif(skb);
1151
1152	if (!isn) {
1153		struct inet_peer *peer = NULL;
1154
1155		if (ipv6_opt_accepted(sk, skb) ||
1156		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158			atomic_inc(&skb->users);
1159			treq->pktopts = skb;
1160		}
1161
1162		if (want_cookie) {
1163			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164			req->cookie_ts = tmp_opt.tstamp_ok;
1165			goto have_isn;
1166		}
1167
1168		/* VJ's idea. We save last timestamp seen
1169		 * from the destination in peer table, when entering
1170		 * state TIME-WAIT, and check against it before
1171		 * accepting new connection request.
1172		 *
1173		 * If "isn" is not zero, this request hit alive
1174		 * timewait bucket, so that all the necessary checks
1175		 * are made in the function processing timewait state.
1176		 */
1177		if (tmp_opt.saw_tstamp &&
1178		    tcp_death_row.sysctl_tw_recycle &&
1179		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182				    &treq->rmt_addr)) {
1183			inet_peer_refcheck(peer);
1184			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185			    (s32)(peer->tcp_ts - req->ts_recent) >
1186							TCP_PAWS_WINDOW) {
1187				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188				goto drop_and_release;
1189			}
1190		}
1191		/* Kill the following clause, if you dislike this way. */
1192		else if (!sysctl_tcp_syncookies &&
1193			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194			  (sysctl_max_syn_backlog >> 2)) &&
1195			 (!peer || !peer->tcp_ts_stamp) &&
1196			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197			/* Without syncookies last quarter of
1198			 * backlog is filled with destinations,
1199			 * proven to be alive.
1200			 * It means that we continue to communicate
1201			 * to destinations, already remembered
1202			 * to the moment of synflood.
1203			 */
1204			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206			goto drop_and_release;
1207		}
1208
1209		isn = tcp_v6_init_sequence(skb);
1210	}
1211have_isn:
1212	tcp_rsk(req)->snt_isn = isn;
1213	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214
1215	if (security_inet_conn_request(sk, skb, req))
1216		goto drop_and_release;
1217
1218	if (tcp_v6_send_synack(sk, req,
1219			       (struct request_values *)&tmp_ext,
1220			       skb_get_queue_mapping(skb)) ||
1221	    want_cookie)
1222		goto drop_and_free;
1223
1224	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225	return 0;
1226
1227drop_and_release:
1228	dst_release(dst);
1229drop_and_free:
1230	reqsk_free(req);
1231drop:
 
1232	return 0; /* don't send reset */
1233}
1234
1235static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1236					  struct request_sock *req,
1237					  struct dst_entry *dst)
1238{
1239	struct inet6_request_sock *treq;
1240	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241	struct tcp6_sock *newtcp6sk;
 
 
 
 
 
 
 
 
 
 
 
 
 
1242	struct inet_sock *newinet;
 
1243	struct tcp_sock *newtp;
1244	struct sock *newsk;
1245	struct ipv6_txoptions *opt;
1246#ifdef CONFIG_TCP_MD5SIG
1247	struct tcp_md5sig_key *key;
 
1248#endif
 
1249
1250	if (skb->protocol == htons(ETH_P_IP)) {
1251		/*
1252		 *	v6 mapped
1253		 */
1254
1255		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
 
1256
1257		if (newsk == NULL)
1258			return NULL;
1259
1260		newtcp6sk = (struct tcp6_sock *)newsk;
1261		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262
1263		newinet = inet_sk(newsk);
1264		newnp = inet6_sk(newsk);
1265		newtp = tcp_sk(newsk);
1266
1267		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268
1269		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270
1271		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272
1273		newnp->rcv_saddr = newnp->saddr;
1274
1275		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 
 
1276		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1277#ifdef CONFIG_TCP_MD5SIG
1278		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279#endif
1280
 
1281		newnp->ipv6_ac_list = NULL;
1282		newnp->ipv6_fl_list = NULL;
1283		newnp->pktoptions  = NULL;
1284		newnp->opt	   = NULL;
1285		newnp->mcast_oif   = inet6_iif(skb);
1286		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1287		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
 
 
1288
1289		/*
1290		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291		 * here, tcp_create_openreq_child now does this for us, see the comment in
1292		 * that function for the gory details. -acme
1293		 */
1294
1295		/* It is tricky place. Until this moment IPv4 tcp
1296		   worked with IPv6 icsk.icsk_af_ops.
1297		   Sync it now.
1298		 */
1299		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300
1301		return newsk;
1302	}
1303
1304	treq = inet6_rsk(req);
1305	opt = np->opt;
1306
1307	if (sk_acceptq_is_full(sk))
1308		goto out_overflow;
1309
1310	if (!dst) {
1311		dst = inet6_csk_route_req(sk, req);
1312		if (!dst)
1313			goto out;
1314	}
1315
1316	newsk = tcp_create_openreq_child(sk, req, skb);
1317	if (newsk == NULL)
1318		goto out_nonewsk;
1319
1320	/*
1321	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322	 * count here, tcp_create_openreq_child now does this for us, see the
1323	 * comment in that function for the gory details. -acme
1324	 */
1325
1326	newsk->sk_gso_type = SKB_GSO_TCPV6;
1327	__ip6_dst_store(newsk, dst, NULL, NULL);
 
1328
1329	newtcp6sk = (struct tcp6_sock *)newsk;
1330	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331
1332	newtp = tcp_sk(newsk);
1333	newinet = inet_sk(newsk);
1334	newnp = inet6_sk(newsk);
1335
1336	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337
1338	newnp->daddr = treq->rmt_addr;
1339	newnp->saddr = treq->loc_addr;
1340	newnp->rcv_saddr = treq->loc_addr;
1341	newsk->sk_bound_dev_if = treq->iif;
1342
1343	/* Now IPv6 options...
1344
1345	   First: no IPv4 options.
1346	 */
1347	newinet->inet_opt = NULL;
 
1348	newnp->ipv6_ac_list = NULL;
1349	newnp->ipv6_fl_list = NULL;
1350
1351	/* Clone RX bits */
1352	newnp->rxopt.all = np->rxopt.all;
1353
1354	/* Clone pktoptions received with SYN */
1355	newnp->pktoptions = NULL;
1356	if (treq->pktopts != NULL) {
1357		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1358		consume_skb(treq->pktopts);
1359		treq->pktopts = NULL;
1360		if (newnp->pktoptions)
1361			skb_set_owner_r(newnp->pktoptions, newsk);
1362	}
1363	newnp->opt	  = NULL;
1364	newnp->mcast_oif  = inet6_iif(skb);
1365	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
 
 
 
 
 
 
 
 
1367
1368	/* Clone native IPv6 options from listening socket (if any)
1369
1370	   Yes, keeping reference count would be much more clever,
1371	   but we make one more one thing there: reattach optmem
1372	   to newsk.
1373	 */
 
 
 
1374	if (opt) {
1375		newnp->opt = ipv6_dup_options(newsk, opt);
1376		if (opt != np->opt)
1377			sock_kfree_s(sk, opt, opt->tot_len);
1378	}
 
 
 
 
1379
1380	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381	if (newnp->opt)
1382		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383						     newnp->opt->opt_flen);
1384
1385	tcp_mtup_init(newsk);
1386	tcp_sync_mss(newsk, dst_mtu(dst));
1387	newtp->advmss = dst_metric_advmss(dst);
1388	if (tcp_sk(sk)->rx_opt.user_mss &&
1389	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1390		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391
1392	tcp_initialize_rcv_mss(newsk);
1393	if (tcp_rsk(req)->snt_synack)
1394		tcp_valid_rtt_meas(newsk,
1395		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1396	newtp->total_retrans = req->retrans;
1397
1398	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1399	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400
1401#ifdef CONFIG_TCP_MD5SIG
 
 
1402	/* Copy over the MD5 key from the original socket */
1403	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1404		/* We're using one, so create a matching key
1405		 * on the newsk structure. If we fail to get
1406		 * memory, then we end up not copying the key
1407		 * across. Shucks.
1408		 */
1409		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1410			       AF_INET6, key->key, key->keylen, GFP_ATOMIC);
 
 
1411	}
1412#endif
1413
1414	if (__inet_inherit_port(sk, newsk) < 0) {
1415		sock_put(newsk);
 
1416		goto out;
1417	}
1418	__inet6_hash(newsk, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1419
1420	return newsk;
1421
1422out_overflow:
1423	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424out_nonewsk:
1425	if (opt && opt != np->opt)
1426		sock_kfree_s(sk, opt, opt->tot_len);
1427	dst_release(dst);
1428out:
1429	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430	return NULL;
1431}
1432
1433static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434{
1435	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1438			skb->ip_summed = CHECKSUM_UNNECESSARY;
1439			return 0;
1440		}
1441	}
1442
1443	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444					      &ipv6_hdr(skb)->saddr,
1445					      &ipv6_hdr(skb)->daddr, 0));
1446
1447	if (skb->len <= 76) {
1448		return __skb_checksum_complete(skb);
1449	}
1450	return 0;
1451}
1452
1453/* The socket must have it's spinlock held when we get
1454 * here.
1455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 
1462{
1463	struct ipv6_pinfo *np = inet6_sk(sk);
 
 
1464	struct tcp_sock *tp;
1465	struct sk_buff *opt_skb = NULL;
1466
1467	/* Imagine: socket is IPv6. IPv4 packet arrives,
1468	   goes to IPv4 receive handler and backlogged.
1469	   From backlog it always goes here. Kerboom...
1470	   Fortunately, tcp_rcv_established and rcv_established
1471	   handle them correctly, but it is not case with
1472	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1473	 */
1474
1475	if (skb->protocol == htons(ETH_P_IP))
1476		return tcp_v4_do_rcv(sk, skb);
1477
1478#ifdef CONFIG_TCP_MD5SIG
1479	if (tcp_v6_inbound_md5_hash (sk, skb))
1480		goto discard;
1481#endif
1482
1483	if (sk_filter(sk, skb))
1484		goto discard;
1485
1486	/*
1487	 *	socket locking is here for SMP purposes as backlog rcv
1488	 *	is currently called with bh processing disabled.
1489	 */
1490
1491	/* Do Stevens' IPV6_PKTOPTIONS.
1492
1493	   Yes, guys, it is the only place in our code, where we
1494	   may make it not affecting IPv4.
1495	   The rest of code is protocol independent,
1496	   and I do not like idea to uglify IPv4.
1497
1498	   Actually, all the idea behind IPV6_PKTOPTIONS
1499	   looks not very well thought. For now we latch
1500	   options, received in the last packet, enqueued
1501	   by tcp. Feel free to propose better solution.
1502					       --ANK (980728)
1503	 */
1504	if (np->rxopt.all)
1505		opt_skb = skb_clone(skb, GFP_ATOMIC);
1506
 
1507	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 
 
 
 
 
1508		sock_rps_save_rxhash(sk, skb);
1509		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510			goto reset;
 
 
 
 
 
 
 
 
 
1511		if (opt_skb)
1512			goto ipv6_pktoptions;
1513		return 0;
1514	}
1515
1516	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517		goto csum_err;
1518
1519	if (sk->sk_state == TCP_LISTEN) {
1520		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
 
1521		if (!nsk)
1522			goto discard;
1523
1524		/*
1525		 * Queue it on the new socket if the new socket is active,
1526		 * otherwise we just shortcircuit this and continue with
1527		 * the new socket..
1528		 */
1529		if(nsk != sk) {
1530			sock_rps_save_rxhash(nsk, skb);
1531			if (tcp_child_process(sk, nsk, skb))
1532				goto reset;
1533			if (opt_skb)
1534				__kfree_skb(opt_skb);
1535			return 0;
1536		}
1537	} else
1538		sock_rps_save_rxhash(sk, skb);
1539
1540	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541		goto reset;
1542	if (opt_skb)
1543		goto ipv6_pktoptions;
1544	return 0;
1545
1546reset:
1547	tcp_v6_send_reset(sk, skb);
1548discard:
1549	if (opt_skb)
1550		__kfree_skb(opt_skb);
1551	kfree_skb(skb);
1552	return 0;
1553csum_err:
1554	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 
 
1555	goto discard;
1556
1557
1558ipv6_pktoptions:
1559	/* Do you ask, what is it?
1560
1561	   1. skb was enqueued by tcp.
1562	   2. skb is added to tail of read queue, rather than out of order.
1563	   3. socket is not in passive state.
1564	   4. Finally, it really contains options, which user wants to receive.
1565	 */
1566	tp = tcp_sk(sk);
1567	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570			np->mcast_oif = inet6_iif(opt_skb);
1571		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573		if (np->rxopt.bits.rxtclass)
1574			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1575		if (ipv6_opt_accepted(sk, opt_skb)) {
1576			skb_set_owner_r(opt_skb, sk);
 
 
1577			opt_skb = xchg(&np->pktoptions, opt_skb);
1578		} else {
1579			__kfree_skb(opt_skb);
1580			opt_skb = xchg(&np->pktoptions, NULL);
1581		}
1582	}
1583
1584	kfree_skb(opt_skb);
1585	return 0;
1586}
1587
1588static int tcp_v6_rcv(struct sk_buff *skb)
 
1589{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590	const struct tcphdr *th;
1591	const struct ipv6hdr *hdr;
 
1592	struct sock *sk;
1593	int ret;
1594	struct net *net = dev_net(skb->dev);
1595
 
1596	if (skb->pkt_type != PACKET_HOST)
1597		goto discard_it;
1598
1599	/*
1600	 *	Count it even if it's bad.
1601	 */
1602	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603
1604	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605		goto discard_it;
1606
1607	th = tcp_hdr(skb);
1608
1609	if (th->doff < sizeof(struct tcphdr)/4)
 
1610		goto bad_packet;
 
1611	if (!pskb_may_pull(skb, th->doff*4))
1612		goto discard_it;
1613
1614	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615		goto bad_packet;
1616
1617	th = tcp_hdr(skb);
1618	hdr = ipv6_hdr(skb);
1619	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621				    skb->len - th->doff*4);
1622	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623	TCP_SKB_CB(skb)->when = 0;
1624	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625	TCP_SKB_CB(skb)->sacked = 0;
1626
1627	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 
 
 
1628	if (!sk)
1629		goto no_tcp_socket;
1630
1631process:
1632	if (sk->sk_state == TCP_TIME_WAIT)
1633		goto do_time_wait;
1634
1635	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637		goto discard_and_relse;
1638	}
1639
1640	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 
 
1641		goto discard_and_relse;
1642
1643	if (sk_filter(sk, skb))
 
1644		goto discard_and_relse;
 
 
 
 
1645
1646	skb->dev = NULL;
1647
 
 
 
 
 
 
 
1648	bh_lock_sock_nested(sk);
 
1649	ret = 0;
1650	if (!sock_owned_by_user(sk)) {
1651#ifdef CONFIG_NET_DMA
1652		struct tcp_sock *tp = tcp_sk(sk);
1653		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654			tp->ucopy.dma_chan = net_dma_find_channel();
1655		if (tp->ucopy.dma_chan)
1656			ret = tcp_v6_do_rcv(sk, skb);
1657		else
1658#endif
1659		{
1660			if (!tcp_prequeue(sk, skb))
1661				ret = tcp_v6_do_rcv(sk, skb);
1662		}
1663	} else if (unlikely(sk_add_backlog(sk, skb,
1664					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1665		bh_unlock_sock(sk);
1666		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1667		goto discard_and_relse;
1668	}
1669	bh_unlock_sock(sk);
1670
1671	sock_put(sk);
 
1672	return ret ? -1 : 0;
1673
1674no_tcp_socket:
 
1675	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676		goto discard_it;
1677
1678	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 
 
 
 
 
 
1679bad_packet:
1680		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681	} else {
1682		tcp_v6_send_reset(NULL, skb);
1683	}
1684
1685discard_it:
1686
1687	/*
1688	 *	Discard frame
1689	 */
1690
1691	kfree_skb(skb);
1692	return 0;
1693
1694discard_and_relse:
1695	sock_put(sk);
 
 
1696	goto discard_it;
1697
1698do_time_wait:
1699	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 
1700		inet_twsk_put(inet_twsk(sk));
1701		goto discard_it;
1702	}
1703
1704	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1705		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 
1706		inet_twsk_put(inet_twsk(sk));
1707		goto discard_it;
1708	}
1709
1710	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711	case TCP_TW_SYN:
1712	{
1713		struct sock *sk2;
1714
1715		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
 
 
1716					    &ipv6_hdr(skb)->daddr,
1717					    ntohs(th->dest), inet6_iif(skb));
1718		if (sk2 != NULL) {
 
 
1719			struct inet_timewait_sock *tw = inet_twsk(sk);
1720			inet_twsk_deschedule(tw, &tcp_death_row);
1721			inet_twsk_put(tw);
1722			sk = sk2;
 
 
1723			goto process;
1724		}
1725		/* Fall through to ACK */
1726	}
 
 
1727	case TCP_TW_ACK:
1728		tcp_v6_timewait_ack(sk, skb);
1729		break;
1730	case TCP_TW_RST:
1731		goto no_tcp_socket;
1732	case TCP_TW_SUCCESS:;
 
 
 
1733	}
1734	goto discard_it;
1735}
1736
1737static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738{
1739	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740	struct ipv6_pinfo *np = inet6_sk(sk);
1741	struct inet_peer *peer;
1742
1743	if (!rt ||
1744	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745		peer = inet_getpeer_v6(&np->daddr, 1);
1746		*release_it = true;
1747	} else {
1748		if (!rt->rt6i_peer)
1749			rt6_bind_peer(rt, 1);
1750		peer = rt->rt6i_peer;
1751		*release_it = false;
1752	}
1753
1754	return peer;
1755}
1756
1757static void *tcp_v6_tw_get_peer(struct sock *sk)
1758{
1759	const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760	const struct inet_timewait_sock *tw = inet_twsk(sk);
1761
1762	if (tw->tw_family == AF_INET)
1763		return tcp_v4_tw_get_peer(sk);
1764
1765	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766}
1767
1768static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1770	.twsk_unique	= tcp_twsk_unique,
1771	.twsk_destructor= tcp_twsk_destructor,
1772	.twsk_getpeer	= tcp_v6_tw_get_peer,
1773};
1774
1775static const struct inet_connection_sock_af_ops ipv6_specific = {
 
 
 
 
 
1776	.queue_xmit	   = inet6_csk_xmit,
1777	.send_check	   = tcp_v6_send_check,
1778	.rebuild_header	   = inet6_sk_rebuild_header,
 
1779	.conn_request	   = tcp_v6_conn_request,
1780	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1781	.get_peer	   = tcp_v6_get_peer,
1782	.net_header_len	   = sizeof(struct ipv6hdr),
1783	.net_frag_header_len = sizeof(struct frag_hdr),
1784	.setsockopt	   = ipv6_setsockopt,
1785	.getsockopt	   = ipv6_getsockopt,
1786	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1787	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1788	.bind_conflict	   = inet6_csk_bind_conflict,
1789#ifdef CONFIG_COMPAT
1790	.compat_setsockopt = compat_ipv6_setsockopt,
1791	.compat_getsockopt = compat_ipv6_getsockopt,
1792#endif
1793};
1794
1795#ifdef CONFIG_TCP_MD5SIG
1796static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797	.md5_lookup	=	tcp_v6_md5_lookup,
1798	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1799	.md5_parse	=	tcp_v6_parse_md5_keys,
1800};
1801#endif
1802
1803/*
1804 *	TCP over IPv4 via INET6 API
1805 */
1806
1807static const struct inet_connection_sock_af_ops ipv6_mapped = {
1808	.queue_xmit	   = ip_queue_xmit,
1809	.send_check	   = tcp_v4_send_check,
1810	.rebuild_header	   = inet_sk_rebuild_header,
 
1811	.conn_request	   = tcp_v6_conn_request,
1812	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1813	.get_peer	   = tcp_v4_get_peer,
1814	.net_header_len	   = sizeof(struct iphdr),
1815	.setsockopt	   = ipv6_setsockopt,
1816	.getsockopt	   = ipv6_getsockopt,
1817	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1818	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1819	.bind_conflict	   = inet6_csk_bind_conflict,
1820#ifdef CONFIG_COMPAT
1821	.compat_setsockopt = compat_ipv6_setsockopt,
1822	.compat_getsockopt = compat_ipv6_getsockopt,
1823#endif
1824};
1825
1826#ifdef CONFIG_TCP_MD5SIG
1827static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1828	.md5_lookup	=	tcp_v4_md5_lookup,
1829	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1830	.md5_parse	=	tcp_v6_parse_md5_keys,
1831};
1832#endif
1833
1834/* NOTE: A lot of things set to zero explicitly by call to
1835 *       sk_alloc() so need not be done here.
1836 */
1837static int tcp_v6_init_sock(struct sock *sk)
1838{
1839	struct inet_connection_sock *icsk = inet_csk(sk);
1840
1841	tcp_init_sock(sk);
1842
1843	icsk->icsk_af_ops = &ipv6_specific;
1844
1845#ifdef CONFIG_TCP_MD5SIG
1846	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1847#endif
1848
1849	return 0;
1850}
1851
1852static void tcp_v6_destroy_sock(struct sock *sk)
1853{
1854	tcp_v4_destroy_sock(sk);
1855	inet6_destroy_sock(sk);
1856}
1857
1858#ifdef CONFIG_PROC_FS
1859/* Proc filesystem TCPv6 sock list dumping. */
1860static void get_openreq6(struct seq_file *seq,
1861			 const struct sock *sk, struct request_sock *req, int i, int uid)
1862{
1863	int ttd = req->expires - jiffies;
1864	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1865	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1866
1867	if (ttd < 0)
1868		ttd = 0;
1869
1870	seq_printf(seq,
1871		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1872		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1873		   i,
1874		   src->s6_addr32[0], src->s6_addr32[1],
1875		   src->s6_addr32[2], src->s6_addr32[3],
1876		   ntohs(inet_rsk(req)->loc_port),
1877		   dest->s6_addr32[0], dest->s6_addr32[1],
1878		   dest->s6_addr32[2], dest->s6_addr32[3],
1879		   ntohs(inet_rsk(req)->rmt_port),
1880		   TCP_SYN_RECV,
1881		   0,0, /* could print option size, but that is af dependent. */
1882		   1,   /* timers active (only the expire timer) */
1883		   jiffies_to_clock_t(ttd),
1884		   req->retrans,
1885		   uid,
 
1886		   0,  /* non standard timer */
1887		   0, /* open_requests have no inode */
1888		   0, req);
1889}
1890
1891static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1892{
1893	const struct in6_addr *dest, *src;
1894	__u16 destp, srcp;
1895	int timer_active;
1896	unsigned long timer_expires;
1897	const struct inet_sock *inet = inet_sk(sp);
1898	const struct tcp_sock *tp = tcp_sk(sp);
1899	const struct inet_connection_sock *icsk = inet_csk(sp);
1900	const struct ipv6_pinfo *np = inet6_sk(sp);
 
 
1901
1902	dest  = &np->daddr;
1903	src   = &np->rcv_saddr;
1904	destp = ntohs(inet->inet_dport);
1905	srcp  = ntohs(inet->inet_sport);
1906
1907	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
 
 
1908		timer_active	= 1;
1909		timer_expires	= icsk->icsk_timeout;
1910	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1911		timer_active	= 4;
1912		timer_expires	= icsk->icsk_timeout;
1913	} else if (timer_pending(&sp->sk_timer)) {
1914		timer_active	= 2;
1915		timer_expires	= sp->sk_timer.expires;
1916	} else {
1917		timer_active	= 0;
1918		timer_expires = jiffies;
1919	}
1920
 
 
 
 
 
 
 
 
 
 
1921	seq_printf(seq,
1922		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1923		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1924		   i,
1925		   src->s6_addr32[0], src->s6_addr32[1],
1926		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1927		   dest->s6_addr32[0], dest->s6_addr32[1],
1928		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1929		   sp->sk_state,
1930		   tp->write_seq-tp->snd_una,
1931		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1932		   timer_active,
1933		   jiffies_to_clock_t(timer_expires - jiffies),
1934		   icsk->icsk_retransmits,
1935		   sock_i_uid(sp),
1936		   icsk->icsk_probes_out,
1937		   sock_i_ino(sp),
1938		   atomic_read(&sp->sk_refcnt), sp,
1939		   jiffies_to_clock_t(icsk->icsk_rto),
1940		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1941		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1942		   tp->snd_cwnd,
1943		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
 
 
1944		   );
1945}
1946
1947static void get_timewait6_sock(struct seq_file *seq,
1948			       struct inet_timewait_sock *tw, int i)
1949{
 
1950	const struct in6_addr *dest, *src;
1951	__u16 destp, srcp;
1952	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1953	int ttd = tw->tw_ttd - jiffies;
1954
1955	if (ttd < 0)
1956		ttd = 0;
1957
1958	dest = &tw6->tw_v6_daddr;
1959	src  = &tw6->tw_v6_rcv_saddr;
1960	destp = ntohs(tw->tw_dport);
1961	srcp  = ntohs(tw->tw_sport);
1962
1963	seq_printf(seq,
1964		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1966		   i,
1967		   src->s6_addr32[0], src->s6_addr32[1],
1968		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1969		   dest->s6_addr32[0], dest->s6_addr32[1],
1970		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1971		   tw->tw_substate, 0, 0,
1972		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1973		   atomic_read(&tw->tw_refcnt), tw);
1974}
1975
1976static int tcp6_seq_show(struct seq_file *seq, void *v)
1977{
1978	struct tcp_iter_state *st;
 
1979
1980	if (v == SEQ_START_TOKEN) {
1981		seq_puts(seq,
1982			 "  sl  "
1983			 "local_address                         "
1984			 "remote_address                        "
1985			 "st tx_queue rx_queue tr tm->when retrnsmt"
1986			 "   uid  timeout inode\n");
1987		goto out;
1988	}
1989	st = seq->private;
1990
1991	switch (st->state) {
1992	case TCP_SEQ_STATE_LISTENING:
1993	case TCP_SEQ_STATE_ESTABLISHED:
 
 
1994		get_tcp6_sock(seq, v, st->num);
1995		break;
1996	case TCP_SEQ_STATE_OPENREQ:
1997		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1998		break;
1999	case TCP_SEQ_STATE_TIME_WAIT:
2000		get_timewait6_sock(seq, v, st->num);
2001		break;
2002	}
2003out:
2004	return 0;
2005}
2006
2007static const struct file_operations tcp6_afinfo_seq_fops = {
2008	.owner   = THIS_MODULE,
2009	.open    = tcp_seq_open,
2010	.read    = seq_read,
2011	.llseek  = seq_lseek,
2012	.release = seq_release_net
2013};
2014
2015static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2016	.name		= "tcp6",
2017	.family		= AF_INET6,
2018	.seq_fops	= &tcp6_afinfo_seq_fops,
2019	.seq_ops	= {
2020		.show		= tcp6_seq_show,
2021	},
2022};
2023
2024int __net_init tcp6_proc_init(struct net *net)
2025{
2026	return tcp_proc_register(net, &tcp6_seq_afinfo);
 
 
 
2027}
2028
2029void tcp6_proc_exit(struct net *net)
2030{
2031	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2032}
2033#endif
2034
2035struct proto tcpv6_prot = {
2036	.name			= "TCPv6",
2037	.owner			= THIS_MODULE,
2038	.close			= tcp_close,
 
2039	.connect		= tcp_v6_connect,
2040	.disconnect		= tcp_disconnect,
2041	.accept			= inet_csk_accept,
2042	.ioctl			= tcp_ioctl,
2043	.init			= tcp_v6_init_sock,
2044	.destroy		= tcp_v6_destroy_sock,
2045	.shutdown		= tcp_shutdown,
2046	.setsockopt		= tcp_setsockopt,
2047	.getsockopt		= tcp_getsockopt,
 
 
2048	.recvmsg		= tcp_recvmsg,
2049	.sendmsg		= tcp_sendmsg,
2050	.sendpage		= tcp_sendpage,
2051	.backlog_rcv		= tcp_v6_do_rcv,
2052	.hash			= tcp_v6_hash,
 
2053	.unhash			= inet_unhash,
2054	.get_port		= inet_csk_get_port,
 
 
 
 
2055	.enter_memory_pressure	= tcp_enter_memory_pressure,
 
 
2056	.sockets_allocated	= &tcp_sockets_allocated,
 
2057	.memory_allocated	= &tcp_memory_allocated,
 
 
2058	.memory_pressure	= &tcp_memory_pressure,
2059	.orphan_count		= &tcp_orphan_count,
2060	.sysctl_wmem		= sysctl_tcp_wmem,
2061	.sysctl_rmem		= sysctl_tcp_rmem,
 
2062	.max_header		= MAX_TCP_HEADER,
2063	.obj_size		= sizeof(struct tcp6_sock),
2064	.slab_flags		= SLAB_DESTROY_BY_RCU,
2065	.twsk_prot		= &tcp6_timewait_sock_ops,
2066	.rsk_prot		= &tcp6_request_sock_ops,
2067	.h.hashinfo		= &tcp_hashinfo,
2068	.no_autobind		= true,
2069#ifdef CONFIG_COMPAT
2070	.compat_setsockopt	= compat_tcp_setsockopt,
2071	.compat_getsockopt	= compat_tcp_getsockopt,
2072#endif
2073#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2074	.proto_cgroup		= tcp_proto_cgroup,
2075#endif
2076};
 
2077
2078static const struct inet6_protocol tcpv6_protocol = {
2079	.handler	=	tcp_v6_rcv,
2080	.err_handler	=	tcp_v6_err,
2081	.gso_send_check	=	tcp_v6_gso_send_check,
2082	.gso_segment	=	tcp_tso_segment,
2083	.gro_receive	=	tcp6_gro_receive,
2084	.gro_complete	=	tcp6_gro_complete,
2085	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2086};
2087
2088static struct inet_protosw tcpv6_protosw = {
2089	.type		=	SOCK_STREAM,
2090	.protocol	=	IPPROTO_TCP,
2091	.prot		=	&tcpv6_prot,
2092	.ops		=	&inet6_stream_ops,
2093	.no_check	=	0,
2094	.flags		=	INET_PROTOSW_PERMANENT |
2095				INET_PROTOSW_ICSK,
2096};
2097
2098static int __net_init tcpv6_net_init(struct net *net)
2099{
2100	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2101				    SOCK_RAW, IPPROTO_TCP, net);
2102}
2103
2104static void __net_exit tcpv6_net_exit(struct net *net)
2105{
2106	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2107}
2108
2109static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2110{
2111	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2112}
2113
2114static struct pernet_operations tcpv6_net_ops = {
2115	.init	    = tcpv6_net_init,
2116	.exit	    = tcpv6_net_exit,
2117	.exit_batch = tcpv6_net_exit_batch,
2118};
2119
2120int __init tcpv6_init(void)
2121{
2122	int ret;
2123
2124	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2125	if (ret)
2126		goto out;
2127
2128	/* register inet6 protocol */
2129	ret = inet6_register_protosw(&tcpv6_protosw);
2130	if (ret)
2131		goto out_tcpv6_protocol;
2132
2133	ret = register_pernet_subsys(&tcpv6_net_ops);
2134	if (ret)
2135		goto out_tcpv6_protosw;
 
 
 
 
 
2136out:
2137	return ret;
2138
 
 
 
 
2139out_tcpv6_protocol:
2140	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2141out_tcpv6_protosw:
2142	inet6_unregister_protosw(&tcpv6_protosw);
2143	goto out;
2144}
2145
2146void tcpv6_exit(void)
2147{
2148	unregister_pernet_subsys(&tcpv6_net_ops);
2149	inet6_unregister_protosw(&tcpv6_protosw);
2150	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2151}