Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	DCCP over IPv6
   3 *	Linux INET6 implementation
   4 *
   5 *	Based on net/dccp6/ipv6.c
   6 *
   7 *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
   8 *
   9 *	This program is free software; you can redistribute it and/or
  10 *      modify it under the terms of the GNU General Public License
  11 *      as published by the Free Software Foundation; either version
  12 *      2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/random.h>
  17#include <linux/slab.h>
  18#include <linux/xfrm.h>
 
  19
  20#include <net/addrconf.h>
  21#include <net/inet_common.h>
  22#include <net/inet_hashtables.h>
  23#include <net/inet_sock.h>
  24#include <net/inet6_connection_sock.h>
  25#include <net/inet6_hashtables.h>
  26#include <net/ip6_route.h>
  27#include <net/ipv6.h>
  28#include <net/protocol.h>
  29#include <net/transp_v6.h>
  30#include <net/ip6_checksum.h>
  31#include <net/xfrm.h>
  32#include <net/secure_seq.h>
 
 
  33
  34#include "dccp.h"
  35#include "ipv6.h"
  36#include "feat.h"
  37
  38/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
 
 
 
 
 
 
  39
  40static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
  41static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
  42
  43/* add pseudo-header to DCCP checksum stored in skb->csum */
  44static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
  45				      const struct in6_addr *saddr,
  46				      const struct in6_addr *daddr)
  47{
  48	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
  49}
  50
  51static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
  52{
  53	struct ipv6_pinfo *np = inet6_sk(sk);
  54	struct dccp_hdr *dh = dccp_hdr(skb);
  55
  56	dccp_csum_outgoing(skb);
  57	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
  58}
  59
  60static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
  61{
  62	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
  63					     ipv6_hdr(skb)->saddr.s6_addr32,
  64					     dccp_hdr(skb)->dccph_dport,
  65					     dccp_hdr(skb)->dccph_sport     );
  66
  67}
  68
  69static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  70			u8 type, u8 code, int offset, __be32 info)
  71{
  72	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
  73	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
  74	struct dccp_sock *dp;
  75	struct ipv6_pinfo *np;
  76	struct sock *sk;
  77	int err;
  78	__u64 seq;
  79	struct net *net = dev_net(skb->dev);
  80
  81	if (skb->len < offset + sizeof(*dh) ||
  82	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
  83		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
  84				   ICMP6_MIB_INERRORS);
  85		return;
  86	}
 
  87
  88	sk = __inet6_lookup_established(net, &dccp_hashinfo,
  89					&hdr->daddr, dh->dccph_dport,
  90					&hdr->saddr, ntohs(dh->dccph_sport),
  91					inet6_iif(skb));
  92
  93	if (!sk) {
  94		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
  95				   ICMP6_MIB_INERRORS);
  96		return;
  97	}
  98
  99	if (sk->sk_state == DCCP_TIME_WAIT) {
 100		inet_twsk_put(inet_twsk(sk));
 101		return;
 102	}
 103	seq = dccp_hdr_seq(dh);
 104	if (sk->sk_state == DCCP_NEW_SYN_RECV)
 105		return dccp_req_err(sk, seq);
 
 
 106
 107	bh_lock_sock(sk);
 108	if (sock_owned_by_user(sk))
 109		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 110
 111	if (sk->sk_state == DCCP_CLOSED)
 112		goto out;
 113
 114	dp = dccp_sk(sk);
 115	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
 116	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
 117		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 118		goto out;
 119	}
 120
 121	np = inet6_sk(sk);
 122
 123	if (type == NDISC_REDIRECT) {
 124		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
 125
 126		if (dst)
 127			dst->ops->redirect(dst, sk, skb);
 
 128		goto out;
 129	}
 130
 131	if (type == ICMPV6_PKT_TOOBIG) {
 132		struct dst_entry *dst = NULL;
 133
 134		if (!ip6_sk_accept_pmtu(sk))
 135			goto out;
 136
 137		if (sock_owned_by_user(sk))
 138			goto out;
 139		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
 140			goto out;
 141
 142		dst = inet6_csk_update_pmtu(sk, ntohl(info));
 143		if (!dst)
 144			goto out;
 145
 146		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
 147			dccp_sync_mss(sk, dst_mtu(dst));
 148		goto out;
 149	}
 150
 151	icmpv6_err_convert(type, code, &err);
 152
 153	/* Might be for an request_sock */
 154	switch (sk->sk_state) {
 155	case DCCP_REQUESTING:
 156	case DCCP_RESPOND:  /* Cannot happen.
 157			       It can, it SYNs are crossed. --ANK */
 158		if (!sock_owned_by_user(sk)) {
 159			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
 160			sk->sk_err = err;
 161			/*
 162			 * Wake people up to see the error
 163			 * (see connect in sock.c)
 164			 */
 165			sk->sk_error_report(sk);
 166			dccp_done(sk);
 167		} else
 168			sk->sk_err_soft = err;
 
 169		goto out;
 170	}
 171
 172	if (!sock_owned_by_user(sk) && np->recverr) {
 173		sk->sk_err = err;
 174		sk->sk_error_report(sk);
 175	} else
 176		sk->sk_err_soft = err;
 177
 178out:
 179	bh_unlock_sock(sk);
 180	sock_put(sk);
 
 181}
 182
 183
 184static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
 185{
 186	struct inet_request_sock *ireq = inet_rsk(req);
 187	struct ipv6_pinfo *np = inet6_sk(sk);
 188	struct sk_buff *skb;
 189	struct in6_addr *final_p, final;
 190	struct flowi6 fl6;
 191	int err = -1;
 192	struct dst_entry *dst;
 193
 194	memset(&fl6, 0, sizeof(fl6));
 195	fl6.flowi6_proto = IPPROTO_DCCP;
 196	fl6.daddr = ireq->ir_v6_rmt_addr;
 197	fl6.saddr = ireq->ir_v6_loc_addr;
 198	fl6.flowlabel = 0;
 199	fl6.flowi6_oif = ireq->ir_iif;
 200	fl6.fl6_dport = ireq->ir_rmt_port;
 201	fl6.fl6_sport = htons(ireq->ir_num);
 202	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 203
 204
 205	rcu_read_lock();
 206	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
 207	rcu_read_unlock();
 208
 209	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 210	if (IS_ERR(dst)) {
 211		err = PTR_ERR(dst);
 212		dst = NULL;
 213		goto done;
 214	}
 215
 216	skb = dccp_make_response(sk, dst, req);
 217	if (skb != NULL) {
 218		struct dccp_hdr *dh = dccp_hdr(skb);
 
 219
 220		dh->dccph_checksum = dccp_v6_csum_finish(skb,
 221							 &ireq->ir_v6_loc_addr,
 222							 &ireq->ir_v6_rmt_addr);
 223		fl6.daddr = ireq->ir_v6_rmt_addr;
 224		rcu_read_lock();
 225		err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
 226			       np->tclass);
 
 
 
 227		rcu_read_unlock();
 228		err = net_xmit_eval(err);
 229	}
 230
 231done:
 232	dst_release(dst);
 233	return err;
 234}
 235
 236static void dccp_v6_reqsk_destructor(struct request_sock *req)
 237{
 238	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
 
 239	kfree_skb(inet_rsk(req)->pktopts);
 240}
 241
 242static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 243{
 244	const struct ipv6hdr *rxip6h;
 245	struct sk_buff *skb;
 246	struct flowi6 fl6;
 247	struct net *net = dev_net(skb_dst(rxskb)->dev);
 248	struct sock *ctl_sk = net->dccp.v6_ctl_sk;
 
 249	struct dst_entry *dst;
 250
 251	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
 252		return;
 253
 254	if (!ipv6_unicast_destination(rxskb))
 255		return;
 256
 
 
 257	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
 258	if (skb == NULL)
 259		return;
 260
 261	rxip6h = ipv6_hdr(rxskb);
 262	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
 263							    &rxip6h->daddr);
 264
 265	memset(&fl6, 0, sizeof(fl6));
 266	fl6.daddr = rxip6h->saddr;
 267	fl6.saddr = rxip6h->daddr;
 268
 269	fl6.flowi6_proto = IPPROTO_DCCP;
 270	fl6.flowi6_oif = inet6_iif(rxskb);
 271	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
 272	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
 273	security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
 274
 275	/* sk = NULL, but it is safe for now. RST socket required. */
 276	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 277	if (!IS_ERR(dst)) {
 278		skb_dst_set(skb, dst);
 279		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
 280		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
 281		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
 282		return;
 283	}
 284
 285	kfree_skb(skb);
 286}
 287
 288static struct request_sock_ops dccp6_request_sock_ops = {
 289	.family		= AF_INET6,
 290	.obj_size	= sizeof(struct dccp6_request_sock),
 291	.rtx_syn_ack	= dccp_v6_send_response,
 292	.send_ack	= dccp_reqsk_send_ack,
 293	.destructor	= dccp_v6_reqsk_destructor,
 294	.send_reset	= dccp_v6_ctl_send_reset,
 295	.syn_ack_timeout = dccp_syn_ack_timeout,
 296};
 297
 298static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 299{
 300	struct request_sock *req;
 301	struct dccp_request_sock *dreq;
 302	struct inet_request_sock *ireq;
 303	struct ipv6_pinfo *np = inet6_sk(sk);
 304	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
 305	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
 306
 307	if (skb->protocol == htons(ETH_P_IP))
 308		return dccp_v4_conn_request(sk, skb);
 309
 310	if (!ipv6_unicast_destination(skb))
 311		return 0;	/* discard, don't send a reset here */
 312
 
 
 
 
 
 313	if (dccp_bad_service_code(sk, service)) {
 314		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
 315		goto drop;
 316	}
 317	/*
 318	 * There are no SYN attacks on IPv6, yet...
 319	 */
 320	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 321	if (inet_csk_reqsk_queue_is_full(sk))
 322		goto drop;
 323
 324	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 325		goto drop;
 326
 327	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
 328	if (req == NULL)
 329		goto drop;
 330
 331	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
 332		goto drop_and_free;
 333
 334	dreq = dccp_rsk(req);
 335	if (dccp_parse_options(sk, dreq, skb))
 336		goto drop_and_free;
 337
 338	if (security_inet_conn_request(sk, skb, req))
 339		goto drop_and_free;
 340
 341	ireq = inet_rsk(req);
 342	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 343	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 344	ireq->ireq_family = AF_INET6;
 
 
 
 
 345
 346	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 347	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 348	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 349		atomic_inc(&skb->users);
 350		ireq->pktopts = skb;
 351	}
 352	ireq->ir_iif = sk->sk_bound_dev_if;
 353
 354	/* So that link locals have meaning */
 355	if (!sk->sk_bound_dev_if &&
 356	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 357		ireq->ir_iif = inet6_iif(skb);
 358
 359	/*
 360	 * Step 3: Process LISTEN state
 361	 *
 362	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
 363	 *
 364	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
 365	 */
 366	dreq->dreq_isr	   = dcb->dccpd_seq;
 367	dreq->dreq_gsr     = dreq->dreq_isr;
 368	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
 369	dreq->dreq_gss     = dreq->dreq_iss;
 370	dreq->dreq_service = service;
 371
 372	if (dccp_v6_send_response(sk, req))
 373		goto drop_and_free;
 374
 375	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
 
 376	return 0;
 377
 378drop_and_free:
 379	reqsk_free(req);
 380drop:
 381	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
 382	return -1;
 383}
 384
 385static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
 386					      struct sk_buff *skb,
 387					      struct request_sock *req,
 388					      struct dst_entry *dst,
 389					      struct request_sock *req_unhash,
 390					      bool *own_req)
 391{
 392	struct inet_request_sock *ireq = inet_rsk(req);
 393	struct ipv6_pinfo *newnp;
 394	const struct ipv6_pinfo *np = inet6_sk(sk);
 395	struct ipv6_txoptions *opt;
 396	struct inet_sock *newinet;
 397	struct dccp6_sock *newdp6;
 398	struct sock *newsk;
 399
 400	if (skb->protocol == htons(ETH_P_IP)) {
 401		/*
 402		 *	v6 mapped
 403		 */
 404		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
 405						  req_unhash, own_req);
 406		if (newsk == NULL)
 407			return NULL;
 408
 409		newdp6 = (struct dccp6_sock *)newsk;
 410		newinet = inet_sk(newsk);
 411		newinet->pinet6 = &newdp6->inet6;
 412		newnp = inet6_sk(newsk);
 413
 414		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 415
 416		newnp->saddr = newsk->sk_v6_rcv_saddr;
 417
 418		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
 419		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
 420		newnp->pktoptions  = NULL;
 421		newnp->opt	   = NULL;
 422		newnp->mcast_oif   = inet6_iif(skb);
 423		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
 
 
 424
 425		/*
 426		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
 427		 * here, dccp_create_openreq_child now does this for us, see the comment in
 428		 * that function for the gory details. -acme
 429		 */
 430
 431		/* It is tricky place. Until this moment IPv4 tcp
 432		   worked with IPv6 icsk.icsk_af_ops.
 433		   Sync it now.
 434		 */
 435		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
 436
 437		return newsk;
 438	}
 439
 440
 441	if (sk_acceptq_is_full(sk))
 442		goto out_overflow;
 443
 444	if (!dst) {
 445		struct flowi6 fl6;
 446
 447		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
 448		if (!dst)
 449			goto out;
 450	}
 451
 452	newsk = dccp_create_openreq_child(sk, req, skb);
 453	if (newsk == NULL)
 454		goto out_nonewsk;
 455
 456	/*
 457	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
 458	 * count here, dccp_create_openreq_child now does this for us, see the
 459	 * comment in that function for the gory details. -acme
 460	 */
 461
 462	ip6_dst_store(newsk, dst, NULL, NULL);
 463	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
 464						      NETIF_F_TSO);
 465	newdp6 = (struct dccp6_sock *)newsk;
 466	newinet = inet_sk(newsk);
 467	newinet->pinet6 = &newdp6->inet6;
 468	newnp = inet6_sk(newsk);
 469
 470	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 471
 472	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
 473	newnp->saddr		= ireq->ir_v6_loc_addr;
 474	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
 475	newsk->sk_bound_dev_if	= ireq->ir_iif;
 476
 477	/* Now IPv6 options...
 478
 479	   First: no IPv4 options.
 480	 */
 481	newinet->inet_opt = NULL;
 482
 483	/* Clone RX bits */
 484	newnp->rxopt.all = np->rxopt.all;
 485
 
 
 
 486	newnp->pktoptions = NULL;
 487	newnp->opt	  = NULL;
 488	newnp->mcast_oif  = inet6_iif(skb);
 489	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 490
 491	/*
 492	 * Clone native IPv6 options from listening socket (if any)
 493	 *
 494	 * Yes, keeping reference count would be much more clever, but we make
 495	 * one more one thing there: reattach optmem to newsk.
 496	 */
 497	opt = rcu_dereference(np->opt);
 
 
 498	if (opt) {
 499		opt = ipv6_dup_options(newsk, opt);
 500		RCU_INIT_POINTER(newnp->opt, opt);
 501	}
 502	inet_csk(newsk)->icsk_ext_hdr_len = 0;
 503	if (opt)
 504		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
 505						    opt->opt_flen;
 506
 507	dccp_sync_mss(newsk, dst_mtu(dst));
 508
 509	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
 510	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
 511
 512	if (__inet_inherit_port(sk, newsk) < 0) {
 513		inet_csk_prepare_forced_close(newsk);
 514		dccp_done(newsk);
 515		goto out;
 516	}
 517	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 518	/* Clone pktoptions received with SYN, if we own the req */
 519	if (*own_req && ireq->pktopts) {
 520		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
 521		consume_skb(ireq->pktopts);
 522		ireq->pktopts = NULL;
 523		if (newnp->pktoptions)
 524			skb_set_owner_r(newnp->pktoptions, newsk);
 525	}
 526
 527	return newsk;
 528
 529out_overflow:
 530	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 531out_nonewsk:
 532	dst_release(dst);
 533out:
 534	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 535	return NULL;
 536}
 537
 538/* The socket must have it's spinlock held when we get
 539 * here.
 540 *
 541 * We have a potential double-lock case here, so even when
 542 * doing backlog processing we use the BH locking scheme.
 543 * This is because we cannot sleep with the original spinlock
 544 * held.
 545 */
 546static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 547{
 548	struct ipv6_pinfo *np = inet6_sk(sk);
 549	struct sk_buff *opt_skb = NULL;
 550
 551	/* Imagine: socket is IPv6. IPv4 packet arrives,
 552	   goes to IPv4 receive handler and backlogged.
 553	   From backlog it always goes here. Kerboom...
 554	   Fortunately, dccp_rcv_established and rcv_established
 555	   handle them correctly, but it is not case with
 556	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
 557	 */
 558
 559	if (skb->protocol == htons(ETH_P_IP))
 560		return dccp_v4_do_rcv(sk, skb);
 561
 562	if (sk_filter(sk, skb))
 563		goto discard;
 564
 565	/*
 566	 * socket locking is here for SMP purposes as backlog rcv is currently
 567	 * called with bh processing disabled.
 568	 */
 569
 570	/* Do Stevens' IPV6_PKTOPTIONS.
 571
 572	   Yes, guys, it is the only place in our code, where we
 573	   may make it not affecting IPv4.
 574	   The rest of code is protocol independent,
 575	   and I do not like idea to uglify IPv4.
 576
 577	   Actually, all the idea behind IPV6_PKTOPTIONS
 578	   looks not very well thought. For now we latch
 579	   options, received in the last packet, enqueued
 580	   by tcp. Feel free to propose better solution.
 581					       --ANK (980728)
 582	 */
 583	if (np->rxopt.all)
 584	/*
 585	 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
 586	 *        (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
 587	 */
 588		opt_skb = skb_clone(skb, GFP_ATOMIC);
 589
 590	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
 591		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
 592			goto reset;
 593		if (opt_skb) {
 594			/* XXX This is where we would goto ipv6_pktoptions. */
 595			__kfree_skb(opt_skb);
 596		}
 597		return 0;
 598	}
 599
 600	/*
 601	 *  Step 3: Process LISTEN state
 602	 *     If S.state == LISTEN,
 603	 *	 If P.type == Request or P contains a valid Init Cookie option,
 604	 *	      (* Must scan the packet's options to check for Init
 605	 *		 Cookies.  Only Init Cookies are processed here,
 606	 *		 however; other options are processed in Step 8.  This
 607	 *		 scan need only be performed if the endpoint uses Init
 608	 *		 Cookies *)
 609	 *	      (* Generate a new socket and switch to that socket *)
 610	 *	      Set S := new socket for this port pair
 611	 *	      S.state = RESPOND
 612	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
 613	 *	      Initialize S.GAR := S.ISS
 614	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
 615	 *	      Continue with S.state == RESPOND
 616	 *	      (* A Response packet will be generated in Step 11 *)
 617	 *	 Otherwise,
 618	 *	      Generate Reset(No Connection) unless P.type == Reset
 619	 *	      Drop packet and return
 620	 *
 621	 * NOTE: the check for the packet types is done in
 622	 *	 dccp_rcv_state_process
 623	 */
 624
 625	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
 626		goto reset;
 627	if (opt_skb) {
 628		/* XXX This is where we would goto ipv6_pktoptions. */
 629		__kfree_skb(opt_skb);
 630	}
 631	return 0;
 632
 633reset:
 634	dccp_v6_ctl_send_reset(sk, skb);
 635discard:
 636	if (opt_skb != NULL)
 637		__kfree_skb(opt_skb);
 638	kfree_skb(skb);
 639	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640}
 641
 642static int dccp_v6_rcv(struct sk_buff *skb)
 643{
 644	const struct dccp_hdr *dh;
 
 645	struct sock *sk;
 646	int min_cov;
 647
 648	/* Step 1: Check header basics */
 649
 650	if (dccp_invalid_packet(skb))
 651		goto discard_it;
 652
 653	/* Step 1: If header checksum is incorrect, drop packet and return. */
 654	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
 655				     &ipv6_hdr(skb)->daddr)) {
 656		DCCP_WARN("dropped packet with invalid checksum\n");
 657		goto discard_it;
 658	}
 659
 660	dh = dccp_hdr(skb);
 661
 662	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
 663	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
 664
 665	if (dccp_packet_without_ack(skb))
 666		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
 667	else
 668		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 669
 670lookup:
 671	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
 672			        dh->dccph_sport, dh->dccph_dport,
 673				inet6_iif(skb));
 674	if (!sk) {
 675		dccp_pr_debug("failed to look up flow ID in table and "
 676			      "get corresponding socket\n");
 677		goto no_dccp_socket;
 678	}
 679
 680	/*
 681	 * Step 2:
 682	 *	... or S.state == TIMEWAIT,
 683	 *		Generate Reset(No Connection) unless P.type == Reset
 684	 *		Drop packet and return
 685	 */
 686	if (sk->sk_state == DCCP_TIME_WAIT) {
 687		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
 688		inet_twsk_put(inet_twsk(sk));
 689		goto no_dccp_socket;
 690	}
 691
 692	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
 693		struct request_sock *req = inet_reqsk(sk);
 694		struct sock *nsk;
 695
 696		sk = req->rsk_listener;
 697		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
 698			inet_csk_reqsk_queue_drop_and_put(sk, req);
 699			goto lookup;
 700		}
 701		sock_hold(sk);
 
 702		nsk = dccp_check_req(sk, skb, req);
 703		if (!nsk) {
 704			reqsk_put(req);
 705			goto discard_and_relse;
 706		}
 707		if (nsk == sk) {
 708			reqsk_put(req);
 709		} else if (dccp_child_process(sk, nsk, skb)) {
 710			dccp_v6_ctl_send_reset(sk, skb);
 711			goto discard_and_relse;
 712		} else {
 713			sock_put(sk);
 714			return 0;
 715		}
 716	}
 717	/*
 718	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 719	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
 720	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
 721	 */
 722	min_cov = dccp_sk(sk)->dccps_pcrlen;
 723	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
 724		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
 725			      dh->dccph_cscov, min_cov);
 726		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
 727		goto discard_and_relse;
 728	}
 729
 730	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 731		goto discard_and_relse;
 
 732
 733	return sk_receive_skb(sk, skb, 1) ? -1 : 0;
 
 734
 735no_dccp_socket:
 736	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 737		goto discard_it;
 738	/*
 739	 * Step 2:
 740	 *	If no socket ...
 741	 *		Generate Reset(No Connection) unless P.type == Reset
 742	 *		Drop packet and return
 743	 */
 744	if (dh->dccph_type != DCCP_PKT_RESET) {
 745		DCCP_SKB_CB(skb)->dccpd_reset_code =
 746					DCCP_RESET_CODE_NO_CONNECTION;
 747		dccp_v6_ctl_send_reset(sk, skb);
 748	}
 749
 750discard_it:
 751	kfree_skb(skb);
 752	return 0;
 753
 754discard_and_relse:
 755	sock_put(sk);
 
 756	goto discard_it;
 757}
 758
 759static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 760			   int addr_len)
 761{
 762	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
 763	struct inet_connection_sock *icsk = inet_csk(sk);
 764	struct inet_sock *inet = inet_sk(sk);
 765	struct ipv6_pinfo *np = inet6_sk(sk);
 766	struct dccp_sock *dp = dccp_sk(sk);
 767	struct in6_addr *saddr = NULL, *final_p, final;
 768	struct ipv6_txoptions *opt;
 769	struct flowi6 fl6;
 770	struct dst_entry *dst;
 771	int addr_type;
 772	int err;
 773
 774	dp->dccps_role = DCCP_ROLE_CLIENT;
 775
 776	if (addr_len < SIN6_LEN_RFC2133)
 777		return -EINVAL;
 778
 779	if (usin->sin6_family != AF_INET6)
 780		return -EAFNOSUPPORT;
 781
 782	memset(&fl6, 0, sizeof(fl6));
 783
 784	if (np->sndflow) {
 785		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 786		IP6_ECN_flow_init(fl6.flowlabel);
 787		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
 788			struct ip6_flowlabel *flowlabel;
 789			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 790			if (flowlabel == NULL)
 791				return -EINVAL;
 792			fl6_sock_release(flowlabel);
 793		}
 794	}
 795	/*
 796	 * connect() to INADDR_ANY means loopback (BSD'ism).
 797	 */
 798	if (ipv6_addr_any(&usin->sin6_addr))
 799		usin->sin6_addr.s6_addr[15] = 1;
 800
 801	addr_type = ipv6_addr_type(&usin->sin6_addr);
 802
 803	if (addr_type & IPV6_ADDR_MULTICAST)
 804		return -ENETUNREACH;
 805
 806	if (addr_type & IPV6_ADDR_LINKLOCAL) {
 807		if (addr_len >= sizeof(struct sockaddr_in6) &&
 808		    usin->sin6_scope_id) {
 809			/* If interface is set while binding, indices
 810			 * must coincide.
 811			 */
 812			if (sk->sk_bound_dev_if &&
 813			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 814				return -EINVAL;
 815
 816			sk->sk_bound_dev_if = usin->sin6_scope_id;
 817		}
 818
 819		/* Connect to link-local address requires an interface */
 820		if (!sk->sk_bound_dev_if)
 821			return -EINVAL;
 822	}
 823
 824	sk->sk_v6_daddr = usin->sin6_addr;
 825	np->flow_label = fl6.flowlabel;
 826
 827	/*
 828	 * DCCP over IPv4
 829	 */
 830	if (addr_type == IPV6_ADDR_MAPPED) {
 831		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 832		struct sockaddr_in sin;
 833
 834		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 835
 836		if (__ipv6_only_sock(sk))
 837			return -ENETUNREACH;
 838
 839		sin.sin_family = AF_INET;
 840		sin.sin_port = usin->sin6_port;
 841		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 842
 843		icsk->icsk_af_ops = &dccp_ipv6_mapped;
 844		sk->sk_backlog_rcv = dccp_v4_do_rcv;
 845
 846		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 847		if (err) {
 848			icsk->icsk_ext_hdr_len = exthdrlen;
 849			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
 850			sk->sk_backlog_rcv = dccp_v6_do_rcv;
 851			goto failure;
 852		}
 853		np->saddr = sk->sk_v6_rcv_saddr;
 854		return err;
 855	}
 856
 857	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 858		saddr = &sk->sk_v6_rcv_saddr;
 859
 860	fl6.flowi6_proto = IPPROTO_DCCP;
 861	fl6.daddr = sk->sk_v6_daddr;
 862	fl6.saddr = saddr ? *saddr : np->saddr;
 863	fl6.flowi6_oif = sk->sk_bound_dev_if;
 864	fl6.fl6_dport = usin->sin6_port;
 865	fl6.fl6_sport = inet->inet_sport;
 866	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 867
 868	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 869	final_p = fl6_update_dst(&fl6, opt, &final);
 870
 871	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 872	if (IS_ERR(dst)) {
 873		err = PTR_ERR(dst);
 874		goto failure;
 875	}
 876
 877	if (saddr == NULL) {
 878		saddr = &fl6.saddr;
 879		sk->sk_v6_rcv_saddr = *saddr;
 
 
 
 880	}
 881
 882	/* set the source address */
 883	np->saddr = *saddr;
 884	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 885
 886	ip6_dst_store(sk, dst, NULL, NULL);
 887
 888	icsk->icsk_ext_hdr_len = 0;
 889	if (opt)
 890		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
 891
 892	inet->inet_dport = usin->sin6_port;
 893
 894	dccp_set_state(sk, DCCP_REQUESTING);
 895	err = inet6_hash_connect(&dccp_death_row, sk);
 896	if (err)
 897		goto late_failure;
 898
 899	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
 900						      sk->sk_v6_daddr.s6_addr32,
 901						      inet->inet_sport,
 902						      inet->inet_dport);
 903	err = dccp_connect(sk);
 904	if (err)
 905		goto late_failure;
 906
 907	return 0;
 908
 909late_failure:
 910	dccp_set_state(sk, DCCP_CLOSED);
 
 911	__sk_dst_reset(sk);
 912failure:
 913	inet->inet_dport = 0;
 914	sk->sk_route_caps = 0;
 915	return err;
 916}
 917
 918static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
 919	.queue_xmit	   = inet6_csk_xmit,
 920	.send_check	   = dccp_v6_send_check,
 921	.rebuild_header	   = inet6_sk_rebuild_header,
 922	.conn_request	   = dccp_v6_conn_request,
 923	.syn_recv_sock	   = dccp_v6_request_recv_sock,
 924	.net_header_len	   = sizeof(struct ipv6hdr),
 925	.setsockopt	   = ipv6_setsockopt,
 926	.getsockopt	   = ipv6_getsockopt,
 927	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
 928	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 929	.bind_conflict	   = inet6_csk_bind_conflict,
 930#ifdef CONFIG_COMPAT
 931	.compat_setsockopt = compat_ipv6_setsockopt,
 932	.compat_getsockopt = compat_ipv6_getsockopt,
 933#endif
 934};
 935
 936/*
 937 *	DCCP over IPv4 via INET6 API
 938 */
 939static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
 940	.queue_xmit	   = ip_queue_xmit,
 941	.send_check	   = dccp_v4_send_check,
 942	.rebuild_header	   = inet_sk_rebuild_header,
 943	.conn_request	   = dccp_v6_conn_request,
 944	.syn_recv_sock	   = dccp_v6_request_recv_sock,
 945	.net_header_len	   = sizeof(struct iphdr),
 946	.setsockopt	   = ipv6_setsockopt,
 947	.getsockopt	   = ipv6_getsockopt,
 948	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
 949	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 950#ifdef CONFIG_COMPAT
 951	.compat_setsockopt = compat_ipv6_setsockopt,
 952	.compat_getsockopt = compat_ipv6_getsockopt,
 953#endif
 954};
 955
 
 
 
 
 
 
 956/* NOTE: A lot of things set to zero explicitly by call to
 957 *       sk_alloc() so need not be done here.
 958 */
 959static int dccp_v6_init_sock(struct sock *sk)
 960{
 961	static __u8 dccp_v6_ctl_sock_initialized;
 962	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
 963
 964	if (err == 0) {
 965		if (unlikely(!dccp_v6_ctl_sock_initialized))
 966			dccp_v6_ctl_sock_initialized = 1;
 967		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
 
 968	}
 969
 970	return err;
 971}
 972
 973static void dccp_v6_destroy_sock(struct sock *sk)
 974{
 975	dccp_destroy_sock(sk);
 976	inet6_destroy_sock(sk);
 977}
 978
 979static struct timewait_sock_ops dccp6_timewait_sock_ops = {
 980	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
 981};
 982
 983static struct proto dccp_v6_prot = {
 984	.name		   = "DCCPv6",
 985	.owner		   = THIS_MODULE,
 986	.close		   = dccp_close,
 987	.connect	   = dccp_v6_connect,
 988	.disconnect	   = dccp_disconnect,
 989	.ioctl		   = dccp_ioctl,
 990	.init		   = dccp_v6_init_sock,
 991	.setsockopt	   = dccp_setsockopt,
 992	.getsockopt	   = dccp_getsockopt,
 993	.sendmsg	   = dccp_sendmsg,
 994	.recvmsg	   = dccp_recvmsg,
 995	.backlog_rcv	   = dccp_v6_do_rcv,
 996	.hash		   = inet6_hash,
 997	.unhash		   = inet_unhash,
 998	.accept		   = inet_csk_accept,
 999	.get_port	   = inet_csk_get_port,
1000	.shutdown	   = dccp_shutdown,
1001	.destroy	   = dccp_v6_destroy_sock,
1002	.orphan_count	   = &dccp_orphan_count,
1003	.max_header	   = MAX_DCCP_HEADER,
1004	.obj_size	   = sizeof(struct dccp6_sock),
1005	.slab_flags	   = SLAB_DESTROY_BY_RCU,
 
1006	.rsk_prot	   = &dccp6_request_sock_ops,
1007	.twsk_prot	   = &dccp6_timewait_sock_ops,
1008	.h.hashinfo	   = &dccp_hashinfo,
1009#ifdef CONFIG_COMPAT
1010	.compat_setsockopt = compat_dccp_setsockopt,
1011	.compat_getsockopt = compat_dccp_getsockopt,
1012#endif
1013};
1014
1015static const struct inet6_protocol dccp_v6_protocol = {
1016	.handler	= dccp_v6_rcv,
1017	.err_handler	= dccp_v6_err,
1018	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1019};
1020
1021static const struct proto_ops inet6_dccp_ops = {
1022	.family		   = PF_INET6,
1023	.owner		   = THIS_MODULE,
1024	.release	   = inet6_release,
1025	.bind		   = inet6_bind,
1026	.connect	   = inet_stream_connect,
1027	.socketpair	   = sock_no_socketpair,
1028	.accept		   = inet_accept,
1029	.getname	   = inet6_getname,
1030	.poll		   = dccp_poll,
1031	.ioctl		   = inet6_ioctl,
 
1032	.listen		   = inet_dccp_listen,
1033	.shutdown	   = inet_shutdown,
1034	.setsockopt	   = sock_common_setsockopt,
1035	.getsockopt	   = sock_common_getsockopt,
1036	.sendmsg	   = inet_sendmsg,
1037	.recvmsg	   = sock_common_recvmsg,
1038	.mmap		   = sock_no_mmap,
1039	.sendpage	   = sock_no_sendpage,
1040#ifdef CONFIG_COMPAT
1041	.compat_setsockopt = compat_sock_common_setsockopt,
1042	.compat_getsockopt = compat_sock_common_getsockopt,
1043#endif
1044};
1045
1046static struct inet_protosw dccp_v6_protosw = {
1047	.type		= SOCK_DCCP,
1048	.protocol	= IPPROTO_DCCP,
1049	.prot		= &dccp_v6_prot,
1050	.ops		= &inet6_dccp_ops,
1051	.flags		= INET_PROTOSW_ICSK,
1052};
1053
1054static int __net_init dccp_v6_init_net(struct net *net)
1055{
 
 
1056	if (dccp_hashinfo.bhash == NULL)
1057		return -ESOCKTNOSUPPORT;
1058
1059	return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1060				    SOCK_DCCP, IPPROTO_DCCP, net);
1061}
1062
1063static void __net_exit dccp_v6_exit_net(struct net *net)
1064{
1065	inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
 
 
 
 
 
 
 
1066}
1067
1068static struct pernet_operations dccp_v6_ops = {
1069	.init   = dccp_v6_init_net,
1070	.exit   = dccp_v6_exit_net,
 
 
 
1071};
1072
1073static int __init dccp_v6_init(void)
1074{
1075	int err = proto_register(&dccp_v6_prot, 1);
1076
1077	if (err != 0)
1078		goto out;
1079
1080	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1081	if (err != 0)
1082		goto out_unregister_proto;
1083
1084	inet6_register_protosw(&dccp_v6_protosw);
1085
1086	err = register_pernet_subsys(&dccp_v6_ops);
1087	if (err != 0)
1088		goto out_destroy_ctl_sock;
 
 
 
 
 
1089out:
1090	return err;
1091
 
1092out_destroy_ctl_sock:
1093	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1094	inet6_unregister_protosw(&dccp_v6_protosw);
1095out_unregister_proto:
1096	proto_unregister(&dccp_v6_prot);
1097	goto out;
1098}
1099
1100static void __exit dccp_v6_exit(void)
1101{
1102	unregister_pernet_subsys(&dccp_v6_ops);
1103	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
 
1104	inet6_unregister_protosw(&dccp_v6_protosw);
1105	proto_unregister(&dccp_v6_prot);
1106}
1107
1108module_init(dccp_v6_init);
1109module_exit(dccp_v6_exit);
1110
1111/*
1112 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1113 * values directly, Also cover the case where the protocol is not specified,
1114 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1115 */
1116MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1117MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1118MODULE_LICENSE("GPL");
1119MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1120MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	DCCP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Based on net/dccp6/ipv6.c
   7 *
   8 *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 
 
 
 
 
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/random.h>
  13#include <linux/slab.h>
  14#include <linux/xfrm.h>
  15#include <linux/string.h>
  16
  17#include <net/addrconf.h>
  18#include <net/inet_common.h>
  19#include <net/inet_hashtables.h>
  20#include <net/inet_sock.h>
  21#include <net/inet6_connection_sock.h>
  22#include <net/inet6_hashtables.h>
  23#include <net/ip6_route.h>
  24#include <net/ipv6.h>
  25#include <net/protocol.h>
  26#include <net/transp_v6.h>
  27#include <net/ip6_checksum.h>
  28#include <net/xfrm.h>
  29#include <net/secure_seq.h>
  30#include <net/netns/generic.h>
  31#include <net/sock.h>
  32
  33#include "dccp.h"
  34#include "ipv6.h"
  35#include "feat.h"
  36
  37struct dccp_v6_pernet {
  38	struct sock *v6_ctl_sk;
  39};
  40
  41static unsigned int dccp_v6_pernet_id __read_mostly;
  42
  43/* The per-net v6_ctl_sk is used for sending RSTs and ACKs */
  44
  45static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
  46static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
  47
  48/* add pseudo-header to DCCP checksum stored in skb->csum */
  49static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
  50				      const struct in6_addr *saddr,
  51				      const struct in6_addr *daddr)
  52{
  53	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
  54}
  55
  56static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
  57{
  58	struct ipv6_pinfo *np = inet6_sk(sk);
  59	struct dccp_hdr *dh = dccp_hdr(skb);
  60
  61	dccp_csum_outgoing(skb);
  62	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
  63}
  64
  65static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
  66{
  67	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
  68					     ipv6_hdr(skb)->saddr.s6_addr32,
  69					     dccp_hdr(skb)->dccph_dport,
  70					     dccp_hdr(skb)->dccph_sport     );
  71
  72}
  73
  74static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  75			u8 type, u8 code, int offset, __be32 info)
  76{
  77	const struct ipv6hdr *hdr;
  78	const struct dccp_hdr *dh;
  79	struct dccp_sock *dp;
  80	struct ipv6_pinfo *np;
  81	struct sock *sk;
  82	int err;
  83	__u64 seq;
  84	struct net *net = dev_net(skb->dev);
  85
  86	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
  87		return -EINVAL;
  88	dh = (struct dccp_hdr *)(skb->data + offset);
  89	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
  90		return -EINVAL;
  91	hdr = (const struct ipv6hdr *)skb->data;
  92	dh = (struct dccp_hdr *)(skb->data + offset);
  93
  94	sk = __inet6_lookup_established(net, &dccp_hashinfo,
  95					&hdr->daddr, dh->dccph_dport,
  96					&hdr->saddr, ntohs(dh->dccph_sport),
  97					inet6_iif(skb), 0);
  98
  99	if (!sk) {
 100		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 101				  ICMP6_MIB_INERRORS);
 102		return -ENOENT;
 103	}
 104
 105	if (sk->sk_state == DCCP_TIME_WAIT) {
 106		inet_twsk_put(inet_twsk(sk));
 107		return 0;
 108	}
 109	seq = dccp_hdr_seq(dh);
 110	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
 111		dccp_req_err(sk, seq);
 112		return 0;
 113	}
 114
 115	bh_lock_sock(sk);
 116	if (sock_owned_by_user(sk))
 117		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 118
 119	if (sk->sk_state == DCCP_CLOSED)
 120		goto out;
 121
 122	dp = dccp_sk(sk);
 123	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
 124	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
 125		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 126		goto out;
 127	}
 128
 129	np = inet6_sk(sk);
 130
 131	if (type == NDISC_REDIRECT) {
 132		if (!sock_owned_by_user(sk)) {
 133			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 134
 135			if (dst)
 136				dst->ops->redirect(dst, sk, skb);
 137		}
 138		goto out;
 139	}
 140
 141	if (type == ICMPV6_PKT_TOOBIG) {
 142		struct dst_entry *dst = NULL;
 143
 144		if (!ip6_sk_accept_pmtu(sk))
 145			goto out;
 146
 147		if (sock_owned_by_user(sk))
 148			goto out;
 149		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
 150			goto out;
 151
 152		dst = inet6_csk_update_pmtu(sk, ntohl(info));
 153		if (!dst)
 154			goto out;
 155
 156		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
 157			dccp_sync_mss(sk, dst_mtu(dst));
 158		goto out;
 159	}
 160
 161	icmpv6_err_convert(type, code, &err);
 162
 163	/* Might be for an request_sock */
 164	switch (sk->sk_state) {
 165	case DCCP_REQUESTING:
 166	case DCCP_RESPOND:  /* Cannot happen.
 167			       It can, it SYNs are crossed. --ANK */
 168		if (!sock_owned_by_user(sk)) {
 169			__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 170			sk->sk_err = err;
 171			/*
 172			 * Wake people up to see the error
 173			 * (see connect in sock.c)
 174			 */
 175			sk_error_report(sk);
 176			dccp_done(sk);
 177		} else {
 178			WRITE_ONCE(sk->sk_err_soft, err);
 179		}
 180		goto out;
 181	}
 182
 183	if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
 184		sk->sk_err = err;
 185		sk_error_report(sk);
 186	} else {
 187		WRITE_ONCE(sk->sk_err_soft, err);
 188	}
 189out:
 190	bh_unlock_sock(sk);
 191	sock_put(sk);
 192	return 0;
 193}
 194
 195
 196static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
 197{
 198	struct inet_request_sock *ireq = inet_rsk(req);
 199	struct ipv6_pinfo *np = inet6_sk(sk);
 200	struct sk_buff *skb;
 201	struct in6_addr *final_p, final;
 202	struct flowi6 fl6;
 203	int err = -1;
 204	struct dst_entry *dst;
 205
 206	memset(&fl6, 0, sizeof(fl6));
 207	fl6.flowi6_proto = IPPROTO_DCCP;
 208	fl6.daddr = ireq->ir_v6_rmt_addr;
 209	fl6.saddr = ireq->ir_v6_loc_addr;
 210	fl6.flowlabel = 0;
 211	fl6.flowi6_oif = ireq->ir_iif;
 212	fl6.fl6_dport = ireq->ir_rmt_port;
 213	fl6.fl6_sport = htons(ireq->ir_num);
 214	security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
 215
 216
 217	rcu_read_lock();
 218	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
 219	rcu_read_unlock();
 220
 221	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
 222	if (IS_ERR(dst)) {
 223		err = PTR_ERR(dst);
 224		dst = NULL;
 225		goto done;
 226	}
 227
 228	skb = dccp_make_response(sk, dst, req);
 229	if (skb != NULL) {
 230		struct dccp_hdr *dh = dccp_hdr(skb);
 231		struct ipv6_txoptions *opt;
 232
 233		dh->dccph_checksum = dccp_v6_csum_finish(skb,
 234							 &ireq->ir_v6_loc_addr,
 235							 &ireq->ir_v6_rmt_addr);
 236		fl6.daddr = ireq->ir_v6_rmt_addr;
 237		rcu_read_lock();
 238		opt = ireq->ipv6_opt;
 239		if (!opt)
 240			opt = rcu_dereference(np->opt);
 241		err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
 242			       np->tclass, READ_ONCE(sk->sk_priority));
 243		rcu_read_unlock();
 244		err = net_xmit_eval(err);
 245	}
 246
 247done:
 248	dst_release(dst);
 249	return err;
 250}
 251
 252static void dccp_v6_reqsk_destructor(struct request_sock *req)
 253{
 254	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
 255	kfree(inet_rsk(req)->ipv6_opt);
 256	kfree_skb(inet_rsk(req)->pktopts);
 257}
 258
 259static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 260{
 261	const struct ipv6hdr *rxip6h;
 262	struct sk_buff *skb;
 263	struct flowi6 fl6;
 264	struct net *net = dev_net(skb_dst(rxskb)->dev);
 265	struct dccp_v6_pernet *pn;
 266	struct sock *ctl_sk;
 267	struct dst_entry *dst;
 268
 269	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
 270		return;
 271
 272	if (!ipv6_unicast_destination(rxskb))
 273		return;
 274
 275	pn = net_generic(net, dccp_v6_pernet_id);
 276	ctl_sk = pn->v6_ctl_sk;
 277	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
 278	if (skb == NULL)
 279		return;
 280
 281	rxip6h = ipv6_hdr(rxskb);
 282	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
 283							    &rxip6h->daddr);
 284
 285	memset(&fl6, 0, sizeof(fl6));
 286	fl6.daddr = rxip6h->saddr;
 287	fl6.saddr = rxip6h->daddr;
 288
 289	fl6.flowi6_proto = IPPROTO_DCCP;
 290	fl6.flowi6_oif = inet6_iif(rxskb);
 291	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
 292	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
 293	security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6));
 294
 295	/* sk = NULL, but it is safe for now. RST socket required. */
 296	dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
 297	if (!IS_ERR(dst)) {
 298		skb_dst_set(skb, dst);
 299		ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
 300		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 301		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
 302		return;
 303	}
 304
 305	kfree_skb(skb);
 306}
 307
 308static struct request_sock_ops dccp6_request_sock_ops = {
 309	.family		= AF_INET6,
 310	.obj_size	= sizeof(struct dccp6_request_sock),
 311	.rtx_syn_ack	= dccp_v6_send_response,
 312	.send_ack	= dccp_reqsk_send_ack,
 313	.destructor	= dccp_v6_reqsk_destructor,
 314	.send_reset	= dccp_v6_ctl_send_reset,
 315	.syn_ack_timeout = dccp_syn_ack_timeout,
 316};
 317
 318static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 319{
 320	struct request_sock *req;
 321	struct dccp_request_sock *dreq;
 322	struct inet_request_sock *ireq;
 323	struct ipv6_pinfo *np = inet6_sk(sk);
 324	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
 325	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
 326
 327	if (skb->protocol == htons(ETH_P_IP))
 328		return dccp_v4_conn_request(sk, skb);
 329
 330	if (!ipv6_unicast_destination(skb))
 331		return 0;	/* discard, don't send a reset here */
 332
 333	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
 334		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
 335		return 0;
 336	}
 337
 338	if (dccp_bad_service_code(sk, service)) {
 339		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
 340		goto drop;
 341	}
 342	/*
 343	 * There are no SYN attacks on IPv6, yet...
 344	 */
 345	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 346	if (inet_csk_reqsk_queue_is_full(sk))
 347		goto drop;
 348
 349	if (sk_acceptq_is_full(sk))
 350		goto drop;
 351
 352	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
 353	if (req == NULL)
 354		goto drop;
 355
 356	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
 357		goto drop_and_free;
 358
 359	dreq = dccp_rsk(req);
 360	if (dccp_parse_options(sk, dreq, skb))
 361		goto drop_and_free;
 362
 
 
 
 363	ireq = inet_rsk(req);
 364	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 365	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 366	ireq->ireq_family = AF_INET6;
 367	ireq->ir_mark = inet_request_mark(sk, skb);
 368
 369	if (security_inet_conn_request(sk, skb, req))
 370		goto drop_and_free;
 371
 372	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 373	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 374	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 375		refcount_inc(&skb->users);
 376		ireq->pktopts = skb;
 377	}
 378	ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
 379
 380	/* So that link locals have meaning */
 381	if (!ireq->ir_iif &&
 382	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 383		ireq->ir_iif = inet6_iif(skb);
 384
 385	/*
 386	 * Step 3: Process LISTEN state
 387	 *
 388	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
 389	 *
 390	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
 391	 */
 392	dreq->dreq_isr	   = dcb->dccpd_seq;
 393	dreq->dreq_gsr     = dreq->dreq_isr;
 394	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
 395	dreq->dreq_gss     = dreq->dreq_iss;
 396	dreq->dreq_service = service;
 397
 398	if (dccp_v6_send_response(sk, req))
 399		goto drop_and_free;
 400
 401	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
 402	reqsk_put(req);
 403	return 0;
 404
 405drop_and_free:
 406	reqsk_free(req);
 407drop:
 408	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 409	return -1;
 410}
 411
 412static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
 413					      struct sk_buff *skb,
 414					      struct request_sock *req,
 415					      struct dst_entry *dst,
 416					      struct request_sock *req_unhash,
 417					      bool *own_req)
 418{
 419	struct inet_request_sock *ireq = inet_rsk(req);
 420	struct ipv6_pinfo *newnp;
 421	const struct ipv6_pinfo *np = inet6_sk(sk);
 422	struct ipv6_txoptions *opt;
 423	struct inet_sock *newinet;
 424	struct dccp6_sock *newdp6;
 425	struct sock *newsk;
 426
 427	if (skb->protocol == htons(ETH_P_IP)) {
 428		/*
 429		 *	v6 mapped
 430		 */
 431		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
 432						  req_unhash, own_req);
 433		if (newsk == NULL)
 434			return NULL;
 435
 436		newdp6 = (struct dccp6_sock *)newsk;
 437		newinet = inet_sk(newsk);
 438		newinet->pinet6 = &newdp6->inet6;
 439		newnp = inet6_sk(newsk);
 440
 441		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 442
 443		newnp->saddr = newsk->sk_v6_rcv_saddr;
 444
 445		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
 446		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
 447		newnp->pktoptions  = NULL;
 448		newnp->opt	   = NULL;
 449		newnp->ipv6_mc_list = NULL;
 450		newnp->ipv6_ac_list = NULL;
 451		newnp->ipv6_fl_list = NULL;
 452		newnp->mcast_oif   = inet_iif(skb);
 453		newnp->mcast_hops  = ip_hdr(skb)->ttl;
 454
 455		/*
 456		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
 457		 * here, dccp_create_openreq_child now does this for us, see the comment in
 458		 * that function for the gory details. -acme
 459		 */
 460
 461		/* It is tricky place. Until this moment IPv4 tcp
 462		   worked with IPv6 icsk.icsk_af_ops.
 463		   Sync it now.
 464		 */
 465		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
 466
 467		return newsk;
 468	}
 469
 470
 471	if (sk_acceptq_is_full(sk))
 472		goto out_overflow;
 473
 474	if (!dst) {
 475		struct flowi6 fl6;
 476
 477		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
 478		if (!dst)
 479			goto out;
 480	}
 481
 482	newsk = dccp_create_openreq_child(sk, req, skb);
 483	if (newsk == NULL)
 484		goto out_nonewsk;
 485
 486	/*
 487	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
 488	 * count here, dccp_create_openreq_child now does this for us, see the
 489	 * comment in that function for the gory details. -acme
 490	 */
 491
 492	ip6_dst_store(newsk, dst, NULL, NULL);
 493	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
 494						      NETIF_F_TSO);
 495	newdp6 = (struct dccp6_sock *)newsk;
 496	newinet = inet_sk(newsk);
 497	newinet->pinet6 = &newdp6->inet6;
 498	newnp = inet6_sk(newsk);
 499
 500	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 501
 502	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
 503	newnp->saddr		= ireq->ir_v6_loc_addr;
 504	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
 505	newsk->sk_bound_dev_if	= ireq->ir_iif;
 506
 507	/* Now IPv6 options...
 508
 509	   First: no IPv4 options.
 510	 */
 511	newinet->inet_opt = NULL;
 512
 513	/* Clone RX bits */
 514	newnp->rxopt.all = np->rxopt.all;
 515
 516	newnp->ipv6_mc_list = NULL;
 517	newnp->ipv6_ac_list = NULL;
 518	newnp->ipv6_fl_list = NULL;
 519	newnp->pktoptions = NULL;
 520	newnp->opt	  = NULL;
 521	newnp->mcast_oif  = inet6_iif(skb);
 522	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 523
 524	/*
 525	 * Clone native IPv6 options from listening socket (if any)
 526	 *
 527	 * Yes, keeping reference count would be much more clever, but we make
 528	 * one more one thing there: reattach optmem to newsk.
 529	 */
 530	opt = ireq->ipv6_opt;
 531	if (!opt)
 532		opt = rcu_dereference(np->opt);
 533	if (opt) {
 534		opt = ipv6_dup_options(newsk, opt);
 535		RCU_INIT_POINTER(newnp->opt, opt);
 536	}
 537	inet_csk(newsk)->icsk_ext_hdr_len = 0;
 538	if (opt)
 539		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
 540						    opt->opt_flen;
 541
 542	dccp_sync_mss(newsk, dst_mtu(dst));
 543
 544	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
 545	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
 546
 547	if (__inet_inherit_port(sk, newsk) < 0) {
 548		inet_csk_prepare_forced_close(newsk);
 549		dccp_done(newsk);
 550		goto out;
 551	}
 552	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
 553	/* Clone pktoptions received with SYN, if we own the req */
 554	if (*own_req && ireq->pktopts) {
 555		newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
 556		consume_skb(ireq->pktopts);
 557		ireq->pktopts = NULL;
 
 
 558	}
 559
 560	return newsk;
 561
 562out_overflow:
 563	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 564out_nonewsk:
 565	dst_release(dst);
 566out:
 567	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 568	return NULL;
 569}
 570
 571/* The socket must have it's spinlock held when we get
 572 * here.
 573 *
 574 * We have a potential double-lock case here, so even when
 575 * doing backlog processing we use the BH locking scheme.
 576 * This is because we cannot sleep with the original spinlock
 577 * held.
 578 */
 579static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 580{
 581	struct ipv6_pinfo *np = inet6_sk(sk);
 582	struct sk_buff *opt_skb = NULL;
 583
 584	/* Imagine: socket is IPv6. IPv4 packet arrives,
 585	   goes to IPv4 receive handler and backlogged.
 586	   From backlog it always goes here. Kerboom...
 587	   Fortunately, dccp_rcv_established and rcv_established
 588	   handle them correctly, but it is not case with
 589	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
 590	 */
 591
 592	if (skb->protocol == htons(ETH_P_IP))
 593		return dccp_v4_do_rcv(sk, skb);
 594
 595	if (sk_filter(sk, skb))
 596		goto discard;
 597
 598	/*
 599	 * socket locking is here for SMP purposes as backlog rcv is currently
 600	 * called with bh processing disabled.
 601	 */
 602
 603	/* Do Stevens' IPV6_PKTOPTIONS.
 604
 605	   Yes, guys, it is the only place in our code, where we
 606	   may make it not affecting IPv4.
 607	   The rest of code is protocol independent,
 608	   and I do not like idea to uglify IPv4.
 609
 610	   Actually, all the idea behind IPV6_PKTOPTIONS
 611	   looks not very well thought. For now we latch
 612	   options, received in the last packet, enqueued
 613	   by tcp. Feel free to propose better solution.
 614					       --ANK (980728)
 615	 */
 616	if (np->rxopt.all)
 617		opt_skb = skb_clone_and_charge_r(skb, sk);
 
 
 
 
 618
 619	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
 620		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
 621			goto reset;
 622		if (opt_skb)
 623			goto ipv6_pktoptions;
 
 
 624		return 0;
 625	}
 626
 627	/*
 628	 *  Step 3: Process LISTEN state
 629	 *     If S.state == LISTEN,
 630	 *	 If P.type == Request or P contains a valid Init Cookie option,
 631	 *	      (* Must scan the packet's options to check for Init
 632	 *		 Cookies.  Only Init Cookies are processed here,
 633	 *		 however; other options are processed in Step 8.  This
 634	 *		 scan need only be performed if the endpoint uses Init
 635	 *		 Cookies *)
 636	 *	      (* Generate a new socket and switch to that socket *)
 637	 *	      Set S := new socket for this port pair
 638	 *	      S.state = RESPOND
 639	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
 640	 *	      Initialize S.GAR := S.ISS
 641	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
 642	 *	      Continue with S.state == RESPOND
 643	 *	      (* A Response packet will be generated in Step 11 *)
 644	 *	 Otherwise,
 645	 *	      Generate Reset(No Connection) unless P.type == Reset
 646	 *	      Drop packet and return
 647	 *
 648	 * NOTE: the check for the packet types is done in
 649	 *	 dccp_rcv_state_process
 650	 */
 651
 652	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
 653		goto reset;
 654	if (opt_skb)
 655		goto ipv6_pktoptions;
 
 
 656	return 0;
 657
 658reset:
 659	dccp_v6_ctl_send_reset(sk, skb);
 660discard:
 661	if (opt_skb != NULL)
 662		__kfree_skb(opt_skb);
 663	kfree_skb(skb);
 664	return 0;
 665
 666/* Handling IPV6_PKTOPTIONS skb the similar
 667 * way it's done for net/ipv6/tcp_ipv6.c
 668 */
 669ipv6_pktoptions:
 670	if (!((1 << sk->sk_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) {
 671		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
 672			WRITE_ONCE(np->mcast_oif, inet6_iif(opt_skb));
 673		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
 674			WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit);
 675		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
 676			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
 677		if (inet6_test_bit(REPFLOW, sk))
 678			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
 679		if (ipv6_opt_accepted(sk, opt_skb,
 680				      &DCCP_SKB_CB(opt_skb)->header.h6)) {
 681			memmove(IP6CB(opt_skb),
 682				&DCCP_SKB_CB(opt_skb)->header.h6,
 683				sizeof(struct inet6_skb_parm));
 684			opt_skb = xchg(&np->pktoptions, opt_skb);
 685		} else {
 686			__kfree_skb(opt_skb);
 687			opt_skb = xchg(&np->pktoptions, NULL);
 688		}
 689	}
 690
 691	kfree_skb(opt_skb);
 692	return 0;
 693}
 694
 695static int dccp_v6_rcv(struct sk_buff *skb)
 696{
 697	const struct dccp_hdr *dh;
 698	bool refcounted;
 699	struct sock *sk;
 700	int min_cov;
 701
 702	/* Step 1: Check header basics */
 703
 704	if (dccp_invalid_packet(skb))
 705		goto discard_it;
 706
 707	/* Step 1: If header checksum is incorrect, drop packet and return. */
 708	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
 709				     &ipv6_hdr(skb)->daddr)) {
 710		DCCP_WARN("dropped packet with invalid checksum\n");
 711		goto discard_it;
 712	}
 713
 714	dh = dccp_hdr(skb);
 715
 716	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
 717	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
 718
 719	if (dccp_packet_without_ack(skb))
 720		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
 721	else
 722		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 723
 724lookup:
 725	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
 726			        dh->dccph_sport, dh->dccph_dport,
 727				inet6_iif(skb), 0, &refcounted);
 728	if (!sk) {
 729		dccp_pr_debug("failed to look up flow ID in table and "
 730			      "get corresponding socket\n");
 731		goto no_dccp_socket;
 732	}
 733
 734	/*
 735	 * Step 2:
 736	 *	... or S.state == TIMEWAIT,
 737	 *		Generate Reset(No Connection) unless P.type == Reset
 738	 *		Drop packet and return
 739	 */
 740	if (sk->sk_state == DCCP_TIME_WAIT) {
 741		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
 742		inet_twsk_put(inet_twsk(sk));
 743		goto no_dccp_socket;
 744	}
 745
 746	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
 747		struct request_sock *req = inet_reqsk(sk);
 748		struct sock *nsk;
 749
 750		sk = req->rsk_listener;
 751		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
 752			inet_csk_reqsk_queue_drop_and_put(sk, req);
 753			goto lookup;
 754		}
 755		sock_hold(sk);
 756		refcounted = true;
 757		nsk = dccp_check_req(sk, skb, req);
 758		if (!nsk) {
 759			reqsk_put(req);
 760			goto discard_and_relse;
 761		}
 762		if (nsk == sk) {
 763			reqsk_put(req);
 764		} else if (dccp_child_process(sk, nsk, skb)) {
 765			dccp_v6_ctl_send_reset(sk, skb);
 766			goto discard_and_relse;
 767		} else {
 768			sock_put(sk);
 769			return 0;
 770		}
 771	}
 772	/*
 773	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 774	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
 775	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
 776	 */
 777	min_cov = dccp_sk(sk)->dccps_pcrlen;
 778	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
 779		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
 780			      dh->dccph_cscov, min_cov);
 781		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
 782		goto discard_and_relse;
 783	}
 784
 785	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 786		goto discard_and_relse;
 787	nf_reset_ct(skb);
 788
 789	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
 790				refcounted) ? -1 : 0;
 791
 792no_dccp_socket:
 793	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 794		goto discard_it;
 795	/*
 796	 * Step 2:
 797	 *	If no socket ...
 798	 *		Generate Reset(No Connection) unless P.type == Reset
 799	 *		Drop packet and return
 800	 */
 801	if (dh->dccph_type != DCCP_PKT_RESET) {
 802		DCCP_SKB_CB(skb)->dccpd_reset_code =
 803					DCCP_RESET_CODE_NO_CONNECTION;
 804		dccp_v6_ctl_send_reset(sk, skb);
 805	}
 806
 807discard_it:
 808	kfree_skb(skb);
 809	return 0;
 810
 811discard_and_relse:
 812	if (refcounted)
 813		sock_put(sk);
 814	goto discard_it;
 815}
 816
 817static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 818			   int addr_len)
 819{
 820	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
 821	struct inet_connection_sock *icsk = inet_csk(sk);
 822	struct inet_sock *inet = inet_sk(sk);
 823	struct ipv6_pinfo *np = inet6_sk(sk);
 824	struct dccp_sock *dp = dccp_sk(sk);
 825	struct in6_addr *saddr = NULL, *final_p, final;
 826	struct ipv6_txoptions *opt;
 827	struct flowi6 fl6;
 828	struct dst_entry *dst;
 829	int addr_type;
 830	int err;
 831
 832	dp->dccps_role = DCCP_ROLE_CLIENT;
 833
 834	if (addr_len < SIN6_LEN_RFC2133)
 835		return -EINVAL;
 836
 837	if (usin->sin6_family != AF_INET6)
 838		return -EAFNOSUPPORT;
 839
 840	memset(&fl6, 0, sizeof(fl6));
 841
 842	if (inet6_test_bit(SNDFLOW, sk)) {
 843		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 844		IP6_ECN_flow_init(fl6.flowlabel);
 845		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
 846			struct ip6_flowlabel *flowlabel;
 847			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 848			if (IS_ERR(flowlabel))
 849				return -EINVAL;
 850			fl6_sock_release(flowlabel);
 851		}
 852	}
 853	/*
 854	 * connect() to INADDR_ANY means loopback (BSD'ism).
 855	 */
 856	if (ipv6_addr_any(&usin->sin6_addr))
 857		usin->sin6_addr.s6_addr[15] = 1;
 858
 859	addr_type = ipv6_addr_type(&usin->sin6_addr);
 860
 861	if (addr_type & IPV6_ADDR_MULTICAST)
 862		return -ENETUNREACH;
 863
 864	if (addr_type & IPV6_ADDR_LINKLOCAL) {
 865		if (addr_len >= sizeof(struct sockaddr_in6) &&
 866		    usin->sin6_scope_id) {
 867			/* If interface is set while binding, indices
 868			 * must coincide.
 869			 */
 870			if (sk->sk_bound_dev_if &&
 871			    sk->sk_bound_dev_if != usin->sin6_scope_id)
 872				return -EINVAL;
 873
 874			sk->sk_bound_dev_if = usin->sin6_scope_id;
 875		}
 876
 877		/* Connect to link-local address requires an interface */
 878		if (!sk->sk_bound_dev_if)
 879			return -EINVAL;
 880	}
 881
 882	sk->sk_v6_daddr = usin->sin6_addr;
 883	np->flow_label = fl6.flowlabel;
 884
 885	/*
 886	 * DCCP over IPv4
 887	 */
 888	if (addr_type == IPV6_ADDR_MAPPED) {
 889		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 890		struct sockaddr_in sin;
 891
 892		net_dbg_ratelimited("connect: ipv4 mapped\n");
 893
 894		if (ipv6_only_sock(sk))
 895			return -ENETUNREACH;
 896
 897		sin.sin_family = AF_INET;
 898		sin.sin_port = usin->sin6_port;
 899		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 900
 901		icsk->icsk_af_ops = &dccp_ipv6_mapped;
 902		sk->sk_backlog_rcv = dccp_v4_do_rcv;
 903
 904		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 905		if (err) {
 906			icsk->icsk_ext_hdr_len = exthdrlen;
 907			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
 908			sk->sk_backlog_rcv = dccp_v6_do_rcv;
 909			goto failure;
 910		}
 911		np->saddr = sk->sk_v6_rcv_saddr;
 912		return err;
 913	}
 914
 915	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 916		saddr = &sk->sk_v6_rcv_saddr;
 917
 918	fl6.flowi6_proto = IPPROTO_DCCP;
 919	fl6.daddr = sk->sk_v6_daddr;
 920	fl6.saddr = saddr ? *saddr : np->saddr;
 921	fl6.flowi6_oif = sk->sk_bound_dev_if;
 922	fl6.fl6_dport = usin->sin6_port;
 923	fl6.fl6_sport = inet->inet_sport;
 924	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
 925
 926	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 927	final_p = fl6_update_dst(&fl6, opt, &final);
 928
 929	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
 930	if (IS_ERR(dst)) {
 931		err = PTR_ERR(dst);
 932		goto failure;
 933	}
 934
 935	if (saddr == NULL) {
 936		saddr = &fl6.saddr;
 937
 938		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
 939		if (err)
 940			goto failure;
 941	}
 942
 943	/* set the source address */
 944	np->saddr = *saddr;
 945	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 946
 947	ip6_dst_store(sk, dst, NULL, NULL);
 948
 949	icsk->icsk_ext_hdr_len = 0;
 950	if (opt)
 951		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
 952
 953	inet->inet_dport = usin->sin6_port;
 954
 955	dccp_set_state(sk, DCCP_REQUESTING);
 956	err = inet6_hash_connect(&dccp_death_row, sk);
 957	if (err)
 958		goto late_failure;
 959
 960	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
 961						      sk->sk_v6_daddr.s6_addr32,
 962						      inet->inet_sport,
 963						      inet->inet_dport);
 964	err = dccp_connect(sk);
 965	if (err)
 966		goto late_failure;
 967
 968	return 0;
 969
 970late_failure:
 971	dccp_set_state(sk, DCCP_CLOSED);
 972	inet_bhash2_reset_saddr(sk);
 973	__sk_dst_reset(sk);
 974failure:
 975	inet->inet_dport = 0;
 976	sk->sk_route_caps = 0;
 977	return err;
 978}
 979
 980static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
 981	.queue_xmit	   = inet6_csk_xmit,
 982	.send_check	   = dccp_v6_send_check,
 983	.rebuild_header	   = inet6_sk_rebuild_header,
 984	.conn_request	   = dccp_v6_conn_request,
 985	.syn_recv_sock	   = dccp_v6_request_recv_sock,
 986	.net_header_len	   = sizeof(struct ipv6hdr),
 987	.setsockopt	   = ipv6_setsockopt,
 988	.getsockopt	   = ipv6_getsockopt,
 989	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
 990	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
 
 
 
 
 991};
 992
 993/*
 994 *	DCCP over IPv4 via INET6 API
 995 */
 996static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
 997	.queue_xmit	   = ip_queue_xmit,
 998	.send_check	   = dccp_v4_send_check,
 999	.rebuild_header	   = inet_sk_rebuild_header,
1000	.conn_request	   = dccp_v6_conn_request,
1001	.syn_recv_sock	   = dccp_v6_request_recv_sock,
1002	.net_header_len	   = sizeof(struct iphdr),
1003	.setsockopt	   = ipv6_setsockopt,
1004	.getsockopt	   = ipv6_getsockopt,
1005	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1006	.sockaddr_len	   = sizeof(struct sockaddr_in6),
 
 
 
 
1007};
1008
1009static void dccp_v6_sk_destruct(struct sock *sk)
1010{
1011	dccp_destruct_common(sk);
1012	inet6_sock_destruct(sk);
1013}
1014
1015/* NOTE: A lot of things set to zero explicitly by call to
1016 *       sk_alloc() so need not be done here.
1017 */
1018static int dccp_v6_init_sock(struct sock *sk)
1019{
1020	static __u8 dccp_v6_ctl_sock_initialized;
1021	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1022
1023	if (err == 0) {
1024		if (unlikely(!dccp_v6_ctl_sock_initialized))
1025			dccp_v6_ctl_sock_initialized = 1;
1026		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1027		sk->sk_destruct = dccp_v6_sk_destruct;
1028	}
1029
1030	return err;
1031}
1032
 
 
 
 
 
 
1033static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1034	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
1035};
1036
1037static struct proto dccp_v6_prot = {
1038	.name		   = "DCCPv6",
1039	.owner		   = THIS_MODULE,
1040	.close		   = dccp_close,
1041	.connect	   = dccp_v6_connect,
1042	.disconnect	   = dccp_disconnect,
1043	.ioctl		   = dccp_ioctl,
1044	.init		   = dccp_v6_init_sock,
1045	.setsockopt	   = dccp_setsockopt,
1046	.getsockopt	   = dccp_getsockopt,
1047	.sendmsg	   = dccp_sendmsg,
1048	.recvmsg	   = dccp_recvmsg,
1049	.backlog_rcv	   = dccp_v6_do_rcv,
1050	.hash		   = inet6_hash,
1051	.unhash		   = inet_unhash,
1052	.accept		   = inet_csk_accept,
1053	.get_port	   = inet_csk_get_port,
1054	.shutdown	   = dccp_shutdown,
1055	.destroy	   = dccp_destroy_sock,
1056	.orphan_count	   = &dccp_orphan_count,
1057	.max_header	   = MAX_DCCP_HEADER,
1058	.obj_size	   = sizeof(struct dccp6_sock),
1059	.ipv6_pinfo_offset = offsetof(struct dccp6_sock, inet6),
1060	.slab_flags	   = SLAB_TYPESAFE_BY_RCU,
1061	.rsk_prot	   = &dccp6_request_sock_ops,
1062	.twsk_prot	   = &dccp6_timewait_sock_ops,
1063	.h.hashinfo	   = &dccp_hashinfo,
 
 
 
 
1064};
1065
1066static const struct inet6_protocol dccp_v6_protocol = {
1067	.handler	= dccp_v6_rcv,
1068	.err_handler	= dccp_v6_err,
1069	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1070};
1071
1072static const struct proto_ops inet6_dccp_ops = {
1073	.family		   = PF_INET6,
1074	.owner		   = THIS_MODULE,
1075	.release	   = inet6_release,
1076	.bind		   = inet6_bind,
1077	.connect	   = inet_stream_connect,
1078	.socketpair	   = sock_no_socketpair,
1079	.accept		   = inet_accept,
1080	.getname	   = inet6_getname,
1081	.poll		   = dccp_poll,
1082	.ioctl		   = inet6_ioctl,
1083	.gettstamp	   = sock_gettstamp,
1084	.listen		   = inet_dccp_listen,
1085	.shutdown	   = inet_shutdown,
1086	.setsockopt	   = sock_common_setsockopt,
1087	.getsockopt	   = sock_common_getsockopt,
1088	.sendmsg	   = inet_sendmsg,
1089	.recvmsg	   = sock_common_recvmsg,
1090	.mmap		   = sock_no_mmap,
 
1091#ifdef CONFIG_COMPAT
1092	.compat_ioctl	   = inet6_compat_ioctl,
 
1093#endif
1094};
1095
1096static struct inet_protosw dccp_v6_protosw = {
1097	.type		= SOCK_DCCP,
1098	.protocol	= IPPROTO_DCCP,
1099	.prot		= &dccp_v6_prot,
1100	.ops		= &inet6_dccp_ops,
1101	.flags		= INET_PROTOSW_ICSK,
1102};
1103
1104static int __net_init dccp_v6_init_net(struct net *net)
1105{
1106	struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
1107
1108	if (dccp_hashinfo.bhash == NULL)
1109		return -ESOCKTNOSUPPORT;
1110
1111	return inet_ctl_sock_create(&pn->v6_ctl_sk, PF_INET6,
1112				    SOCK_DCCP, IPPROTO_DCCP, net);
1113}
1114
1115static void __net_exit dccp_v6_exit_net(struct net *net)
1116{
1117	struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
1118
1119	inet_ctl_sock_destroy(pn->v6_ctl_sk);
1120}
1121
1122static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
1123{
1124	inet_twsk_purge(&dccp_hashinfo, AF_INET6);
1125}
1126
1127static struct pernet_operations dccp_v6_ops = {
1128	.init   = dccp_v6_init_net,
1129	.exit   = dccp_v6_exit_net,
1130	.exit_batch = dccp_v6_exit_batch,
1131	.id	= &dccp_v6_pernet_id,
1132	.size   = sizeof(struct dccp_v6_pernet),
1133};
1134
1135static int __init dccp_v6_init(void)
1136{
1137	int err = proto_register(&dccp_v6_prot, 1);
1138
1139	if (err)
1140		goto out;
1141
 
 
 
 
1142	inet6_register_protosw(&dccp_v6_protosw);
1143
1144	err = register_pernet_subsys(&dccp_v6_ops);
1145	if (err)
1146		goto out_destroy_ctl_sock;
1147
1148	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1149	if (err)
1150		goto out_unregister_proto;
1151
1152out:
1153	return err;
1154out_unregister_proto:
1155	unregister_pernet_subsys(&dccp_v6_ops);
1156out_destroy_ctl_sock:
 
1157	inet6_unregister_protosw(&dccp_v6_protosw);
 
1158	proto_unregister(&dccp_v6_prot);
1159	goto out;
1160}
1161
1162static void __exit dccp_v6_exit(void)
1163{
 
1164	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1165	unregister_pernet_subsys(&dccp_v6_ops);
1166	inet6_unregister_protosw(&dccp_v6_protosw);
1167	proto_unregister(&dccp_v6_prot);
1168}
1169
1170module_init(dccp_v6_init);
1171module_exit(dccp_v6_exit);
1172
1173/*
1174 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1175 * values directly, Also cover the case where the protocol is not specified,
1176 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1177 */
1178MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1179MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1180MODULE_LICENSE("GPL");
1181MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1182MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");