Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
 
  20#include <linux/errno.h>
  21#include <linux/types.h>
  22#include <linux/socket.h>
  23#include <linux/sockios.h>
  24#include <linux/net.h>
  25#include <linux/in6.h>
  26#include <linux/netdevice.h>
  27#include <linux/if_arp.h>
  28#include <linux/ipv6.h>
  29#include <linux/icmpv6.h>
  30#include <linux/init.h>
  31#include <linux/module.h>
  32#include <linux/skbuff.h>
  33#include <linux/slab.h>
  34#include <linux/uaccess.h>
  35#include <linux/indirect_call_wrapper.h>
 
  36
  37#include <net/addrconf.h>
  38#include <net/ndisc.h>
  39#include <net/protocol.h>
  40#include <net/transp_v6.h>
  41#include <net/ip6_route.h>
  42#include <net/raw.h>
 
  43#include <net/tcp_states.h>
  44#include <net/ip6_checksum.h>
  45#include <net/ip6_tunnel.h>
  46#include <net/xfrm.h>
  47#include <net/inet_hashtables.h>
  48#include <net/inet6_hashtables.h>
  49#include <net/busy_poll.h>
  50#include <net/sock_reuseport.h>
 
  51
  52#include <linux/proc_fs.h>
  53#include <linux/seq_file.h>
  54#include <trace/events/skb.h>
  55#include "udp_impl.h"
  56
  57static u32 udp6_ehashfn(const struct net *net,
  58			const struct in6_addr *laddr,
  59			const u16 lport,
  60			const struct in6_addr *faddr,
  61			const __be16 fport)
  62{
  63	static u32 udp6_ehash_secret __read_mostly;
  64	static u32 udp_ipv6_hash_secret __read_mostly;
 
 
 
 
 
 
 
 
 
  65
 
 
 
 
 
 
 
  66	u32 lhash, fhash;
  67
  68	net_get_random_once(&udp6_ehash_secret,
  69			    sizeof(udp6_ehash_secret));
  70	net_get_random_once(&udp_ipv6_hash_secret,
  71			    sizeof(udp_ipv6_hash_secret));
  72
  73	lhash = (__force u32)laddr->s6_addr32[3];
  74	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  75
  76	return __inet6_ehashfn(lhash, lport, fhash, fport,
  77			       udp_ipv6_hash_secret + net_hash_mix(net));
  78}
  79
  80int udp_v6_get_port(struct sock *sk, unsigned short snum)
  81{
  82	unsigned int hash2_nulladdr =
  83		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
  84	unsigned int hash2_partial =
  85		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
  86
  87	/* precompute partial secondary hash */
  88	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
  89	return udp_lib_get_port(sk, snum, hash2_nulladdr);
  90}
  91
  92void udp_v6_rehash(struct sock *sk)
  93{
  94	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
  95					  &sk->sk_v6_rcv_saddr,
  96					  inet_sk(sk)->inet_num);
 
 
 
 
 
 
 
 
 
 
 
  97
  98	udp_lib_rehash(sk, new_hash);
  99}
 100
 101static int compute_score(struct sock *sk, struct net *net,
 102			 const struct in6_addr *saddr, __be16 sport,
 103			 const struct in6_addr *daddr, unsigned short hnum,
 104			 int dif, int sdif)
 105{
 106	int score;
 107	struct inet_sock *inet;
 108	bool dev_match;
 109
 110	if (!net_eq(sock_net(sk), net) ||
 111	    udp_sk(sk)->udp_port_hash != hnum ||
 112	    sk->sk_family != PF_INET6)
 113		return -1;
 114
 115	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 116		return -1;
 117
 118	score = 0;
 119	inet = inet_sk(sk);
 120
 121	if (inet->inet_dport) {
 122		if (inet->inet_dport != sport)
 123			return -1;
 124		score++;
 125	}
 126
 127	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 128		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 129			return -1;
 130		score++;
 131	}
 132
 133	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
 
 134	if (!dev_match)
 135		return -1;
 136	score++;
 
 137
 138	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 139		score++;
 140
 141	return score;
 142}
 143
 144static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
 145				     struct sk_buff *skb,
 146				     const struct in6_addr *saddr,
 147				     __be16 sport,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148				     const struct in6_addr *daddr,
 149				     unsigned int hnum)
 
 150{
 151	struct sock *reuse_sk = NULL;
 152	u32 hash;
 153
 154	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
 155		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
 156		reuse_sk = reuseport_select_sock(sk, hash, skb,
 157						 sizeof(struct udphdr));
 
 
 
 
 
 158	}
 159	return reuse_sk;
 
 160}
 161
 162/* called with rcu_read_lock() */
 163static struct sock *udp6_lib_lookup2(struct net *net,
 164		const struct in6_addr *saddr, __be16 sport,
 165		const struct in6_addr *daddr, unsigned int hnum,
 166		int dif, int sdif, struct udp_hslot *hslot2,
 167		struct sk_buff *skb)
 168{
 169	struct sock *sk, *result;
 170	int score, badness;
 
 171
 172	result = NULL;
 173	badness = -1;
 174	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 175		score = compute_score(sk, net, saddr, sport,
 176				      daddr, hnum, dif, sdif);
 
 
 177		if (score > badness) {
 178			result = lookup_reuseport(net, sk, skb,
 179						  saddr, sport, daddr, hnum);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180			/* Fall back to scoring if group has connections */
 181			if (result && !reuseport_has_conns(sk, false))
 182				return result;
 183
 184			result = result ? : sk;
 185			badness = score;
 
 
 
 
 
 
 
 
 
 
 186		}
 187	}
 188	return result;
 189}
 190
 191static inline struct sock *udp6_lookup_run_bpf(struct net *net,
 192					       struct udp_table *udptable,
 193					       struct sk_buff *skb,
 194					       const struct in6_addr *saddr,
 195					       __be16 sport,
 196					       const struct in6_addr *daddr,
 197					       u16 hnum)
 198{
 199	struct sock *sk, *reuse_sk;
 200	bool no_reuseport;
 201
 202	if (udptable != &udp_table)
 203		return NULL; /* only UDP is supported */
 204
 205	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
 206					    saddr, sport, daddr, hnum, &sk);
 207	if (no_reuseport || IS_ERR_OR_NULL(sk))
 208		return sk;
 209
 210	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
 211	if (reuse_sk)
 212		sk = reuse_sk;
 213	return sk;
 214}
 215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216/* rcu_read_lock() must be held */
 217struct sock *__udp6_lib_lookup(struct net *net,
 218			       const struct in6_addr *saddr, __be16 sport,
 219			       const struct in6_addr *daddr, __be16 dport,
 220			       int dif, int sdif, struct udp_table *udptable,
 221			       struct sk_buff *skb)
 222{
 223	unsigned short hnum = ntohs(dport);
 224	unsigned int hash2, slot2;
 225	struct udp_hslot *hslot2;
 226	struct sock *result, *sk;
 
 227
 228	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 229	slot2 = hash2 & udptable->mask;
 230	hslot2 = &udptable->hash2[slot2];
 
 
 
 
 
 
 231
 232	/* Lookup connected or non-wildcard sockets */
 233	result = udp6_lib_lookup2(net, saddr, sport,
 234				  daddr, hnum, dif, sdif,
 235				  hslot2, skb);
 236	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 237		goto done;
 238
 239	/* Lookup redirect from BPF */
 240	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
 241		sk = udp6_lookup_run_bpf(net, udptable, skb,
 242					 saddr, sport, daddr, hnum);
 
 
 243		if (sk) {
 244			result = sk;
 245			goto done;
 246		}
 247	}
 248
 249	/* Got non-wildcard socket or error on first lookup */
 250	if (result)
 251		goto done;
 252
 253	/* Lookup wildcard sockets */
 254	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 255	slot2 = hash2 & udptable->mask;
 256	hslot2 = &udptable->hash2[slot2];
 257
 258	result = udp6_lib_lookup2(net, saddr, sport,
 259				  &in6addr_any, hnum, dif, sdif,
 260				  hslot2, skb);
 
 
 
 
 
 
 
 261done:
 262	if (IS_ERR(result))
 263		return NULL;
 264	return result;
 265}
 266EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 267
 268static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 269					  __be16 sport, __be16 dport,
 270					  struct udp_table *udptable)
 271{
 272	const struct ipv6hdr *iph = ipv6_hdr(skb);
 273
 274	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 275				 &iph->daddr, dport, inet6_iif(skb),
 276				 inet6_sdif(skb), udptable, skb);
 277}
 278
 279struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 280				 __be16 sport, __be16 dport)
 281{
 282	const struct ipv6hdr *iph = ipv6_hdr(skb);
 
 
 
 283
 284	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 285				 &iph->daddr, dport, inet6_iif(skb),
 286				 inet6_sdif(skb), &udp_table, NULL);
 
 
 287}
 288EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
 289
 290/* Must be called under rcu_read_lock().
 291 * Does increment socket refcount.
 292 */
 293#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 294struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 295			     const struct in6_addr *daddr, __be16 dport, int dif)
 296{
 297	struct sock *sk;
 298
 299	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 300				dif, 0, &udp_table, NULL);
 301	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 302		sk = NULL;
 303	return sk;
 304}
 305EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 306#endif
 307
 308/* do not use the scratch area len for jumbogram: their length execeeds the
 309 * scratch area space; note that the IP6CB flags is still in the first
 310 * cacheline, so checking for jumbograms is cheap
 311 */
 312static int udp6_skb_len(struct sk_buff *skb)
 313{
 314	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 315}
 316
 317/*
 318 *	This should be easy, if there is something there we
 319 *	return it, otherwise we block.
 320 */
 321
 322int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 323		  int noblock, int flags, int *addr_len)
 324{
 325	struct ipv6_pinfo *np = inet6_sk(sk);
 326	struct inet_sock *inet = inet_sk(sk);
 327	struct sk_buff *skb;
 328	unsigned int ulen, copied;
 329	int off, err, peeking = flags & MSG_PEEK;
 330	int is_udplite = IS_UDPLITE(sk);
 331	struct udp_mib __percpu *mib;
 332	bool checksum_valid = false;
 333	int is_udp4;
 334
 335	if (flags & MSG_ERRQUEUE)
 336		return ipv6_recv_error(sk, msg, len, addr_len);
 337
 338	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 339		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 340
 341try_again:
 342	off = sk_peek_offset(sk, flags);
 343	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
 344	if (!skb)
 345		return err;
 346
 347	ulen = udp6_skb_len(skb);
 348	copied = len;
 349	if (copied > ulen - off)
 350		copied = ulen - off;
 351	else if (copied < ulen)
 352		msg->msg_flags |= MSG_TRUNC;
 353
 354	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 355	mib = __UDPX_MIB(sk, is_udp4);
 356
 357	/*
 358	 * If checksum is needed at all, try to do it while copying the
 359	 * data.  If the data is truncated, or if we only want a partial
 360	 * coverage checksum (UDP-Lite), do it before the copy.
 361	 */
 362
 363	if (copied < ulen || peeking ||
 364	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 365		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 366				!__udp_lib_checksum_complete(skb);
 367		if (!checksum_valid)
 368			goto csum_copy_err;
 369	}
 370
 371	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 372		if (udp_skb_is_linear(skb))
 373			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 374		else
 375			err = skb_copy_datagram_msg(skb, off, msg, copied);
 376	} else {
 377		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 378		if (err == -EINVAL)
 379			goto csum_copy_err;
 380	}
 381	if (unlikely(err)) {
 382		if (!peeking) {
 383			atomic_inc(&sk->sk_drops);
 384			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 385		}
 386		kfree_skb(skb);
 387		return err;
 388	}
 389	if (!peeking)
 390		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 391
 392	sock_recv_ts_and_drops(msg, sk, skb);
 393
 394	/* Copy the address. */
 395	if (msg->msg_name) {
 396		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 397		sin6->sin6_family = AF_INET6;
 398		sin6->sin6_port = udp_hdr(skb)->source;
 399		sin6->sin6_flowinfo = 0;
 400
 401		if (is_udp4) {
 402			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 403					       &sin6->sin6_addr);
 404			sin6->sin6_scope_id = 0;
 405		} else {
 406			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 407			sin6->sin6_scope_id =
 408				ipv6_iface_scope_id(&sin6->sin6_addr,
 409						    inet6_iif(skb));
 410		}
 411		*addr_len = sizeof(*sin6);
 412
 413		if (cgroup_bpf_enabled)
 414			BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 415						(struct sockaddr *)sin6);
 416	}
 417
 418	if (udp_sk(sk)->gro_enabled)
 419		udp_cmsg_recv(msg, sk, skb);
 420
 421	if (np->rxopt.all)
 422		ip6_datagram_recv_common_ctl(sk, msg, skb);
 423
 424	if (is_udp4) {
 425		if (inet->cmsg_flags)
 426			ip_cmsg_recv_offset(msg, sk, skb,
 427					    sizeof(struct udphdr), off);
 428	} else {
 429		if (np->rxopt.all)
 430			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 431	}
 432
 433	err = copied;
 434	if (flags & MSG_TRUNC)
 435		err = ulen;
 436
 437	skb_consume_udp(sk, skb, peeking ? -err : err);
 438	return err;
 439
 440csum_copy_err:
 441	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 442				 udp_skb_destructor)) {
 443		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 444		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 445	}
 446	kfree_skb(skb);
 447
 448	/* starting over for a new packet, but check if we need to yield */
 449	cond_resched();
 450	msg->msg_flags &= ~MSG_TRUNC;
 451	goto try_again;
 452}
 453
 454DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 455void udpv6_encap_enable(void)
 456{
 457	static_branch_inc(&udpv6_encap_needed_key);
 458}
 459EXPORT_SYMBOL(udpv6_encap_enable);
 460
 461/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 462 * through error handlers in encapsulations looking for a match.
 463 */
 464static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 465				      struct inet6_skb_parm *opt,
 466				      u8 type, u8 code, int offset, __be32 info)
 467{
 468	int i;
 469
 470	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 471		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 472			       u8 type, u8 code, int offset, __be32 info);
 473		const struct ip6_tnl_encap_ops *encap;
 474
 475		encap = rcu_dereference(ip6tun_encaps[i]);
 476		if (!encap)
 477			continue;
 478		handler = encap->err_handler;
 479		if (handler && !handler(skb, opt, type, code, offset, info))
 480			return 0;
 481	}
 482
 483	return -ENOENT;
 484}
 485
 486/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 487 * reversing source and destination port: this will match tunnels that force the
 488 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 489 * lwtunnels might actually break this assumption by being configured with
 490 * different destination ports on endpoints, in this case we won't be able to
 491 * trace ICMP messages back to them.
 492 *
 493 * If this doesn't match any socket, probe tunnels with arbitrary destination
 494 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 495 * we've sent packets to won't necessarily match the local destination port.
 496 *
 497 * Then ask the tunnel implementation to match the error against a valid
 498 * association.
 499 *
 500 * Return an error if we can't find a match, the socket if we need further
 501 * processing, zero otherwise.
 502 */
 503static struct sock *__udp6_lib_err_encap(struct net *net,
 504					 const struct ipv6hdr *hdr, int offset,
 505					 struct udphdr *uh,
 506					 struct udp_table *udptable,
 
 507					 struct sk_buff *skb,
 508					 struct inet6_skb_parm *opt,
 509					 u8 type, u8 code, __be32 info)
 510{
 
 511	int network_offset, transport_offset;
 512	struct sock *sk;
 513
 514	network_offset = skb_network_offset(skb);
 515	transport_offset = skb_transport_offset(skb);
 516
 517	/* Network header needs to point to the outer IPv6 header inside ICMP */
 518	skb_reset_network_header(skb);
 519
 520	/* Transport header needs to point to the UDP header */
 521	skb_set_transport_header(skb, offset);
 522
 
 
 
 
 
 
 
 
 
 
 523	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 524			       &hdr->saddr, uh->dest,
 525			       inet6_iif(skb), 0, udptable, skb);
 526	if (sk) {
 527		int (*lookup)(struct sock *sk, struct sk_buff *skb);
 528		struct udp_sock *up = udp_sk(sk);
 529
 530		lookup = READ_ONCE(up->encap_err_lookup);
 531		if (!lookup || lookup(sk, skb))
 532			sk = NULL;
 533	}
 534
 
 535	if (!sk) {
 536		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 537							offset, info));
 538	}
 539
 540	skb_set_transport_header(skb, transport_offset);
 541	skb_set_network_header(skb, network_offset);
 542
 543	return sk;
 544}
 545
 546int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 547		   u8 type, u8 code, int offset, __be32 info,
 548		   struct udp_table *udptable)
 549{
 550	struct ipv6_pinfo *np;
 551	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 552	const struct in6_addr *saddr = &hdr->saddr;
 553	const struct in6_addr *daddr = &hdr->daddr;
 554	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 555	bool tunnel = false;
 556	struct sock *sk;
 557	int harderr;
 558	int err;
 559	struct net *net = dev_net(skb->dev);
 560
 561	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 562			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 563	if (!sk) {
 
 564		/* No socket for error: try tunnels before discarding */
 565		sk = ERR_PTR(-ENOENT);
 566		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 567			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 568						  udptable, skb,
 569						  opt, type, code, info);
 570			if (!sk)
 571				return 0;
 572		}
 
 573
 574		if (IS_ERR(sk)) {
 575			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 576					  ICMP6_MIB_INERRORS);
 577			return PTR_ERR(sk);
 578		}
 579
 580		tunnel = true;
 581	}
 582
 583	harderr = icmpv6_err_convert(type, code, &err);
 584	np = inet6_sk(sk);
 585
 586	if (type == ICMPV6_PKT_TOOBIG) {
 587		if (!ip6_sk_accept_pmtu(sk))
 588			goto out;
 589		ip6_sk_update_pmtu(skb, sk, info);
 590		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
 591			harderr = 1;
 592	}
 593	if (type == NDISC_REDIRECT) {
 594		if (tunnel) {
 595			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 596				     sk->sk_mark, sk->sk_uid);
 597		} else {
 598			ip6_sk_redirect(skb, sk);
 599		}
 600		goto out;
 601	}
 602
 603	/* Tunnels don't have an application socket: don't pass errors back */
 604	if (tunnel)
 
 
 
 605		goto out;
 
 606
 607	if (!np->recverr) {
 608		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 609			goto out;
 610	} else {
 611		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 612	}
 613
 614	sk->sk_err = err;
 615	sk->sk_error_report(sk);
 616out:
 617	return 0;
 618}
 619
 620static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 621{
 622	int rc;
 623
 624	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 625		sock_rps_save_rxhash(sk, skb);
 626		sk_mark_napi_id(sk, skb);
 627		sk_incoming_cpu_update(sk);
 628	} else {
 629		sk_mark_napi_id_once(sk, skb);
 630	}
 631
 632	rc = __udp_enqueue_schedule_skb(sk, skb);
 633	if (rc < 0) {
 634		int is_udplite = IS_UDPLITE(sk);
 
 635
 636		/* Note that an ENOMEM error is charged twice */
 637		if (rc == -ENOMEM)
 638			UDP6_INC_STATS(sock_net(sk),
 639					 UDP_MIB_RCVBUFERRORS, is_udplite);
 
 
 
 
 
 
 640		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 641		kfree_skb(skb);
 
 642		return -1;
 643	}
 644
 645	return 0;
 646}
 647
 648static __inline__ int udpv6_err(struct sk_buff *skb,
 649				struct inet6_skb_parm *opt, u8 type,
 650				u8 code, int offset, __be32 info)
 651{
 652	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 
 653}
 654
 655static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 656{
 
 657	struct udp_sock *up = udp_sk(sk);
 658	int is_udplite = IS_UDPLITE(sk);
 659
 660	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 
 661		goto drop;
 
 
 662
 663	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
 
 664		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 665
 666		/*
 667		 * This is an encapsulation socket so pass the skb to
 668		 * the socket's udp_encap_rcv() hook. Otherwise, just
 669		 * fall through and pass this up the UDP socket.
 670		 * up->encap_rcv() returns the following value:
 671		 * =0 if skb was successfully passed to the encap
 672		 *    handler or was discarded by it.
 673		 * >0 if skb should be passed on to UDP.
 674		 * <0 if skb should be resubmitted as proto -N
 675		 */
 676
 677		/* if we're overly short, let UDP handle it */
 678		encap_rcv = READ_ONCE(up->encap_rcv);
 679		if (encap_rcv) {
 680			int ret;
 681
 682			/* Verify checksum before giving to encap */
 683			if (udp_lib_checksum_complete(skb))
 684				goto csum_error;
 685
 686			ret = encap_rcv(sk, skb);
 687			if (ret <= 0) {
 688				__UDP_INC_STATS(sock_net(sk),
 689						UDP_MIB_INDATAGRAMS,
 690						is_udplite);
 691				return -ret;
 692			}
 693		}
 694
 695		/* FALLTHROUGH -- it's a UDP Packet */
 696	}
 697
 698	/*
 699	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 700	 */
 701	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
 
 702
 703		if (up->pcrlen == 0) {          /* full coverage was set  */
 704			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 705					    UDP_SKB_CB(skb)->cscov, skb->len);
 706			goto drop;
 707		}
 708		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
 709			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 710					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
 711			goto drop;
 712		}
 713	}
 714
 715	prefetch(&sk->sk_rmem_alloc);
 716	if (rcu_access_pointer(sk->sk_filter) &&
 717	    udp_lib_checksum_complete(skb))
 718		goto csum_error;
 719
 720	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
 
 721		goto drop;
 
 722
 723	udp_csum_pull_header(skb);
 724
 725	skb_dst_drop(skb);
 726
 727	return __udpv6_queue_rcv_skb(sk, skb);
 728
 729csum_error:
 
 730	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 731drop:
 732	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 733	atomic_inc(&sk->sk_drops);
 734	kfree_skb(skb);
 735	return -1;
 736}
 737
 738static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 739{
 740	struct sk_buff *next, *segs;
 741	int ret;
 742
 743	if (likely(!udp_unexpected_gso(sk, skb)))
 744		return udpv6_queue_rcv_one_skb(sk, skb);
 745
 746	__skb_push(skb, -skb_mac_offset(skb));
 747	segs = udp_rcv_segment(sk, skb, false);
 748	skb_list_walk_safe(segs, skb, next) {
 749		__skb_pull(skb, skb_transport_offset(skb));
 750
 
 751		ret = udpv6_queue_rcv_one_skb(sk, skb);
 752		if (ret > 0)
 753			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 754						 true);
 755	}
 756	return 0;
 757}
 758
 759static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
 760				   __be16 loc_port, const struct in6_addr *loc_addr,
 761				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 762				   int dif, int sdif, unsigned short hnum)
 763{
 764	struct inet_sock *inet = inet_sk(sk);
 765
 766	if (!net_eq(sock_net(sk), net))
 767		return false;
 768
 769	if (udp_sk(sk)->udp_port_hash != hnum ||
 770	    sk->sk_family != PF_INET6 ||
 771	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 772	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 773		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 774	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
 775	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 776		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 777		return false;
 778	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 779		return false;
 780	return true;
 781}
 782
 783static void udp6_csum_zero_error(struct sk_buff *skb)
 784{
 785	/* RFC 2460 section 8.1 says that we SHOULD log
 786	 * this error. Well, it is reasonable.
 787	 */
 788	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 789			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 790			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 791}
 792
 793/*
 794 * Note: called only from the BH handler context,
 795 * so we don't need to lock the hashes.
 796 */
 797static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 798		const struct in6_addr *saddr, const struct in6_addr *daddr,
 799		struct udp_table *udptable, int proto)
 800{
 801	struct sock *sk, *first = NULL;
 802	const struct udphdr *uh = udp_hdr(skb);
 803	unsigned short hnum = ntohs(uh->dest);
 804	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 805	unsigned int offset = offsetof(typeof(*sk), sk_node);
 806	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 807	int dif = inet6_iif(skb);
 808	int sdif = inet6_sdif(skb);
 809	struct hlist_node *node;
 810	struct sk_buff *nskb;
 811
 812	if (use_hash2) {
 813		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 814			    udptable->mask;
 815		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 816start_lookup:
 817		hslot = &udptable->hash2[hash2];
 818		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 819	}
 820
 821	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 822		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
 823					    uh->source, saddr, dif, sdif,
 824					    hnum))
 825			continue;
 826		/* If zero checksum and no_check is not on for
 827		 * the socket then skip it.
 828		 */
 829		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 830			continue;
 831		if (!first) {
 832			first = sk;
 833			continue;
 834		}
 835		nskb = skb_clone(skb, GFP_ATOMIC);
 836		if (unlikely(!nskb)) {
 837			atomic_inc(&sk->sk_drops);
 838			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
 839					 IS_UDPLITE(sk));
 840			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
 841					 IS_UDPLITE(sk));
 842			continue;
 843		}
 844
 845		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
 846			consume_skb(nskb);
 847	}
 848
 849	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 850	if (use_hash2 && hash2 != hash2_any) {
 851		hash2 = hash2_any;
 852		goto start_lookup;
 853	}
 854
 855	if (first) {
 856		if (udpv6_queue_rcv_skb(first, skb) > 0)
 857			consume_skb(skb);
 858	} else {
 859		kfree_skb(skb);
 860		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
 861				 proto == IPPROTO_UDPLITE);
 862	}
 863	return 0;
 864}
 865
 866static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 867{
 868	if (udp_sk_rx_dst_set(sk, dst)) {
 869		const struct rt6_info *rt = (const struct rt6_info *)dst;
 870
 871		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 872	}
 873}
 874
 875/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 876 * return code conversion for ip layer consumption
 877 */
 878static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
 879				struct udphdr *uh)
 880{
 881	int ret;
 882
 883	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 884		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
 885
 886	ret = udpv6_queue_rcv_skb(sk, skb);
 887
 888	/* a return value > 0 means to resubmit the input */
 889	if (ret > 0)
 890		return ret;
 891	return 0;
 892}
 893
 894int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 895		   int proto)
 896{
 
 897	const struct in6_addr *saddr, *daddr;
 898	struct net *net = dev_net(skb->dev);
 
 899	struct udphdr *uh;
 900	struct sock *sk;
 901	bool refcounted;
 902	u32 ulen = 0;
 903
 904	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 905		goto discard;
 906
 907	saddr = &ipv6_hdr(skb)->saddr;
 908	daddr = &ipv6_hdr(skb)->daddr;
 909	uh = udp_hdr(skb);
 910
 911	ulen = ntohs(uh->len);
 912	if (ulen > skb->len)
 913		goto short_packet;
 914
 915	if (proto == IPPROTO_UDP) {
 916		/* UDP validates ulen. */
 917
 918		/* Check for jumbo payload */
 919		if (ulen == 0)
 920			ulen = skb->len;
 921
 922		if (ulen < sizeof(*uh))
 923			goto short_packet;
 924
 925		if (ulen < skb->len) {
 926			if (pskb_trim_rcsum(skb, ulen))
 927				goto short_packet;
 928			saddr = &ipv6_hdr(skb)->saddr;
 929			daddr = &ipv6_hdr(skb)->daddr;
 930			uh = udp_hdr(skb);
 931		}
 932	}
 933
 934	if (udp6_csum_init(skb, uh, proto))
 935		goto csum_error;
 936
 937	/* Check if the socket is already available, e.g. due to early demux */
 938	sk = skb_steal_sock(skb, &refcounted);
 
 
 
 
 939	if (sk) {
 940		struct dst_entry *dst = skb_dst(skb);
 941		int ret;
 942
 943		if (unlikely(sk->sk_rx_dst != dst))
 944			udp6_sk_rx_dst_set(sk, dst);
 945
 946		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
 947			if (refcounted)
 948				sock_put(sk);
 949			goto report_csum_error;
 950		}
 951
 952		ret = udp6_unicast_rcv_skb(sk, skb, uh);
 953		if (refcounted)
 954			sock_put(sk);
 955		return ret;
 956	}
 957
 958	/*
 959	 *	Multicast receive code
 960	 */
 961	if (ipv6_addr_is_multicast(daddr))
 962		return __udp6_lib_mcast_deliver(net, skb,
 963				saddr, daddr, udptable, proto);
 964
 965	/* Unicast */
 966	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 967	if (sk) {
 968		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 969			goto report_csum_error;
 970		return udp6_unicast_rcv_skb(sk, skb, uh);
 971	}
 
 
 972
 973	if (!uh->check)
 974		goto report_csum_error;
 975
 976	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 977		goto discard;
 
 978
 979	if (udp_lib_checksum_complete(skb))
 980		goto csum_error;
 981
 982	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 983	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 984
 985	kfree_skb(skb);
 986	return 0;
 987
 988short_packet:
 
 
 989	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
 990			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
 991			    saddr, ntohs(uh->source),
 992			    ulen, skb->len,
 993			    daddr, ntohs(uh->dest));
 994	goto discard;
 995
 996report_csum_error:
 997	udp6_csum_zero_error(skb);
 998csum_error:
 
 
 999	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1000discard:
1001	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1002	kfree_skb(skb);
1003	return 0;
1004}
1005
1006
1007static struct sock *__udp6_lib_demux_lookup(struct net *net,
1008			__be16 loc_port, const struct in6_addr *loc_addr,
1009			__be16 rmt_port, const struct in6_addr *rmt_addr,
1010			int dif, int sdif)
1011{
 
1012	unsigned short hnum = ntohs(loc_port);
1013	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1014	unsigned int slot2 = hash2 & udp_table.mask;
1015	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1016	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1017	struct sock *sk;
1018
 
 
 
 
1019	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1020		if (sk->sk_state == TCP_ESTABLISHED &&
1021		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1022			return sk;
1023		/* Only check first socket in chain */
1024		break;
1025	}
1026	return NULL;
1027}
1028
1029INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1030{
1031	struct net *net = dev_net(skb->dev);
1032	const struct udphdr *uh;
1033	struct sock *sk;
1034	struct dst_entry *dst;
1035	int dif = skb->dev->ifindex;
1036	int sdif = inet6_sdif(skb);
1037
1038	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1039	    sizeof(struct udphdr)))
1040		return;
1041
1042	uh = udp_hdr(skb);
1043
1044	if (skb->pkt_type == PACKET_HOST)
1045		sk = __udp6_lib_demux_lookup(net, uh->dest,
1046					     &ipv6_hdr(skb)->daddr,
1047					     uh->source, &ipv6_hdr(skb)->saddr,
1048					     dif, sdif);
1049	else
1050		return;
1051
1052	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1053		return;
1054
1055	skb->sk = sk;
1056	skb->destructor = sock_efree;
1057	dst = READ_ONCE(sk->sk_rx_dst);
 
1058
1059	if (dst)
1060		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1061	if (dst) {
1062		/* set noref for now.
1063		 * any place which wants to hold dst has to call
1064		 * dst_hold_safe()
1065		 */
1066		skb_dst_set_noref(skb, dst);
1067	}
1068}
1069
1070INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1071{
1072	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1073}
1074
1075/*
1076 * Throw away all pending data and cancel the corking. Socket is locked.
1077 */
1078static void udp_v6_flush_pending_frames(struct sock *sk)
1079{
1080	struct udp_sock *up = udp_sk(sk);
1081
1082	if (up->pending == AF_INET)
1083		udp_flush_pending_frames(sk);
1084	else if (up->pending) {
1085		up->len = 0;
1086		up->pending = 0;
1087		ip6_flush_pending_frames(sk);
1088	}
1089}
1090
1091static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1092			     int addr_len)
1093{
1094	if (addr_len < offsetofend(struct sockaddr, sa_family))
1095		return -EINVAL;
1096	/* The following checks are replicated from __ip6_datagram_connect()
1097	 * and intended to prevent BPF program called below from accessing
1098	 * bytes that are out of the bound specified by user in addr_len.
1099	 */
1100	if (uaddr->sa_family == AF_INET) {
1101		if (__ipv6_only_sock(sk))
1102			return -EAFNOSUPPORT;
1103		return udp_pre_connect(sk, uaddr, addr_len);
1104	}
1105
1106	if (addr_len < SIN6_LEN_RFC2133)
1107		return -EINVAL;
1108
1109	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
 
 
 
 
 
 
 
 
 
 
 
 
1110}
1111
1112/**
1113 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1114 *	@sk:	socket we are sending on
1115 *	@skb:	sk_buff containing the filled-in UDP header
1116 *		(checksum field must be zeroed out)
1117 *	@saddr: source address
1118 *	@daddr: destination address
1119 *	@len:	length of packet
1120 */
1121static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1122				 const struct in6_addr *saddr,
1123				 const struct in6_addr *daddr, int len)
1124{
1125	unsigned int offset;
1126	struct udphdr *uh = udp_hdr(skb);
1127	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1128	__wsum csum = 0;
1129
1130	if (!frags) {
1131		/* Only one fragment on the socket.  */
1132		skb->csum_start = skb_transport_header(skb) - skb->head;
1133		skb->csum_offset = offsetof(struct udphdr, check);
1134		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1135	} else {
1136		/*
1137		 * HW-checksum won't work as there are two or more
1138		 * fragments on the socket so that all csums of sk_buffs
1139		 * should be together
1140		 */
1141		offset = skb_transport_offset(skb);
1142		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1143		csum = skb->csum;
1144
1145		skb->ip_summed = CHECKSUM_NONE;
1146
1147		do {
1148			csum = csum_add(csum, frags->csum);
1149		} while ((frags = frags->next));
1150
1151		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1152					    csum);
1153		if (uh->check == 0)
1154			uh->check = CSUM_MANGLED_0;
1155	}
1156}
1157
1158/*
1159 *	Sending
1160 */
1161
1162static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1163			   struct inet_cork *cork)
1164{
1165	struct sock *sk = skb->sk;
1166	struct udphdr *uh;
1167	int err = 0;
1168	int is_udplite = IS_UDPLITE(sk);
1169	__wsum csum = 0;
1170	int offset = skb_transport_offset(skb);
1171	int len = skb->len - offset;
1172	int datalen = len - sizeof(*uh);
1173
1174	/*
1175	 * Create a UDP header
1176	 */
1177	uh = udp_hdr(skb);
1178	uh->source = fl6->fl6_sport;
1179	uh->dest = fl6->fl6_dport;
1180	uh->len = htons(len);
1181	uh->check = 0;
1182
1183	if (cork->gso_size) {
1184		const int hlen = skb_network_header_len(skb) +
1185				 sizeof(struct udphdr);
1186
1187		if (hlen + cork->gso_size > cork->fragsize) {
1188			kfree_skb(skb);
1189			return -EINVAL;
1190		}
1191		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1192			kfree_skb(skb);
1193			return -EINVAL;
1194		}
1195		if (udp_sk(sk)->no_check6_tx) {
1196			kfree_skb(skb);
1197			return -EINVAL;
1198		}
1199		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1200		    dst_xfrm(skb_dst(skb))) {
1201			kfree_skb(skb);
1202			return -EIO;
1203		}
1204
1205		if (datalen > cork->gso_size) {
1206			skb_shinfo(skb)->gso_size = cork->gso_size;
1207			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1208			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1209								 cork->gso_size);
 
 
 
1210		}
1211		goto csum_partial;
1212	}
1213
1214	if (is_udplite)
1215		csum = udplite_csum(skb);
1216	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1217		skb->ip_summed = CHECKSUM_NONE;
1218		goto send;
1219	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1220csum_partial:
1221		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1222		goto send;
1223	} else
1224		csum = udp_csum(skb);
1225
1226	/* add protocol-dependent pseudo-header */
1227	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1228				    len, fl6->flowi6_proto, csum);
1229	if (uh->check == 0)
1230		uh->check = CSUM_MANGLED_0;
1231
1232send:
1233	err = ip6_send_skb(skb);
1234	if (err) {
1235		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1236			UDP6_INC_STATS(sock_net(sk),
1237				       UDP_MIB_SNDBUFERRORS, is_udplite);
1238			err = 0;
1239		}
1240	} else {
1241		UDP6_INC_STATS(sock_net(sk),
1242			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1243	}
1244	return err;
1245}
1246
1247static int udp_v6_push_pending_frames(struct sock *sk)
1248{
1249	struct sk_buff *skb;
1250	struct udp_sock  *up = udp_sk(sk);
1251	struct flowi6 fl6;
1252	int err = 0;
1253
1254	if (up->pending == AF_INET)
1255		return udp_push_pending_frames(sk);
1256
1257	/* ip6_finish_skb will release the cork, so make a copy of
1258	 * fl6 here.
1259	 */
1260	fl6 = inet_sk(sk)->cork.fl.u.ip6;
1261
1262	skb = ip6_finish_skb(sk);
1263	if (!skb)
1264		goto out;
1265
1266	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1267
1268out:
1269	up->len = 0;
1270	up->pending = 0;
1271	return err;
1272}
1273
1274int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1275{
1276	struct ipv6_txoptions opt_space;
1277	struct udp_sock *up = udp_sk(sk);
1278	struct inet_sock *inet = inet_sk(sk);
1279	struct ipv6_pinfo *np = inet6_sk(sk);
1280	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1281	struct in6_addr *daddr, *final_p, final;
1282	struct ipv6_txoptions *opt = NULL;
1283	struct ipv6_txoptions *opt_to_free = NULL;
1284	struct ip6_flowlabel *flowlabel = NULL;
1285	struct flowi6 fl6;
 
1286	struct dst_entry *dst;
1287	struct ipcm6_cookie ipc6;
1288	int addr_len = msg->msg_namelen;
1289	bool connected = false;
1290	int ulen = len;
1291	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1292	int err;
1293	int is_udplite = IS_UDPLITE(sk);
1294	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1295
1296	ipcm6_init(&ipc6);
1297	ipc6.gso_size = up->gso_size;
1298	ipc6.sockc.tsflags = sk->sk_tsflags;
1299	ipc6.sockc.mark = sk->sk_mark;
1300
1301	/* destination address check */
1302	if (sin6) {
1303		if (addr_len < offsetof(struct sockaddr, sa_data))
1304			return -EINVAL;
1305
1306		switch (sin6->sin6_family) {
1307		case AF_INET6:
1308			if (addr_len < SIN6_LEN_RFC2133)
1309				return -EINVAL;
1310			daddr = &sin6->sin6_addr;
1311			if (ipv6_addr_any(daddr) &&
1312			    ipv6_addr_v4mapped(&np->saddr))
1313				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1314						       daddr);
1315			break;
1316		case AF_INET:
1317			goto do_udp_sendmsg;
1318		case AF_UNSPEC:
1319			msg->msg_name = sin6 = NULL;
1320			msg->msg_namelen = addr_len = 0;
1321			daddr = NULL;
1322			break;
1323		default:
1324			return -EINVAL;
1325		}
1326	} else if (!up->pending) {
1327		if (sk->sk_state != TCP_ESTABLISHED)
1328			return -EDESTADDRREQ;
1329		daddr = &sk->sk_v6_daddr;
1330	} else
1331		daddr = NULL;
1332
1333	if (daddr) {
1334		if (ipv6_addr_v4mapped(daddr)) {
1335			struct sockaddr_in sin;
1336			sin.sin_family = AF_INET;
1337			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1338			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1339			msg->msg_name = &sin;
1340			msg->msg_namelen = sizeof(sin);
1341do_udp_sendmsg:
1342			if (__ipv6_only_sock(sk))
1343				return -ENETUNREACH;
1344			return udp_sendmsg(sk, msg, len);
 
 
1345		}
1346	}
1347
1348	if (up->pending == AF_INET)
1349		return udp_sendmsg(sk, msg, len);
1350
1351	/* Rough check on arithmetic overflow,
1352	   better check is made in ip6_append_data().
1353	   */
1354	if (len > INT_MAX - sizeof(struct udphdr))
1355		return -EMSGSIZE;
1356
1357	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1358	if (up->pending) {
 
 
1359		/*
1360		 * There are pending frames.
1361		 * The socket lock must be held while it's corked.
1362		 */
1363		lock_sock(sk);
1364		if (likely(up->pending)) {
1365			if (unlikely(up->pending != AF_INET6)) {
1366				release_sock(sk);
1367				return -EAFNOSUPPORT;
1368			}
1369			dst = NULL;
1370			goto do_append_data;
1371		}
1372		release_sock(sk);
1373	}
1374	ulen += sizeof(struct udphdr);
1375
1376	memset(&fl6, 0, sizeof(fl6));
1377
1378	if (sin6) {
1379		if (sin6->sin6_port == 0)
1380			return -EINVAL;
1381
1382		fl6.fl6_dport = sin6->sin6_port;
1383		daddr = &sin6->sin6_addr;
1384
1385		if (np->sndflow) {
1386			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1387			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1388				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1389				if (IS_ERR(flowlabel))
1390					return -EINVAL;
1391			}
1392		}
1393
1394		/*
1395		 * Otherwise it will be difficult to maintain
1396		 * sk->sk_dst_cache.
1397		 */
1398		if (sk->sk_state == TCP_ESTABLISHED &&
1399		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1400			daddr = &sk->sk_v6_daddr;
1401
1402		if (addr_len >= sizeof(struct sockaddr_in6) &&
1403		    sin6->sin6_scope_id &&
1404		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1405			fl6.flowi6_oif = sin6->sin6_scope_id;
1406	} else {
1407		if (sk->sk_state != TCP_ESTABLISHED)
1408			return -EDESTADDRREQ;
1409
1410		fl6.fl6_dport = inet->inet_dport;
1411		daddr = &sk->sk_v6_daddr;
1412		fl6.flowlabel = np->flow_label;
1413		connected = true;
1414	}
1415
1416	if (!fl6.flowi6_oif)
1417		fl6.flowi6_oif = sk->sk_bound_dev_if;
1418
1419	if (!fl6.flowi6_oif)
1420		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1421
1422	fl6.flowi6_mark = ipc6.sockc.mark;
1423	fl6.flowi6_uid = sk->sk_uid;
1424
1425	if (msg->msg_controllen) {
1426		opt = &opt_space;
1427		memset(opt, 0, sizeof(struct ipv6_txoptions));
1428		opt->tot_len = sizeof(*opt);
1429		ipc6.opt = opt;
1430
1431		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1432		if (err > 0)
1433			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1434						    &ipc6);
 
 
1435		if (err < 0) {
1436			fl6_sock_release(flowlabel);
1437			return err;
1438		}
1439		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1440			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1441			if (IS_ERR(flowlabel))
1442				return -EINVAL;
1443		}
1444		if (!(opt->opt_nflen|opt->opt_flen))
1445			opt = NULL;
1446		connected = false;
1447	}
1448	if (!opt) {
1449		opt = txopt_get(np);
1450		opt_to_free = opt;
1451	}
1452	if (flowlabel)
1453		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1454	opt = ipv6_fixup_options(&opt_space, opt);
1455	ipc6.opt = opt;
1456
1457	fl6.flowi6_proto = sk->sk_protocol;
1458	fl6.daddr = *daddr;
1459	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1460		fl6.saddr = np->saddr;
1461	fl6.fl6_sport = inet->inet_sport;
 
1462
1463	if (cgroup_bpf_enabled && !connected) {
1464		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1465					   (struct sockaddr *)sin6, &fl6.saddr);
 
 
1466		if (err)
1467			goto out_no_dst;
1468		if (sin6) {
1469			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1470				/* BPF program rewrote IPv6-only by IPv4-mapped
1471				 * IPv6. It's currently unsupported.
1472				 */
1473				err = -ENOTSUPP;
1474				goto out_no_dst;
1475			}
1476			if (sin6->sin6_port == 0) {
1477				/* BPF program set invalid port. Reject it. */
1478				err = -EINVAL;
1479				goto out_no_dst;
1480			}
1481			fl6.fl6_dport = sin6->sin6_port;
1482			fl6.daddr = sin6->sin6_addr;
1483		}
1484	}
1485
1486	if (ipv6_addr_any(&fl6.daddr))
1487		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1488
1489	final_p = fl6_update_dst(&fl6, opt, &final);
1490	if (final_p)
1491		connected = false;
1492
1493	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1494		fl6.flowi6_oif = np->mcast_oif;
1495		connected = false;
1496	} else if (!fl6.flowi6_oif)
1497		fl6.flowi6_oif = np->ucast_oif;
1498
1499	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1500
1501	if (ipc6.tclass < 0)
1502		ipc6.tclass = np->tclass;
1503
1504	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1505
1506	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1507	if (IS_ERR(dst)) {
1508		err = PTR_ERR(dst);
1509		dst = NULL;
1510		goto out;
1511	}
1512
1513	if (ipc6.hlimit < 0)
1514		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1515
1516	if (msg->msg_flags&MSG_CONFIRM)
1517		goto do_confirm;
1518back_from_confirm:
1519
1520	/* Lockless fast path for the non-corking case */
1521	if (!corkreq) {
1522		struct inet_cork_full cork;
1523		struct sk_buff *skb;
1524
1525		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1526				   sizeof(struct udphdr), &ipc6,
1527				   &fl6, (struct rt6_info *)dst,
1528				   msg->msg_flags, &cork);
1529		err = PTR_ERR(skb);
1530		if (!IS_ERR_OR_NULL(skb))
1531			err = udp_v6_send_skb(skb, &fl6, &cork.base);
1532		goto out;
 
1533	}
1534
1535	lock_sock(sk);
1536	if (unlikely(up->pending)) {
1537		/* The socket is already corked while preparing it. */
1538		/* ... which is an evident application bug. --ANK */
1539		release_sock(sk);
1540
1541		net_dbg_ratelimited("udp cork app bug 2\n");
1542		err = -EINVAL;
1543		goto out;
1544	}
1545
1546	up->pending = AF_INET6;
1547
1548do_append_data:
1549	if (ipc6.dontfrag < 0)
1550		ipc6.dontfrag = np->dontfrag;
1551	up->len += ulen;
1552	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1553			      &ipc6, &fl6, (struct rt6_info *)dst,
1554			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1555	if (err)
1556		udp_v6_flush_pending_frames(sk);
1557	else if (!corkreq)
1558		err = udp_v6_push_pending_frames(sk);
1559	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1560		up->pending = 0;
1561
1562	if (err > 0)
1563		err = np->recverr ? net_xmit_errno(err) : 0;
1564	release_sock(sk);
1565
1566out:
1567	dst_release(dst);
1568out_no_dst:
1569	fl6_sock_release(flowlabel);
1570	txopt_put(opt_to_free);
1571	if (!err)
1572		return len;
1573	/*
1574	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1575	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1576	 * we don't have a good statistic (IpOutDiscards but it can be too many
1577	 * things).  We could add another new stat but at least for now that
1578	 * seems like overkill.
1579	 */
1580	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1581		UDP6_INC_STATS(sock_net(sk),
1582			       UDP_MIB_SNDBUFERRORS, is_udplite);
1583	}
1584	return err;
1585
1586do_confirm:
1587	if (msg->msg_flags & MSG_PROBE)
1588		dst_confirm_neigh(dst, &fl6.daddr);
1589	if (!(msg->msg_flags&MSG_PROBE) || len)
1590		goto back_from_confirm;
1591	err = 0;
1592	goto out;
1593}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1594
1595void udpv6_destroy_sock(struct sock *sk)
1596{
1597	struct udp_sock *up = udp_sk(sk);
1598	lock_sock(sk);
 
 
 
1599	udp_v6_flush_pending_frames(sk);
1600	release_sock(sk);
1601
1602	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1603		if (up->encap_type) {
1604			void (*encap_destroy)(struct sock *sk);
1605			encap_destroy = READ_ONCE(up->encap_destroy);
1606			if (encap_destroy)
1607				encap_destroy(sk);
1608		}
1609		if (up->encap_enabled)
1610			static_branch_dec(&udpv6_encap_needed_key);
 
 
1611	}
1612
1613	inet6_destroy_sock(sk);
1614}
1615
1616/*
1617 *	Socket option code for UDP
1618 */
1619int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1620		     unsigned int optlen)
1621{
1622	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1623		return udp_lib_setsockopt(sk, level, optname,
1624					  optval, optlen,
1625					  udp_v6_push_pending_frames);
1626	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1627}
1628
1629int udpv6_getsockopt(struct sock *sk, int level, int optname,
1630		     char __user *optval, int __user *optlen)
1631{
1632	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1633		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1634	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1635}
1636
1637/* thinking of making this const? Don't.
1638 * early_demux can change based on sysctl.
1639 */
1640static struct inet6_protocol udpv6_protocol = {
1641	.early_demux	=	udp_v6_early_demux,
1642	.early_demux_handler =  udp_v6_early_demux,
1643	.handler	=	udpv6_rcv,
1644	.err_handler	=	udpv6_err,
1645	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1646};
1647
1648/* ------------------------------------------------------------------------ */
1649#ifdef CONFIG_PROC_FS
1650int udp6_seq_show(struct seq_file *seq, void *v)
1651{
1652	if (v == SEQ_START_TOKEN) {
1653		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1654	} else {
1655		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1656		struct inet_sock *inet = inet_sk(v);
1657		__u16 srcp = ntohs(inet->inet_sport);
1658		__u16 destp = ntohs(inet->inet_dport);
1659		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1660					  udp_rqueue_get(v), bucket);
1661	}
1662	return 0;
1663}
1664
1665const struct seq_operations udp6_seq_ops = {
1666	.start		= udp_seq_start,
1667	.next		= udp_seq_next,
1668	.stop		= udp_seq_stop,
1669	.show		= udp6_seq_show,
1670};
1671EXPORT_SYMBOL(udp6_seq_ops);
1672
1673static struct udp_seq_afinfo udp6_seq_afinfo = {
1674	.family		= AF_INET6,
1675	.udp_table	= &udp_table,
1676};
1677
1678int __net_init udp6_proc_init(struct net *net)
1679{
1680	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1681			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1682		return -ENOMEM;
1683	return 0;
1684}
1685
1686void udp6_proc_exit(struct net *net)
1687{
1688	remove_proc_entry("udp6", net->proc_net);
1689}
1690#endif /* CONFIG_PROC_FS */
1691
1692/* ------------------------------------------------------------------------ */
1693
1694struct proto udpv6_prot = {
1695	.name			= "UDPv6",
1696	.owner			= THIS_MODULE,
1697	.close			= udp_lib_close,
1698	.pre_connect		= udpv6_pre_connect,
1699	.connect		= ip6_datagram_connect,
1700	.disconnect		= udp_disconnect,
1701	.ioctl			= udp_ioctl,
1702	.init			= udp_init_sock,
1703	.destroy		= udpv6_destroy_sock,
1704	.setsockopt		= udpv6_setsockopt,
1705	.getsockopt		= udpv6_getsockopt,
1706	.sendmsg		= udpv6_sendmsg,
1707	.recvmsg		= udpv6_recvmsg,
 
1708	.release_cb		= ip6_datagram_release_cb,
1709	.hash			= udp_lib_hash,
1710	.unhash			= udp_lib_unhash,
1711	.rehash			= udp_v6_rehash,
1712	.get_port		= udp_v6_get_port,
 
 
 
 
 
1713	.memory_allocated	= &udp_memory_allocated,
 
 
1714	.sysctl_mem		= sysctl_udp_mem,
1715	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1716	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1717	.obj_size		= sizeof(struct udp6_sock),
1718	.h.udp_table		= &udp_table,
 
1719	.diag_destroy		= udp_abort,
1720};
1721
1722static struct inet_protosw udpv6_protosw = {
1723	.type =      SOCK_DGRAM,
1724	.protocol =  IPPROTO_UDP,
1725	.prot =      &udpv6_prot,
1726	.ops =       &inet6_dgram_ops,
1727	.flags =     INET_PROTOSW_PERMANENT,
1728};
1729
1730int __init udpv6_init(void)
1731{
1732	int ret;
1733
1734	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
 
 
 
 
 
1735	if (ret)
1736		goto out;
1737
1738	ret = inet6_register_protosw(&udpv6_protosw);
1739	if (ret)
1740		goto out_udpv6_protocol;
1741out:
1742	return ret;
1743
1744out_udpv6_protocol:
1745	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1746	goto out;
1747}
1748
1749void udpv6_exit(void)
1750{
1751	inet6_unregister_protosw(&udpv6_protosw);
1752	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1753}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
  20#include <linux/bpf-cgroup.h>
  21#include <linux/errno.h>
  22#include <linux/types.h>
  23#include <linux/socket.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/in6.h>
  27#include <linux/netdevice.h>
  28#include <linux/if_arp.h>
  29#include <linux/ipv6.h>
  30#include <linux/icmpv6.h>
  31#include <linux/init.h>
  32#include <linux/module.h>
  33#include <linux/skbuff.h>
  34#include <linux/slab.h>
  35#include <linux/uaccess.h>
  36#include <linux/indirect_call_wrapper.h>
  37#include <trace/events/udp.h>
  38
  39#include <net/addrconf.h>
  40#include <net/ndisc.h>
  41#include <net/protocol.h>
  42#include <net/transp_v6.h>
  43#include <net/ip6_route.h>
  44#include <net/raw.h>
  45#include <net/seg6.h>
  46#include <net/tcp_states.h>
  47#include <net/ip6_checksum.h>
  48#include <net/ip6_tunnel.h>
  49#include <net/xfrm.h>
  50#include <net/inet_hashtables.h>
  51#include <net/inet6_hashtables.h>
  52#include <net/busy_poll.h>
  53#include <net/sock_reuseport.h>
  54#include <net/gro.h>
  55
  56#include <linux/proc_fs.h>
  57#include <linux/seq_file.h>
  58#include <trace/events/skb.h>
  59#include "udp_impl.h"
  60
  61static void udpv6_destruct_sock(struct sock *sk)
 
 
 
 
  62{
  63	udp_destruct_common(sk);
  64	inet6_sock_destruct(sk);
  65}
  66
  67int udpv6_init_sock(struct sock *sk)
  68{
  69	udp_lib_init_sock(sk);
  70	sk->sk_destruct = udpv6_destruct_sock;
  71	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
  72	return 0;
  73}
  74
  75INDIRECT_CALLABLE_SCOPE
  76u32 udp6_ehashfn(const struct net *net,
  77		 const struct in6_addr *laddr,
  78		 const u16 lport,
  79		 const struct in6_addr *faddr,
  80		 const __be16 fport)
  81{
  82	u32 lhash, fhash;
  83
  84	net_get_random_once(&udp6_ehash_secret,
  85			    sizeof(udp6_ehash_secret));
  86	net_get_random_once(&udp_ipv6_hash_secret,
  87			    sizeof(udp_ipv6_hash_secret));
  88
  89	lhash = (__force u32)laddr->s6_addr32[3];
  90	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  91
  92	return __inet6_ehashfn(lhash, lport, fhash, fport,
  93			       udp6_ehash_secret + net_hash_mix(net));
  94}
  95
  96int udp_v6_get_port(struct sock *sk, unsigned short snum)
  97{
  98	unsigned int hash2_nulladdr =
  99		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 100	unsigned int hash2_partial =
 101		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 102
 103	/* precompute partial secondary hash */
 104	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 105	return udp_lib_get_port(sk, snum, hash2_nulladdr);
 106}
 107
 108void udp_v6_rehash(struct sock *sk)
 109{
 110	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 111					  &sk->sk_v6_rcv_saddr,
 112					  inet_sk(sk)->inet_num);
 113	u16 new_hash4;
 114
 115	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
 116		new_hash4 = udp_ehashfn(sock_net(sk),
 117					sk->sk_rcv_saddr, sk->sk_num,
 118					sk->sk_daddr, sk->sk_dport);
 119	} else {
 120		new_hash4 = udp6_ehashfn(sock_net(sk),
 121					 &sk->sk_v6_rcv_saddr, sk->sk_num,
 122					 &sk->sk_v6_daddr, sk->sk_dport);
 123	}
 124
 125	udp_lib_rehash(sk, new_hash, new_hash4);
 126}
 127
 128static int compute_score(struct sock *sk, const struct net *net,
 129			 const struct in6_addr *saddr, __be16 sport,
 130			 const struct in6_addr *daddr, unsigned short hnum,
 131			 int dif, int sdif)
 132{
 133	int bound_dev_if, score;
 134	struct inet_sock *inet;
 135	bool dev_match;
 136
 137	if (!net_eq(sock_net(sk), net) ||
 138	    udp_sk(sk)->udp_port_hash != hnum ||
 139	    sk->sk_family != PF_INET6)
 140		return -1;
 141
 142	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 143		return -1;
 144
 145	score = 0;
 146	inet = inet_sk(sk);
 147
 148	if (inet->inet_dport) {
 149		if (inet->inet_dport != sport)
 150			return -1;
 151		score++;
 152	}
 153
 154	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 155		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 156			return -1;
 157		score++;
 158	}
 159
 160	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 161	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
 162	if (!dev_match)
 163		return -1;
 164	if (bound_dev_if)
 165		score++;
 166
 167	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 168		score++;
 169
 170	return score;
 171}
 172
 173/**
 174 * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
 175 * @net:	Network namespace
 176 * @saddr:	Source address, network order
 177 * @sport:	Source port, network order
 178 * @daddr:	Destination address, network order
 179 * @hnum:	Destination port, host order
 180 * @dif:	Destination interface index
 181 * @sdif:	Destination bridge port index, if relevant
 182 * @udptable:	Set of UDP hash tables
 183 *
 184 * Simplified lookup to be used as fallback if no sockets are found due to a
 185 * potential race between (receive) address change, and lookup happening before
 186 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
 187 * result sockets, because if we have one, we don't need the fallback at all.
 188 *
 189 * Called under rcu_read_lock().
 190 *
 191 * Return: socket with highest matching score if any, NULL if none
 192 */
 193static struct sock *udp6_lib_lookup1(const struct net *net,
 194				     const struct in6_addr *saddr, __be16 sport,
 195				     const struct in6_addr *daddr,
 196				     unsigned int hnum, int dif, int sdif,
 197				     const struct udp_table *udptable)
 198{
 199	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
 200	struct udp_hslot *hslot = &udptable->hash[slot];
 201	struct sock *sk, *result = NULL;
 202	int score, badness = 0;
 203
 204	sk_for_each_rcu(sk, &hslot->head) {
 205		score = compute_score(sk, net,
 206				      saddr, sport, daddr, hnum, dif, sdif);
 207		if (score > badness) {
 208			result = sk;
 209			badness = score;
 210		}
 211	}
 212
 213	return result;
 214}
 215
 216/* called with rcu_read_lock() */
 217static struct sock *udp6_lib_lookup2(const struct net *net,
 218		const struct in6_addr *saddr, __be16 sport,
 219		const struct in6_addr *daddr, unsigned int hnum,
 220		int dif, int sdif, struct udp_hslot *hslot2,
 221		struct sk_buff *skb)
 222{
 223	struct sock *sk, *result;
 224	int score, badness;
 225	bool need_rescore;
 226
 227	result = NULL;
 228	badness = -1;
 229	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 230		need_rescore = false;
 231rescore:
 232		score = compute_score(need_rescore ? result : sk, net, saddr,
 233				      sport, daddr, hnum, dif, sdif);
 234		if (score > badness) {
 235			badness = score;
 236
 237			if (need_rescore)
 238				continue;
 239
 240			if (sk->sk_state == TCP_ESTABLISHED) {
 241				result = sk;
 242				continue;
 243			}
 244
 245			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
 246							saddr, sport, daddr, hnum, udp6_ehashfn);
 247			if (!result) {
 248				result = sk;
 249				continue;
 250			}
 251
 252			/* Fall back to scoring if group has connections */
 253			if (!reuseport_has_conns(sk))
 254				return result;
 255
 256			/* Reuseport logic returned an error, keep original score. */
 257			if (IS_ERR(result))
 258				continue;
 259
 260			/* compute_score is too long of a function to be
 261			 * inlined, and calling it again here yields
 262			 * measureable overhead for some
 263			 * workloads. Work around it by jumping
 264			 * backwards to rescore 'result'.
 265			 */
 266			need_rescore = true;
 267			goto rescore;
 268		}
 269	}
 270	return result;
 271}
 272
 273#if IS_ENABLED(CONFIG_BASE_SMALL)
 274static struct sock *udp6_lib_lookup4(const struct net *net,
 275				     const struct in6_addr *saddr, __be16 sport,
 276				     const struct in6_addr *daddr,
 277				     unsigned int hnum, int dif, int sdif,
 278				     struct udp_table *udptable)
 279{
 280	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281}
 282
 283static void udp6_hash4(struct sock *sk)
 284{
 285}
 286#else /* !CONFIG_BASE_SMALL */
 287static struct sock *udp6_lib_lookup4(const struct net *net,
 288				     const struct in6_addr *saddr, __be16 sport,
 289				     const struct in6_addr *daddr,
 290				     unsigned int hnum, int dif, int sdif,
 291				     struct udp_table *udptable)
 292{
 293	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
 294	const struct hlist_nulls_node *node;
 295	struct udp_hslot *hslot4;
 296	unsigned int hash4, slot;
 297	struct udp_sock *up;
 298	struct sock *sk;
 299
 300	hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
 301	slot = hash4 & udptable->mask;
 302	hslot4 = &udptable->hash4[slot];
 303
 304begin:
 305	udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
 306		sk = (struct sock *)up;
 307		if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
 308			return sk;
 309	}
 310
 311	/* if the nulls value we got at the end of this lookup is not the
 312	 * expected one, we must restart lookup. We probably met an item that
 313	 * was moved to another chain due to rehash.
 314	 */
 315	if (get_nulls_value(node) != slot)
 316		goto begin;
 317
 318	return NULL;
 319}
 320
 321static void udp6_hash4(struct sock *sk)
 322{
 323	struct net *net = sock_net(sk);
 324	unsigned int hash;
 325
 326	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
 327		udp4_hash4(sk);
 328		return;
 329	}
 330
 331	if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 332		return;
 333
 334	hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
 335			    &sk->sk_v6_daddr, sk->sk_dport);
 336
 337	udp_lib_hash4(sk, hash);
 338}
 339#endif /* CONFIG_BASE_SMALL */
 340
 341/* rcu_read_lock() must be held */
 342struct sock *__udp6_lib_lookup(const struct net *net,
 343			       const struct in6_addr *saddr, __be16 sport,
 344			       const struct in6_addr *daddr, __be16 dport,
 345			       int dif, int sdif, struct udp_table *udptable,
 346			       struct sk_buff *skb)
 347{
 348	unsigned short hnum = ntohs(dport);
 
 349	struct udp_hslot *hslot2;
 350	struct sock *result, *sk;
 351	unsigned int hash2;
 352
 353	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 354	hslot2 = udp_hashslot2(udptable, hash2);
 355
 356	if (udp_has_hash4(hslot2)) {
 357		result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
 358					  dif, sdif, udptable);
 359		if (result) /* udp6_lib_lookup4 return sk or NULL */
 360			return result;
 361	}
 362
 363	/* Lookup connected or non-wildcard sockets */
 364	result = udp6_lib_lookup2(net, saddr, sport,
 365				  daddr, hnum, dif, sdif,
 366				  hslot2, skb);
 367	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 368		goto done;
 369
 370	/* Lookup redirect from BPF */
 371	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
 372	    udptable == net->ipv4.udp_table) {
 373		sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
 374						saddr, sport, daddr, hnum, dif,
 375						udp6_ehashfn);
 376		if (sk) {
 377			result = sk;
 378			goto done;
 379		}
 380	}
 381
 382	/* Got non-wildcard socket or error on first lookup */
 383	if (result)
 384		goto done;
 385
 386	/* Lookup wildcard sockets */
 387	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 388	hslot2 = udp_hashslot2(udptable, hash2);
 
 389
 390	result = udp6_lib_lookup2(net, saddr, sport,
 391				  &in6addr_any, hnum, dif, sdif,
 392				  hslot2, skb);
 393	if (!IS_ERR_OR_NULL(result))
 394		goto done;
 395
 396	/* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
 397	result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
 398				  udptable);
 399
 400done:
 401	if (IS_ERR(result))
 402		return NULL;
 403	return result;
 404}
 405EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 406
 407static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 408					  __be16 sport, __be16 dport,
 409					  struct udp_table *udptable)
 410{
 411	const struct ipv6hdr *iph = ipv6_hdr(skb);
 412
 413	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 414				 &iph->daddr, dport, inet6_iif(skb),
 415				 inet6_sdif(skb), udptable, skb);
 416}
 417
 418struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
 419				 __be16 sport, __be16 dport)
 420{
 421	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
 422	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
 423	struct net *net = dev_net(skb->dev);
 424	int iif, sdif;
 425
 426	inet6_get_iif_sdif(skb, &iif, &sdif);
 427
 428	return __udp6_lib_lookup(net, &iph->saddr, sport,
 429				 &iph->daddr, dport, iif,
 430				 sdif, net->ipv4.udp_table, NULL);
 431}
 
 432
 433/* Must be called under rcu_read_lock().
 434 * Does increment socket refcount.
 435 */
 436#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 437struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
 438			     const struct in6_addr *daddr, __be16 dport, int dif)
 439{
 440	struct sock *sk;
 441
 442	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 443				dif, 0, net->ipv4.udp_table, NULL);
 444	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 445		sk = NULL;
 446	return sk;
 447}
 448EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 449#endif
 450
 451/* do not use the scratch area len for jumbogram: their length execeeds the
 452 * scratch area space; note that the IP6CB flags is still in the first
 453 * cacheline, so checking for jumbograms is cheap
 454 */
 455static int udp6_skb_len(struct sk_buff *skb)
 456{
 457	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 458}
 459
 460/*
 461 *	This should be easy, if there is something there we
 462 *	return it, otherwise we block.
 463 */
 464
 465int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 466		  int flags, int *addr_len)
 467{
 468	struct ipv6_pinfo *np = inet6_sk(sk);
 469	struct inet_sock *inet = inet_sk(sk);
 470	struct sk_buff *skb;
 471	unsigned int ulen, copied;
 472	int off, err, peeking = flags & MSG_PEEK;
 473	int is_udplite = IS_UDPLITE(sk);
 474	struct udp_mib __percpu *mib;
 475	bool checksum_valid = false;
 476	int is_udp4;
 477
 478	if (flags & MSG_ERRQUEUE)
 479		return ipv6_recv_error(sk, msg, len, addr_len);
 480
 481	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 482		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 483
 484try_again:
 485	off = sk_peek_offset(sk, flags);
 486	skb = __skb_recv_udp(sk, flags, &off, &err);
 487	if (!skb)
 488		return err;
 489
 490	ulen = udp6_skb_len(skb);
 491	copied = len;
 492	if (copied > ulen - off)
 493		copied = ulen - off;
 494	else if (copied < ulen)
 495		msg->msg_flags |= MSG_TRUNC;
 496
 497	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 498	mib = __UDPX_MIB(sk, is_udp4);
 499
 500	/*
 501	 * If checksum is needed at all, try to do it while copying the
 502	 * data.  If the data is truncated, or if we only want a partial
 503	 * coverage checksum (UDP-Lite), do it before the copy.
 504	 */
 505
 506	if (copied < ulen || peeking ||
 507	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 508		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 509				!__udp_lib_checksum_complete(skb);
 510		if (!checksum_valid)
 511			goto csum_copy_err;
 512	}
 513
 514	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 515		if (udp_skb_is_linear(skb))
 516			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 517		else
 518			err = skb_copy_datagram_msg(skb, off, msg, copied);
 519	} else {
 520		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 521		if (err == -EINVAL)
 522			goto csum_copy_err;
 523	}
 524	if (unlikely(err)) {
 525		if (!peeking) {
 526			atomic_inc(&sk->sk_drops);
 527			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 528		}
 529		kfree_skb(skb);
 530		return err;
 531	}
 532	if (!peeking)
 533		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 534
 535	sock_recv_cmsgs(msg, sk, skb);
 536
 537	/* Copy the address. */
 538	if (msg->msg_name) {
 539		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 540		sin6->sin6_family = AF_INET6;
 541		sin6->sin6_port = udp_hdr(skb)->source;
 542		sin6->sin6_flowinfo = 0;
 543
 544		if (is_udp4) {
 545			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 546					       &sin6->sin6_addr);
 547			sin6->sin6_scope_id = 0;
 548		} else {
 549			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 550			sin6->sin6_scope_id =
 551				ipv6_iface_scope_id(&sin6->sin6_addr,
 552						    inet6_iif(skb));
 553		}
 554		*addr_len = sizeof(*sin6);
 555
 556		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 557						      (struct sockaddr *)sin6,
 558						      addr_len);
 559	}
 560
 561	if (udp_test_bit(GRO_ENABLED, sk))
 562		udp_cmsg_recv(msg, sk, skb);
 563
 564	if (np->rxopt.all)
 565		ip6_datagram_recv_common_ctl(sk, msg, skb);
 566
 567	if (is_udp4) {
 568		if (inet_cmsg_flags(inet))
 569			ip_cmsg_recv_offset(msg, sk, skb,
 570					    sizeof(struct udphdr), off);
 571	} else {
 572		if (np->rxopt.all)
 573			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 574	}
 575
 576	err = copied;
 577	if (flags & MSG_TRUNC)
 578		err = ulen;
 579
 580	skb_consume_udp(sk, skb, peeking ? -err : err);
 581	return err;
 582
 583csum_copy_err:
 584	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 585				 udp_skb_destructor)) {
 586		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 587		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 588	}
 589	kfree_skb(skb);
 590
 591	/* starting over for a new packet, but check if we need to yield */
 592	cond_resched();
 593	msg->msg_flags &= ~MSG_TRUNC;
 594	goto try_again;
 595}
 596
 597DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 598void udpv6_encap_enable(void)
 599{
 600	static_branch_inc(&udpv6_encap_needed_key);
 601}
 602EXPORT_SYMBOL(udpv6_encap_enable);
 603
 604/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 605 * through error handlers in encapsulations looking for a match.
 606 */
 607static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 608				      struct inet6_skb_parm *opt,
 609				      u8 type, u8 code, int offset, __be32 info)
 610{
 611	int i;
 612
 613	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 614		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 615			       u8 type, u8 code, int offset, __be32 info);
 616		const struct ip6_tnl_encap_ops *encap;
 617
 618		encap = rcu_dereference(ip6tun_encaps[i]);
 619		if (!encap)
 620			continue;
 621		handler = encap->err_handler;
 622		if (handler && !handler(skb, opt, type, code, offset, info))
 623			return 0;
 624	}
 625
 626	return -ENOENT;
 627}
 628
 629/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 630 * reversing source and destination port: this will match tunnels that force the
 631 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 632 * lwtunnels might actually break this assumption by being configured with
 633 * different destination ports on endpoints, in this case we won't be able to
 634 * trace ICMP messages back to them.
 635 *
 636 * If this doesn't match any socket, probe tunnels with arbitrary destination
 637 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 638 * we've sent packets to won't necessarily match the local destination port.
 639 *
 640 * Then ask the tunnel implementation to match the error against a valid
 641 * association.
 642 *
 643 * Return an error if we can't find a match, the socket if we need further
 644 * processing, zero otherwise.
 645 */
 646static struct sock *__udp6_lib_err_encap(struct net *net,
 647					 const struct ipv6hdr *hdr, int offset,
 648					 struct udphdr *uh,
 649					 struct udp_table *udptable,
 650					 struct sock *sk,
 651					 struct sk_buff *skb,
 652					 struct inet6_skb_parm *opt,
 653					 u8 type, u8 code, __be32 info)
 654{
 655	int (*lookup)(struct sock *sk, struct sk_buff *skb);
 656	int network_offset, transport_offset;
 657	struct udp_sock *up;
 658
 659	network_offset = skb_network_offset(skb);
 660	transport_offset = skb_transport_offset(skb);
 661
 662	/* Network header needs to point to the outer IPv6 header inside ICMP */
 663	skb_reset_network_header(skb);
 664
 665	/* Transport header needs to point to the UDP header */
 666	skb_set_transport_header(skb, offset);
 667
 668	if (sk) {
 669		up = udp_sk(sk);
 670
 671		lookup = READ_ONCE(up->encap_err_lookup);
 672		if (lookup && lookup(sk, skb))
 673			sk = NULL;
 674
 675		goto out;
 676	}
 677
 678	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 679			       &hdr->saddr, uh->dest,
 680			       inet6_iif(skb), 0, udptable, skb);
 681	if (sk) {
 682		up = udp_sk(sk);
 
 683
 684		lookup = READ_ONCE(up->encap_err_lookup);
 685		if (!lookup || lookup(sk, skb))
 686			sk = NULL;
 687	}
 688
 689out:
 690	if (!sk) {
 691		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 692							offset, info));
 693	}
 694
 695	skb_set_transport_header(skb, transport_offset);
 696	skb_set_network_header(skb, network_offset);
 697
 698	return sk;
 699}
 700
 701int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 702		   u8 type, u8 code, int offset, __be32 info,
 703		   struct udp_table *udptable)
 704{
 705	struct ipv6_pinfo *np;
 706	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 707	const struct in6_addr *saddr = &hdr->saddr;
 708	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
 709	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 710	bool tunnel = false;
 711	struct sock *sk;
 712	int harderr;
 713	int err;
 714	struct net *net = dev_net(skb->dev);
 715
 716	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 717			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 718
 719	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
 720		/* No socket for error: try tunnels before discarding */
 
 721		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 722			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 723						  udptable, sk, skb,
 724						  opt, type, code, info);
 725			if (!sk)
 726				return 0;
 727		} else
 728			sk = ERR_PTR(-ENOENT);
 729
 730		if (IS_ERR(sk)) {
 731			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 732					  ICMP6_MIB_INERRORS);
 733			return PTR_ERR(sk);
 734		}
 735
 736		tunnel = true;
 737	}
 738
 739	harderr = icmpv6_err_convert(type, code, &err);
 740	np = inet6_sk(sk);
 741
 742	if (type == ICMPV6_PKT_TOOBIG) {
 743		if (!ip6_sk_accept_pmtu(sk))
 744			goto out;
 745		ip6_sk_update_pmtu(skb, sk, info);
 746		if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
 747			harderr = 1;
 748	}
 749	if (type == NDISC_REDIRECT) {
 750		if (tunnel) {
 751			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 752				     READ_ONCE(sk->sk_mark), sk->sk_uid);
 753		} else {
 754			ip6_sk_redirect(skb, sk);
 755		}
 756		goto out;
 757	}
 758
 759	/* Tunnels don't have an application socket: don't pass errors back */
 760	if (tunnel) {
 761		if (udp_sk(sk)->encap_err_rcv)
 762			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
 763						  ntohl(info), (u8 *)(uh+1));
 764		goto out;
 765	}
 766
 767	if (!inet6_test_bit(RECVERR6, sk)) {
 768		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 769			goto out;
 770	} else {
 771		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 772	}
 773
 774	sk->sk_err = err;
 775	sk_error_report(sk);
 776out:
 777	return 0;
 778}
 779
 780static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 781{
 782	int rc;
 783
 784	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 785		sock_rps_save_rxhash(sk, skb);
 786		sk_mark_napi_id(sk, skb);
 787		sk_incoming_cpu_update(sk);
 788	} else {
 789		sk_mark_napi_id_once(sk, skb);
 790	}
 791
 792	rc = __udp_enqueue_schedule_skb(sk, skb);
 793	if (rc < 0) {
 794		int is_udplite = IS_UDPLITE(sk);
 795		enum skb_drop_reason drop_reason;
 796
 797		/* Note that an ENOMEM error is charged twice */
 798		if (rc == -ENOMEM) {
 799			UDP6_INC_STATS(sock_net(sk),
 800					 UDP_MIB_RCVBUFERRORS, is_udplite);
 801			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
 802		} else {
 803			UDP6_INC_STATS(sock_net(sk),
 804				       UDP_MIB_MEMERRORS, is_udplite);
 805			drop_reason = SKB_DROP_REASON_PROTO_MEM;
 806		}
 807		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 808		trace_udp_fail_queue_rcv_skb(rc, sk, skb);
 809		sk_skb_reason_drop(sk, skb, drop_reason);
 810		return -1;
 811	}
 812
 813	return 0;
 814}
 815
 816static __inline__ int udpv6_err(struct sk_buff *skb,
 817				struct inet6_skb_parm *opt, u8 type,
 818				u8 code, int offset, __be32 info)
 819{
 820	return __udp6_lib_err(skb, opt, type, code, offset, info,
 821			      dev_net(skb->dev)->ipv4.udp_table);
 822}
 823
 824static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 825{
 826	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 827	struct udp_sock *up = udp_sk(sk);
 828	int is_udplite = IS_UDPLITE(sk);
 829
 830	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
 831		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
 832		goto drop;
 833	}
 834	nf_reset_ct(skb);
 835
 836	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
 837	    READ_ONCE(up->encap_type)) {
 838		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 839
 840		/*
 841		 * This is an encapsulation socket so pass the skb to
 842		 * the socket's udp_encap_rcv() hook. Otherwise, just
 843		 * fall through and pass this up the UDP socket.
 844		 * up->encap_rcv() returns the following value:
 845		 * =0 if skb was successfully passed to the encap
 846		 *    handler or was discarded by it.
 847		 * >0 if skb should be passed on to UDP.
 848		 * <0 if skb should be resubmitted as proto -N
 849		 */
 850
 851		/* if we're overly short, let UDP handle it */
 852		encap_rcv = READ_ONCE(up->encap_rcv);
 853		if (encap_rcv) {
 854			int ret;
 855
 856			/* Verify checksum before giving to encap */
 857			if (udp_lib_checksum_complete(skb))
 858				goto csum_error;
 859
 860			ret = encap_rcv(sk, skb);
 861			if (ret <= 0) {
 862				__UDP6_INC_STATS(sock_net(sk),
 863						 UDP_MIB_INDATAGRAMS,
 864						 is_udplite);
 865				return -ret;
 866			}
 867		}
 868
 869		/* FALLTHROUGH -- it's a UDP Packet */
 870	}
 871
 872	/*
 873	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 874	 */
 875	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
 876		u16 pcrlen = READ_ONCE(up->pcrlen);
 877
 878		if (pcrlen == 0) {          /* full coverage was set  */
 879			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 880					    UDP_SKB_CB(skb)->cscov, skb->len);
 881			goto drop;
 882		}
 883		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
 884			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 885					    UDP_SKB_CB(skb)->cscov, pcrlen);
 886			goto drop;
 887		}
 888	}
 889
 890	prefetch(&sk->sk_rmem_alloc);
 891	if (rcu_access_pointer(sk->sk_filter) &&
 892	    udp_lib_checksum_complete(skb))
 893		goto csum_error;
 894
 895	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
 896		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
 897		goto drop;
 898	}
 899
 900	udp_csum_pull_header(skb);
 901
 902	skb_dst_drop(skb);
 903
 904	return __udpv6_queue_rcv_skb(sk, skb);
 905
 906csum_error:
 907	drop_reason = SKB_DROP_REASON_UDP_CSUM;
 908	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 909drop:
 910	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 911	atomic_inc(&sk->sk_drops);
 912	sk_skb_reason_drop(sk, skb, drop_reason);
 913	return -1;
 914}
 915
 916static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 917{
 918	struct sk_buff *next, *segs;
 919	int ret;
 920
 921	if (likely(!udp_unexpected_gso(sk, skb)))
 922		return udpv6_queue_rcv_one_skb(sk, skb);
 923
 924	__skb_push(skb, -skb_mac_offset(skb));
 925	segs = udp_rcv_segment(sk, skb, false);
 926	skb_list_walk_safe(segs, skb, next) {
 927		__skb_pull(skb, skb_transport_offset(skb));
 928
 929		udp_post_segment_fix_csum(skb);
 930		ret = udpv6_queue_rcv_one_skb(sk, skb);
 931		if (ret > 0)
 932			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 933						 true);
 934	}
 935	return 0;
 936}
 937
 938static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
 939				   __be16 loc_port, const struct in6_addr *loc_addr,
 940				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 941				   int dif, int sdif, unsigned short hnum)
 942{
 943	const struct inet_sock *inet = inet_sk(sk);
 944
 945	if (!net_eq(sock_net(sk), net))
 946		return false;
 947
 948	if (udp_sk(sk)->udp_port_hash != hnum ||
 949	    sk->sk_family != PF_INET6 ||
 950	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 951	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 952		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 953	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
 954	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 955		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 956		return false;
 957	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 958		return false;
 959	return true;
 960}
 961
 962static void udp6_csum_zero_error(struct sk_buff *skb)
 963{
 964	/* RFC 2460 section 8.1 says that we SHOULD log
 965	 * this error. Well, it is reasonable.
 966	 */
 967	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 968			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 969			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 970}
 971
 972/*
 973 * Note: called only from the BH handler context,
 974 * so we don't need to lock the hashes.
 975 */
 976static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 977		const struct in6_addr *saddr, const struct in6_addr *daddr,
 978		struct udp_table *udptable, int proto)
 979{
 980	struct sock *sk, *first = NULL;
 981	const struct udphdr *uh = udp_hdr(skb);
 982	unsigned short hnum = ntohs(uh->dest);
 983	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 984	unsigned int offset = offsetof(typeof(*sk), sk_node);
 985	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 986	int dif = inet6_iif(skb);
 987	int sdif = inet6_sdif(skb);
 988	struct hlist_node *node;
 989	struct sk_buff *nskb;
 990
 991	if (use_hash2) {
 992		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 993			    udptable->mask;
 994		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 995start_lookup:
 996		hslot = &udptable->hash2[hash2].hslot;
 997		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 998	}
 999
1000	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1001		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
1002					    uh->source, saddr, dif, sdif,
1003					    hnum))
1004			continue;
1005		/* If zero checksum and no_check is not on for
1006		 * the socket then skip it.
1007		 */
1008		if (!uh->check && !udp_get_no_check6_rx(sk))
1009			continue;
1010		if (!first) {
1011			first = sk;
1012			continue;
1013		}
1014		nskb = skb_clone(skb, GFP_ATOMIC);
1015		if (unlikely(!nskb)) {
1016			atomic_inc(&sk->sk_drops);
1017			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1018					 IS_UDPLITE(sk));
1019			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1020					 IS_UDPLITE(sk));
1021			continue;
1022		}
1023
1024		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
1025			consume_skb(nskb);
1026	}
1027
1028	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
1029	if (use_hash2 && hash2 != hash2_any) {
1030		hash2 = hash2_any;
1031		goto start_lookup;
1032	}
1033
1034	if (first) {
1035		if (udpv6_queue_rcv_skb(first, skb) > 0)
1036			consume_skb(skb);
1037	} else {
1038		kfree_skb(skb);
1039		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1040				 proto == IPPROTO_UDPLITE);
1041	}
1042	return 0;
1043}
1044
1045static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1046{
1047	if (udp_sk_rx_dst_set(sk, dst))
1048		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
 
 
 
1049}
1050
1051/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
1052 * return code conversion for ip layer consumption
1053 */
1054static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1055				struct udphdr *uh)
1056{
1057	int ret;
1058
1059	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1060		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1061
1062	ret = udpv6_queue_rcv_skb(sk, skb);
1063
1064	/* a return value > 0 means to resubmit the input */
1065	if (ret > 0)
1066		return ret;
1067	return 0;
1068}
1069
1070int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1071		   int proto)
1072{
1073	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1074	const struct in6_addr *saddr, *daddr;
1075	struct net *net = dev_net(skb->dev);
1076	struct sock *sk = NULL;
1077	struct udphdr *uh;
 
1078	bool refcounted;
1079	u32 ulen = 0;
1080
1081	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1082		goto discard;
1083
1084	saddr = &ipv6_hdr(skb)->saddr;
1085	daddr = &ipv6_hdr(skb)->daddr;
1086	uh = udp_hdr(skb);
1087
1088	ulen = ntohs(uh->len);
1089	if (ulen > skb->len)
1090		goto short_packet;
1091
1092	if (proto == IPPROTO_UDP) {
1093		/* UDP validates ulen. */
1094
1095		/* Check for jumbo payload */
1096		if (ulen == 0)
1097			ulen = skb->len;
1098
1099		if (ulen < sizeof(*uh))
1100			goto short_packet;
1101
1102		if (ulen < skb->len) {
1103			if (pskb_trim_rcsum(skb, ulen))
1104				goto short_packet;
1105			saddr = &ipv6_hdr(skb)->saddr;
1106			daddr = &ipv6_hdr(skb)->daddr;
1107			uh = udp_hdr(skb);
1108		}
1109	}
1110
1111	if (udp6_csum_init(skb, uh, proto))
1112		goto csum_error;
1113
1114	/* Check if the socket is already available, e.g. due to early demux */
1115	sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1116			      &refcounted, udp6_ehashfn);
1117	if (IS_ERR(sk))
1118		goto no_sk;
1119
1120	if (sk) {
1121		struct dst_entry *dst = skb_dst(skb);
1122		int ret;
1123
1124		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1125			udp6_sk_rx_dst_set(sk, dst);
1126
1127		if (!uh->check && !udp_get_no_check6_rx(sk)) {
1128			if (refcounted)
1129				sock_put(sk);
1130			goto report_csum_error;
1131		}
1132
1133		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1134		if (refcounted)
1135			sock_put(sk);
1136		return ret;
1137	}
1138
1139	/*
1140	 *	Multicast receive code
1141	 */
1142	if (ipv6_addr_is_multicast(daddr))
1143		return __udp6_lib_mcast_deliver(net, skb,
1144				saddr, daddr, udptable, proto);
1145
1146	/* Unicast */
1147	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1148	if (sk) {
1149		if (!uh->check && !udp_get_no_check6_rx(sk))
1150			goto report_csum_error;
1151		return udp6_unicast_rcv_skb(sk, skb, uh);
1152	}
1153no_sk:
1154	reason = SKB_DROP_REASON_NO_SOCKET;
1155
1156	if (!uh->check)
1157		goto report_csum_error;
1158
1159	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1160		goto discard;
1161	nf_reset_ct(skb);
1162
1163	if (udp_lib_checksum_complete(skb))
1164		goto csum_error;
1165
1166	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1167	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1168
1169	sk_skb_reason_drop(sk, skb, reason);
1170	return 0;
1171
1172short_packet:
1173	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1174		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1175	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1176			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1177			    saddr, ntohs(uh->source),
1178			    ulen, skb->len,
1179			    daddr, ntohs(uh->dest));
1180	goto discard;
1181
1182report_csum_error:
1183	udp6_csum_zero_error(skb);
1184csum_error:
1185	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1186		reason = SKB_DROP_REASON_UDP_CSUM;
1187	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1188discard:
1189	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1190	sk_skb_reason_drop(sk, skb, reason);
1191	return 0;
1192}
1193
1194
1195static struct sock *__udp6_lib_demux_lookup(struct net *net,
1196			__be16 loc_port, const struct in6_addr *loc_addr,
1197			__be16 rmt_port, const struct in6_addr *rmt_addr,
1198			int dif, int sdif)
1199{
1200	struct udp_table *udptable = net->ipv4.udp_table;
1201	unsigned short hnum = ntohs(loc_port);
1202	struct udp_hslot *hslot2;
1203	unsigned int hash2;
1204	__portpair ports;
 
1205	struct sock *sk;
1206
1207	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1208	hslot2 = udp_hashslot2(udptable, hash2);
1209	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1210
1211	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1212		if (sk->sk_state == TCP_ESTABLISHED &&
1213		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1214			return sk;
1215		/* Only check first socket in chain */
1216		break;
1217	}
1218	return NULL;
1219}
1220
1221void udp_v6_early_demux(struct sk_buff *skb)
1222{
1223	struct net *net = dev_net(skb->dev);
1224	const struct udphdr *uh;
1225	struct sock *sk;
1226	struct dst_entry *dst;
1227	int dif = skb->dev->ifindex;
1228	int sdif = inet6_sdif(skb);
1229
1230	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1231	    sizeof(struct udphdr)))
1232		return;
1233
1234	uh = udp_hdr(skb);
1235
1236	if (skb->pkt_type == PACKET_HOST)
1237		sk = __udp6_lib_demux_lookup(net, uh->dest,
1238					     &ipv6_hdr(skb)->daddr,
1239					     uh->source, &ipv6_hdr(skb)->saddr,
1240					     dif, sdif);
1241	else
1242		return;
1243
1244	if (!sk)
1245		return;
1246
1247	skb->sk = sk;
1248	DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1249	skb->destructor = sock_pfree;
1250	dst = rcu_dereference(sk->sk_rx_dst);
1251
1252	if (dst)
1253		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1254	if (dst) {
1255		/* set noref for now.
1256		 * any place which wants to hold dst has to call
1257		 * dst_hold_safe()
1258		 */
1259		skb_dst_set_noref(skb, dst);
1260	}
1261}
1262
1263INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1264{
1265	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1266}
1267
1268/*
1269 * Throw away all pending data and cancel the corking. Socket is locked.
1270 */
1271static void udp_v6_flush_pending_frames(struct sock *sk)
1272{
1273	struct udp_sock *up = udp_sk(sk);
1274
1275	if (up->pending == AF_INET)
1276		udp_flush_pending_frames(sk);
1277	else if (up->pending) {
1278		up->len = 0;
1279		WRITE_ONCE(up->pending, 0);
1280		ip6_flush_pending_frames(sk);
1281	}
1282}
1283
1284static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1285			     int addr_len)
1286{
1287	if (addr_len < offsetofend(struct sockaddr, sa_family))
1288		return -EINVAL;
1289	/* The following checks are replicated from __ip6_datagram_connect()
1290	 * and intended to prevent BPF program called below from accessing
1291	 * bytes that are out of the bound specified by user in addr_len.
1292	 */
1293	if (uaddr->sa_family == AF_INET) {
1294		if (ipv6_only_sock(sk))
1295			return -EAFNOSUPPORT;
1296		return udp_pre_connect(sk, uaddr, addr_len);
1297	}
1298
1299	if (addr_len < SIN6_LEN_RFC2133)
1300		return -EINVAL;
1301
1302	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1303}
1304
1305static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1306{
1307	int res;
1308
1309	lock_sock(sk);
1310	res = __ip6_datagram_connect(sk, uaddr, addr_len);
1311	if (!res)
1312		udp6_hash4(sk);
1313	release_sock(sk);
1314	return res;
1315}
1316
1317/**
1318 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1319 *	@sk:	socket we are sending on
1320 *	@skb:	sk_buff containing the filled-in UDP header
1321 *		(checksum field must be zeroed out)
1322 *	@saddr: source address
1323 *	@daddr: destination address
1324 *	@len:	length of packet
1325 */
1326static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1327				 const struct in6_addr *saddr,
1328				 const struct in6_addr *daddr, int len)
1329{
1330	unsigned int offset;
1331	struct udphdr *uh = udp_hdr(skb);
1332	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1333	__wsum csum = 0;
1334
1335	if (!frags) {
1336		/* Only one fragment on the socket.  */
1337		skb->csum_start = skb_transport_header(skb) - skb->head;
1338		skb->csum_offset = offsetof(struct udphdr, check);
1339		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1340	} else {
1341		/*
1342		 * HW-checksum won't work as there are two or more
1343		 * fragments on the socket so that all csums of sk_buffs
1344		 * should be together
1345		 */
1346		offset = skb_transport_offset(skb);
1347		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1348		csum = skb->csum;
1349
1350		skb->ip_summed = CHECKSUM_NONE;
1351
1352		do {
1353			csum = csum_add(csum, frags->csum);
1354		} while ((frags = frags->next));
1355
1356		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1357					    csum);
1358		if (uh->check == 0)
1359			uh->check = CSUM_MANGLED_0;
1360	}
1361}
1362
1363/*
1364 *	Sending
1365 */
1366
1367static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1368			   struct inet_cork *cork)
1369{
1370	struct sock *sk = skb->sk;
1371	struct udphdr *uh;
1372	int err = 0;
1373	int is_udplite = IS_UDPLITE(sk);
1374	__wsum csum = 0;
1375	int offset = skb_transport_offset(skb);
1376	int len = skb->len - offset;
1377	int datalen = len - sizeof(*uh);
1378
1379	/*
1380	 * Create a UDP header
1381	 */
1382	uh = udp_hdr(skb);
1383	uh->source = fl6->fl6_sport;
1384	uh->dest = fl6->fl6_dport;
1385	uh->len = htons(len);
1386	uh->check = 0;
1387
1388	if (cork->gso_size) {
1389		const int hlen = skb_network_header_len(skb) +
1390				 sizeof(struct udphdr);
1391
1392		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1393			kfree_skb(skb);
1394			return -EMSGSIZE;
1395		}
1396		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1397			kfree_skb(skb);
1398			return -EINVAL;
1399		}
1400		if (udp_get_no_check6_tx(sk)) {
1401			kfree_skb(skb);
1402			return -EINVAL;
1403		}
1404		if (is_udplite || dst_xfrm(skb_dst(skb))) {
 
1405			kfree_skb(skb);
1406			return -EIO;
1407		}
1408
1409		if (datalen > cork->gso_size) {
1410			skb_shinfo(skb)->gso_size = cork->gso_size;
1411			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1412			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1413								 cork->gso_size);
1414
1415			/* Don't checksum the payload, skb will get segmented */
1416			goto csum_partial;
1417		}
 
1418	}
1419
1420	if (is_udplite)
1421		csum = udplite_csum(skb);
1422	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
1423		skb->ip_summed = CHECKSUM_NONE;
1424		goto send;
1425	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1426csum_partial:
1427		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1428		goto send;
1429	} else
1430		csum = udp_csum(skb);
1431
1432	/* add protocol-dependent pseudo-header */
1433	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1434				    len, fl6->flowi6_proto, csum);
1435	if (uh->check == 0)
1436		uh->check = CSUM_MANGLED_0;
1437
1438send:
1439	err = ip6_send_skb(skb);
1440	if (err) {
1441		if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1442			UDP6_INC_STATS(sock_net(sk),
1443				       UDP_MIB_SNDBUFERRORS, is_udplite);
1444			err = 0;
1445		}
1446	} else {
1447		UDP6_INC_STATS(sock_net(sk),
1448			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1449	}
1450	return err;
1451}
1452
1453static int udp_v6_push_pending_frames(struct sock *sk)
1454{
1455	struct sk_buff *skb;
1456	struct udp_sock  *up = udp_sk(sk);
 
1457	int err = 0;
1458
1459	if (up->pending == AF_INET)
1460		return udp_push_pending_frames(sk);
1461
 
 
 
 
 
1462	skb = ip6_finish_skb(sk);
1463	if (!skb)
1464		goto out;
1465
1466	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1467			      &inet_sk(sk)->cork.base);
1468out:
1469	up->len = 0;
1470	WRITE_ONCE(up->pending, 0);
1471	return err;
1472}
1473
1474int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1475{
1476	struct ipv6_txoptions opt_space;
1477	struct udp_sock *up = udp_sk(sk);
1478	struct inet_sock *inet = inet_sk(sk);
1479	struct ipv6_pinfo *np = inet6_sk(sk);
1480	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1481	struct in6_addr *daddr, *final_p, final;
1482	struct ipv6_txoptions *opt = NULL;
1483	struct ipv6_txoptions *opt_to_free = NULL;
1484	struct ip6_flowlabel *flowlabel = NULL;
1485	struct inet_cork_full cork;
1486	struct flowi6 *fl6 = &cork.fl.u.ip6;
1487	struct dst_entry *dst;
1488	struct ipcm6_cookie ipc6;
1489	int addr_len = msg->msg_namelen;
1490	bool connected = false;
1491	int ulen = len;
1492	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1493	int err;
1494	int is_udplite = IS_UDPLITE(sk);
1495	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1496
1497	ipcm6_init(&ipc6);
1498	ipc6.gso_size = READ_ONCE(up->gso_size);
1499	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1500	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1501
1502	/* destination address check */
1503	if (sin6) {
1504		if (addr_len < offsetof(struct sockaddr, sa_data))
1505			return -EINVAL;
1506
1507		switch (sin6->sin6_family) {
1508		case AF_INET6:
1509			if (addr_len < SIN6_LEN_RFC2133)
1510				return -EINVAL;
1511			daddr = &sin6->sin6_addr;
1512			if (ipv6_addr_any(daddr) &&
1513			    ipv6_addr_v4mapped(&np->saddr))
1514				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1515						       daddr);
1516			break;
1517		case AF_INET:
1518			goto do_udp_sendmsg;
1519		case AF_UNSPEC:
1520			msg->msg_name = sin6 = NULL;
1521			msg->msg_namelen = addr_len = 0;
1522			daddr = NULL;
1523			break;
1524		default:
1525			return -EINVAL;
1526		}
1527	} else if (!READ_ONCE(up->pending)) {
1528		if (sk->sk_state != TCP_ESTABLISHED)
1529			return -EDESTADDRREQ;
1530		daddr = &sk->sk_v6_daddr;
1531	} else
1532		daddr = NULL;
1533
1534	if (daddr) {
1535		if (ipv6_addr_v4mapped(daddr)) {
1536			struct sockaddr_in sin;
1537			sin.sin_family = AF_INET;
1538			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1539			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1540			msg->msg_name = &sin;
1541			msg->msg_namelen = sizeof(sin);
1542do_udp_sendmsg:
1543			err = ipv6_only_sock(sk) ?
1544				-ENETUNREACH : udp_sendmsg(sk, msg, len);
1545			msg->msg_name = sin6;
1546			msg->msg_namelen = addr_len;
1547			return err;
1548		}
1549	}
1550
 
 
 
1551	/* Rough check on arithmetic overflow,
1552	   better check is made in ip6_append_data().
1553	   */
1554	if (len > INT_MAX - sizeof(struct udphdr))
1555		return -EMSGSIZE;
1556
1557	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1558	if (READ_ONCE(up->pending)) {
1559		if (READ_ONCE(up->pending) == AF_INET)
1560			return udp_sendmsg(sk, msg, len);
1561		/*
1562		 * There are pending frames.
1563		 * The socket lock must be held while it's corked.
1564		 */
1565		lock_sock(sk);
1566		if (likely(up->pending)) {
1567			if (unlikely(up->pending != AF_INET6)) {
1568				release_sock(sk);
1569				return -EAFNOSUPPORT;
1570			}
1571			dst = NULL;
1572			goto do_append_data;
1573		}
1574		release_sock(sk);
1575	}
1576	ulen += sizeof(struct udphdr);
1577
1578	memset(fl6, 0, sizeof(*fl6));
1579
1580	if (sin6) {
1581		if (sin6->sin6_port == 0)
1582			return -EINVAL;
1583
1584		fl6->fl6_dport = sin6->sin6_port;
1585		daddr = &sin6->sin6_addr;
1586
1587		if (inet6_test_bit(SNDFLOW, sk)) {
1588			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1589			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1590				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1591				if (IS_ERR(flowlabel))
1592					return -EINVAL;
1593			}
1594		}
1595
1596		/*
1597		 * Otherwise it will be difficult to maintain
1598		 * sk->sk_dst_cache.
1599		 */
1600		if (sk->sk_state == TCP_ESTABLISHED &&
1601		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1602			daddr = &sk->sk_v6_daddr;
1603
1604		if (addr_len >= sizeof(struct sockaddr_in6) &&
1605		    sin6->sin6_scope_id &&
1606		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1607			fl6->flowi6_oif = sin6->sin6_scope_id;
1608	} else {
1609		if (sk->sk_state != TCP_ESTABLISHED)
1610			return -EDESTADDRREQ;
1611
1612		fl6->fl6_dport = inet->inet_dport;
1613		daddr = &sk->sk_v6_daddr;
1614		fl6->flowlabel = np->flow_label;
1615		connected = true;
1616	}
1617
1618	if (!fl6->flowi6_oif)
1619		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1620
1621	if (!fl6->flowi6_oif)
1622		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1623
1624	fl6->flowi6_uid = sk->sk_uid;
 
1625
1626	if (msg->msg_controllen) {
1627		opt = &opt_space;
1628		memset(opt, 0, sizeof(struct ipv6_txoptions));
1629		opt->tot_len = sizeof(*opt);
1630		ipc6.opt = opt;
1631
1632		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1633		if (err > 0) {
1634			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1635						    &ipc6);
1636			connected = false;
1637		}
1638		if (err < 0) {
1639			fl6_sock_release(flowlabel);
1640			return err;
1641		}
1642		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1643			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1644			if (IS_ERR(flowlabel))
1645				return -EINVAL;
1646		}
1647		if (!(opt->opt_nflen|opt->opt_flen))
1648			opt = NULL;
 
1649	}
1650	if (!opt) {
1651		opt = txopt_get(np);
1652		opt_to_free = opt;
1653	}
1654	if (flowlabel)
1655		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1656	opt = ipv6_fixup_options(&opt_space, opt);
1657	ipc6.opt = opt;
1658
1659	fl6->flowi6_proto = sk->sk_protocol;
1660	fl6->flowi6_mark = ipc6.sockc.mark;
1661	fl6->daddr = *daddr;
1662	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1663		fl6->saddr = np->saddr;
1664	fl6->fl6_sport = inet->inet_sport;
1665
1666	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1667		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1668					   (struct sockaddr *)sin6,
1669					   &addr_len,
1670					   &fl6->saddr);
1671		if (err)
1672			goto out_no_dst;
1673		if (sin6) {
1674			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1675				/* BPF program rewrote IPv6-only by IPv4-mapped
1676				 * IPv6. It's currently unsupported.
1677				 */
1678				err = -ENOTSUPP;
1679				goto out_no_dst;
1680			}
1681			if (sin6->sin6_port == 0) {
1682				/* BPF program set invalid port. Reject it. */
1683				err = -EINVAL;
1684				goto out_no_dst;
1685			}
1686			fl6->fl6_dport = sin6->sin6_port;
1687			fl6->daddr = sin6->sin6_addr;
1688		}
1689	}
1690
1691	if (ipv6_addr_any(&fl6->daddr))
1692		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1693
1694	final_p = fl6_update_dst(fl6, opt, &final);
1695	if (final_p)
1696		connected = false;
1697
1698	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1699		fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1700		connected = false;
1701	} else if (!fl6->flowi6_oif)
1702		fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1703
1704	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1705
1706	if (ipc6.tclass < 0)
1707		ipc6.tclass = np->tclass;
1708
1709	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1710
1711	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1712	if (IS_ERR(dst)) {
1713		err = PTR_ERR(dst);
1714		dst = NULL;
1715		goto out;
1716	}
1717
1718	if (ipc6.hlimit < 0)
1719		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1720
1721	if (msg->msg_flags&MSG_CONFIRM)
1722		goto do_confirm;
1723back_from_confirm:
1724
1725	/* Lockless fast path for the non-corking case */
1726	if (!corkreq) {
 
1727		struct sk_buff *skb;
1728
1729		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1730				   sizeof(struct udphdr), &ipc6,
1731				   dst_rt6_info(dst),
1732				   msg->msg_flags, &cork);
1733		err = PTR_ERR(skb);
1734		if (!IS_ERR_OR_NULL(skb))
1735			err = udp_v6_send_skb(skb, fl6, &cork.base);
1736		/* ip6_make_skb steals dst reference */
1737		goto out_no_dst;
1738	}
1739
1740	lock_sock(sk);
1741	if (unlikely(up->pending)) {
1742		/* The socket is already corked while preparing it. */
1743		/* ... which is an evident application bug. --ANK */
1744		release_sock(sk);
1745
1746		net_dbg_ratelimited("udp cork app bug 2\n");
1747		err = -EINVAL;
1748		goto out;
1749	}
1750
1751	WRITE_ONCE(up->pending, AF_INET6);
1752
1753do_append_data:
1754	if (ipc6.dontfrag < 0)
1755		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1756	up->len += ulen;
1757	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1758			      &ipc6, fl6, dst_rt6_info(dst),
1759			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1760	if (err)
1761		udp_v6_flush_pending_frames(sk);
1762	else if (!corkreq)
1763		err = udp_v6_push_pending_frames(sk);
1764	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1765		WRITE_ONCE(up->pending, 0);
1766
1767	if (err > 0)
1768		err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1769	release_sock(sk);
1770
1771out:
1772	dst_release(dst);
1773out_no_dst:
1774	fl6_sock_release(flowlabel);
1775	txopt_put(opt_to_free);
1776	if (!err)
1777		return len;
1778	/*
1779	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1780	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1781	 * we don't have a good statistic (IpOutDiscards but it can be too many
1782	 * things).  We could add another new stat but at least for now that
1783	 * seems like overkill.
1784	 */
1785	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1786		UDP6_INC_STATS(sock_net(sk),
1787			       UDP_MIB_SNDBUFERRORS, is_udplite);
1788	}
1789	return err;
1790
1791do_confirm:
1792	if (msg->msg_flags & MSG_PROBE)
1793		dst_confirm_neigh(dst, &fl6->daddr);
1794	if (!(msg->msg_flags&MSG_PROBE) || len)
1795		goto back_from_confirm;
1796	err = 0;
1797	goto out;
1798}
1799EXPORT_SYMBOL(udpv6_sendmsg);
1800
1801static void udpv6_splice_eof(struct socket *sock)
1802{
1803	struct sock *sk = sock->sk;
1804	struct udp_sock *up = udp_sk(sk);
1805
1806	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1807		return;
1808
1809	lock_sock(sk);
1810	if (up->pending && !udp_test_bit(CORK, sk))
1811		udp_v6_push_pending_frames(sk);
1812	release_sock(sk);
1813}
1814
1815void udpv6_destroy_sock(struct sock *sk)
1816{
1817	struct udp_sock *up = udp_sk(sk);
1818	lock_sock(sk);
1819
1820	/* protects from races with udp_abort() */
1821	sock_set_flag(sk, SOCK_DEAD);
1822	udp_v6_flush_pending_frames(sk);
1823	release_sock(sk);
1824
1825	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1826		if (up->encap_type) {
1827			void (*encap_destroy)(struct sock *sk);
1828			encap_destroy = READ_ONCE(up->encap_destroy);
1829			if (encap_destroy)
1830				encap_destroy(sk);
1831		}
1832		if (udp_test_bit(ENCAP_ENABLED, sk)) {
1833			static_branch_dec(&udpv6_encap_needed_key);
1834			udp_encap_disable();
1835		}
1836	}
 
 
1837}
1838
1839/*
1840 *	Socket option code for UDP
1841 */
1842int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1843		     unsigned int optlen)
1844{
1845	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1846		return udp_lib_setsockopt(sk, level, optname,
1847					  optval, optlen,
1848					  udp_v6_push_pending_frames);
1849	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1850}
1851
1852int udpv6_getsockopt(struct sock *sk, int level, int optname,
1853		     char __user *optval, int __user *optlen)
1854{
1855	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1856		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1857	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1858}
1859
 
 
 
 
 
 
 
 
 
 
1860
1861/* ------------------------------------------------------------------------ */
1862#ifdef CONFIG_PROC_FS
1863int udp6_seq_show(struct seq_file *seq, void *v)
1864{
1865	if (v == SEQ_START_TOKEN) {
1866		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1867	} else {
1868		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1869		const struct inet_sock *inet = inet_sk((const struct sock *)v);
1870		__u16 srcp = ntohs(inet->inet_sport);
1871		__u16 destp = ntohs(inet->inet_dport);
1872		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1873					  udp_rqueue_get(v), bucket);
1874	}
1875	return 0;
1876}
1877
1878const struct seq_operations udp6_seq_ops = {
1879	.start		= udp_seq_start,
1880	.next		= udp_seq_next,
1881	.stop		= udp_seq_stop,
1882	.show		= udp6_seq_show,
1883};
1884EXPORT_SYMBOL(udp6_seq_ops);
1885
1886static struct udp_seq_afinfo udp6_seq_afinfo = {
1887	.family		= AF_INET6,
1888	.udp_table	= NULL,
1889};
1890
1891int __net_init udp6_proc_init(struct net *net)
1892{
1893	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1894			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1895		return -ENOMEM;
1896	return 0;
1897}
1898
1899void udp6_proc_exit(struct net *net)
1900{
1901	remove_proc_entry("udp6", net->proc_net);
1902}
1903#endif /* CONFIG_PROC_FS */
1904
1905/* ------------------------------------------------------------------------ */
1906
1907struct proto udpv6_prot = {
1908	.name			= "UDPv6",
1909	.owner			= THIS_MODULE,
1910	.close			= udp_lib_close,
1911	.pre_connect		= udpv6_pre_connect,
1912	.connect		= udpv6_connect,
1913	.disconnect		= udp_disconnect,
1914	.ioctl			= udp_ioctl,
1915	.init			= udpv6_init_sock,
1916	.destroy		= udpv6_destroy_sock,
1917	.setsockopt		= udpv6_setsockopt,
1918	.getsockopt		= udpv6_getsockopt,
1919	.sendmsg		= udpv6_sendmsg,
1920	.recvmsg		= udpv6_recvmsg,
1921	.splice_eof		= udpv6_splice_eof,
1922	.release_cb		= ip6_datagram_release_cb,
1923	.hash			= udp_lib_hash,
1924	.unhash			= udp_lib_unhash,
1925	.rehash			= udp_v6_rehash,
1926	.get_port		= udp_v6_get_port,
1927	.put_port		= udp_lib_unhash,
1928#ifdef CONFIG_BPF_SYSCALL
1929	.psock_update_sk_prot	= udp_bpf_update_proto,
1930#endif
1931
1932	.memory_allocated	= &udp_memory_allocated,
1933	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1934
1935	.sysctl_mem		= sysctl_udp_mem,
1936	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1937	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1938	.obj_size		= sizeof(struct udp6_sock),
1939	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1940	.h.udp_table		= NULL,
1941	.diag_destroy		= udp_abort,
1942};
1943
1944static struct inet_protosw udpv6_protosw = {
1945	.type =      SOCK_DGRAM,
1946	.protocol =  IPPROTO_UDP,
1947	.prot =      &udpv6_prot,
1948	.ops =       &inet6_dgram_ops,
1949	.flags =     INET_PROTOSW_PERMANENT,
1950};
1951
1952int __init udpv6_init(void)
1953{
1954	int ret;
1955
1956	net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1957		.handler     = udpv6_rcv,
1958		.err_handler = udpv6_err,
1959		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1960	};
1961	ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1962	if (ret)
1963		goto out;
1964
1965	ret = inet6_register_protosw(&udpv6_protosw);
1966	if (ret)
1967		goto out_udpv6_protocol;
1968out:
1969	return ret;
1970
1971out_udpv6_protocol:
1972	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1973	goto out;
1974}
1975
1976void udpv6_exit(void)
1977{
1978	inet6_unregister_protosw(&udpv6_protosw);
1979	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1980}