Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
 
  20#include <linux/errno.h>
  21#include <linux/types.h>
  22#include <linux/socket.h>
  23#include <linux/sockios.h>
  24#include <linux/net.h>
  25#include <linux/in6.h>
  26#include <linux/netdevice.h>
  27#include <linux/if_arp.h>
  28#include <linux/ipv6.h>
  29#include <linux/icmpv6.h>
  30#include <linux/init.h>
  31#include <linux/module.h>
  32#include <linux/skbuff.h>
  33#include <linux/slab.h>
  34#include <linux/uaccess.h>
  35#include <linux/indirect_call_wrapper.h>
  36
  37#include <net/addrconf.h>
  38#include <net/ndisc.h>
  39#include <net/protocol.h>
  40#include <net/transp_v6.h>
  41#include <net/ip6_route.h>
  42#include <net/raw.h>
 
  43#include <net/tcp_states.h>
  44#include <net/ip6_checksum.h>
  45#include <net/ip6_tunnel.h>
 
  46#include <net/xfrm.h>
  47#include <net/inet_hashtables.h>
  48#include <net/inet6_hashtables.h>
  49#include <net/busy_poll.h>
  50#include <net/sock_reuseport.h>
 
  51
  52#include <linux/proc_fs.h>
  53#include <linux/seq_file.h>
  54#include <trace/events/skb.h>
  55#include "udp_impl.h"
  56
  57static u32 udp6_ehashfn(const struct net *net,
  58			const struct in6_addr *laddr,
  59			const u16 lport,
  60			const struct in6_addr *faddr,
  61			const __be16 fport)
  62{
  63	static u32 udp6_ehash_secret __read_mostly;
  64	static u32 udp_ipv6_hash_secret __read_mostly;
 
 
 
 
 
 
 
 
 
  65
 
 
 
 
 
 
 
  66	u32 lhash, fhash;
  67
  68	net_get_random_once(&udp6_ehash_secret,
  69			    sizeof(udp6_ehash_secret));
  70	net_get_random_once(&udp_ipv6_hash_secret,
  71			    sizeof(udp_ipv6_hash_secret));
  72
  73	lhash = (__force u32)laddr->s6_addr32[3];
  74	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  75
  76	return __inet6_ehashfn(lhash, lport, fhash, fport,
  77			       udp_ipv6_hash_secret + net_hash_mix(net));
  78}
  79
  80int udp_v6_get_port(struct sock *sk, unsigned short snum)
  81{
  82	unsigned int hash2_nulladdr =
  83		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
  84	unsigned int hash2_partial =
  85		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
  86
  87	/* precompute partial secondary hash */
  88	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
  89	return udp_lib_get_port(sk, snum, hash2_nulladdr);
  90}
  91
  92void udp_v6_rehash(struct sock *sk)
  93{
  94	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
  95					  &sk->sk_v6_rcv_saddr,
  96					  inet_sk(sk)->inet_num);
  97
  98	udp_lib_rehash(sk, new_hash);
  99}
 100
 101static int compute_score(struct sock *sk, struct net *net,
 102			 const struct in6_addr *saddr, __be16 sport,
 103			 const struct in6_addr *daddr, unsigned short hnum,
 104			 int dif, int sdif)
 105{
 106	int score;
 107	struct inet_sock *inet;
 108	bool dev_match;
 109
 110	if (!net_eq(sock_net(sk), net) ||
 111	    udp_sk(sk)->udp_port_hash != hnum ||
 112	    sk->sk_family != PF_INET6)
 113		return -1;
 114
 115	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 116		return -1;
 117
 118	score = 0;
 119	inet = inet_sk(sk);
 120
 121	if (inet->inet_dport) {
 122		if (inet->inet_dport != sport)
 123			return -1;
 124		score++;
 125	}
 126
 127	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 128		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 129			return -1;
 130		score++;
 131	}
 132
 133	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
 
 134	if (!dev_match)
 135		return -1;
 136	score++;
 
 137
 138	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 139		score++;
 140
 141	return score;
 142}
 143
 144static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
 145				     struct sk_buff *skb,
 146				     const struct in6_addr *saddr,
 147				     __be16 sport,
 148				     const struct in6_addr *daddr,
 149				     unsigned int hnum)
 150{
 151	struct sock *reuse_sk = NULL;
 152	u32 hash;
 153
 154	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
 155		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
 156		reuse_sk = reuseport_select_sock(sk, hash, skb,
 157						 sizeof(struct udphdr));
 158	}
 159	return reuse_sk;
 160}
 161
 162/* called with rcu_read_lock() */
 163static struct sock *udp6_lib_lookup2(struct net *net,
 164		const struct in6_addr *saddr, __be16 sport,
 165		const struct in6_addr *daddr, unsigned int hnum,
 166		int dif, int sdif, struct udp_hslot *hslot2,
 167		struct sk_buff *skb)
 168{
 169	struct sock *sk, *result;
 170	int score, badness;
 
 171
 172	result = NULL;
 173	badness = -1;
 174	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 175		score = compute_score(sk, net, saddr, sport,
 176				      daddr, hnum, dif, sdif);
 
 
 177		if (score > badness) {
 178			result = lookup_reuseport(net, sk, skb,
 179						  saddr, sport, daddr, hnum);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180			/* Fall back to scoring if group has connections */
 181			if (result && !reuseport_has_conns(sk, false))
 182				return result;
 183
 184			result = result ? : sk;
 185			badness = score;
 
 
 
 
 
 
 
 
 
 
 186		}
 187	}
 188	return result;
 189}
 190
 191static inline struct sock *udp6_lookup_run_bpf(struct net *net,
 192					       struct udp_table *udptable,
 193					       struct sk_buff *skb,
 194					       const struct in6_addr *saddr,
 195					       __be16 sport,
 196					       const struct in6_addr *daddr,
 197					       u16 hnum)
 198{
 199	struct sock *sk, *reuse_sk;
 200	bool no_reuseport;
 201
 202	if (udptable != &udp_table)
 203		return NULL; /* only UDP is supported */
 204
 205	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
 206					    saddr, sport, daddr, hnum, &sk);
 207	if (no_reuseport || IS_ERR_OR_NULL(sk))
 208		return sk;
 209
 210	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
 211	if (reuse_sk)
 212		sk = reuse_sk;
 213	return sk;
 214}
 215
 216/* rcu_read_lock() must be held */
 217struct sock *__udp6_lib_lookup(struct net *net,
 218			       const struct in6_addr *saddr, __be16 sport,
 219			       const struct in6_addr *daddr, __be16 dport,
 220			       int dif, int sdif, struct udp_table *udptable,
 221			       struct sk_buff *skb)
 222{
 223	unsigned short hnum = ntohs(dport);
 224	unsigned int hash2, slot2;
 225	struct udp_hslot *hslot2;
 226	struct sock *result, *sk;
 227
 228	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 229	slot2 = hash2 & udptable->mask;
 230	hslot2 = &udptable->hash2[slot2];
 231
 232	/* Lookup connected or non-wildcard sockets */
 233	result = udp6_lib_lookup2(net, saddr, sport,
 234				  daddr, hnum, dif, sdif,
 235				  hslot2, skb);
 236	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 237		goto done;
 238
 239	/* Lookup redirect from BPF */
 240	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
 241		sk = udp6_lookup_run_bpf(net, udptable, skb,
 242					 saddr, sport, daddr, hnum);
 
 
 243		if (sk) {
 244			result = sk;
 245			goto done;
 246		}
 247	}
 248
 249	/* Got non-wildcard socket or error on first lookup */
 250	if (result)
 251		goto done;
 252
 253	/* Lookup wildcard sockets */
 254	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 255	slot2 = hash2 & udptable->mask;
 256	hslot2 = &udptable->hash2[slot2];
 257
 258	result = udp6_lib_lookup2(net, saddr, sport,
 259				  &in6addr_any, hnum, dif, sdif,
 260				  hslot2, skb);
 261done:
 262	if (IS_ERR(result))
 263		return NULL;
 264	return result;
 265}
 266EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 267
 268static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 269					  __be16 sport, __be16 dport,
 270					  struct udp_table *udptable)
 271{
 272	const struct ipv6hdr *iph = ipv6_hdr(skb);
 273
 274	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 275				 &iph->daddr, dport, inet6_iif(skb),
 276				 inet6_sdif(skb), udptable, skb);
 277}
 278
 279struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 280				 __be16 sport, __be16 dport)
 281{
 282	const struct ipv6hdr *iph = ipv6_hdr(skb);
 
 
 
 283
 284	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 285				 &iph->daddr, dport, inet6_iif(skb),
 286				 inet6_sdif(skb), &udp_table, NULL);
 
 
 287}
 288EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
 289
 290/* Must be called under rcu_read_lock().
 291 * Does increment socket refcount.
 292 */
 293#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 294struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 295			     const struct in6_addr *daddr, __be16 dport, int dif)
 296{
 297	struct sock *sk;
 298
 299	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 300				dif, 0, &udp_table, NULL);
 301	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 302		sk = NULL;
 303	return sk;
 304}
 305EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 306#endif
 307
 308/* do not use the scratch area len for jumbogram: their length execeeds the
 309 * scratch area space; note that the IP6CB flags is still in the first
 310 * cacheline, so checking for jumbograms is cheap
 311 */
 312static int udp6_skb_len(struct sk_buff *skb)
 313{
 314	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 315}
 316
 317/*
 318 *	This should be easy, if there is something there we
 319 *	return it, otherwise we block.
 320 */
 321
 322int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 323		  int noblock, int flags, int *addr_len)
 324{
 325	struct ipv6_pinfo *np = inet6_sk(sk);
 326	struct inet_sock *inet = inet_sk(sk);
 327	struct sk_buff *skb;
 328	unsigned int ulen, copied;
 329	int off, err, peeking = flags & MSG_PEEK;
 330	int is_udplite = IS_UDPLITE(sk);
 331	struct udp_mib __percpu *mib;
 332	bool checksum_valid = false;
 333	int is_udp4;
 334
 335	if (flags & MSG_ERRQUEUE)
 336		return ipv6_recv_error(sk, msg, len, addr_len);
 337
 338	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 339		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 340
 341try_again:
 342	off = sk_peek_offset(sk, flags);
 343	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
 344	if (!skb)
 345		return err;
 346
 347	ulen = udp6_skb_len(skb);
 348	copied = len;
 349	if (copied > ulen - off)
 350		copied = ulen - off;
 351	else if (copied < ulen)
 352		msg->msg_flags |= MSG_TRUNC;
 353
 354	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 355	mib = __UDPX_MIB(sk, is_udp4);
 356
 357	/*
 358	 * If checksum is needed at all, try to do it while copying the
 359	 * data.  If the data is truncated, or if we only want a partial
 360	 * coverage checksum (UDP-Lite), do it before the copy.
 361	 */
 362
 363	if (copied < ulen || peeking ||
 364	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 365		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 366				!__udp_lib_checksum_complete(skb);
 367		if (!checksum_valid)
 368			goto csum_copy_err;
 369	}
 370
 371	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 372		if (udp_skb_is_linear(skb))
 373			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 374		else
 375			err = skb_copy_datagram_msg(skb, off, msg, copied);
 376	} else {
 377		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 378		if (err == -EINVAL)
 379			goto csum_copy_err;
 380	}
 381	if (unlikely(err)) {
 382		if (!peeking) {
 383			atomic_inc(&sk->sk_drops);
 384			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 385		}
 386		kfree_skb(skb);
 387		return err;
 388	}
 389	if (!peeking)
 390		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 391
 392	sock_recv_ts_and_drops(msg, sk, skb);
 393
 394	/* Copy the address. */
 395	if (msg->msg_name) {
 396		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 397		sin6->sin6_family = AF_INET6;
 398		sin6->sin6_port = udp_hdr(skb)->source;
 399		sin6->sin6_flowinfo = 0;
 400
 401		if (is_udp4) {
 402			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 403					       &sin6->sin6_addr);
 404			sin6->sin6_scope_id = 0;
 405		} else {
 406			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 407			sin6->sin6_scope_id =
 408				ipv6_iface_scope_id(&sin6->sin6_addr,
 409						    inet6_iif(skb));
 410		}
 411		*addr_len = sizeof(*sin6);
 412
 413		if (cgroup_bpf_enabled)
 414			BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 415						(struct sockaddr *)sin6);
 416	}
 417
 418	if (udp_sk(sk)->gro_enabled)
 419		udp_cmsg_recv(msg, sk, skb);
 420
 421	if (np->rxopt.all)
 422		ip6_datagram_recv_common_ctl(sk, msg, skb);
 423
 424	if (is_udp4) {
 425		if (inet->cmsg_flags)
 426			ip_cmsg_recv_offset(msg, sk, skb,
 427					    sizeof(struct udphdr), off);
 428	} else {
 429		if (np->rxopt.all)
 430			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 431	}
 432
 433	err = copied;
 434	if (flags & MSG_TRUNC)
 435		err = ulen;
 436
 437	skb_consume_udp(sk, skb, peeking ? -err : err);
 438	return err;
 439
 440csum_copy_err:
 441	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 442				 udp_skb_destructor)) {
 443		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 444		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 445	}
 446	kfree_skb(skb);
 447
 448	/* starting over for a new packet, but check if we need to yield */
 449	cond_resched();
 450	msg->msg_flags &= ~MSG_TRUNC;
 451	goto try_again;
 452}
 453
 454DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 455void udpv6_encap_enable(void)
 456{
 457	static_branch_inc(&udpv6_encap_needed_key);
 458}
 459EXPORT_SYMBOL(udpv6_encap_enable);
 460
 461/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 462 * through error handlers in encapsulations looking for a match.
 463 */
 464static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 465				      struct inet6_skb_parm *opt,
 466				      u8 type, u8 code, int offset, __be32 info)
 467{
 468	int i;
 469
 470	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 471		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 472			       u8 type, u8 code, int offset, __be32 info);
 473		const struct ip6_tnl_encap_ops *encap;
 474
 475		encap = rcu_dereference(ip6tun_encaps[i]);
 476		if (!encap)
 477			continue;
 478		handler = encap->err_handler;
 479		if (handler && !handler(skb, opt, type, code, offset, info))
 480			return 0;
 481	}
 482
 483	return -ENOENT;
 484}
 485
 486/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 487 * reversing source and destination port: this will match tunnels that force the
 488 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 489 * lwtunnels might actually break this assumption by being configured with
 490 * different destination ports on endpoints, in this case we won't be able to
 491 * trace ICMP messages back to them.
 492 *
 493 * If this doesn't match any socket, probe tunnels with arbitrary destination
 494 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 495 * we've sent packets to won't necessarily match the local destination port.
 496 *
 497 * Then ask the tunnel implementation to match the error against a valid
 498 * association.
 499 *
 500 * Return an error if we can't find a match, the socket if we need further
 501 * processing, zero otherwise.
 502 */
 503static struct sock *__udp6_lib_err_encap(struct net *net,
 504					 const struct ipv6hdr *hdr, int offset,
 505					 struct udphdr *uh,
 506					 struct udp_table *udptable,
 
 507					 struct sk_buff *skb,
 508					 struct inet6_skb_parm *opt,
 509					 u8 type, u8 code, __be32 info)
 510{
 
 511	int network_offset, transport_offset;
 512	struct sock *sk;
 513
 514	network_offset = skb_network_offset(skb);
 515	transport_offset = skb_transport_offset(skb);
 516
 517	/* Network header needs to point to the outer IPv6 header inside ICMP */
 518	skb_reset_network_header(skb);
 519
 520	/* Transport header needs to point to the UDP header */
 521	skb_set_transport_header(skb, offset);
 522
 
 
 
 
 
 
 
 
 
 
 523	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 524			       &hdr->saddr, uh->dest,
 525			       inet6_iif(skb), 0, udptable, skb);
 526	if (sk) {
 527		int (*lookup)(struct sock *sk, struct sk_buff *skb);
 528		struct udp_sock *up = udp_sk(sk);
 529
 530		lookup = READ_ONCE(up->encap_err_lookup);
 531		if (!lookup || lookup(sk, skb))
 532			sk = NULL;
 533	}
 534
 
 535	if (!sk) {
 536		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 537							offset, info));
 538	}
 539
 540	skb_set_transport_header(skb, transport_offset);
 541	skb_set_network_header(skb, network_offset);
 542
 543	return sk;
 544}
 545
 546int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 547		   u8 type, u8 code, int offset, __be32 info,
 548		   struct udp_table *udptable)
 549{
 550	struct ipv6_pinfo *np;
 551	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 552	const struct in6_addr *saddr = &hdr->saddr;
 553	const struct in6_addr *daddr = &hdr->daddr;
 554	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 555	bool tunnel = false;
 556	struct sock *sk;
 557	int harderr;
 558	int err;
 559	struct net *net = dev_net(skb->dev);
 560
 561	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 562			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 563	if (!sk) {
 
 564		/* No socket for error: try tunnels before discarding */
 565		sk = ERR_PTR(-ENOENT);
 566		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 567			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 568						  udptable, skb,
 569						  opt, type, code, info);
 570			if (!sk)
 571				return 0;
 572		}
 
 573
 574		if (IS_ERR(sk)) {
 575			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 576					  ICMP6_MIB_INERRORS);
 577			return PTR_ERR(sk);
 578		}
 579
 580		tunnel = true;
 581	}
 582
 583	harderr = icmpv6_err_convert(type, code, &err);
 584	np = inet6_sk(sk);
 585
 586	if (type == ICMPV6_PKT_TOOBIG) {
 587		if (!ip6_sk_accept_pmtu(sk))
 588			goto out;
 589		ip6_sk_update_pmtu(skb, sk, info);
 590		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
 591			harderr = 1;
 592	}
 593	if (type == NDISC_REDIRECT) {
 594		if (tunnel) {
 595			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 596				     sk->sk_mark, sk->sk_uid);
 597		} else {
 598			ip6_sk_redirect(skb, sk);
 599		}
 600		goto out;
 601	}
 602
 603	/* Tunnels don't have an application socket: don't pass errors back */
 604	if (tunnel)
 
 
 
 605		goto out;
 
 606
 607	if (!np->recverr) {
 608		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 609			goto out;
 610	} else {
 611		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 612	}
 613
 614	sk->sk_err = err;
 615	sk->sk_error_report(sk);
 616out:
 617	return 0;
 618}
 619
 620static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 621{
 622	int rc;
 623
 624	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 625		sock_rps_save_rxhash(sk, skb);
 626		sk_mark_napi_id(sk, skb);
 627		sk_incoming_cpu_update(sk);
 628	} else {
 629		sk_mark_napi_id_once(sk, skb);
 630	}
 631
 632	rc = __udp_enqueue_schedule_skb(sk, skb);
 633	if (rc < 0) {
 634		int is_udplite = IS_UDPLITE(sk);
 
 635
 636		/* Note that an ENOMEM error is charged twice */
 637		if (rc == -ENOMEM)
 638			UDP6_INC_STATS(sock_net(sk),
 639					 UDP_MIB_RCVBUFERRORS, is_udplite);
 
 
 
 
 
 
 640		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 641		kfree_skb(skb);
 
 642		return -1;
 643	}
 644
 645	return 0;
 646}
 647
 648static __inline__ int udpv6_err(struct sk_buff *skb,
 649				struct inet6_skb_parm *opt, u8 type,
 650				u8 code, int offset, __be32 info)
 651{
 652	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 
 653}
 654
 655static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 656{
 
 657	struct udp_sock *up = udp_sk(sk);
 658	int is_udplite = IS_UDPLITE(sk);
 659
 660	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 
 661		goto drop;
 
 
 662
 663	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
 
 664		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 665
 666		/*
 667		 * This is an encapsulation socket so pass the skb to
 668		 * the socket's udp_encap_rcv() hook. Otherwise, just
 669		 * fall through and pass this up the UDP socket.
 670		 * up->encap_rcv() returns the following value:
 671		 * =0 if skb was successfully passed to the encap
 672		 *    handler or was discarded by it.
 673		 * >0 if skb should be passed on to UDP.
 674		 * <0 if skb should be resubmitted as proto -N
 675		 */
 676
 677		/* if we're overly short, let UDP handle it */
 678		encap_rcv = READ_ONCE(up->encap_rcv);
 679		if (encap_rcv) {
 680			int ret;
 681
 682			/* Verify checksum before giving to encap */
 683			if (udp_lib_checksum_complete(skb))
 684				goto csum_error;
 685
 686			ret = encap_rcv(sk, skb);
 687			if (ret <= 0) {
 688				__UDP_INC_STATS(sock_net(sk),
 689						UDP_MIB_INDATAGRAMS,
 690						is_udplite);
 691				return -ret;
 692			}
 693		}
 694
 695		/* FALLTHROUGH -- it's a UDP Packet */
 696	}
 697
 698	/*
 699	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 700	 */
 701	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
 
 702
 703		if (up->pcrlen == 0) {          /* full coverage was set  */
 704			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 705					    UDP_SKB_CB(skb)->cscov, skb->len);
 706			goto drop;
 707		}
 708		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
 709			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 710					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
 711			goto drop;
 712		}
 713	}
 714
 715	prefetch(&sk->sk_rmem_alloc);
 716	if (rcu_access_pointer(sk->sk_filter) &&
 717	    udp_lib_checksum_complete(skb))
 718		goto csum_error;
 719
 720	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
 
 721		goto drop;
 
 722
 723	udp_csum_pull_header(skb);
 724
 725	skb_dst_drop(skb);
 726
 727	return __udpv6_queue_rcv_skb(sk, skb);
 728
 729csum_error:
 
 730	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 731drop:
 732	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 733	atomic_inc(&sk->sk_drops);
 734	kfree_skb(skb);
 735	return -1;
 736}
 737
 738static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 739{
 740	struct sk_buff *next, *segs;
 741	int ret;
 742
 743	if (likely(!udp_unexpected_gso(sk, skb)))
 744		return udpv6_queue_rcv_one_skb(sk, skb);
 745
 746	__skb_push(skb, -skb_mac_offset(skb));
 747	segs = udp_rcv_segment(sk, skb, false);
 748	skb_list_walk_safe(segs, skb, next) {
 749		__skb_pull(skb, skb_transport_offset(skb));
 750
 
 751		ret = udpv6_queue_rcv_one_skb(sk, skb);
 752		if (ret > 0)
 753			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 754						 true);
 755	}
 756	return 0;
 757}
 758
 759static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
 760				   __be16 loc_port, const struct in6_addr *loc_addr,
 761				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 762				   int dif, int sdif, unsigned short hnum)
 763{
 764	struct inet_sock *inet = inet_sk(sk);
 765
 766	if (!net_eq(sock_net(sk), net))
 767		return false;
 768
 769	if (udp_sk(sk)->udp_port_hash != hnum ||
 770	    sk->sk_family != PF_INET6 ||
 771	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 772	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 773		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 774	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
 775	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 776		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 777		return false;
 778	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 779		return false;
 780	return true;
 781}
 782
 783static void udp6_csum_zero_error(struct sk_buff *skb)
 784{
 785	/* RFC 2460 section 8.1 says that we SHOULD log
 786	 * this error. Well, it is reasonable.
 787	 */
 788	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 789			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 790			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 791}
 792
 793/*
 794 * Note: called only from the BH handler context,
 795 * so we don't need to lock the hashes.
 796 */
 797static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 798		const struct in6_addr *saddr, const struct in6_addr *daddr,
 799		struct udp_table *udptable, int proto)
 800{
 801	struct sock *sk, *first = NULL;
 802	const struct udphdr *uh = udp_hdr(skb);
 803	unsigned short hnum = ntohs(uh->dest);
 804	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 805	unsigned int offset = offsetof(typeof(*sk), sk_node);
 806	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 807	int dif = inet6_iif(skb);
 808	int sdif = inet6_sdif(skb);
 809	struct hlist_node *node;
 810	struct sk_buff *nskb;
 811
 812	if (use_hash2) {
 813		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 814			    udptable->mask;
 815		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 816start_lookup:
 817		hslot = &udptable->hash2[hash2];
 818		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 819	}
 820
 821	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 822		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
 823					    uh->source, saddr, dif, sdif,
 824					    hnum))
 825			continue;
 826		/* If zero checksum and no_check is not on for
 827		 * the socket then skip it.
 828		 */
 829		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 830			continue;
 831		if (!first) {
 832			first = sk;
 833			continue;
 834		}
 835		nskb = skb_clone(skb, GFP_ATOMIC);
 836		if (unlikely(!nskb)) {
 837			atomic_inc(&sk->sk_drops);
 838			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
 839					 IS_UDPLITE(sk));
 840			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
 841					 IS_UDPLITE(sk));
 842			continue;
 843		}
 844
 845		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
 846			consume_skb(nskb);
 847	}
 848
 849	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 850	if (use_hash2 && hash2 != hash2_any) {
 851		hash2 = hash2_any;
 852		goto start_lookup;
 853	}
 854
 855	if (first) {
 856		if (udpv6_queue_rcv_skb(first, skb) > 0)
 857			consume_skb(skb);
 858	} else {
 859		kfree_skb(skb);
 860		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
 861				 proto == IPPROTO_UDPLITE);
 862	}
 863	return 0;
 864}
 865
 866static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 867{
 868	if (udp_sk_rx_dst_set(sk, dst)) {
 869		const struct rt6_info *rt = (const struct rt6_info *)dst;
 870
 871		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 872	}
 873}
 874
 875/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 876 * return code conversion for ip layer consumption
 877 */
 878static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
 879				struct udphdr *uh)
 880{
 881	int ret;
 882
 883	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 884		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
 885
 886	ret = udpv6_queue_rcv_skb(sk, skb);
 887
 888	/* a return value > 0 means to resubmit the input */
 889	if (ret > 0)
 890		return ret;
 891	return 0;
 892}
 893
 894int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 895		   int proto)
 896{
 
 897	const struct in6_addr *saddr, *daddr;
 898	struct net *net = dev_net(skb->dev);
 899	struct udphdr *uh;
 900	struct sock *sk;
 901	bool refcounted;
 902	u32 ulen = 0;
 903
 904	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 905		goto discard;
 906
 907	saddr = &ipv6_hdr(skb)->saddr;
 908	daddr = &ipv6_hdr(skb)->daddr;
 909	uh = udp_hdr(skb);
 910
 911	ulen = ntohs(uh->len);
 912	if (ulen > skb->len)
 913		goto short_packet;
 914
 915	if (proto == IPPROTO_UDP) {
 916		/* UDP validates ulen. */
 917
 918		/* Check for jumbo payload */
 919		if (ulen == 0)
 920			ulen = skb->len;
 921
 922		if (ulen < sizeof(*uh))
 923			goto short_packet;
 924
 925		if (ulen < skb->len) {
 926			if (pskb_trim_rcsum(skb, ulen))
 927				goto short_packet;
 928			saddr = &ipv6_hdr(skb)->saddr;
 929			daddr = &ipv6_hdr(skb)->daddr;
 930			uh = udp_hdr(skb);
 931		}
 932	}
 933
 934	if (udp6_csum_init(skb, uh, proto))
 935		goto csum_error;
 936
 937	/* Check if the socket is already available, e.g. due to early demux */
 938	sk = skb_steal_sock(skb, &refcounted);
 
 
 
 
 939	if (sk) {
 940		struct dst_entry *dst = skb_dst(skb);
 941		int ret;
 942
 943		if (unlikely(sk->sk_rx_dst != dst))
 944			udp6_sk_rx_dst_set(sk, dst);
 945
 946		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
 947			if (refcounted)
 948				sock_put(sk);
 949			goto report_csum_error;
 950		}
 951
 952		ret = udp6_unicast_rcv_skb(sk, skb, uh);
 953		if (refcounted)
 954			sock_put(sk);
 955		return ret;
 956	}
 957
 958	/*
 959	 *	Multicast receive code
 960	 */
 961	if (ipv6_addr_is_multicast(daddr))
 962		return __udp6_lib_mcast_deliver(net, skb,
 963				saddr, daddr, udptable, proto);
 964
 965	/* Unicast */
 966	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 967	if (sk) {
 968		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 969			goto report_csum_error;
 970		return udp6_unicast_rcv_skb(sk, skb, uh);
 971	}
 
 
 972
 973	if (!uh->check)
 974		goto report_csum_error;
 975
 976	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 977		goto discard;
 
 978
 979	if (udp_lib_checksum_complete(skb))
 980		goto csum_error;
 981
 982	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 983	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 984
 985	kfree_skb(skb);
 986	return 0;
 987
 988short_packet:
 
 
 989	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
 990			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
 991			    saddr, ntohs(uh->source),
 992			    ulen, skb->len,
 993			    daddr, ntohs(uh->dest));
 994	goto discard;
 995
 996report_csum_error:
 997	udp6_csum_zero_error(skb);
 998csum_error:
 
 
 999	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1000discard:
1001	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1002	kfree_skb(skb);
1003	return 0;
1004}
1005
1006
1007static struct sock *__udp6_lib_demux_lookup(struct net *net,
1008			__be16 loc_port, const struct in6_addr *loc_addr,
1009			__be16 rmt_port, const struct in6_addr *rmt_addr,
1010			int dif, int sdif)
1011{
 
1012	unsigned short hnum = ntohs(loc_port);
1013	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1014	unsigned int slot2 = hash2 & udp_table.mask;
1015	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1016	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1017	struct sock *sk;
1018
 
 
 
 
 
1019	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1020		if (sk->sk_state == TCP_ESTABLISHED &&
1021		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1022			return sk;
1023		/* Only check first socket in chain */
1024		break;
1025	}
1026	return NULL;
1027}
1028
1029INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1030{
1031	struct net *net = dev_net(skb->dev);
1032	const struct udphdr *uh;
1033	struct sock *sk;
1034	struct dst_entry *dst;
1035	int dif = skb->dev->ifindex;
1036	int sdif = inet6_sdif(skb);
1037
1038	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1039	    sizeof(struct udphdr)))
1040		return;
1041
1042	uh = udp_hdr(skb);
1043
1044	if (skb->pkt_type == PACKET_HOST)
1045		sk = __udp6_lib_demux_lookup(net, uh->dest,
1046					     &ipv6_hdr(skb)->daddr,
1047					     uh->source, &ipv6_hdr(skb)->saddr,
1048					     dif, sdif);
1049	else
1050		return;
1051
1052	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1053		return;
1054
1055	skb->sk = sk;
1056	skb->destructor = sock_efree;
1057	dst = READ_ONCE(sk->sk_rx_dst);
 
1058
1059	if (dst)
1060		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1061	if (dst) {
1062		/* set noref for now.
1063		 * any place which wants to hold dst has to call
1064		 * dst_hold_safe()
1065		 */
1066		skb_dst_set_noref(skb, dst);
1067	}
1068}
1069
1070INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1071{
1072	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1073}
1074
1075/*
1076 * Throw away all pending data and cancel the corking. Socket is locked.
1077 */
1078static void udp_v6_flush_pending_frames(struct sock *sk)
1079{
1080	struct udp_sock *up = udp_sk(sk);
1081
1082	if (up->pending == AF_INET)
1083		udp_flush_pending_frames(sk);
1084	else if (up->pending) {
1085		up->len = 0;
1086		up->pending = 0;
1087		ip6_flush_pending_frames(sk);
1088	}
1089}
1090
1091static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1092			     int addr_len)
1093{
1094	if (addr_len < offsetofend(struct sockaddr, sa_family))
1095		return -EINVAL;
1096	/* The following checks are replicated from __ip6_datagram_connect()
1097	 * and intended to prevent BPF program called below from accessing
1098	 * bytes that are out of the bound specified by user in addr_len.
1099	 */
1100	if (uaddr->sa_family == AF_INET) {
1101		if (__ipv6_only_sock(sk))
1102			return -EAFNOSUPPORT;
1103		return udp_pre_connect(sk, uaddr, addr_len);
1104	}
1105
1106	if (addr_len < SIN6_LEN_RFC2133)
1107		return -EINVAL;
1108
1109	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1110}
1111
1112/**
1113 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1114 *	@sk:	socket we are sending on
1115 *	@skb:	sk_buff containing the filled-in UDP header
1116 *		(checksum field must be zeroed out)
1117 *	@saddr: source address
1118 *	@daddr: destination address
1119 *	@len:	length of packet
1120 */
1121static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1122				 const struct in6_addr *saddr,
1123				 const struct in6_addr *daddr, int len)
1124{
1125	unsigned int offset;
1126	struct udphdr *uh = udp_hdr(skb);
1127	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1128	__wsum csum = 0;
1129
1130	if (!frags) {
1131		/* Only one fragment on the socket.  */
1132		skb->csum_start = skb_transport_header(skb) - skb->head;
1133		skb->csum_offset = offsetof(struct udphdr, check);
1134		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1135	} else {
1136		/*
1137		 * HW-checksum won't work as there are two or more
1138		 * fragments on the socket so that all csums of sk_buffs
1139		 * should be together
1140		 */
1141		offset = skb_transport_offset(skb);
1142		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1143		csum = skb->csum;
1144
1145		skb->ip_summed = CHECKSUM_NONE;
1146
1147		do {
1148			csum = csum_add(csum, frags->csum);
1149		} while ((frags = frags->next));
1150
1151		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1152					    csum);
1153		if (uh->check == 0)
1154			uh->check = CSUM_MANGLED_0;
1155	}
1156}
1157
1158/*
1159 *	Sending
1160 */
1161
1162static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1163			   struct inet_cork *cork)
1164{
1165	struct sock *sk = skb->sk;
1166	struct udphdr *uh;
1167	int err = 0;
1168	int is_udplite = IS_UDPLITE(sk);
1169	__wsum csum = 0;
1170	int offset = skb_transport_offset(skb);
1171	int len = skb->len - offset;
1172	int datalen = len - sizeof(*uh);
1173
1174	/*
1175	 * Create a UDP header
1176	 */
1177	uh = udp_hdr(skb);
1178	uh->source = fl6->fl6_sport;
1179	uh->dest = fl6->fl6_dport;
1180	uh->len = htons(len);
1181	uh->check = 0;
1182
1183	if (cork->gso_size) {
1184		const int hlen = skb_network_header_len(skb) +
1185				 sizeof(struct udphdr);
1186
1187		if (hlen + cork->gso_size > cork->fragsize) {
1188			kfree_skb(skb);
1189			return -EINVAL;
1190		}
1191		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1192			kfree_skb(skb);
1193			return -EINVAL;
1194		}
1195		if (udp_sk(sk)->no_check6_tx) {
1196			kfree_skb(skb);
1197			return -EINVAL;
1198		}
1199		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1200		    dst_xfrm(skb_dst(skb))) {
1201			kfree_skb(skb);
1202			return -EIO;
1203		}
1204
1205		if (datalen > cork->gso_size) {
1206			skb_shinfo(skb)->gso_size = cork->gso_size;
1207			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1208			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1209								 cork->gso_size);
1210		}
1211		goto csum_partial;
1212	}
1213
1214	if (is_udplite)
1215		csum = udplite_csum(skb);
1216	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1217		skb->ip_summed = CHECKSUM_NONE;
1218		goto send;
1219	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1220csum_partial:
1221		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1222		goto send;
1223	} else
1224		csum = udp_csum(skb);
1225
1226	/* add protocol-dependent pseudo-header */
1227	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1228				    len, fl6->flowi6_proto, csum);
1229	if (uh->check == 0)
1230		uh->check = CSUM_MANGLED_0;
1231
1232send:
1233	err = ip6_send_skb(skb);
1234	if (err) {
1235		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1236			UDP6_INC_STATS(sock_net(sk),
1237				       UDP_MIB_SNDBUFERRORS, is_udplite);
1238			err = 0;
1239		}
1240	} else {
1241		UDP6_INC_STATS(sock_net(sk),
1242			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1243	}
1244	return err;
1245}
1246
1247static int udp_v6_push_pending_frames(struct sock *sk)
1248{
1249	struct sk_buff *skb;
1250	struct udp_sock  *up = udp_sk(sk);
1251	struct flowi6 fl6;
1252	int err = 0;
1253
1254	if (up->pending == AF_INET)
1255		return udp_push_pending_frames(sk);
1256
1257	/* ip6_finish_skb will release the cork, so make a copy of
1258	 * fl6 here.
1259	 */
1260	fl6 = inet_sk(sk)->cork.fl.u.ip6;
1261
1262	skb = ip6_finish_skb(sk);
1263	if (!skb)
1264		goto out;
1265
1266	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1267
1268out:
1269	up->len = 0;
1270	up->pending = 0;
1271	return err;
1272}
1273
1274int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1275{
1276	struct ipv6_txoptions opt_space;
1277	struct udp_sock *up = udp_sk(sk);
1278	struct inet_sock *inet = inet_sk(sk);
1279	struct ipv6_pinfo *np = inet6_sk(sk);
1280	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1281	struct in6_addr *daddr, *final_p, final;
1282	struct ipv6_txoptions *opt = NULL;
1283	struct ipv6_txoptions *opt_to_free = NULL;
1284	struct ip6_flowlabel *flowlabel = NULL;
1285	struct flowi6 fl6;
 
1286	struct dst_entry *dst;
1287	struct ipcm6_cookie ipc6;
1288	int addr_len = msg->msg_namelen;
1289	bool connected = false;
1290	int ulen = len;
1291	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1292	int err;
1293	int is_udplite = IS_UDPLITE(sk);
1294	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1295
1296	ipcm6_init(&ipc6);
1297	ipc6.gso_size = up->gso_size;
1298	ipc6.sockc.tsflags = sk->sk_tsflags;
1299	ipc6.sockc.mark = sk->sk_mark;
1300
1301	/* destination address check */
1302	if (sin6) {
1303		if (addr_len < offsetof(struct sockaddr, sa_data))
1304			return -EINVAL;
1305
1306		switch (sin6->sin6_family) {
1307		case AF_INET6:
1308			if (addr_len < SIN6_LEN_RFC2133)
1309				return -EINVAL;
1310			daddr = &sin6->sin6_addr;
1311			if (ipv6_addr_any(daddr) &&
1312			    ipv6_addr_v4mapped(&np->saddr))
1313				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1314						       daddr);
1315			break;
1316		case AF_INET:
1317			goto do_udp_sendmsg;
1318		case AF_UNSPEC:
1319			msg->msg_name = sin6 = NULL;
1320			msg->msg_namelen = addr_len = 0;
1321			daddr = NULL;
1322			break;
1323		default:
1324			return -EINVAL;
1325		}
1326	} else if (!up->pending) {
1327		if (sk->sk_state != TCP_ESTABLISHED)
1328			return -EDESTADDRREQ;
1329		daddr = &sk->sk_v6_daddr;
1330	} else
1331		daddr = NULL;
1332
1333	if (daddr) {
1334		if (ipv6_addr_v4mapped(daddr)) {
1335			struct sockaddr_in sin;
1336			sin.sin_family = AF_INET;
1337			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1338			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1339			msg->msg_name = &sin;
1340			msg->msg_namelen = sizeof(sin);
1341do_udp_sendmsg:
1342			if (__ipv6_only_sock(sk))
1343				return -ENETUNREACH;
1344			return udp_sendmsg(sk, msg, len);
 
 
1345		}
1346	}
1347
1348	if (up->pending == AF_INET)
1349		return udp_sendmsg(sk, msg, len);
1350
1351	/* Rough check on arithmetic overflow,
1352	   better check is made in ip6_append_data().
1353	   */
1354	if (len > INT_MAX - sizeof(struct udphdr))
1355		return -EMSGSIZE;
1356
1357	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1358	if (up->pending) {
 
 
1359		/*
1360		 * There are pending frames.
1361		 * The socket lock must be held while it's corked.
1362		 */
1363		lock_sock(sk);
1364		if (likely(up->pending)) {
1365			if (unlikely(up->pending != AF_INET6)) {
1366				release_sock(sk);
1367				return -EAFNOSUPPORT;
1368			}
1369			dst = NULL;
1370			goto do_append_data;
1371		}
1372		release_sock(sk);
1373	}
1374	ulen += sizeof(struct udphdr);
1375
1376	memset(&fl6, 0, sizeof(fl6));
1377
1378	if (sin6) {
1379		if (sin6->sin6_port == 0)
1380			return -EINVAL;
1381
1382		fl6.fl6_dport = sin6->sin6_port;
1383		daddr = &sin6->sin6_addr;
1384
1385		if (np->sndflow) {
1386			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1387			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1388				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1389				if (IS_ERR(flowlabel))
1390					return -EINVAL;
1391			}
1392		}
1393
1394		/*
1395		 * Otherwise it will be difficult to maintain
1396		 * sk->sk_dst_cache.
1397		 */
1398		if (sk->sk_state == TCP_ESTABLISHED &&
1399		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1400			daddr = &sk->sk_v6_daddr;
1401
1402		if (addr_len >= sizeof(struct sockaddr_in6) &&
1403		    sin6->sin6_scope_id &&
1404		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1405			fl6.flowi6_oif = sin6->sin6_scope_id;
1406	} else {
1407		if (sk->sk_state != TCP_ESTABLISHED)
1408			return -EDESTADDRREQ;
1409
1410		fl6.fl6_dport = inet->inet_dport;
1411		daddr = &sk->sk_v6_daddr;
1412		fl6.flowlabel = np->flow_label;
1413		connected = true;
1414	}
1415
1416	if (!fl6.flowi6_oif)
1417		fl6.flowi6_oif = sk->sk_bound_dev_if;
1418
1419	if (!fl6.flowi6_oif)
1420		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1421
1422	fl6.flowi6_mark = ipc6.sockc.mark;
1423	fl6.flowi6_uid = sk->sk_uid;
1424
1425	if (msg->msg_controllen) {
1426		opt = &opt_space;
1427		memset(opt, 0, sizeof(struct ipv6_txoptions));
1428		opt->tot_len = sizeof(*opt);
1429		ipc6.opt = opt;
1430
1431		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1432		if (err > 0)
1433			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1434						    &ipc6);
 
 
1435		if (err < 0) {
1436			fl6_sock_release(flowlabel);
1437			return err;
1438		}
1439		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1440			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1441			if (IS_ERR(flowlabel))
1442				return -EINVAL;
1443		}
1444		if (!(opt->opt_nflen|opt->opt_flen))
1445			opt = NULL;
1446		connected = false;
1447	}
1448	if (!opt) {
1449		opt = txopt_get(np);
1450		opt_to_free = opt;
1451	}
1452	if (flowlabel)
1453		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1454	opt = ipv6_fixup_options(&opt_space, opt);
1455	ipc6.opt = opt;
1456
1457	fl6.flowi6_proto = sk->sk_protocol;
1458	fl6.daddr = *daddr;
1459	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1460		fl6.saddr = np->saddr;
1461	fl6.fl6_sport = inet->inet_sport;
 
1462
1463	if (cgroup_bpf_enabled && !connected) {
1464		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1465					   (struct sockaddr *)sin6, &fl6.saddr);
 
 
1466		if (err)
1467			goto out_no_dst;
1468		if (sin6) {
1469			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1470				/* BPF program rewrote IPv6-only by IPv4-mapped
1471				 * IPv6. It's currently unsupported.
1472				 */
1473				err = -ENOTSUPP;
1474				goto out_no_dst;
1475			}
1476			if (sin6->sin6_port == 0) {
1477				/* BPF program set invalid port. Reject it. */
1478				err = -EINVAL;
1479				goto out_no_dst;
1480			}
1481			fl6.fl6_dport = sin6->sin6_port;
1482			fl6.daddr = sin6->sin6_addr;
1483		}
1484	}
1485
1486	if (ipv6_addr_any(&fl6.daddr))
1487		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1488
1489	final_p = fl6_update_dst(&fl6, opt, &final);
1490	if (final_p)
1491		connected = false;
1492
1493	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1494		fl6.flowi6_oif = np->mcast_oif;
1495		connected = false;
1496	} else if (!fl6.flowi6_oif)
1497		fl6.flowi6_oif = np->ucast_oif;
1498
1499	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1500
1501	if (ipc6.tclass < 0)
1502		ipc6.tclass = np->tclass;
1503
1504	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1505
1506	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1507	if (IS_ERR(dst)) {
1508		err = PTR_ERR(dst);
1509		dst = NULL;
1510		goto out;
1511	}
1512
1513	if (ipc6.hlimit < 0)
1514		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1515
1516	if (msg->msg_flags&MSG_CONFIRM)
1517		goto do_confirm;
1518back_from_confirm:
1519
1520	/* Lockless fast path for the non-corking case */
1521	if (!corkreq) {
1522		struct inet_cork_full cork;
1523		struct sk_buff *skb;
1524
1525		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1526				   sizeof(struct udphdr), &ipc6,
1527				   &fl6, (struct rt6_info *)dst,
1528				   msg->msg_flags, &cork);
1529		err = PTR_ERR(skb);
1530		if (!IS_ERR_OR_NULL(skb))
1531			err = udp_v6_send_skb(skb, &fl6, &cork.base);
1532		goto out;
 
1533	}
1534
1535	lock_sock(sk);
1536	if (unlikely(up->pending)) {
1537		/* The socket is already corked while preparing it. */
1538		/* ... which is an evident application bug. --ANK */
1539		release_sock(sk);
1540
1541		net_dbg_ratelimited("udp cork app bug 2\n");
1542		err = -EINVAL;
1543		goto out;
1544	}
1545
1546	up->pending = AF_INET6;
1547
1548do_append_data:
1549	if (ipc6.dontfrag < 0)
1550		ipc6.dontfrag = np->dontfrag;
1551	up->len += ulen;
1552	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1553			      &ipc6, &fl6, (struct rt6_info *)dst,
1554			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1555	if (err)
1556		udp_v6_flush_pending_frames(sk);
1557	else if (!corkreq)
1558		err = udp_v6_push_pending_frames(sk);
1559	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1560		up->pending = 0;
1561
1562	if (err > 0)
1563		err = np->recverr ? net_xmit_errno(err) : 0;
1564	release_sock(sk);
1565
1566out:
1567	dst_release(dst);
1568out_no_dst:
1569	fl6_sock_release(flowlabel);
1570	txopt_put(opt_to_free);
1571	if (!err)
1572		return len;
1573	/*
1574	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1575	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1576	 * we don't have a good statistic (IpOutDiscards but it can be too many
1577	 * things).  We could add another new stat but at least for now that
1578	 * seems like overkill.
1579	 */
1580	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1581		UDP6_INC_STATS(sock_net(sk),
1582			       UDP_MIB_SNDBUFERRORS, is_udplite);
1583	}
1584	return err;
1585
1586do_confirm:
1587	if (msg->msg_flags & MSG_PROBE)
1588		dst_confirm_neigh(dst, &fl6.daddr);
1589	if (!(msg->msg_flags&MSG_PROBE) || len)
1590		goto back_from_confirm;
1591	err = 0;
1592	goto out;
1593}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1594
1595void udpv6_destroy_sock(struct sock *sk)
1596{
1597	struct udp_sock *up = udp_sk(sk);
1598	lock_sock(sk);
 
 
 
1599	udp_v6_flush_pending_frames(sk);
1600	release_sock(sk);
1601
1602	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1603		if (up->encap_type) {
1604			void (*encap_destroy)(struct sock *sk);
1605			encap_destroy = READ_ONCE(up->encap_destroy);
1606			if (encap_destroy)
1607				encap_destroy(sk);
1608		}
1609		if (up->encap_enabled)
1610			static_branch_dec(&udpv6_encap_needed_key);
 
 
1611	}
1612
1613	inet6_destroy_sock(sk);
1614}
1615
1616/*
1617 *	Socket option code for UDP
1618 */
1619int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1620		     unsigned int optlen)
1621{
1622	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1623		return udp_lib_setsockopt(sk, level, optname,
1624					  optval, optlen,
1625					  udp_v6_push_pending_frames);
1626	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1627}
1628
1629int udpv6_getsockopt(struct sock *sk, int level, int optname,
1630		     char __user *optval, int __user *optlen)
1631{
1632	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1633		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1634	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1635}
1636
1637/* thinking of making this const? Don't.
1638 * early_demux can change based on sysctl.
1639 */
1640static struct inet6_protocol udpv6_protocol = {
1641	.early_demux	=	udp_v6_early_demux,
1642	.early_demux_handler =  udp_v6_early_demux,
1643	.handler	=	udpv6_rcv,
1644	.err_handler	=	udpv6_err,
1645	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1646};
1647
1648/* ------------------------------------------------------------------------ */
1649#ifdef CONFIG_PROC_FS
1650int udp6_seq_show(struct seq_file *seq, void *v)
1651{
1652	if (v == SEQ_START_TOKEN) {
1653		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1654	} else {
1655		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1656		struct inet_sock *inet = inet_sk(v);
1657		__u16 srcp = ntohs(inet->inet_sport);
1658		__u16 destp = ntohs(inet->inet_dport);
1659		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1660					  udp_rqueue_get(v), bucket);
1661	}
1662	return 0;
1663}
1664
1665const struct seq_operations udp6_seq_ops = {
1666	.start		= udp_seq_start,
1667	.next		= udp_seq_next,
1668	.stop		= udp_seq_stop,
1669	.show		= udp6_seq_show,
1670};
1671EXPORT_SYMBOL(udp6_seq_ops);
1672
1673static struct udp_seq_afinfo udp6_seq_afinfo = {
1674	.family		= AF_INET6,
1675	.udp_table	= &udp_table,
1676};
1677
1678int __net_init udp6_proc_init(struct net *net)
1679{
1680	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1681			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1682		return -ENOMEM;
1683	return 0;
1684}
1685
1686void udp6_proc_exit(struct net *net)
1687{
1688	remove_proc_entry("udp6", net->proc_net);
1689}
1690#endif /* CONFIG_PROC_FS */
1691
1692/* ------------------------------------------------------------------------ */
1693
1694struct proto udpv6_prot = {
1695	.name			= "UDPv6",
1696	.owner			= THIS_MODULE,
1697	.close			= udp_lib_close,
1698	.pre_connect		= udpv6_pre_connect,
1699	.connect		= ip6_datagram_connect,
1700	.disconnect		= udp_disconnect,
1701	.ioctl			= udp_ioctl,
1702	.init			= udp_init_sock,
1703	.destroy		= udpv6_destroy_sock,
1704	.setsockopt		= udpv6_setsockopt,
1705	.getsockopt		= udpv6_getsockopt,
1706	.sendmsg		= udpv6_sendmsg,
1707	.recvmsg		= udpv6_recvmsg,
 
1708	.release_cb		= ip6_datagram_release_cb,
1709	.hash			= udp_lib_hash,
1710	.unhash			= udp_lib_unhash,
1711	.rehash			= udp_v6_rehash,
1712	.get_port		= udp_v6_get_port,
 
 
 
 
 
1713	.memory_allocated	= &udp_memory_allocated,
 
 
1714	.sysctl_mem		= sysctl_udp_mem,
1715	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1716	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1717	.obj_size		= sizeof(struct udp6_sock),
1718	.h.udp_table		= &udp_table,
 
1719	.diag_destroy		= udp_abort,
1720};
1721
1722static struct inet_protosw udpv6_protosw = {
1723	.type =      SOCK_DGRAM,
1724	.protocol =  IPPROTO_UDP,
1725	.prot =      &udpv6_prot,
1726	.ops =       &inet6_dgram_ops,
1727	.flags =     INET_PROTOSW_PERMANENT,
1728};
1729
1730int __init udpv6_init(void)
1731{
1732	int ret;
1733
1734	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
 
 
 
 
 
1735	if (ret)
1736		goto out;
1737
1738	ret = inet6_register_protosw(&udpv6_protosw);
1739	if (ret)
1740		goto out_udpv6_protocol;
1741out:
1742	return ret;
1743
1744out_udpv6_protocol:
1745	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1746	goto out;
1747}
1748
1749void udpv6_exit(void)
1750{
1751	inet6_unregister_protosw(&udpv6_protosw);
1752	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1753}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
  20#include <linux/bpf-cgroup.h>
  21#include <linux/errno.h>
  22#include <linux/types.h>
  23#include <linux/socket.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/in6.h>
  27#include <linux/netdevice.h>
  28#include <linux/if_arp.h>
  29#include <linux/ipv6.h>
  30#include <linux/icmpv6.h>
  31#include <linux/init.h>
  32#include <linux/module.h>
  33#include <linux/skbuff.h>
  34#include <linux/slab.h>
  35#include <linux/uaccess.h>
  36#include <linux/indirect_call_wrapper.h>
  37
  38#include <net/addrconf.h>
  39#include <net/ndisc.h>
  40#include <net/protocol.h>
  41#include <net/transp_v6.h>
  42#include <net/ip6_route.h>
  43#include <net/raw.h>
  44#include <net/seg6.h>
  45#include <net/tcp_states.h>
  46#include <net/ip6_checksum.h>
  47#include <net/ip6_tunnel.h>
  48#include <trace/events/udp.h>
  49#include <net/xfrm.h>
  50#include <net/inet_hashtables.h>
  51#include <net/inet6_hashtables.h>
  52#include <net/busy_poll.h>
  53#include <net/sock_reuseport.h>
  54#include <net/gro.h>
  55
  56#include <linux/proc_fs.h>
  57#include <linux/seq_file.h>
  58#include <trace/events/skb.h>
  59#include "udp_impl.h"
  60
  61static void udpv6_destruct_sock(struct sock *sk)
 
 
 
 
  62{
  63	udp_destruct_common(sk);
  64	inet6_sock_destruct(sk);
  65}
  66
  67int udpv6_init_sock(struct sock *sk)
  68{
  69	udp_lib_init_sock(sk);
  70	sk->sk_destruct = udpv6_destruct_sock;
  71	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
  72	return 0;
  73}
  74
  75INDIRECT_CALLABLE_SCOPE
  76u32 udp6_ehashfn(const struct net *net,
  77		 const struct in6_addr *laddr,
  78		 const u16 lport,
  79		 const struct in6_addr *faddr,
  80		 const __be16 fport)
  81{
  82	u32 lhash, fhash;
  83
  84	net_get_random_once(&udp6_ehash_secret,
  85			    sizeof(udp6_ehash_secret));
  86	net_get_random_once(&udp_ipv6_hash_secret,
  87			    sizeof(udp_ipv6_hash_secret));
  88
  89	lhash = (__force u32)laddr->s6_addr32[3];
  90	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  91
  92	return __inet6_ehashfn(lhash, lport, fhash, fport,
  93			       udp6_ehash_secret + net_hash_mix(net));
  94}
  95
  96int udp_v6_get_port(struct sock *sk, unsigned short snum)
  97{
  98	unsigned int hash2_nulladdr =
  99		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 100	unsigned int hash2_partial =
 101		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 102
 103	/* precompute partial secondary hash */
 104	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 105	return udp_lib_get_port(sk, snum, hash2_nulladdr);
 106}
 107
 108void udp_v6_rehash(struct sock *sk)
 109{
 110	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 111					  &sk->sk_v6_rcv_saddr,
 112					  inet_sk(sk)->inet_num);
 113
 114	udp_lib_rehash(sk, new_hash);
 115}
 116
 117static int compute_score(struct sock *sk, struct net *net,
 118			 const struct in6_addr *saddr, __be16 sport,
 119			 const struct in6_addr *daddr, unsigned short hnum,
 120			 int dif, int sdif)
 121{
 122	int bound_dev_if, score;
 123	struct inet_sock *inet;
 124	bool dev_match;
 125
 126	if (!net_eq(sock_net(sk), net) ||
 127	    udp_sk(sk)->udp_port_hash != hnum ||
 128	    sk->sk_family != PF_INET6)
 129		return -1;
 130
 131	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 132		return -1;
 133
 134	score = 0;
 135	inet = inet_sk(sk);
 136
 137	if (inet->inet_dport) {
 138		if (inet->inet_dport != sport)
 139			return -1;
 140		score++;
 141	}
 142
 143	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 144		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 145			return -1;
 146		score++;
 147	}
 148
 149	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 150	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
 151	if (!dev_match)
 152		return -1;
 153	if (bound_dev_if)
 154		score++;
 155
 156	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 157		score++;
 158
 159	return score;
 160}
 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162/* called with rcu_read_lock() */
 163static struct sock *udp6_lib_lookup2(struct net *net,
 164		const struct in6_addr *saddr, __be16 sport,
 165		const struct in6_addr *daddr, unsigned int hnum,
 166		int dif, int sdif, struct udp_hslot *hslot2,
 167		struct sk_buff *skb)
 168{
 169	struct sock *sk, *result;
 170	int score, badness;
 171	bool need_rescore;
 172
 173	result = NULL;
 174	badness = -1;
 175	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 176		need_rescore = false;
 177rescore:
 178		score = compute_score(need_rescore ? result : sk, net, saddr,
 179				      sport, daddr, hnum, dif, sdif);
 180		if (score > badness) {
 181			badness = score;
 182
 183			if (need_rescore)
 184				continue;
 185
 186			if (sk->sk_state == TCP_ESTABLISHED) {
 187				result = sk;
 188				continue;
 189			}
 190
 191			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
 192							saddr, sport, daddr, hnum, udp6_ehashfn);
 193			if (!result) {
 194				result = sk;
 195				continue;
 196			}
 197
 198			/* Fall back to scoring if group has connections */
 199			if (!reuseport_has_conns(sk))
 200				return result;
 201
 202			/* Reuseport logic returned an error, keep original score. */
 203			if (IS_ERR(result))
 204				continue;
 205
 206			/* compute_score is too long of a function to be
 207			 * inlined, and calling it again here yields
 208			 * measureable overhead for some
 209			 * workloads. Work around it by jumping
 210			 * backwards to rescore 'result'.
 211			 */
 212			need_rescore = true;
 213			goto rescore;
 214		}
 215	}
 216	return result;
 217}
 218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219/* rcu_read_lock() must be held */
 220struct sock *__udp6_lib_lookup(struct net *net,
 221			       const struct in6_addr *saddr, __be16 sport,
 222			       const struct in6_addr *daddr, __be16 dport,
 223			       int dif, int sdif, struct udp_table *udptable,
 224			       struct sk_buff *skb)
 225{
 226	unsigned short hnum = ntohs(dport);
 227	unsigned int hash2, slot2;
 228	struct udp_hslot *hslot2;
 229	struct sock *result, *sk;
 230
 231	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 232	slot2 = hash2 & udptable->mask;
 233	hslot2 = &udptable->hash2[slot2];
 234
 235	/* Lookup connected or non-wildcard sockets */
 236	result = udp6_lib_lookup2(net, saddr, sport,
 237				  daddr, hnum, dif, sdif,
 238				  hslot2, skb);
 239	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 240		goto done;
 241
 242	/* Lookup redirect from BPF */
 243	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
 244	    udptable == net->ipv4.udp_table) {
 245		sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
 246						saddr, sport, daddr, hnum, dif,
 247						udp6_ehashfn);
 248		if (sk) {
 249			result = sk;
 250			goto done;
 251		}
 252	}
 253
 254	/* Got non-wildcard socket or error on first lookup */
 255	if (result)
 256		goto done;
 257
 258	/* Lookup wildcard sockets */
 259	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 260	slot2 = hash2 & udptable->mask;
 261	hslot2 = &udptable->hash2[slot2];
 262
 263	result = udp6_lib_lookup2(net, saddr, sport,
 264				  &in6addr_any, hnum, dif, sdif,
 265				  hslot2, skb);
 266done:
 267	if (IS_ERR(result))
 268		return NULL;
 269	return result;
 270}
 271EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 272
 273static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 274					  __be16 sport, __be16 dport,
 275					  struct udp_table *udptable)
 276{
 277	const struct ipv6hdr *iph = ipv6_hdr(skb);
 278
 279	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 280				 &iph->daddr, dport, inet6_iif(skb),
 281				 inet6_sdif(skb), udptable, skb);
 282}
 283
 284struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
 285				 __be16 sport, __be16 dport)
 286{
 287	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
 288	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
 289	struct net *net = dev_net(skb->dev);
 290	int iif, sdif;
 291
 292	inet6_get_iif_sdif(skb, &iif, &sdif);
 293
 294	return __udp6_lib_lookup(net, &iph->saddr, sport,
 295				 &iph->daddr, dport, iif,
 296				 sdif, net->ipv4.udp_table, NULL);
 297}
 
 298
 299/* Must be called under rcu_read_lock().
 300 * Does increment socket refcount.
 301 */
 302#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 303struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 304			     const struct in6_addr *daddr, __be16 dport, int dif)
 305{
 306	struct sock *sk;
 307
 308	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 309				dif, 0, net->ipv4.udp_table, NULL);
 310	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 311		sk = NULL;
 312	return sk;
 313}
 314EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 315#endif
 316
 317/* do not use the scratch area len for jumbogram: their length execeeds the
 318 * scratch area space; note that the IP6CB flags is still in the first
 319 * cacheline, so checking for jumbograms is cheap
 320 */
 321static int udp6_skb_len(struct sk_buff *skb)
 322{
 323	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 324}
 325
 326/*
 327 *	This should be easy, if there is something there we
 328 *	return it, otherwise we block.
 329 */
 330
 331int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 332		  int flags, int *addr_len)
 333{
 334	struct ipv6_pinfo *np = inet6_sk(sk);
 335	struct inet_sock *inet = inet_sk(sk);
 336	struct sk_buff *skb;
 337	unsigned int ulen, copied;
 338	int off, err, peeking = flags & MSG_PEEK;
 339	int is_udplite = IS_UDPLITE(sk);
 340	struct udp_mib __percpu *mib;
 341	bool checksum_valid = false;
 342	int is_udp4;
 343
 344	if (flags & MSG_ERRQUEUE)
 345		return ipv6_recv_error(sk, msg, len, addr_len);
 346
 347	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 348		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 349
 350try_again:
 351	off = sk_peek_offset(sk, flags);
 352	skb = __skb_recv_udp(sk, flags, &off, &err);
 353	if (!skb)
 354		return err;
 355
 356	ulen = udp6_skb_len(skb);
 357	copied = len;
 358	if (copied > ulen - off)
 359		copied = ulen - off;
 360	else if (copied < ulen)
 361		msg->msg_flags |= MSG_TRUNC;
 362
 363	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 364	mib = __UDPX_MIB(sk, is_udp4);
 365
 366	/*
 367	 * If checksum is needed at all, try to do it while copying the
 368	 * data.  If the data is truncated, or if we only want a partial
 369	 * coverage checksum (UDP-Lite), do it before the copy.
 370	 */
 371
 372	if (copied < ulen || peeking ||
 373	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 374		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 375				!__udp_lib_checksum_complete(skb);
 376		if (!checksum_valid)
 377			goto csum_copy_err;
 378	}
 379
 380	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 381		if (udp_skb_is_linear(skb))
 382			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 383		else
 384			err = skb_copy_datagram_msg(skb, off, msg, copied);
 385	} else {
 386		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 387		if (err == -EINVAL)
 388			goto csum_copy_err;
 389	}
 390	if (unlikely(err)) {
 391		if (!peeking) {
 392			atomic_inc(&sk->sk_drops);
 393			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 394		}
 395		kfree_skb(skb);
 396		return err;
 397	}
 398	if (!peeking)
 399		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 400
 401	sock_recv_cmsgs(msg, sk, skb);
 402
 403	/* Copy the address. */
 404	if (msg->msg_name) {
 405		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 406		sin6->sin6_family = AF_INET6;
 407		sin6->sin6_port = udp_hdr(skb)->source;
 408		sin6->sin6_flowinfo = 0;
 409
 410		if (is_udp4) {
 411			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 412					       &sin6->sin6_addr);
 413			sin6->sin6_scope_id = 0;
 414		} else {
 415			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 416			sin6->sin6_scope_id =
 417				ipv6_iface_scope_id(&sin6->sin6_addr,
 418						    inet6_iif(skb));
 419		}
 420		*addr_len = sizeof(*sin6);
 421
 422		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 423						      (struct sockaddr *)sin6,
 424						      addr_len);
 425	}
 426
 427	if (udp_test_bit(GRO_ENABLED, sk))
 428		udp_cmsg_recv(msg, sk, skb);
 429
 430	if (np->rxopt.all)
 431		ip6_datagram_recv_common_ctl(sk, msg, skb);
 432
 433	if (is_udp4) {
 434		if (inet_cmsg_flags(inet))
 435			ip_cmsg_recv_offset(msg, sk, skb,
 436					    sizeof(struct udphdr), off);
 437	} else {
 438		if (np->rxopt.all)
 439			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 440	}
 441
 442	err = copied;
 443	if (flags & MSG_TRUNC)
 444		err = ulen;
 445
 446	skb_consume_udp(sk, skb, peeking ? -err : err);
 447	return err;
 448
 449csum_copy_err:
 450	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 451				 udp_skb_destructor)) {
 452		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 453		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 454	}
 455	kfree_skb(skb);
 456
 457	/* starting over for a new packet, but check if we need to yield */
 458	cond_resched();
 459	msg->msg_flags &= ~MSG_TRUNC;
 460	goto try_again;
 461}
 462
 463DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 464void udpv6_encap_enable(void)
 465{
 466	static_branch_inc(&udpv6_encap_needed_key);
 467}
 468EXPORT_SYMBOL(udpv6_encap_enable);
 469
 470/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 471 * through error handlers in encapsulations looking for a match.
 472 */
 473static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 474				      struct inet6_skb_parm *opt,
 475				      u8 type, u8 code, int offset, __be32 info)
 476{
 477	int i;
 478
 479	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 480		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 481			       u8 type, u8 code, int offset, __be32 info);
 482		const struct ip6_tnl_encap_ops *encap;
 483
 484		encap = rcu_dereference(ip6tun_encaps[i]);
 485		if (!encap)
 486			continue;
 487		handler = encap->err_handler;
 488		if (handler && !handler(skb, opt, type, code, offset, info))
 489			return 0;
 490	}
 491
 492	return -ENOENT;
 493}
 494
 495/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 496 * reversing source and destination port: this will match tunnels that force the
 497 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 498 * lwtunnels might actually break this assumption by being configured with
 499 * different destination ports on endpoints, in this case we won't be able to
 500 * trace ICMP messages back to them.
 501 *
 502 * If this doesn't match any socket, probe tunnels with arbitrary destination
 503 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 504 * we've sent packets to won't necessarily match the local destination port.
 505 *
 506 * Then ask the tunnel implementation to match the error against a valid
 507 * association.
 508 *
 509 * Return an error if we can't find a match, the socket if we need further
 510 * processing, zero otherwise.
 511 */
 512static struct sock *__udp6_lib_err_encap(struct net *net,
 513					 const struct ipv6hdr *hdr, int offset,
 514					 struct udphdr *uh,
 515					 struct udp_table *udptable,
 516					 struct sock *sk,
 517					 struct sk_buff *skb,
 518					 struct inet6_skb_parm *opt,
 519					 u8 type, u8 code, __be32 info)
 520{
 521	int (*lookup)(struct sock *sk, struct sk_buff *skb);
 522	int network_offset, transport_offset;
 523	struct udp_sock *up;
 524
 525	network_offset = skb_network_offset(skb);
 526	transport_offset = skb_transport_offset(skb);
 527
 528	/* Network header needs to point to the outer IPv6 header inside ICMP */
 529	skb_reset_network_header(skb);
 530
 531	/* Transport header needs to point to the UDP header */
 532	skb_set_transport_header(skb, offset);
 533
 534	if (sk) {
 535		up = udp_sk(sk);
 536
 537		lookup = READ_ONCE(up->encap_err_lookup);
 538		if (lookup && lookup(sk, skb))
 539			sk = NULL;
 540
 541		goto out;
 542	}
 543
 544	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 545			       &hdr->saddr, uh->dest,
 546			       inet6_iif(skb), 0, udptable, skb);
 547	if (sk) {
 548		up = udp_sk(sk);
 
 549
 550		lookup = READ_ONCE(up->encap_err_lookup);
 551		if (!lookup || lookup(sk, skb))
 552			sk = NULL;
 553	}
 554
 555out:
 556	if (!sk) {
 557		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 558							offset, info));
 559	}
 560
 561	skb_set_transport_header(skb, transport_offset);
 562	skb_set_network_header(skb, network_offset);
 563
 564	return sk;
 565}
 566
 567int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 568		   u8 type, u8 code, int offset, __be32 info,
 569		   struct udp_table *udptable)
 570{
 571	struct ipv6_pinfo *np;
 572	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 573	const struct in6_addr *saddr = &hdr->saddr;
 574	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
 575	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 576	bool tunnel = false;
 577	struct sock *sk;
 578	int harderr;
 579	int err;
 580	struct net *net = dev_net(skb->dev);
 581
 582	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 583			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 584
 585	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
 586		/* No socket for error: try tunnels before discarding */
 
 587		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 588			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 589						  udptable, sk, skb,
 590						  opt, type, code, info);
 591			if (!sk)
 592				return 0;
 593		} else
 594			sk = ERR_PTR(-ENOENT);
 595
 596		if (IS_ERR(sk)) {
 597			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 598					  ICMP6_MIB_INERRORS);
 599			return PTR_ERR(sk);
 600		}
 601
 602		tunnel = true;
 603	}
 604
 605	harderr = icmpv6_err_convert(type, code, &err);
 606	np = inet6_sk(sk);
 607
 608	if (type == ICMPV6_PKT_TOOBIG) {
 609		if (!ip6_sk_accept_pmtu(sk))
 610			goto out;
 611		ip6_sk_update_pmtu(skb, sk, info);
 612		if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
 613			harderr = 1;
 614	}
 615	if (type == NDISC_REDIRECT) {
 616		if (tunnel) {
 617			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 618				     READ_ONCE(sk->sk_mark), sk->sk_uid);
 619		} else {
 620			ip6_sk_redirect(skb, sk);
 621		}
 622		goto out;
 623	}
 624
 625	/* Tunnels don't have an application socket: don't pass errors back */
 626	if (tunnel) {
 627		if (udp_sk(sk)->encap_err_rcv)
 628			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
 629						  ntohl(info), (u8 *)(uh+1));
 630		goto out;
 631	}
 632
 633	if (!inet6_test_bit(RECVERR6, sk)) {
 634		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 635			goto out;
 636	} else {
 637		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 638	}
 639
 640	sk->sk_err = err;
 641	sk_error_report(sk);
 642out:
 643	return 0;
 644}
 645
 646static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 647{
 648	int rc;
 649
 650	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 651		sock_rps_save_rxhash(sk, skb);
 652		sk_mark_napi_id(sk, skb);
 653		sk_incoming_cpu_update(sk);
 654	} else {
 655		sk_mark_napi_id_once(sk, skb);
 656	}
 657
 658	rc = __udp_enqueue_schedule_skb(sk, skb);
 659	if (rc < 0) {
 660		int is_udplite = IS_UDPLITE(sk);
 661		enum skb_drop_reason drop_reason;
 662
 663		/* Note that an ENOMEM error is charged twice */
 664		if (rc == -ENOMEM) {
 665			UDP6_INC_STATS(sock_net(sk),
 666					 UDP_MIB_RCVBUFERRORS, is_udplite);
 667			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
 668		} else {
 669			UDP6_INC_STATS(sock_net(sk),
 670				       UDP_MIB_MEMERRORS, is_udplite);
 671			drop_reason = SKB_DROP_REASON_PROTO_MEM;
 672		}
 673		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 674		kfree_skb_reason(skb, drop_reason);
 675		trace_udp_fail_queue_rcv_skb(rc, sk);
 676		return -1;
 677	}
 678
 679	return 0;
 680}
 681
 682static __inline__ int udpv6_err(struct sk_buff *skb,
 683				struct inet6_skb_parm *opt, u8 type,
 684				u8 code, int offset, __be32 info)
 685{
 686	return __udp6_lib_err(skb, opt, type, code, offset, info,
 687			      dev_net(skb->dev)->ipv4.udp_table);
 688}
 689
 690static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 691{
 692	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 693	struct udp_sock *up = udp_sk(sk);
 694	int is_udplite = IS_UDPLITE(sk);
 695
 696	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
 697		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
 698		goto drop;
 699	}
 700	nf_reset_ct(skb);
 701
 702	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
 703	    READ_ONCE(up->encap_type)) {
 704		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 705
 706		/*
 707		 * This is an encapsulation socket so pass the skb to
 708		 * the socket's udp_encap_rcv() hook. Otherwise, just
 709		 * fall through and pass this up the UDP socket.
 710		 * up->encap_rcv() returns the following value:
 711		 * =0 if skb was successfully passed to the encap
 712		 *    handler or was discarded by it.
 713		 * >0 if skb should be passed on to UDP.
 714		 * <0 if skb should be resubmitted as proto -N
 715		 */
 716
 717		/* if we're overly short, let UDP handle it */
 718		encap_rcv = READ_ONCE(up->encap_rcv);
 719		if (encap_rcv) {
 720			int ret;
 721
 722			/* Verify checksum before giving to encap */
 723			if (udp_lib_checksum_complete(skb))
 724				goto csum_error;
 725
 726			ret = encap_rcv(sk, skb);
 727			if (ret <= 0) {
 728				__UDP6_INC_STATS(sock_net(sk),
 729						 UDP_MIB_INDATAGRAMS,
 730						 is_udplite);
 731				return -ret;
 732			}
 733		}
 734
 735		/* FALLTHROUGH -- it's a UDP Packet */
 736	}
 737
 738	/*
 739	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 740	 */
 741	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
 742		u16 pcrlen = READ_ONCE(up->pcrlen);
 743
 744		if (pcrlen == 0) {          /* full coverage was set  */
 745			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 746					    UDP_SKB_CB(skb)->cscov, skb->len);
 747			goto drop;
 748		}
 749		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
 750			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 751					    UDP_SKB_CB(skb)->cscov, pcrlen);
 752			goto drop;
 753		}
 754	}
 755
 756	prefetch(&sk->sk_rmem_alloc);
 757	if (rcu_access_pointer(sk->sk_filter) &&
 758	    udp_lib_checksum_complete(skb))
 759		goto csum_error;
 760
 761	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
 762		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
 763		goto drop;
 764	}
 765
 766	udp_csum_pull_header(skb);
 767
 768	skb_dst_drop(skb);
 769
 770	return __udpv6_queue_rcv_skb(sk, skb);
 771
 772csum_error:
 773	drop_reason = SKB_DROP_REASON_UDP_CSUM;
 774	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 775drop:
 776	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 777	atomic_inc(&sk->sk_drops);
 778	kfree_skb_reason(skb, drop_reason);
 779	return -1;
 780}
 781
 782static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 783{
 784	struct sk_buff *next, *segs;
 785	int ret;
 786
 787	if (likely(!udp_unexpected_gso(sk, skb)))
 788		return udpv6_queue_rcv_one_skb(sk, skb);
 789
 790	__skb_push(skb, -skb_mac_offset(skb));
 791	segs = udp_rcv_segment(sk, skb, false);
 792	skb_list_walk_safe(segs, skb, next) {
 793		__skb_pull(skb, skb_transport_offset(skb));
 794
 795		udp_post_segment_fix_csum(skb);
 796		ret = udpv6_queue_rcv_one_skb(sk, skb);
 797		if (ret > 0)
 798			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 799						 true);
 800	}
 801	return 0;
 802}
 803
 804static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
 805				   __be16 loc_port, const struct in6_addr *loc_addr,
 806				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 807				   int dif, int sdif, unsigned short hnum)
 808{
 809	const struct inet_sock *inet = inet_sk(sk);
 810
 811	if (!net_eq(sock_net(sk), net))
 812		return false;
 813
 814	if (udp_sk(sk)->udp_port_hash != hnum ||
 815	    sk->sk_family != PF_INET6 ||
 816	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 817	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 818		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 819	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
 820	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 821		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 822		return false;
 823	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 824		return false;
 825	return true;
 826}
 827
 828static void udp6_csum_zero_error(struct sk_buff *skb)
 829{
 830	/* RFC 2460 section 8.1 says that we SHOULD log
 831	 * this error. Well, it is reasonable.
 832	 */
 833	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 834			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 835			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 836}
 837
 838/*
 839 * Note: called only from the BH handler context,
 840 * so we don't need to lock the hashes.
 841 */
 842static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 843		const struct in6_addr *saddr, const struct in6_addr *daddr,
 844		struct udp_table *udptable, int proto)
 845{
 846	struct sock *sk, *first = NULL;
 847	const struct udphdr *uh = udp_hdr(skb);
 848	unsigned short hnum = ntohs(uh->dest);
 849	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 850	unsigned int offset = offsetof(typeof(*sk), sk_node);
 851	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 852	int dif = inet6_iif(skb);
 853	int sdif = inet6_sdif(skb);
 854	struct hlist_node *node;
 855	struct sk_buff *nskb;
 856
 857	if (use_hash2) {
 858		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 859			    udptable->mask;
 860		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 861start_lookup:
 862		hslot = &udptable->hash2[hash2];
 863		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 864	}
 865
 866	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 867		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
 868					    uh->source, saddr, dif, sdif,
 869					    hnum))
 870			continue;
 871		/* If zero checksum and no_check is not on for
 872		 * the socket then skip it.
 873		 */
 874		if (!uh->check && !udp_get_no_check6_rx(sk))
 875			continue;
 876		if (!first) {
 877			first = sk;
 878			continue;
 879		}
 880		nskb = skb_clone(skb, GFP_ATOMIC);
 881		if (unlikely(!nskb)) {
 882			atomic_inc(&sk->sk_drops);
 883			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
 884					 IS_UDPLITE(sk));
 885			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
 886					 IS_UDPLITE(sk));
 887			continue;
 888		}
 889
 890		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
 891			consume_skb(nskb);
 892	}
 893
 894	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 895	if (use_hash2 && hash2 != hash2_any) {
 896		hash2 = hash2_any;
 897		goto start_lookup;
 898	}
 899
 900	if (first) {
 901		if (udpv6_queue_rcv_skb(first, skb) > 0)
 902			consume_skb(skb);
 903	} else {
 904		kfree_skb(skb);
 905		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
 906				 proto == IPPROTO_UDPLITE);
 907	}
 908	return 0;
 909}
 910
 911static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 912{
 913	if (udp_sk_rx_dst_set(sk, dst))
 914		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
 
 
 
 915}
 916
 917/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 918 * return code conversion for ip layer consumption
 919 */
 920static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
 921				struct udphdr *uh)
 922{
 923	int ret;
 924
 925	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 926		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
 927
 928	ret = udpv6_queue_rcv_skb(sk, skb);
 929
 930	/* a return value > 0 means to resubmit the input */
 931	if (ret > 0)
 932		return ret;
 933	return 0;
 934}
 935
 936int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 937		   int proto)
 938{
 939	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
 940	const struct in6_addr *saddr, *daddr;
 941	struct net *net = dev_net(skb->dev);
 942	struct udphdr *uh;
 943	struct sock *sk;
 944	bool refcounted;
 945	u32 ulen = 0;
 946
 947	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 948		goto discard;
 949
 950	saddr = &ipv6_hdr(skb)->saddr;
 951	daddr = &ipv6_hdr(skb)->daddr;
 952	uh = udp_hdr(skb);
 953
 954	ulen = ntohs(uh->len);
 955	if (ulen > skb->len)
 956		goto short_packet;
 957
 958	if (proto == IPPROTO_UDP) {
 959		/* UDP validates ulen. */
 960
 961		/* Check for jumbo payload */
 962		if (ulen == 0)
 963			ulen = skb->len;
 964
 965		if (ulen < sizeof(*uh))
 966			goto short_packet;
 967
 968		if (ulen < skb->len) {
 969			if (pskb_trim_rcsum(skb, ulen))
 970				goto short_packet;
 971			saddr = &ipv6_hdr(skb)->saddr;
 972			daddr = &ipv6_hdr(skb)->daddr;
 973			uh = udp_hdr(skb);
 974		}
 975	}
 976
 977	if (udp6_csum_init(skb, uh, proto))
 978		goto csum_error;
 979
 980	/* Check if the socket is already available, e.g. due to early demux */
 981	sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
 982			      &refcounted, udp6_ehashfn);
 983	if (IS_ERR(sk))
 984		goto no_sk;
 985
 986	if (sk) {
 987		struct dst_entry *dst = skb_dst(skb);
 988		int ret;
 989
 990		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
 991			udp6_sk_rx_dst_set(sk, dst);
 992
 993		if (!uh->check && !udp_get_no_check6_rx(sk)) {
 994			if (refcounted)
 995				sock_put(sk);
 996			goto report_csum_error;
 997		}
 998
 999		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1000		if (refcounted)
1001			sock_put(sk);
1002		return ret;
1003	}
1004
1005	/*
1006	 *	Multicast receive code
1007	 */
1008	if (ipv6_addr_is_multicast(daddr))
1009		return __udp6_lib_mcast_deliver(net, skb,
1010				saddr, daddr, udptable, proto);
1011
1012	/* Unicast */
1013	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1014	if (sk) {
1015		if (!uh->check && !udp_get_no_check6_rx(sk))
1016			goto report_csum_error;
1017		return udp6_unicast_rcv_skb(sk, skb, uh);
1018	}
1019no_sk:
1020	reason = SKB_DROP_REASON_NO_SOCKET;
1021
1022	if (!uh->check)
1023		goto report_csum_error;
1024
1025	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1026		goto discard;
1027	nf_reset_ct(skb);
1028
1029	if (udp_lib_checksum_complete(skb))
1030		goto csum_error;
1031
1032	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1033	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1034
1035	kfree_skb_reason(skb, reason);
1036	return 0;
1037
1038short_packet:
1039	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1040		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1041	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1042			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1043			    saddr, ntohs(uh->source),
1044			    ulen, skb->len,
1045			    daddr, ntohs(uh->dest));
1046	goto discard;
1047
1048report_csum_error:
1049	udp6_csum_zero_error(skb);
1050csum_error:
1051	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1052		reason = SKB_DROP_REASON_UDP_CSUM;
1053	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1054discard:
1055	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1056	kfree_skb_reason(skb, reason);
1057	return 0;
1058}
1059
1060
1061static struct sock *__udp6_lib_demux_lookup(struct net *net,
1062			__be16 loc_port, const struct in6_addr *loc_addr,
1063			__be16 rmt_port, const struct in6_addr *rmt_addr,
1064			int dif, int sdif)
1065{
1066	struct udp_table *udptable = net->ipv4.udp_table;
1067	unsigned short hnum = ntohs(loc_port);
1068	unsigned int hash2, slot2;
1069	struct udp_hslot *hslot2;
1070	__portpair ports;
 
1071	struct sock *sk;
1072
1073	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1074	slot2 = hash2 & udptable->mask;
1075	hslot2 = &udptable->hash2[slot2];
1076	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1077
1078	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1079		if (sk->sk_state == TCP_ESTABLISHED &&
1080		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1081			return sk;
1082		/* Only check first socket in chain */
1083		break;
1084	}
1085	return NULL;
1086}
1087
1088void udp_v6_early_demux(struct sk_buff *skb)
1089{
1090	struct net *net = dev_net(skb->dev);
1091	const struct udphdr *uh;
1092	struct sock *sk;
1093	struct dst_entry *dst;
1094	int dif = skb->dev->ifindex;
1095	int sdif = inet6_sdif(skb);
1096
1097	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1098	    sizeof(struct udphdr)))
1099		return;
1100
1101	uh = udp_hdr(skb);
1102
1103	if (skb->pkt_type == PACKET_HOST)
1104		sk = __udp6_lib_demux_lookup(net, uh->dest,
1105					     &ipv6_hdr(skb)->daddr,
1106					     uh->source, &ipv6_hdr(skb)->saddr,
1107					     dif, sdif);
1108	else
1109		return;
1110
1111	if (!sk)
1112		return;
1113
1114	skb->sk = sk;
1115	DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1116	skb->destructor = sock_pfree;
1117	dst = rcu_dereference(sk->sk_rx_dst);
1118
1119	if (dst)
1120		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1121	if (dst) {
1122		/* set noref for now.
1123		 * any place which wants to hold dst has to call
1124		 * dst_hold_safe()
1125		 */
1126		skb_dst_set_noref(skb, dst);
1127	}
1128}
1129
1130INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1131{
1132	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1133}
1134
1135/*
1136 * Throw away all pending data and cancel the corking. Socket is locked.
1137 */
1138static void udp_v6_flush_pending_frames(struct sock *sk)
1139{
1140	struct udp_sock *up = udp_sk(sk);
1141
1142	if (up->pending == AF_INET)
1143		udp_flush_pending_frames(sk);
1144	else if (up->pending) {
1145		up->len = 0;
1146		WRITE_ONCE(up->pending, 0);
1147		ip6_flush_pending_frames(sk);
1148	}
1149}
1150
1151static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1152			     int addr_len)
1153{
1154	if (addr_len < offsetofend(struct sockaddr, sa_family))
1155		return -EINVAL;
1156	/* The following checks are replicated from __ip6_datagram_connect()
1157	 * and intended to prevent BPF program called below from accessing
1158	 * bytes that are out of the bound specified by user in addr_len.
1159	 */
1160	if (uaddr->sa_family == AF_INET) {
1161		if (ipv6_only_sock(sk))
1162			return -EAFNOSUPPORT;
1163		return udp_pre_connect(sk, uaddr, addr_len);
1164	}
1165
1166	if (addr_len < SIN6_LEN_RFC2133)
1167		return -EINVAL;
1168
1169	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1170}
1171
1172/**
1173 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1174 *	@sk:	socket we are sending on
1175 *	@skb:	sk_buff containing the filled-in UDP header
1176 *		(checksum field must be zeroed out)
1177 *	@saddr: source address
1178 *	@daddr: destination address
1179 *	@len:	length of packet
1180 */
1181static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1182				 const struct in6_addr *saddr,
1183				 const struct in6_addr *daddr, int len)
1184{
1185	unsigned int offset;
1186	struct udphdr *uh = udp_hdr(skb);
1187	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1188	__wsum csum = 0;
1189
1190	if (!frags) {
1191		/* Only one fragment on the socket.  */
1192		skb->csum_start = skb_transport_header(skb) - skb->head;
1193		skb->csum_offset = offsetof(struct udphdr, check);
1194		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1195	} else {
1196		/*
1197		 * HW-checksum won't work as there are two or more
1198		 * fragments on the socket so that all csums of sk_buffs
1199		 * should be together
1200		 */
1201		offset = skb_transport_offset(skb);
1202		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1203		csum = skb->csum;
1204
1205		skb->ip_summed = CHECKSUM_NONE;
1206
1207		do {
1208			csum = csum_add(csum, frags->csum);
1209		} while ((frags = frags->next));
1210
1211		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1212					    csum);
1213		if (uh->check == 0)
1214			uh->check = CSUM_MANGLED_0;
1215	}
1216}
1217
1218/*
1219 *	Sending
1220 */
1221
1222static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1223			   struct inet_cork *cork)
1224{
1225	struct sock *sk = skb->sk;
1226	struct udphdr *uh;
1227	int err = 0;
1228	int is_udplite = IS_UDPLITE(sk);
1229	__wsum csum = 0;
1230	int offset = skb_transport_offset(skb);
1231	int len = skb->len - offset;
1232	int datalen = len - sizeof(*uh);
1233
1234	/*
1235	 * Create a UDP header
1236	 */
1237	uh = udp_hdr(skb);
1238	uh->source = fl6->fl6_sport;
1239	uh->dest = fl6->fl6_dport;
1240	uh->len = htons(len);
1241	uh->check = 0;
1242
1243	if (cork->gso_size) {
1244		const int hlen = skb_network_header_len(skb) +
1245				 sizeof(struct udphdr);
1246
1247		if (hlen + cork->gso_size > cork->fragsize) {
1248			kfree_skb(skb);
1249			return -EINVAL;
1250		}
1251		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1252			kfree_skb(skb);
1253			return -EINVAL;
1254		}
1255		if (udp_get_no_check6_tx(sk)) {
1256			kfree_skb(skb);
1257			return -EINVAL;
1258		}
1259		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1260		    dst_xfrm(skb_dst(skb))) {
1261			kfree_skb(skb);
1262			return -EIO;
1263		}
1264
1265		if (datalen > cork->gso_size) {
1266			skb_shinfo(skb)->gso_size = cork->gso_size;
1267			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1268			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1269								 cork->gso_size);
1270		}
1271		goto csum_partial;
1272	}
1273
1274	if (is_udplite)
1275		csum = udplite_csum(skb);
1276	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
1277		skb->ip_summed = CHECKSUM_NONE;
1278		goto send;
1279	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1280csum_partial:
1281		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1282		goto send;
1283	} else
1284		csum = udp_csum(skb);
1285
1286	/* add protocol-dependent pseudo-header */
1287	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1288				    len, fl6->flowi6_proto, csum);
1289	if (uh->check == 0)
1290		uh->check = CSUM_MANGLED_0;
1291
1292send:
1293	err = ip6_send_skb(skb);
1294	if (err) {
1295		if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1296			UDP6_INC_STATS(sock_net(sk),
1297				       UDP_MIB_SNDBUFERRORS, is_udplite);
1298			err = 0;
1299		}
1300	} else {
1301		UDP6_INC_STATS(sock_net(sk),
1302			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1303	}
1304	return err;
1305}
1306
1307static int udp_v6_push_pending_frames(struct sock *sk)
1308{
1309	struct sk_buff *skb;
1310	struct udp_sock  *up = udp_sk(sk);
 
1311	int err = 0;
1312
1313	if (up->pending == AF_INET)
1314		return udp_push_pending_frames(sk);
1315
 
 
 
 
 
1316	skb = ip6_finish_skb(sk);
1317	if (!skb)
1318		goto out;
1319
1320	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1321			      &inet_sk(sk)->cork.base);
1322out:
1323	up->len = 0;
1324	WRITE_ONCE(up->pending, 0);
1325	return err;
1326}
1327
1328int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1329{
1330	struct ipv6_txoptions opt_space;
1331	struct udp_sock *up = udp_sk(sk);
1332	struct inet_sock *inet = inet_sk(sk);
1333	struct ipv6_pinfo *np = inet6_sk(sk);
1334	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1335	struct in6_addr *daddr, *final_p, final;
1336	struct ipv6_txoptions *opt = NULL;
1337	struct ipv6_txoptions *opt_to_free = NULL;
1338	struct ip6_flowlabel *flowlabel = NULL;
1339	struct inet_cork_full cork;
1340	struct flowi6 *fl6 = &cork.fl.u.ip6;
1341	struct dst_entry *dst;
1342	struct ipcm6_cookie ipc6;
1343	int addr_len = msg->msg_namelen;
1344	bool connected = false;
1345	int ulen = len;
1346	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1347	int err;
1348	int is_udplite = IS_UDPLITE(sk);
1349	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1350
1351	ipcm6_init(&ipc6);
1352	ipc6.gso_size = READ_ONCE(up->gso_size);
1353	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1354	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1355
1356	/* destination address check */
1357	if (sin6) {
1358		if (addr_len < offsetof(struct sockaddr, sa_data))
1359			return -EINVAL;
1360
1361		switch (sin6->sin6_family) {
1362		case AF_INET6:
1363			if (addr_len < SIN6_LEN_RFC2133)
1364				return -EINVAL;
1365			daddr = &sin6->sin6_addr;
1366			if (ipv6_addr_any(daddr) &&
1367			    ipv6_addr_v4mapped(&np->saddr))
1368				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1369						       daddr);
1370			break;
1371		case AF_INET:
1372			goto do_udp_sendmsg;
1373		case AF_UNSPEC:
1374			msg->msg_name = sin6 = NULL;
1375			msg->msg_namelen = addr_len = 0;
1376			daddr = NULL;
1377			break;
1378		default:
1379			return -EINVAL;
1380		}
1381	} else if (!READ_ONCE(up->pending)) {
1382		if (sk->sk_state != TCP_ESTABLISHED)
1383			return -EDESTADDRREQ;
1384		daddr = &sk->sk_v6_daddr;
1385	} else
1386		daddr = NULL;
1387
1388	if (daddr) {
1389		if (ipv6_addr_v4mapped(daddr)) {
1390			struct sockaddr_in sin;
1391			sin.sin_family = AF_INET;
1392			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1393			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1394			msg->msg_name = &sin;
1395			msg->msg_namelen = sizeof(sin);
1396do_udp_sendmsg:
1397			err = ipv6_only_sock(sk) ?
1398				-ENETUNREACH : udp_sendmsg(sk, msg, len);
1399			msg->msg_name = sin6;
1400			msg->msg_namelen = addr_len;
1401			return err;
1402		}
1403	}
1404
 
 
 
1405	/* Rough check on arithmetic overflow,
1406	   better check is made in ip6_append_data().
1407	   */
1408	if (len > INT_MAX - sizeof(struct udphdr))
1409		return -EMSGSIZE;
1410
1411	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1412	if (READ_ONCE(up->pending)) {
1413		if (READ_ONCE(up->pending) == AF_INET)
1414			return udp_sendmsg(sk, msg, len);
1415		/*
1416		 * There are pending frames.
1417		 * The socket lock must be held while it's corked.
1418		 */
1419		lock_sock(sk);
1420		if (likely(up->pending)) {
1421			if (unlikely(up->pending != AF_INET6)) {
1422				release_sock(sk);
1423				return -EAFNOSUPPORT;
1424			}
1425			dst = NULL;
1426			goto do_append_data;
1427		}
1428		release_sock(sk);
1429	}
1430	ulen += sizeof(struct udphdr);
1431
1432	memset(fl6, 0, sizeof(*fl6));
1433
1434	if (sin6) {
1435		if (sin6->sin6_port == 0)
1436			return -EINVAL;
1437
1438		fl6->fl6_dport = sin6->sin6_port;
1439		daddr = &sin6->sin6_addr;
1440
1441		if (inet6_test_bit(SNDFLOW, sk)) {
1442			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1443			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1444				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1445				if (IS_ERR(flowlabel))
1446					return -EINVAL;
1447			}
1448		}
1449
1450		/*
1451		 * Otherwise it will be difficult to maintain
1452		 * sk->sk_dst_cache.
1453		 */
1454		if (sk->sk_state == TCP_ESTABLISHED &&
1455		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1456			daddr = &sk->sk_v6_daddr;
1457
1458		if (addr_len >= sizeof(struct sockaddr_in6) &&
1459		    sin6->sin6_scope_id &&
1460		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1461			fl6->flowi6_oif = sin6->sin6_scope_id;
1462	} else {
1463		if (sk->sk_state != TCP_ESTABLISHED)
1464			return -EDESTADDRREQ;
1465
1466		fl6->fl6_dport = inet->inet_dport;
1467		daddr = &sk->sk_v6_daddr;
1468		fl6->flowlabel = np->flow_label;
1469		connected = true;
1470	}
1471
1472	if (!fl6->flowi6_oif)
1473		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1474
1475	if (!fl6->flowi6_oif)
1476		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1477
1478	fl6->flowi6_uid = sk->sk_uid;
 
1479
1480	if (msg->msg_controllen) {
1481		opt = &opt_space;
1482		memset(opt, 0, sizeof(struct ipv6_txoptions));
1483		opt->tot_len = sizeof(*opt);
1484		ipc6.opt = opt;
1485
1486		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1487		if (err > 0) {
1488			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1489						    &ipc6);
1490			connected = false;
1491		}
1492		if (err < 0) {
1493			fl6_sock_release(flowlabel);
1494			return err;
1495		}
1496		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1497			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1498			if (IS_ERR(flowlabel))
1499				return -EINVAL;
1500		}
1501		if (!(opt->opt_nflen|opt->opt_flen))
1502			opt = NULL;
 
1503	}
1504	if (!opt) {
1505		opt = txopt_get(np);
1506		opt_to_free = opt;
1507	}
1508	if (flowlabel)
1509		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1510	opt = ipv6_fixup_options(&opt_space, opt);
1511	ipc6.opt = opt;
1512
1513	fl6->flowi6_proto = sk->sk_protocol;
1514	fl6->flowi6_mark = ipc6.sockc.mark;
1515	fl6->daddr = *daddr;
1516	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1517		fl6->saddr = np->saddr;
1518	fl6->fl6_sport = inet->inet_sport;
1519
1520	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1521		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1522					   (struct sockaddr *)sin6,
1523					   &addr_len,
1524					   &fl6->saddr);
1525		if (err)
1526			goto out_no_dst;
1527		if (sin6) {
1528			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1529				/* BPF program rewrote IPv6-only by IPv4-mapped
1530				 * IPv6. It's currently unsupported.
1531				 */
1532				err = -ENOTSUPP;
1533				goto out_no_dst;
1534			}
1535			if (sin6->sin6_port == 0) {
1536				/* BPF program set invalid port. Reject it. */
1537				err = -EINVAL;
1538				goto out_no_dst;
1539			}
1540			fl6->fl6_dport = sin6->sin6_port;
1541			fl6->daddr = sin6->sin6_addr;
1542		}
1543	}
1544
1545	if (ipv6_addr_any(&fl6->daddr))
1546		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1547
1548	final_p = fl6_update_dst(fl6, opt, &final);
1549	if (final_p)
1550		connected = false;
1551
1552	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1553		fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1554		connected = false;
1555	} else if (!fl6->flowi6_oif)
1556		fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1557
1558	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1559
1560	if (ipc6.tclass < 0)
1561		ipc6.tclass = np->tclass;
1562
1563	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1564
1565	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1566	if (IS_ERR(dst)) {
1567		err = PTR_ERR(dst);
1568		dst = NULL;
1569		goto out;
1570	}
1571
1572	if (ipc6.hlimit < 0)
1573		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1574
1575	if (msg->msg_flags&MSG_CONFIRM)
1576		goto do_confirm;
1577back_from_confirm:
1578
1579	/* Lockless fast path for the non-corking case */
1580	if (!corkreq) {
 
1581		struct sk_buff *skb;
1582
1583		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1584				   sizeof(struct udphdr), &ipc6,
1585				   dst_rt6_info(dst),
1586				   msg->msg_flags, &cork);
1587		err = PTR_ERR(skb);
1588		if (!IS_ERR_OR_NULL(skb))
1589			err = udp_v6_send_skb(skb, fl6, &cork.base);
1590		/* ip6_make_skb steals dst reference */
1591		goto out_no_dst;
1592	}
1593
1594	lock_sock(sk);
1595	if (unlikely(up->pending)) {
1596		/* The socket is already corked while preparing it. */
1597		/* ... which is an evident application bug. --ANK */
1598		release_sock(sk);
1599
1600		net_dbg_ratelimited("udp cork app bug 2\n");
1601		err = -EINVAL;
1602		goto out;
1603	}
1604
1605	WRITE_ONCE(up->pending, AF_INET6);
1606
1607do_append_data:
1608	if (ipc6.dontfrag < 0)
1609		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1610	up->len += ulen;
1611	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1612			      &ipc6, fl6, dst_rt6_info(dst),
1613			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1614	if (err)
1615		udp_v6_flush_pending_frames(sk);
1616	else if (!corkreq)
1617		err = udp_v6_push_pending_frames(sk);
1618	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1619		WRITE_ONCE(up->pending, 0);
1620
1621	if (err > 0)
1622		err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1623	release_sock(sk);
1624
1625out:
1626	dst_release(dst);
1627out_no_dst:
1628	fl6_sock_release(flowlabel);
1629	txopt_put(opt_to_free);
1630	if (!err)
1631		return len;
1632	/*
1633	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1634	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1635	 * we don't have a good statistic (IpOutDiscards but it can be too many
1636	 * things).  We could add another new stat but at least for now that
1637	 * seems like overkill.
1638	 */
1639	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1640		UDP6_INC_STATS(sock_net(sk),
1641			       UDP_MIB_SNDBUFERRORS, is_udplite);
1642	}
1643	return err;
1644
1645do_confirm:
1646	if (msg->msg_flags & MSG_PROBE)
1647		dst_confirm_neigh(dst, &fl6->daddr);
1648	if (!(msg->msg_flags&MSG_PROBE) || len)
1649		goto back_from_confirm;
1650	err = 0;
1651	goto out;
1652}
1653EXPORT_SYMBOL(udpv6_sendmsg);
1654
1655static void udpv6_splice_eof(struct socket *sock)
1656{
1657	struct sock *sk = sock->sk;
1658	struct udp_sock *up = udp_sk(sk);
1659
1660	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1661		return;
1662
1663	lock_sock(sk);
1664	if (up->pending && !udp_test_bit(CORK, sk))
1665		udp_v6_push_pending_frames(sk);
1666	release_sock(sk);
1667}
1668
1669void udpv6_destroy_sock(struct sock *sk)
1670{
1671	struct udp_sock *up = udp_sk(sk);
1672	lock_sock(sk);
1673
1674	/* protects from races with udp_abort() */
1675	sock_set_flag(sk, SOCK_DEAD);
1676	udp_v6_flush_pending_frames(sk);
1677	release_sock(sk);
1678
1679	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1680		if (up->encap_type) {
1681			void (*encap_destroy)(struct sock *sk);
1682			encap_destroy = READ_ONCE(up->encap_destroy);
1683			if (encap_destroy)
1684				encap_destroy(sk);
1685		}
1686		if (udp_test_bit(ENCAP_ENABLED, sk)) {
1687			static_branch_dec(&udpv6_encap_needed_key);
1688			udp_encap_disable();
1689		}
1690	}
 
 
1691}
1692
1693/*
1694 *	Socket option code for UDP
1695 */
1696int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1697		     unsigned int optlen)
1698{
1699	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1700		return udp_lib_setsockopt(sk, level, optname,
1701					  optval, optlen,
1702					  udp_v6_push_pending_frames);
1703	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1704}
1705
1706int udpv6_getsockopt(struct sock *sk, int level, int optname,
1707		     char __user *optval, int __user *optlen)
1708{
1709	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1710		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1711	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1712}
1713
 
 
 
 
 
 
 
 
 
 
1714
1715/* ------------------------------------------------------------------------ */
1716#ifdef CONFIG_PROC_FS
1717int udp6_seq_show(struct seq_file *seq, void *v)
1718{
1719	if (v == SEQ_START_TOKEN) {
1720		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1721	} else {
1722		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1723		const struct inet_sock *inet = inet_sk((const struct sock *)v);
1724		__u16 srcp = ntohs(inet->inet_sport);
1725		__u16 destp = ntohs(inet->inet_dport);
1726		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1727					  udp_rqueue_get(v), bucket);
1728	}
1729	return 0;
1730}
1731
1732const struct seq_operations udp6_seq_ops = {
1733	.start		= udp_seq_start,
1734	.next		= udp_seq_next,
1735	.stop		= udp_seq_stop,
1736	.show		= udp6_seq_show,
1737};
1738EXPORT_SYMBOL(udp6_seq_ops);
1739
1740static struct udp_seq_afinfo udp6_seq_afinfo = {
1741	.family		= AF_INET6,
1742	.udp_table	= NULL,
1743};
1744
1745int __net_init udp6_proc_init(struct net *net)
1746{
1747	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1748			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1749		return -ENOMEM;
1750	return 0;
1751}
1752
1753void udp6_proc_exit(struct net *net)
1754{
1755	remove_proc_entry("udp6", net->proc_net);
1756}
1757#endif /* CONFIG_PROC_FS */
1758
1759/* ------------------------------------------------------------------------ */
1760
1761struct proto udpv6_prot = {
1762	.name			= "UDPv6",
1763	.owner			= THIS_MODULE,
1764	.close			= udp_lib_close,
1765	.pre_connect		= udpv6_pre_connect,
1766	.connect		= ip6_datagram_connect,
1767	.disconnect		= udp_disconnect,
1768	.ioctl			= udp_ioctl,
1769	.init			= udpv6_init_sock,
1770	.destroy		= udpv6_destroy_sock,
1771	.setsockopt		= udpv6_setsockopt,
1772	.getsockopt		= udpv6_getsockopt,
1773	.sendmsg		= udpv6_sendmsg,
1774	.recvmsg		= udpv6_recvmsg,
1775	.splice_eof		= udpv6_splice_eof,
1776	.release_cb		= ip6_datagram_release_cb,
1777	.hash			= udp_lib_hash,
1778	.unhash			= udp_lib_unhash,
1779	.rehash			= udp_v6_rehash,
1780	.get_port		= udp_v6_get_port,
1781	.put_port		= udp_lib_unhash,
1782#ifdef CONFIG_BPF_SYSCALL
1783	.psock_update_sk_prot	= udp_bpf_update_proto,
1784#endif
1785
1786	.memory_allocated	= &udp_memory_allocated,
1787	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1788
1789	.sysctl_mem		= sysctl_udp_mem,
1790	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1791	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1792	.obj_size		= sizeof(struct udp6_sock),
1793	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1794	.h.udp_table		= NULL,
1795	.diag_destroy		= udp_abort,
1796};
1797
1798static struct inet_protosw udpv6_protosw = {
1799	.type =      SOCK_DGRAM,
1800	.protocol =  IPPROTO_UDP,
1801	.prot =      &udpv6_prot,
1802	.ops =       &inet6_dgram_ops,
1803	.flags =     INET_PROTOSW_PERMANENT,
1804};
1805
1806int __init udpv6_init(void)
1807{
1808	int ret;
1809
1810	net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1811		.handler     = udpv6_rcv,
1812		.err_handler = udpv6_err,
1813		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1814	};
1815	ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1816	if (ret)
1817		goto out;
1818
1819	ret = inet6_register_protosw(&udpv6_protosw);
1820	if (ret)
1821		goto out_udpv6_protocol;
1822out:
1823	return ret;
1824
1825out_udpv6_protocol:
1826	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1827	goto out;
1828}
1829
1830void udpv6_exit(void)
1831{
1832	inet6_unregister_protosw(&udpv6_protosw);
1833	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1834}