Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
  20#include <linux/bpf-cgroup.h>
  21#include <linux/errno.h>
  22#include <linux/types.h>
  23#include <linux/socket.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/in6.h>
  27#include <linux/netdevice.h>
  28#include <linux/if_arp.h>
  29#include <linux/ipv6.h>
  30#include <linux/icmpv6.h>
  31#include <linux/init.h>
  32#include <linux/module.h>
  33#include <linux/skbuff.h>
  34#include <linux/slab.h>
  35#include <linux/uaccess.h>
  36#include <linux/indirect_call_wrapper.h>
  37
  38#include <net/addrconf.h>
  39#include <net/ndisc.h>
  40#include <net/protocol.h>
  41#include <net/transp_v6.h>
  42#include <net/ip6_route.h>
  43#include <net/raw.h>
  44#include <net/seg6.h>
  45#include <net/tcp_states.h>
  46#include <net/ip6_checksum.h>
  47#include <net/ip6_tunnel.h>
 
  48#include <net/xfrm.h>
  49#include <net/inet_hashtables.h>
  50#include <net/inet6_hashtables.h>
  51#include <net/busy_poll.h>
  52#include <net/sock_reuseport.h>
 
  53
  54#include <linux/proc_fs.h>
  55#include <linux/seq_file.h>
  56#include <trace/events/skb.h>
  57#include "udp_impl.h"
  58
  59static void udpv6_destruct_sock(struct sock *sk)
  60{
  61	udp_destruct_common(sk);
  62	inet6_sock_destruct(sk);
  63}
  64
  65int udpv6_init_sock(struct sock *sk)
  66{
  67	udp_lib_init_sock(sk);
  68	sk->sk_destruct = udpv6_destruct_sock;
  69	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
  70	return 0;
  71}
  72
  73static u32 udp6_ehashfn(const struct net *net,
  74			const struct in6_addr *laddr,
  75			const u16 lport,
  76			const struct in6_addr *faddr,
  77			const __be16 fport)
 
  78{
  79	static u32 udp6_ehash_secret __read_mostly;
  80	static u32 udp_ipv6_hash_secret __read_mostly;
  81
  82	u32 lhash, fhash;
  83
  84	net_get_random_once(&udp6_ehash_secret,
  85			    sizeof(udp6_ehash_secret));
  86	net_get_random_once(&udp_ipv6_hash_secret,
  87			    sizeof(udp_ipv6_hash_secret));
  88
  89	lhash = (__force u32)laddr->s6_addr32[3];
  90	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  91
  92	return __inet6_ehashfn(lhash, lport, fhash, fport,
  93			       udp_ipv6_hash_secret + net_hash_mix(net));
  94}
  95
  96int udp_v6_get_port(struct sock *sk, unsigned short snum)
  97{
  98	unsigned int hash2_nulladdr =
  99		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 100	unsigned int hash2_partial =
 101		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 102
 103	/* precompute partial secondary hash */
 104	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 105	return udp_lib_get_port(sk, snum, hash2_nulladdr);
 106}
 107
 108void udp_v6_rehash(struct sock *sk)
 109{
 110	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 111					  &sk->sk_v6_rcv_saddr,
 112					  inet_sk(sk)->inet_num);
 113
 114	udp_lib_rehash(sk, new_hash);
 115}
 116
 117static int compute_score(struct sock *sk, struct net *net,
 118			 const struct in6_addr *saddr, __be16 sport,
 119			 const struct in6_addr *daddr, unsigned short hnum,
 120			 int dif, int sdif)
 121{
 122	int bound_dev_if, score;
 123	struct inet_sock *inet;
 124	bool dev_match;
 125
 126	if (!net_eq(sock_net(sk), net) ||
 127	    udp_sk(sk)->udp_port_hash != hnum ||
 128	    sk->sk_family != PF_INET6)
 129		return -1;
 130
 131	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 132		return -1;
 133
 134	score = 0;
 135	inet = inet_sk(sk);
 136
 137	if (inet->inet_dport) {
 138		if (inet->inet_dport != sport)
 139			return -1;
 140		score++;
 141	}
 142
 143	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 144		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 145			return -1;
 146		score++;
 147	}
 148
 149	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 150	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
 151	if (!dev_match)
 152		return -1;
 153	if (bound_dev_if)
 154		score++;
 155
 156	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 157		score++;
 158
 159	return score;
 160}
 161
 162static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
 163				     struct sk_buff *skb,
 164				     const struct in6_addr *saddr,
 165				     __be16 sport,
 166				     const struct in6_addr *daddr,
 167				     unsigned int hnum)
 168{
 169	struct sock *reuse_sk = NULL;
 170	u32 hash;
 171
 172	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
 173		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
 174		reuse_sk = reuseport_select_sock(sk, hash, skb,
 175						 sizeof(struct udphdr));
 176	}
 177	return reuse_sk;
 178}
 179
 180/* called with rcu_read_lock() */
 181static struct sock *udp6_lib_lookup2(struct net *net,
 182		const struct in6_addr *saddr, __be16 sport,
 183		const struct in6_addr *daddr, unsigned int hnum,
 184		int dif, int sdif, struct udp_hslot *hslot2,
 185		struct sk_buff *skb)
 186{
 187	struct sock *sk, *result;
 188	int score, badness;
 
 189
 190	result = NULL;
 191	badness = -1;
 192	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 193		score = compute_score(sk, net, saddr, sport,
 194				      daddr, hnum, dif, sdif);
 
 
 195		if (score > badness) {
 196			result = lookup_reuseport(net, sk, skb,
 197						  saddr, sport, daddr, hnum);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198			/* Fall back to scoring if group has connections */
 199			if (result && !reuseport_has_conns(sk))
 200				return result;
 201
 202			result = result ? : sk;
 203			badness = score;
 
 
 
 
 
 
 
 
 
 
 204		}
 205	}
 206	return result;
 207}
 208
 209static inline struct sock *udp6_lookup_run_bpf(struct net *net,
 210					       struct udp_table *udptable,
 211					       struct sk_buff *skb,
 212					       const struct in6_addr *saddr,
 213					       __be16 sport,
 214					       const struct in6_addr *daddr,
 215					       u16 hnum, const int dif)
 216{
 217	struct sock *sk, *reuse_sk;
 218	bool no_reuseport;
 219
 220	if (udptable != net->ipv4.udp_table)
 221		return NULL; /* only UDP is supported */
 222
 223	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
 224					    daddr, hnum, dif, &sk);
 225	if (no_reuseport || IS_ERR_OR_NULL(sk))
 226		return sk;
 227
 228	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
 229	if (reuse_sk)
 230		sk = reuse_sk;
 231	return sk;
 232}
 233
 234/* rcu_read_lock() must be held */
 235struct sock *__udp6_lib_lookup(struct net *net,
 236			       const struct in6_addr *saddr, __be16 sport,
 237			       const struct in6_addr *daddr, __be16 dport,
 238			       int dif, int sdif, struct udp_table *udptable,
 239			       struct sk_buff *skb)
 240{
 241	unsigned short hnum = ntohs(dport);
 242	unsigned int hash2, slot2;
 243	struct udp_hslot *hslot2;
 244	struct sock *result, *sk;
 245
 246	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 247	slot2 = hash2 & udptable->mask;
 248	hslot2 = &udptable->hash2[slot2];
 249
 250	/* Lookup connected or non-wildcard sockets */
 251	result = udp6_lib_lookup2(net, saddr, sport,
 252				  daddr, hnum, dif, sdif,
 253				  hslot2, skb);
 254	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 255		goto done;
 256
 257	/* Lookup redirect from BPF */
 258	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
 259		sk = udp6_lookup_run_bpf(net, udptable, skb,
 260					 saddr, sport, daddr, hnum, dif);
 
 
 261		if (sk) {
 262			result = sk;
 263			goto done;
 264		}
 265	}
 266
 267	/* Got non-wildcard socket or error on first lookup */
 268	if (result)
 269		goto done;
 270
 271	/* Lookup wildcard sockets */
 272	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 273	slot2 = hash2 & udptable->mask;
 274	hslot2 = &udptable->hash2[slot2];
 275
 276	result = udp6_lib_lookup2(net, saddr, sport,
 277				  &in6addr_any, hnum, dif, sdif,
 278				  hslot2, skb);
 279done:
 280	if (IS_ERR(result))
 281		return NULL;
 282	return result;
 283}
 284EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 285
 286static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 287					  __be16 sport, __be16 dport,
 288					  struct udp_table *udptable)
 289{
 290	const struct ipv6hdr *iph = ipv6_hdr(skb);
 291
 292	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 293				 &iph->daddr, dport, inet6_iif(skb),
 294				 inet6_sdif(skb), udptable, skb);
 295}
 296
 297struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
 298				 __be16 sport, __be16 dport)
 299{
 300	const struct ipv6hdr *iph = ipv6_hdr(skb);
 
 301	struct net *net = dev_net(skb->dev);
 
 
 
 302
 303	return __udp6_lib_lookup(net, &iph->saddr, sport,
 304				 &iph->daddr, dport, inet6_iif(skb),
 305				 inet6_sdif(skb), net->ipv4.udp_table, NULL);
 306}
 307
 308/* Must be called under rcu_read_lock().
 309 * Does increment socket refcount.
 310 */
 311#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 312struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 313			     const struct in6_addr *daddr, __be16 dport, int dif)
 314{
 315	struct sock *sk;
 316
 317	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 318				dif, 0, net->ipv4.udp_table, NULL);
 319	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 320		sk = NULL;
 321	return sk;
 322}
 323EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 324#endif
 325
 326/* do not use the scratch area len for jumbogram: their length execeeds the
 327 * scratch area space; note that the IP6CB flags is still in the first
 328 * cacheline, so checking for jumbograms is cheap
 329 */
 330static int udp6_skb_len(struct sk_buff *skb)
 331{
 332	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 333}
 334
 335/*
 336 *	This should be easy, if there is something there we
 337 *	return it, otherwise we block.
 338 */
 339
 340int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 341		  int flags, int *addr_len)
 342{
 343	struct ipv6_pinfo *np = inet6_sk(sk);
 344	struct inet_sock *inet = inet_sk(sk);
 345	struct sk_buff *skb;
 346	unsigned int ulen, copied;
 347	int off, err, peeking = flags & MSG_PEEK;
 348	int is_udplite = IS_UDPLITE(sk);
 349	struct udp_mib __percpu *mib;
 350	bool checksum_valid = false;
 351	int is_udp4;
 352
 353	if (flags & MSG_ERRQUEUE)
 354		return ipv6_recv_error(sk, msg, len, addr_len);
 355
 356	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 357		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 358
 359try_again:
 360	off = sk_peek_offset(sk, flags);
 361	skb = __skb_recv_udp(sk, flags, &off, &err);
 362	if (!skb)
 363		return err;
 364
 365	ulen = udp6_skb_len(skb);
 366	copied = len;
 367	if (copied > ulen - off)
 368		copied = ulen - off;
 369	else if (copied < ulen)
 370		msg->msg_flags |= MSG_TRUNC;
 371
 372	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 373	mib = __UDPX_MIB(sk, is_udp4);
 374
 375	/*
 376	 * If checksum is needed at all, try to do it while copying the
 377	 * data.  If the data is truncated, or if we only want a partial
 378	 * coverage checksum (UDP-Lite), do it before the copy.
 379	 */
 380
 381	if (copied < ulen || peeking ||
 382	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 383		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 384				!__udp_lib_checksum_complete(skb);
 385		if (!checksum_valid)
 386			goto csum_copy_err;
 387	}
 388
 389	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 390		if (udp_skb_is_linear(skb))
 391			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 392		else
 393			err = skb_copy_datagram_msg(skb, off, msg, copied);
 394	} else {
 395		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 396		if (err == -EINVAL)
 397			goto csum_copy_err;
 398	}
 399	if (unlikely(err)) {
 400		if (!peeking) {
 401			atomic_inc(&sk->sk_drops);
 402			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 403		}
 404		kfree_skb(skb);
 405		return err;
 406	}
 407	if (!peeking)
 408		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 409
 410	sock_recv_cmsgs(msg, sk, skb);
 411
 412	/* Copy the address. */
 413	if (msg->msg_name) {
 414		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 415		sin6->sin6_family = AF_INET6;
 416		sin6->sin6_port = udp_hdr(skb)->source;
 417		sin6->sin6_flowinfo = 0;
 418
 419		if (is_udp4) {
 420			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 421					       &sin6->sin6_addr);
 422			sin6->sin6_scope_id = 0;
 423		} else {
 424			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 425			sin6->sin6_scope_id =
 426				ipv6_iface_scope_id(&sin6->sin6_addr,
 427						    inet6_iif(skb));
 428		}
 429		*addr_len = sizeof(*sin6);
 430
 431		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 432						      (struct sockaddr *)sin6);
 
 433	}
 434
 435	if (udp_sk(sk)->gro_enabled)
 436		udp_cmsg_recv(msg, sk, skb);
 437
 438	if (np->rxopt.all)
 439		ip6_datagram_recv_common_ctl(sk, msg, skb);
 440
 441	if (is_udp4) {
 442		if (inet->cmsg_flags)
 443			ip_cmsg_recv_offset(msg, sk, skb,
 444					    sizeof(struct udphdr), off);
 445	} else {
 446		if (np->rxopt.all)
 447			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 448	}
 449
 450	err = copied;
 451	if (flags & MSG_TRUNC)
 452		err = ulen;
 453
 454	skb_consume_udp(sk, skb, peeking ? -err : err);
 455	return err;
 456
 457csum_copy_err:
 458	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 459				 udp_skb_destructor)) {
 460		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 461		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 462	}
 463	kfree_skb(skb);
 464
 465	/* starting over for a new packet, but check if we need to yield */
 466	cond_resched();
 467	msg->msg_flags &= ~MSG_TRUNC;
 468	goto try_again;
 469}
 470
 471DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 472void udpv6_encap_enable(void)
 473{
 474	static_branch_inc(&udpv6_encap_needed_key);
 475}
 476EXPORT_SYMBOL(udpv6_encap_enable);
 477
 478/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 479 * through error handlers in encapsulations looking for a match.
 480 */
 481static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 482				      struct inet6_skb_parm *opt,
 483				      u8 type, u8 code, int offset, __be32 info)
 484{
 485	int i;
 486
 487	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 488		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 489			       u8 type, u8 code, int offset, __be32 info);
 490		const struct ip6_tnl_encap_ops *encap;
 491
 492		encap = rcu_dereference(ip6tun_encaps[i]);
 493		if (!encap)
 494			continue;
 495		handler = encap->err_handler;
 496		if (handler && !handler(skb, opt, type, code, offset, info))
 497			return 0;
 498	}
 499
 500	return -ENOENT;
 501}
 502
 503/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 504 * reversing source and destination port: this will match tunnels that force the
 505 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 506 * lwtunnels might actually break this assumption by being configured with
 507 * different destination ports on endpoints, in this case we won't be able to
 508 * trace ICMP messages back to them.
 509 *
 510 * If this doesn't match any socket, probe tunnels with arbitrary destination
 511 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 512 * we've sent packets to won't necessarily match the local destination port.
 513 *
 514 * Then ask the tunnel implementation to match the error against a valid
 515 * association.
 516 *
 517 * Return an error if we can't find a match, the socket if we need further
 518 * processing, zero otherwise.
 519 */
 520static struct sock *__udp6_lib_err_encap(struct net *net,
 521					 const struct ipv6hdr *hdr, int offset,
 522					 struct udphdr *uh,
 523					 struct udp_table *udptable,
 524					 struct sock *sk,
 525					 struct sk_buff *skb,
 526					 struct inet6_skb_parm *opt,
 527					 u8 type, u8 code, __be32 info)
 528{
 529	int (*lookup)(struct sock *sk, struct sk_buff *skb);
 530	int network_offset, transport_offset;
 531	struct udp_sock *up;
 532
 533	network_offset = skb_network_offset(skb);
 534	transport_offset = skb_transport_offset(skb);
 535
 536	/* Network header needs to point to the outer IPv6 header inside ICMP */
 537	skb_reset_network_header(skb);
 538
 539	/* Transport header needs to point to the UDP header */
 540	skb_set_transport_header(skb, offset);
 541
 542	if (sk) {
 543		up = udp_sk(sk);
 544
 545		lookup = READ_ONCE(up->encap_err_lookup);
 546		if (lookup && lookup(sk, skb))
 547			sk = NULL;
 548
 549		goto out;
 550	}
 551
 552	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 553			       &hdr->saddr, uh->dest,
 554			       inet6_iif(skb), 0, udptable, skb);
 555	if (sk) {
 556		up = udp_sk(sk);
 557
 558		lookup = READ_ONCE(up->encap_err_lookup);
 559		if (!lookup || lookup(sk, skb))
 560			sk = NULL;
 561	}
 562
 563out:
 564	if (!sk) {
 565		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 566							offset, info));
 567	}
 568
 569	skb_set_transport_header(skb, transport_offset);
 570	skb_set_network_header(skb, network_offset);
 571
 572	return sk;
 573}
 574
 575int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 576		   u8 type, u8 code, int offset, __be32 info,
 577		   struct udp_table *udptable)
 578{
 579	struct ipv6_pinfo *np;
 580	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 581	const struct in6_addr *saddr = &hdr->saddr;
 582	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
 583	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 584	bool tunnel = false;
 585	struct sock *sk;
 586	int harderr;
 587	int err;
 588	struct net *net = dev_net(skb->dev);
 589
 590	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 591			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 592
 593	if (!sk || udp_sk(sk)->encap_type) {
 594		/* No socket for error: try tunnels before discarding */
 595		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 596			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 597						  udptable, sk, skb,
 598						  opt, type, code, info);
 599			if (!sk)
 600				return 0;
 601		} else
 602			sk = ERR_PTR(-ENOENT);
 603
 604		if (IS_ERR(sk)) {
 605			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 606					  ICMP6_MIB_INERRORS);
 607			return PTR_ERR(sk);
 608		}
 609
 610		tunnel = true;
 611	}
 612
 613	harderr = icmpv6_err_convert(type, code, &err);
 614	np = inet6_sk(sk);
 615
 616	if (type == ICMPV6_PKT_TOOBIG) {
 617		if (!ip6_sk_accept_pmtu(sk))
 618			goto out;
 619		ip6_sk_update_pmtu(skb, sk, info);
 620		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
 621			harderr = 1;
 622	}
 623	if (type == NDISC_REDIRECT) {
 624		if (tunnel) {
 625			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 626				     sk->sk_mark, sk->sk_uid);
 627		} else {
 628			ip6_sk_redirect(skb, sk);
 629		}
 630		goto out;
 631	}
 632
 633	/* Tunnels don't have an application socket: don't pass errors back */
 634	if (tunnel) {
 635		if (udp_sk(sk)->encap_err_rcv)
 636			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
 637						  ntohl(info), (u8 *)(uh+1));
 638		goto out;
 639	}
 640
 641	if (!np->recverr) {
 642		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 643			goto out;
 644	} else {
 645		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 646	}
 647
 648	sk->sk_err = err;
 649	sk_error_report(sk);
 650out:
 651	return 0;
 652}
 653
 654static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 655{
 656	int rc;
 657
 658	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 659		sock_rps_save_rxhash(sk, skb);
 660		sk_mark_napi_id(sk, skb);
 661		sk_incoming_cpu_update(sk);
 662	} else {
 663		sk_mark_napi_id_once(sk, skb);
 664	}
 665
 666	rc = __udp_enqueue_schedule_skb(sk, skb);
 667	if (rc < 0) {
 668		int is_udplite = IS_UDPLITE(sk);
 669		enum skb_drop_reason drop_reason;
 670
 671		/* Note that an ENOMEM error is charged twice */
 672		if (rc == -ENOMEM) {
 673			UDP6_INC_STATS(sock_net(sk),
 674					 UDP_MIB_RCVBUFERRORS, is_udplite);
 675			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
 676		} else {
 677			UDP6_INC_STATS(sock_net(sk),
 678				       UDP_MIB_MEMERRORS, is_udplite);
 679			drop_reason = SKB_DROP_REASON_PROTO_MEM;
 680		}
 681		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 682		kfree_skb_reason(skb, drop_reason);
 
 683		return -1;
 684	}
 685
 686	return 0;
 687}
 688
 689static __inline__ int udpv6_err(struct sk_buff *skb,
 690				struct inet6_skb_parm *opt, u8 type,
 691				u8 code, int offset, __be32 info)
 692{
 693	return __udp6_lib_err(skb, opt, type, code, offset, info,
 694			      dev_net(skb->dev)->ipv4.udp_table);
 695}
 696
 697static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 698{
 699	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 700	struct udp_sock *up = udp_sk(sk);
 701	int is_udplite = IS_UDPLITE(sk);
 702
 703	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
 704		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
 705		goto drop;
 706	}
 
 707
 708	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
 
 709		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 710
 711		/*
 712		 * This is an encapsulation socket so pass the skb to
 713		 * the socket's udp_encap_rcv() hook. Otherwise, just
 714		 * fall through and pass this up the UDP socket.
 715		 * up->encap_rcv() returns the following value:
 716		 * =0 if skb was successfully passed to the encap
 717		 *    handler or was discarded by it.
 718		 * >0 if skb should be passed on to UDP.
 719		 * <0 if skb should be resubmitted as proto -N
 720		 */
 721
 722		/* if we're overly short, let UDP handle it */
 723		encap_rcv = READ_ONCE(up->encap_rcv);
 724		if (encap_rcv) {
 725			int ret;
 726
 727			/* Verify checksum before giving to encap */
 728			if (udp_lib_checksum_complete(skb))
 729				goto csum_error;
 730
 731			ret = encap_rcv(sk, skb);
 732			if (ret <= 0) {
 733				__UDP6_INC_STATS(sock_net(sk),
 734						 UDP_MIB_INDATAGRAMS,
 735						 is_udplite);
 736				return -ret;
 737			}
 738		}
 739
 740		/* FALLTHROUGH -- it's a UDP Packet */
 741	}
 742
 743	/*
 744	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 745	 */
 746	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
 
 747
 748		if (up->pcrlen == 0) {          /* full coverage was set  */
 749			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 750					    UDP_SKB_CB(skb)->cscov, skb->len);
 751			goto drop;
 752		}
 753		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
 754			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 755					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
 756			goto drop;
 757		}
 758	}
 759
 760	prefetch(&sk->sk_rmem_alloc);
 761	if (rcu_access_pointer(sk->sk_filter) &&
 762	    udp_lib_checksum_complete(skb))
 763		goto csum_error;
 764
 765	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
 766		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
 767		goto drop;
 768	}
 769
 770	udp_csum_pull_header(skb);
 771
 772	skb_dst_drop(skb);
 773
 774	return __udpv6_queue_rcv_skb(sk, skb);
 775
 776csum_error:
 777	drop_reason = SKB_DROP_REASON_UDP_CSUM;
 778	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 779drop:
 780	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 781	atomic_inc(&sk->sk_drops);
 782	kfree_skb_reason(skb, drop_reason);
 783	return -1;
 784}
 785
 786static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 787{
 788	struct sk_buff *next, *segs;
 789	int ret;
 790
 791	if (likely(!udp_unexpected_gso(sk, skb)))
 792		return udpv6_queue_rcv_one_skb(sk, skb);
 793
 794	__skb_push(skb, -skb_mac_offset(skb));
 795	segs = udp_rcv_segment(sk, skb, false);
 796	skb_list_walk_safe(segs, skb, next) {
 797		__skb_pull(skb, skb_transport_offset(skb));
 798
 799		udp_post_segment_fix_csum(skb);
 800		ret = udpv6_queue_rcv_one_skb(sk, skb);
 801		if (ret > 0)
 802			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 803						 true);
 804	}
 805	return 0;
 806}
 807
 808static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
 809				   __be16 loc_port, const struct in6_addr *loc_addr,
 810				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 811				   int dif, int sdif, unsigned short hnum)
 812{
 813	struct inet_sock *inet = inet_sk(sk);
 814
 815	if (!net_eq(sock_net(sk), net))
 816		return false;
 817
 818	if (udp_sk(sk)->udp_port_hash != hnum ||
 819	    sk->sk_family != PF_INET6 ||
 820	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 821	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 822		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 823	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
 824	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 825		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 826		return false;
 827	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 828		return false;
 829	return true;
 830}
 831
 832static void udp6_csum_zero_error(struct sk_buff *skb)
 833{
 834	/* RFC 2460 section 8.1 says that we SHOULD log
 835	 * this error. Well, it is reasonable.
 836	 */
 837	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 838			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 839			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 840}
 841
 842/*
 843 * Note: called only from the BH handler context,
 844 * so we don't need to lock the hashes.
 845 */
 846static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 847		const struct in6_addr *saddr, const struct in6_addr *daddr,
 848		struct udp_table *udptable, int proto)
 849{
 850	struct sock *sk, *first = NULL;
 851	const struct udphdr *uh = udp_hdr(skb);
 852	unsigned short hnum = ntohs(uh->dest);
 853	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 854	unsigned int offset = offsetof(typeof(*sk), sk_node);
 855	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 856	int dif = inet6_iif(skb);
 857	int sdif = inet6_sdif(skb);
 858	struct hlist_node *node;
 859	struct sk_buff *nskb;
 860
 861	if (use_hash2) {
 862		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 863			    udptable->mask;
 864		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 865start_lookup:
 866		hslot = &udptable->hash2[hash2];
 867		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 868	}
 869
 870	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 871		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
 872					    uh->source, saddr, dif, sdif,
 873					    hnum))
 874			continue;
 875		/* If zero checksum and no_check is not on for
 876		 * the socket then skip it.
 877		 */
 878		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 879			continue;
 880		if (!first) {
 881			first = sk;
 882			continue;
 883		}
 884		nskb = skb_clone(skb, GFP_ATOMIC);
 885		if (unlikely(!nskb)) {
 886			atomic_inc(&sk->sk_drops);
 887			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
 888					 IS_UDPLITE(sk));
 889			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
 890					 IS_UDPLITE(sk));
 891			continue;
 892		}
 893
 894		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
 895			consume_skb(nskb);
 896	}
 897
 898	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 899	if (use_hash2 && hash2 != hash2_any) {
 900		hash2 = hash2_any;
 901		goto start_lookup;
 902	}
 903
 904	if (first) {
 905		if (udpv6_queue_rcv_skb(first, skb) > 0)
 906			consume_skb(skb);
 907	} else {
 908		kfree_skb(skb);
 909		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
 910				 proto == IPPROTO_UDPLITE);
 911	}
 912	return 0;
 913}
 914
 915static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 916{
 917	if (udp_sk_rx_dst_set(sk, dst)) {
 918		const struct rt6_info *rt = (const struct rt6_info *)dst;
 919
 920		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
 921	}
 922}
 923
 924/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 925 * return code conversion for ip layer consumption
 926 */
 927static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
 928				struct udphdr *uh)
 929{
 930	int ret;
 931
 932	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 933		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
 934
 935	ret = udpv6_queue_rcv_skb(sk, skb);
 936
 937	/* a return value > 0 means to resubmit the input */
 938	if (ret > 0)
 939		return ret;
 940	return 0;
 941}
 942
 943int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 944		   int proto)
 945{
 946	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
 947	const struct in6_addr *saddr, *daddr;
 948	struct net *net = dev_net(skb->dev);
 949	struct udphdr *uh;
 950	struct sock *sk;
 951	bool refcounted;
 952	u32 ulen = 0;
 953
 954	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 955		goto discard;
 956
 957	saddr = &ipv6_hdr(skb)->saddr;
 958	daddr = &ipv6_hdr(skb)->daddr;
 959	uh = udp_hdr(skb);
 960
 961	ulen = ntohs(uh->len);
 962	if (ulen > skb->len)
 963		goto short_packet;
 964
 965	if (proto == IPPROTO_UDP) {
 966		/* UDP validates ulen. */
 967
 968		/* Check for jumbo payload */
 969		if (ulen == 0)
 970			ulen = skb->len;
 971
 972		if (ulen < sizeof(*uh))
 973			goto short_packet;
 974
 975		if (ulen < skb->len) {
 976			if (pskb_trim_rcsum(skb, ulen))
 977				goto short_packet;
 978			saddr = &ipv6_hdr(skb)->saddr;
 979			daddr = &ipv6_hdr(skb)->daddr;
 980			uh = udp_hdr(skb);
 981		}
 982	}
 983
 984	if (udp6_csum_init(skb, uh, proto))
 985		goto csum_error;
 986
 987	/* Check if the socket is already available, e.g. due to early demux */
 988	sk = skb_steal_sock(skb, &refcounted);
 
 
 
 
 989	if (sk) {
 990		struct dst_entry *dst = skb_dst(skb);
 991		int ret;
 992
 993		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
 994			udp6_sk_rx_dst_set(sk, dst);
 995
 996		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
 997			if (refcounted)
 998				sock_put(sk);
 999			goto report_csum_error;
1000		}
1001
1002		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1003		if (refcounted)
1004			sock_put(sk);
1005		return ret;
1006	}
1007
1008	/*
1009	 *	Multicast receive code
1010	 */
1011	if (ipv6_addr_is_multicast(daddr))
1012		return __udp6_lib_mcast_deliver(net, skb,
1013				saddr, daddr, udptable, proto);
1014
1015	/* Unicast */
1016	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1017	if (sk) {
1018		if (!uh->check && !udp_sk(sk)->no_check6_rx)
1019			goto report_csum_error;
1020		return udp6_unicast_rcv_skb(sk, skb, uh);
1021	}
1022
1023	reason = SKB_DROP_REASON_NO_SOCKET;
1024
1025	if (!uh->check)
1026		goto report_csum_error;
1027
1028	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1029		goto discard;
 
1030
1031	if (udp_lib_checksum_complete(skb))
1032		goto csum_error;
1033
1034	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1035	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1036
1037	kfree_skb_reason(skb, reason);
1038	return 0;
1039
1040short_packet:
1041	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1042		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1043	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1044			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1045			    saddr, ntohs(uh->source),
1046			    ulen, skb->len,
1047			    daddr, ntohs(uh->dest));
1048	goto discard;
1049
1050report_csum_error:
1051	udp6_csum_zero_error(skb);
1052csum_error:
1053	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1054		reason = SKB_DROP_REASON_UDP_CSUM;
1055	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1056discard:
1057	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1058	kfree_skb_reason(skb, reason);
1059	return 0;
1060}
1061
1062
1063static struct sock *__udp6_lib_demux_lookup(struct net *net,
1064			__be16 loc_port, const struct in6_addr *loc_addr,
1065			__be16 rmt_port, const struct in6_addr *rmt_addr,
1066			int dif, int sdif)
1067{
1068	struct udp_table *udptable = net->ipv4.udp_table;
1069	unsigned short hnum = ntohs(loc_port);
1070	unsigned int hash2, slot2;
1071	struct udp_hslot *hslot2;
1072	__portpair ports;
1073	struct sock *sk;
1074
1075	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1076	slot2 = hash2 & udptable->mask;
1077	hslot2 = &udptable->hash2[slot2];
1078	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1079
1080	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1081		if (sk->sk_state == TCP_ESTABLISHED &&
1082		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1083			return sk;
1084		/* Only check first socket in chain */
1085		break;
1086	}
1087	return NULL;
1088}
1089
1090void udp_v6_early_demux(struct sk_buff *skb)
1091{
1092	struct net *net = dev_net(skb->dev);
1093	const struct udphdr *uh;
1094	struct sock *sk;
1095	struct dst_entry *dst;
1096	int dif = skb->dev->ifindex;
1097	int sdif = inet6_sdif(skb);
1098
1099	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1100	    sizeof(struct udphdr)))
1101		return;
1102
1103	uh = udp_hdr(skb);
1104
1105	if (skb->pkt_type == PACKET_HOST)
1106		sk = __udp6_lib_demux_lookup(net, uh->dest,
1107					     &ipv6_hdr(skb)->daddr,
1108					     uh->source, &ipv6_hdr(skb)->saddr,
1109					     dif, sdif);
1110	else
1111		return;
1112
1113	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1114		return;
1115
1116	skb->sk = sk;
1117	skb->destructor = sock_efree;
 
1118	dst = rcu_dereference(sk->sk_rx_dst);
1119
1120	if (dst)
1121		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1122	if (dst) {
1123		/* set noref for now.
1124		 * any place which wants to hold dst has to call
1125		 * dst_hold_safe()
1126		 */
1127		skb_dst_set_noref(skb, dst);
1128	}
1129}
1130
1131INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1132{
1133	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1134}
1135
1136/*
1137 * Throw away all pending data and cancel the corking. Socket is locked.
1138 */
1139static void udp_v6_flush_pending_frames(struct sock *sk)
1140{
1141	struct udp_sock *up = udp_sk(sk);
1142
1143	if (up->pending == AF_INET)
1144		udp_flush_pending_frames(sk);
1145	else if (up->pending) {
1146		up->len = 0;
1147		up->pending = 0;
1148		ip6_flush_pending_frames(sk);
1149	}
1150}
1151
1152static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1153			     int addr_len)
1154{
1155	if (addr_len < offsetofend(struct sockaddr, sa_family))
1156		return -EINVAL;
1157	/* The following checks are replicated from __ip6_datagram_connect()
1158	 * and intended to prevent BPF program called below from accessing
1159	 * bytes that are out of the bound specified by user in addr_len.
1160	 */
1161	if (uaddr->sa_family == AF_INET) {
1162		if (ipv6_only_sock(sk))
1163			return -EAFNOSUPPORT;
1164		return udp_pre_connect(sk, uaddr, addr_len);
1165	}
1166
1167	if (addr_len < SIN6_LEN_RFC2133)
1168		return -EINVAL;
1169
1170	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1171}
1172
1173/**
1174 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1175 *	@sk:	socket we are sending on
1176 *	@skb:	sk_buff containing the filled-in UDP header
1177 *		(checksum field must be zeroed out)
1178 *	@saddr: source address
1179 *	@daddr: destination address
1180 *	@len:	length of packet
1181 */
1182static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1183				 const struct in6_addr *saddr,
1184				 const struct in6_addr *daddr, int len)
1185{
1186	unsigned int offset;
1187	struct udphdr *uh = udp_hdr(skb);
1188	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1189	__wsum csum = 0;
1190
1191	if (!frags) {
1192		/* Only one fragment on the socket.  */
1193		skb->csum_start = skb_transport_header(skb) - skb->head;
1194		skb->csum_offset = offsetof(struct udphdr, check);
1195		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1196	} else {
1197		/*
1198		 * HW-checksum won't work as there are two or more
1199		 * fragments on the socket so that all csums of sk_buffs
1200		 * should be together
1201		 */
1202		offset = skb_transport_offset(skb);
1203		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1204		csum = skb->csum;
1205
1206		skb->ip_summed = CHECKSUM_NONE;
1207
1208		do {
1209			csum = csum_add(csum, frags->csum);
1210		} while ((frags = frags->next));
1211
1212		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1213					    csum);
1214		if (uh->check == 0)
1215			uh->check = CSUM_MANGLED_0;
1216	}
1217}
1218
1219/*
1220 *	Sending
1221 */
1222
1223static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1224			   struct inet_cork *cork)
1225{
1226	struct sock *sk = skb->sk;
1227	struct udphdr *uh;
1228	int err = 0;
1229	int is_udplite = IS_UDPLITE(sk);
1230	__wsum csum = 0;
1231	int offset = skb_transport_offset(skb);
1232	int len = skb->len - offset;
1233	int datalen = len - sizeof(*uh);
1234
1235	/*
1236	 * Create a UDP header
1237	 */
1238	uh = udp_hdr(skb);
1239	uh->source = fl6->fl6_sport;
1240	uh->dest = fl6->fl6_dport;
1241	uh->len = htons(len);
1242	uh->check = 0;
1243
1244	if (cork->gso_size) {
1245		const int hlen = skb_network_header_len(skb) +
1246				 sizeof(struct udphdr);
1247
1248		if (hlen + cork->gso_size > cork->fragsize) {
1249			kfree_skb(skb);
1250			return -EINVAL;
1251		}
1252		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1253			kfree_skb(skb);
1254			return -EINVAL;
1255		}
1256		if (udp_sk(sk)->no_check6_tx) {
1257			kfree_skb(skb);
1258			return -EINVAL;
1259		}
1260		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1261		    dst_xfrm(skb_dst(skb))) {
1262			kfree_skb(skb);
1263			return -EIO;
1264		}
1265
1266		if (datalen > cork->gso_size) {
1267			skb_shinfo(skb)->gso_size = cork->gso_size;
1268			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1269			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1270								 cork->gso_size);
1271		}
1272		goto csum_partial;
1273	}
1274
1275	if (is_udplite)
1276		csum = udplite_csum(skb);
1277	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1278		skb->ip_summed = CHECKSUM_NONE;
1279		goto send;
1280	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1281csum_partial:
1282		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1283		goto send;
1284	} else
1285		csum = udp_csum(skb);
1286
1287	/* add protocol-dependent pseudo-header */
1288	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1289				    len, fl6->flowi6_proto, csum);
1290	if (uh->check == 0)
1291		uh->check = CSUM_MANGLED_0;
1292
1293send:
1294	err = ip6_send_skb(skb);
1295	if (err) {
1296		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1297			UDP6_INC_STATS(sock_net(sk),
1298				       UDP_MIB_SNDBUFERRORS, is_udplite);
1299			err = 0;
1300		}
1301	} else {
1302		UDP6_INC_STATS(sock_net(sk),
1303			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1304	}
1305	return err;
1306}
1307
1308static int udp_v6_push_pending_frames(struct sock *sk)
1309{
1310	struct sk_buff *skb;
1311	struct udp_sock  *up = udp_sk(sk);
1312	int err = 0;
1313
1314	if (up->pending == AF_INET)
1315		return udp_push_pending_frames(sk);
1316
1317	skb = ip6_finish_skb(sk);
1318	if (!skb)
1319		goto out;
1320
1321	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1322			      &inet_sk(sk)->cork.base);
1323out:
1324	up->len = 0;
1325	up->pending = 0;
1326	return err;
1327}
1328
1329int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1330{
1331	struct ipv6_txoptions opt_space;
1332	struct udp_sock *up = udp_sk(sk);
1333	struct inet_sock *inet = inet_sk(sk);
1334	struct ipv6_pinfo *np = inet6_sk(sk);
1335	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1336	struct in6_addr *daddr, *final_p, final;
1337	struct ipv6_txoptions *opt = NULL;
1338	struct ipv6_txoptions *opt_to_free = NULL;
1339	struct ip6_flowlabel *flowlabel = NULL;
1340	struct inet_cork_full cork;
1341	struct flowi6 *fl6 = &cork.fl.u.ip6;
1342	struct dst_entry *dst;
1343	struct ipcm6_cookie ipc6;
1344	int addr_len = msg->msg_namelen;
1345	bool connected = false;
1346	int ulen = len;
1347	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1348	int err;
1349	int is_udplite = IS_UDPLITE(sk);
1350	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1351
1352	ipcm6_init(&ipc6);
1353	ipc6.gso_size = READ_ONCE(up->gso_size);
1354	ipc6.sockc.tsflags = sk->sk_tsflags;
1355	ipc6.sockc.mark = sk->sk_mark;
1356
1357	/* destination address check */
1358	if (sin6) {
1359		if (addr_len < offsetof(struct sockaddr, sa_data))
1360			return -EINVAL;
1361
1362		switch (sin6->sin6_family) {
1363		case AF_INET6:
1364			if (addr_len < SIN6_LEN_RFC2133)
1365				return -EINVAL;
1366			daddr = &sin6->sin6_addr;
1367			if (ipv6_addr_any(daddr) &&
1368			    ipv6_addr_v4mapped(&np->saddr))
1369				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1370						       daddr);
1371			break;
1372		case AF_INET:
1373			goto do_udp_sendmsg;
1374		case AF_UNSPEC:
1375			msg->msg_name = sin6 = NULL;
1376			msg->msg_namelen = addr_len = 0;
1377			daddr = NULL;
1378			break;
1379		default:
1380			return -EINVAL;
1381		}
1382	} else if (!up->pending) {
1383		if (sk->sk_state != TCP_ESTABLISHED)
1384			return -EDESTADDRREQ;
1385		daddr = &sk->sk_v6_daddr;
1386	} else
1387		daddr = NULL;
1388
1389	if (daddr) {
1390		if (ipv6_addr_v4mapped(daddr)) {
1391			struct sockaddr_in sin;
1392			sin.sin_family = AF_INET;
1393			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1394			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1395			msg->msg_name = &sin;
1396			msg->msg_namelen = sizeof(sin);
1397do_udp_sendmsg:
1398			if (ipv6_only_sock(sk))
1399				return -ENETUNREACH;
1400			return udp_sendmsg(sk, msg, len);
 
 
1401		}
1402	}
1403
1404	/* Rough check on arithmetic overflow,
1405	   better check is made in ip6_append_data().
1406	   */
1407	if (len > INT_MAX - sizeof(struct udphdr))
1408		return -EMSGSIZE;
1409
1410	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1411	if (up->pending) {
1412		if (up->pending == AF_INET)
1413			return udp_sendmsg(sk, msg, len);
1414		/*
1415		 * There are pending frames.
1416		 * The socket lock must be held while it's corked.
1417		 */
1418		lock_sock(sk);
1419		if (likely(up->pending)) {
1420			if (unlikely(up->pending != AF_INET6)) {
1421				release_sock(sk);
1422				return -EAFNOSUPPORT;
1423			}
1424			dst = NULL;
1425			goto do_append_data;
1426		}
1427		release_sock(sk);
1428	}
1429	ulen += sizeof(struct udphdr);
1430
1431	memset(fl6, 0, sizeof(*fl6));
1432
1433	if (sin6) {
1434		if (sin6->sin6_port == 0)
1435			return -EINVAL;
1436
1437		fl6->fl6_dport = sin6->sin6_port;
1438		daddr = &sin6->sin6_addr;
1439
1440		if (np->sndflow) {
1441			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1442			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1443				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1444				if (IS_ERR(flowlabel))
1445					return -EINVAL;
1446			}
1447		}
1448
1449		/*
1450		 * Otherwise it will be difficult to maintain
1451		 * sk->sk_dst_cache.
1452		 */
1453		if (sk->sk_state == TCP_ESTABLISHED &&
1454		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1455			daddr = &sk->sk_v6_daddr;
1456
1457		if (addr_len >= sizeof(struct sockaddr_in6) &&
1458		    sin6->sin6_scope_id &&
1459		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1460			fl6->flowi6_oif = sin6->sin6_scope_id;
1461	} else {
1462		if (sk->sk_state != TCP_ESTABLISHED)
1463			return -EDESTADDRREQ;
1464
1465		fl6->fl6_dport = inet->inet_dport;
1466		daddr = &sk->sk_v6_daddr;
1467		fl6->flowlabel = np->flow_label;
1468		connected = true;
1469	}
1470
1471	if (!fl6->flowi6_oif)
1472		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1473
1474	if (!fl6->flowi6_oif)
1475		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1476
1477	fl6->flowi6_uid = sk->sk_uid;
1478
1479	if (msg->msg_controllen) {
1480		opt = &opt_space;
1481		memset(opt, 0, sizeof(struct ipv6_txoptions));
1482		opt->tot_len = sizeof(*opt);
1483		ipc6.opt = opt;
1484
1485		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1486		if (err > 0)
1487			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1488						    &ipc6);
 
 
1489		if (err < 0) {
1490			fl6_sock_release(flowlabel);
1491			return err;
1492		}
1493		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1494			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1495			if (IS_ERR(flowlabel))
1496				return -EINVAL;
1497		}
1498		if (!(opt->opt_nflen|opt->opt_flen))
1499			opt = NULL;
1500		connected = false;
1501	}
1502	if (!opt) {
1503		opt = txopt_get(np);
1504		opt_to_free = opt;
1505	}
1506	if (flowlabel)
1507		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1508	opt = ipv6_fixup_options(&opt_space, opt);
1509	ipc6.opt = opt;
1510
1511	fl6->flowi6_proto = sk->sk_protocol;
1512	fl6->flowi6_mark = ipc6.sockc.mark;
1513	fl6->daddr = *daddr;
1514	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1515		fl6->saddr = np->saddr;
1516	fl6->fl6_sport = inet->inet_sport;
1517
1518	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1519		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1520					   (struct sockaddr *)sin6,
 
1521					   &fl6->saddr);
1522		if (err)
1523			goto out_no_dst;
1524		if (sin6) {
1525			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1526				/* BPF program rewrote IPv6-only by IPv4-mapped
1527				 * IPv6. It's currently unsupported.
1528				 */
1529				err = -ENOTSUPP;
1530				goto out_no_dst;
1531			}
1532			if (sin6->sin6_port == 0) {
1533				/* BPF program set invalid port. Reject it. */
1534				err = -EINVAL;
1535				goto out_no_dst;
1536			}
1537			fl6->fl6_dport = sin6->sin6_port;
1538			fl6->daddr = sin6->sin6_addr;
1539		}
1540	}
1541
1542	if (ipv6_addr_any(&fl6->daddr))
1543		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1544
1545	final_p = fl6_update_dst(fl6, opt, &final);
1546	if (final_p)
1547		connected = false;
1548
1549	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1550		fl6->flowi6_oif = np->mcast_oif;
1551		connected = false;
1552	} else if (!fl6->flowi6_oif)
1553		fl6->flowi6_oif = np->ucast_oif;
1554
1555	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1556
1557	if (ipc6.tclass < 0)
1558		ipc6.tclass = np->tclass;
1559
1560	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1561
1562	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1563	if (IS_ERR(dst)) {
1564		err = PTR_ERR(dst);
1565		dst = NULL;
1566		goto out;
1567	}
1568
1569	if (ipc6.hlimit < 0)
1570		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1571
1572	if (msg->msg_flags&MSG_CONFIRM)
1573		goto do_confirm;
1574back_from_confirm:
1575
1576	/* Lockless fast path for the non-corking case */
1577	if (!corkreq) {
1578		struct sk_buff *skb;
1579
1580		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1581				   sizeof(struct udphdr), &ipc6,
1582				   (struct rt6_info *)dst,
1583				   msg->msg_flags, &cork);
1584		err = PTR_ERR(skb);
1585		if (!IS_ERR_OR_NULL(skb))
1586			err = udp_v6_send_skb(skb, fl6, &cork.base);
1587		/* ip6_make_skb steals dst reference */
1588		goto out_no_dst;
1589	}
1590
1591	lock_sock(sk);
1592	if (unlikely(up->pending)) {
1593		/* The socket is already corked while preparing it. */
1594		/* ... which is an evident application bug. --ANK */
1595		release_sock(sk);
1596
1597		net_dbg_ratelimited("udp cork app bug 2\n");
1598		err = -EINVAL;
1599		goto out;
1600	}
1601
1602	up->pending = AF_INET6;
1603
1604do_append_data:
1605	if (ipc6.dontfrag < 0)
1606		ipc6.dontfrag = np->dontfrag;
1607	up->len += ulen;
1608	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1609			      &ipc6, fl6, (struct rt6_info *)dst,
1610			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1611	if (err)
1612		udp_v6_flush_pending_frames(sk);
1613	else if (!corkreq)
1614		err = udp_v6_push_pending_frames(sk);
1615	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1616		up->pending = 0;
1617
1618	if (err > 0)
1619		err = np->recverr ? net_xmit_errno(err) : 0;
1620	release_sock(sk);
1621
1622out:
1623	dst_release(dst);
1624out_no_dst:
1625	fl6_sock_release(flowlabel);
1626	txopt_put(opt_to_free);
1627	if (!err)
1628		return len;
1629	/*
1630	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1631	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1632	 * we don't have a good statistic (IpOutDiscards but it can be too many
1633	 * things).  We could add another new stat but at least for now that
1634	 * seems like overkill.
1635	 */
1636	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1637		UDP6_INC_STATS(sock_net(sk),
1638			       UDP_MIB_SNDBUFERRORS, is_udplite);
1639	}
1640	return err;
1641
1642do_confirm:
1643	if (msg->msg_flags & MSG_PROBE)
1644		dst_confirm_neigh(dst, &fl6->daddr);
1645	if (!(msg->msg_flags&MSG_PROBE) || len)
1646		goto back_from_confirm;
1647	err = 0;
1648	goto out;
1649}
1650EXPORT_SYMBOL(udpv6_sendmsg);
1651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652void udpv6_destroy_sock(struct sock *sk)
1653{
1654	struct udp_sock *up = udp_sk(sk);
1655	lock_sock(sk);
1656
1657	/* protects from races with udp_abort() */
1658	sock_set_flag(sk, SOCK_DEAD);
1659	udp_v6_flush_pending_frames(sk);
1660	release_sock(sk);
1661
1662	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1663		if (up->encap_type) {
1664			void (*encap_destroy)(struct sock *sk);
1665			encap_destroy = READ_ONCE(up->encap_destroy);
1666			if (encap_destroy)
1667				encap_destroy(sk);
1668		}
1669		if (up->encap_enabled) {
1670			static_branch_dec(&udpv6_encap_needed_key);
1671			udp_encap_disable();
1672		}
1673	}
1674}
1675
1676/*
1677 *	Socket option code for UDP
1678 */
1679int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1680		     unsigned int optlen)
1681{
1682	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1683		return udp_lib_setsockopt(sk, level, optname,
1684					  optval, optlen,
1685					  udp_v6_push_pending_frames);
1686	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1687}
1688
1689int udpv6_getsockopt(struct sock *sk, int level, int optname,
1690		     char __user *optval, int __user *optlen)
1691{
1692	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1693		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1694	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1695}
1696
1697static const struct inet6_protocol udpv6_protocol = {
1698	.handler	=	udpv6_rcv,
1699	.err_handler	=	udpv6_err,
1700	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1701};
1702
1703/* ------------------------------------------------------------------------ */
1704#ifdef CONFIG_PROC_FS
1705int udp6_seq_show(struct seq_file *seq, void *v)
1706{
1707	if (v == SEQ_START_TOKEN) {
1708		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1709	} else {
1710		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1711		struct inet_sock *inet = inet_sk(v);
1712		__u16 srcp = ntohs(inet->inet_sport);
1713		__u16 destp = ntohs(inet->inet_dport);
1714		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1715					  udp_rqueue_get(v), bucket);
1716	}
1717	return 0;
1718}
1719
1720const struct seq_operations udp6_seq_ops = {
1721	.start		= udp_seq_start,
1722	.next		= udp_seq_next,
1723	.stop		= udp_seq_stop,
1724	.show		= udp6_seq_show,
1725};
1726EXPORT_SYMBOL(udp6_seq_ops);
1727
1728static struct udp_seq_afinfo udp6_seq_afinfo = {
1729	.family		= AF_INET6,
1730	.udp_table	= NULL,
1731};
1732
1733int __net_init udp6_proc_init(struct net *net)
1734{
1735	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1736			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1737		return -ENOMEM;
1738	return 0;
1739}
1740
1741void udp6_proc_exit(struct net *net)
1742{
1743	remove_proc_entry("udp6", net->proc_net);
1744}
1745#endif /* CONFIG_PROC_FS */
1746
1747/* ------------------------------------------------------------------------ */
1748
1749struct proto udpv6_prot = {
1750	.name			= "UDPv6",
1751	.owner			= THIS_MODULE,
1752	.close			= udp_lib_close,
1753	.pre_connect		= udpv6_pre_connect,
1754	.connect		= ip6_datagram_connect,
1755	.disconnect		= udp_disconnect,
1756	.ioctl			= udp_ioctl,
1757	.init			= udpv6_init_sock,
1758	.destroy		= udpv6_destroy_sock,
1759	.setsockopt		= udpv6_setsockopt,
1760	.getsockopt		= udpv6_getsockopt,
1761	.sendmsg		= udpv6_sendmsg,
1762	.recvmsg		= udpv6_recvmsg,
 
1763	.release_cb		= ip6_datagram_release_cb,
1764	.hash			= udp_lib_hash,
1765	.unhash			= udp_lib_unhash,
1766	.rehash			= udp_v6_rehash,
1767	.get_port		= udp_v6_get_port,
1768	.put_port		= udp_lib_unhash,
1769#ifdef CONFIG_BPF_SYSCALL
1770	.psock_update_sk_prot	= udp_bpf_update_proto,
1771#endif
1772
1773	.memory_allocated	= &udp_memory_allocated,
1774	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1775
1776	.sysctl_mem		= sysctl_udp_mem,
1777	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1778	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1779	.obj_size		= sizeof(struct udp6_sock),
 
1780	.h.udp_table		= NULL,
1781	.diag_destroy		= udp_abort,
1782};
1783
1784static struct inet_protosw udpv6_protosw = {
1785	.type =      SOCK_DGRAM,
1786	.protocol =  IPPROTO_UDP,
1787	.prot =      &udpv6_prot,
1788	.ops =       &inet6_dgram_ops,
1789	.flags =     INET_PROTOSW_PERMANENT,
1790};
1791
1792int __init udpv6_init(void)
1793{
1794	int ret;
1795
1796	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
 
 
 
 
 
1797	if (ret)
1798		goto out;
1799
1800	ret = inet6_register_protosw(&udpv6_protosw);
1801	if (ret)
1802		goto out_udpv6_protocol;
1803out:
1804	return ret;
1805
1806out_udpv6_protocol:
1807	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1808	goto out;
1809}
1810
1811void udpv6_exit(void)
1812{
1813	inet6_unregister_protosw(&udpv6_protosw);
1814	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1815}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	UDP over IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/udp.c
  10 *
  11 *	Fixes:
  12 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
  13 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  14 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
  15 *					a single port at the same time.
  16 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  17 *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
  18 */
  19
  20#include <linux/bpf-cgroup.h>
  21#include <linux/errno.h>
  22#include <linux/types.h>
  23#include <linux/socket.h>
  24#include <linux/sockios.h>
  25#include <linux/net.h>
  26#include <linux/in6.h>
  27#include <linux/netdevice.h>
  28#include <linux/if_arp.h>
  29#include <linux/ipv6.h>
  30#include <linux/icmpv6.h>
  31#include <linux/init.h>
  32#include <linux/module.h>
  33#include <linux/skbuff.h>
  34#include <linux/slab.h>
  35#include <linux/uaccess.h>
  36#include <linux/indirect_call_wrapper.h>
  37
  38#include <net/addrconf.h>
  39#include <net/ndisc.h>
  40#include <net/protocol.h>
  41#include <net/transp_v6.h>
  42#include <net/ip6_route.h>
  43#include <net/raw.h>
  44#include <net/seg6.h>
  45#include <net/tcp_states.h>
  46#include <net/ip6_checksum.h>
  47#include <net/ip6_tunnel.h>
  48#include <trace/events/udp.h>
  49#include <net/xfrm.h>
  50#include <net/inet_hashtables.h>
  51#include <net/inet6_hashtables.h>
  52#include <net/busy_poll.h>
  53#include <net/sock_reuseport.h>
  54#include <net/gro.h>
  55
  56#include <linux/proc_fs.h>
  57#include <linux/seq_file.h>
  58#include <trace/events/skb.h>
  59#include "udp_impl.h"
  60
  61static void udpv6_destruct_sock(struct sock *sk)
  62{
  63	udp_destruct_common(sk);
  64	inet6_sock_destruct(sk);
  65}
  66
  67int udpv6_init_sock(struct sock *sk)
  68{
  69	udp_lib_init_sock(sk);
  70	sk->sk_destruct = udpv6_destruct_sock;
  71	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
  72	return 0;
  73}
  74
  75INDIRECT_CALLABLE_SCOPE
  76u32 udp6_ehashfn(const struct net *net,
  77		 const struct in6_addr *laddr,
  78		 const u16 lport,
  79		 const struct in6_addr *faddr,
  80		 const __be16 fport)
  81{
 
 
 
  82	u32 lhash, fhash;
  83
  84	net_get_random_once(&udp6_ehash_secret,
  85			    sizeof(udp6_ehash_secret));
  86	net_get_random_once(&udp_ipv6_hash_secret,
  87			    sizeof(udp_ipv6_hash_secret));
  88
  89	lhash = (__force u32)laddr->s6_addr32[3];
  90	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  91
  92	return __inet6_ehashfn(lhash, lport, fhash, fport,
  93			       udp6_ehash_secret + net_hash_mix(net));
  94}
  95
  96int udp_v6_get_port(struct sock *sk, unsigned short snum)
  97{
  98	unsigned int hash2_nulladdr =
  99		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 100	unsigned int hash2_partial =
 101		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 102
 103	/* precompute partial secondary hash */
 104	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 105	return udp_lib_get_port(sk, snum, hash2_nulladdr);
 106}
 107
 108void udp_v6_rehash(struct sock *sk)
 109{
 110	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 111					  &sk->sk_v6_rcv_saddr,
 112					  inet_sk(sk)->inet_num);
 113
 114	udp_lib_rehash(sk, new_hash);
 115}
 116
 117static int compute_score(struct sock *sk, struct net *net,
 118			 const struct in6_addr *saddr, __be16 sport,
 119			 const struct in6_addr *daddr, unsigned short hnum,
 120			 int dif, int sdif)
 121{
 122	int bound_dev_if, score;
 123	struct inet_sock *inet;
 124	bool dev_match;
 125
 126	if (!net_eq(sock_net(sk), net) ||
 127	    udp_sk(sk)->udp_port_hash != hnum ||
 128	    sk->sk_family != PF_INET6)
 129		return -1;
 130
 131	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 132		return -1;
 133
 134	score = 0;
 135	inet = inet_sk(sk);
 136
 137	if (inet->inet_dport) {
 138		if (inet->inet_dport != sport)
 139			return -1;
 140		score++;
 141	}
 142
 143	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 144		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 145			return -1;
 146		score++;
 147	}
 148
 149	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 150	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
 151	if (!dev_match)
 152		return -1;
 153	if (bound_dev_if)
 154		score++;
 155
 156	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 157		score++;
 158
 159	return score;
 160}
 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162/* called with rcu_read_lock() */
 163static struct sock *udp6_lib_lookup2(struct net *net,
 164		const struct in6_addr *saddr, __be16 sport,
 165		const struct in6_addr *daddr, unsigned int hnum,
 166		int dif, int sdif, struct udp_hslot *hslot2,
 167		struct sk_buff *skb)
 168{
 169	struct sock *sk, *result;
 170	int score, badness;
 171	bool need_rescore;
 172
 173	result = NULL;
 174	badness = -1;
 175	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 176		need_rescore = false;
 177rescore:
 178		score = compute_score(need_rescore ? result : sk, net, saddr,
 179				      sport, daddr, hnum, dif, sdif);
 180		if (score > badness) {
 181			badness = score;
 182
 183			if (need_rescore)
 184				continue;
 185
 186			if (sk->sk_state == TCP_ESTABLISHED) {
 187				result = sk;
 188				continue;
 189			}
 190
 191			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
 192							saddr, sport, daddr, hnum, udp6_ehashfn);
 193			if (!result) {
 194				result = sk;
 195				continue;
 196			}
 197
 198			/* Fall back to scoring if group has connections */
 199			if (!reuseport_has_conns(sk))
 200				return result;
 201
 202			/* Reuseport logic returned an error, keep original score. */
 203			if (IS_ERR(result))
 204				continue;
 205
 206			/* compute_score is too long of a function to be
 207			 * inlined, and calling it again here yields
 208			 * measureable overhead for some
 209			 * workloads. Work around it by jumping
 210			 * backwards to rescore 'result'.
 211			 */
 212			need_rescore = true;
 213			goto rescore;
 214		}
 215	}
 216	return result;
 217}
 218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219/* rcu_read_lock() must be held */
 220struct sock *__udp6_lib_lookup(struct net *net,
 221			       const struct in6_addr *saddr, __be16 sport,
 222			       const struct in6_addr *daddr, __be16 dport,
 223			       int dif, int sdif, struct udp_table *udptable,
 224			       struct sk_buff *skb)
 225{
 226	unsigned short hnum = ntohs(dport);
 227	unsigned int hash2, slot2;
 228	struct udp_hslot *hslot2;
 229	struct sock *result, *sk;
 230
 231	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 232	slot2 = hash2 & udptable->mask;
 233	hslot2 = &udptable->hash2[slot2];
 234
 235	/* Lookup connected or non-wildcard sockets */
 236	result = udp6_lib_lookup2(net, saddr, sport,
 237				  daddr, hnum, dif, sdif,
 238				  hslot2, skb);
 239	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 240		goto done;
 241
 242	/* Lookup redirect from BPF */
 243	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
 244	    udptable == net->ipv4.udp_table) {
 245		sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
 246						saddr, sport, daddr, hnum, dif,
 247						udp6_ehashfn);
 248		if (sk) {
 249			result = sk;
 250			goto done;
 251		}
 252	}
 253
 254	/* Got non-wildcard socket or error on first lookup */
 255	if (result)
 256		goto done;
 257
 258	/* Lookup wildcard sockets */
 259	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 260	slot2 = hash2 & udptable->mask;
 261	hslot2 = &udptable->hash2[slot2];
 262
 263	result = udp6_lib_lookup2(net, saddr, sport,
 264				  &in6addr_any, hnum, dif, sdif,
 265				  hslot2, skb);
 266done:
 267	if (IS_ERR(result))
 268		return NULL;
 269	return result;
 270}
 271EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 272
 273static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 274					  __be16 sport, __be16 dport,
 275					  struct udp_table *udptable)
 276{
 277	const struct ipv6hdr *iph = ipv6_hdr(skb);
 278
 279	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 280				 &iph->daddr, dport, inet6_iif(skb),
 281				 inet6_sdif(skb), udptable, skb);
 282}
 283
 284struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
 285				 __be16 sport, __be16 dport)
 286{
 287	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
 288	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
 289	struct net *net = dev_net(skb->dev);
 290	int iif, sdif;
 291
 292	inet6_get_iif_sdif(skb, &iif, &sdif);
 293
 294	return __udp6_lib_lookup(net, &iph->saddr, sport,
 295				 &iph->daddr, dport, iif,
 296				 sdif, net->ipv4.udp_table, NULL);
 297}
 298
 299/* Must be called under rcu_read_lock().
 300 * Does increment socket refcount.
 301 */
 302#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
 303struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 304			     const struct in6_addr *daddr, __be16 dport, int dif)
 305{
 306	struct sock *sk;
 307
 308	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 309				dif, 0, net->ipv4.udp_table, NULL);
 310	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 311		sk = NULL;
 312	return sk;
 313}
 314EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 315#endif
 316
 317/* do not use the scratch area len for jumbogram: their length execeeds the
 318 * scratch area space; note that the IP6CB flags is still in the first
 319 * cacheline, so checking for jumbograms is cheap
 320 */
 321static int udp6_skb_len(struct sk_buff *skb)
 322{
 323	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
 324}
 325
 326/*
 327 *	This should be easy, if there is something there we
 328 *	return it, otherwise we block.
 329 */
 330
 331int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 332		  int flags, int *addr_len)
 333{
 334	struct ipv6_pinfo *np = inet6_sk(sk);
 335	struct inet_sock *inet = inet_sk(sk);
 336	struct sk_buff *skb;
 337	unsigned int ulen, copied;
 338	int off, err, peeking = flags & MSG_PEEK;
 339	int is_udplite = IS_UDPLITE(sk);
 340	struct udp_mib __percpu *mib;
 341	bool checksum_valid = false;
 342	int is_udp4;
 343
 344	if (flags & MSG_ERRQUEUE)
 345		return ipv6_recv_error(sk, msg, len, addr_len);
 346
 347	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 348		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 349
 350try_again:
 351	off = sk_peek_offset(sk, flags);
 352	skb = __skb_recv_udp(sk, flags, &off, &err);
 353	if (!skb)
 354		return err;
 355
 356	ulen = udp6_skb_len(skb);
 357	copied = len;
 358	if (copied > ulen - off)
 359		copied = ulen - off;
 360	else if (copied < ulen)
 361		msg->msg_flags |= MSG_TRUNC;
 362
 363	is_udp4 = (skb->protocol == htons(ETH_P_IP));
 364	mib = __UDPX_MIB(sk, is_udp4);
 365
 366	/*
 367	 * If checksum is needed at all, try to do it while copying the
 368	 * data.  If the data is truncated, or if we only want a partial
 369	 * coverage checksum (UDP-Lite), do it before the copy.
 370	 */
 371
 372	if (copied < ulen || peeking ||
 373	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
 374		checksum_valid = udp_skb_csum_unnecessary(skb) ||
 375				!__udp_lib_checksum_complete(skb);
 376		if (!checksum_valid)
 377			goto csum_copy_err;
 378	}
 379
 380	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
 381		if (udp_skb_is_linear(skb))
 382			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
 383		else
 384			err = skb_copy_datagram_msg(skb, off, msg, copied);
 385	} else {
 386		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 387		if (err == -EINVAL)
 388			goto csum_copy_err;
 389	}
 390	if (unlikely(err)) {
 391		if (!peeking) {
 392			atomic_inc(&sk->sk_drops);
 393			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 394		}
 395		kfree_skb(skb);
 396		return err;
 397	}
 398	if (!peeking)
 399		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 400
 401	sock_recv_cmsgs(msg, sk, skb);
 402
 403	/* Copy the address. */
 404	if (msg->msg_name) {
 405		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
 406		sin6->sin6_family = AF_INET6;
 407		sin6->sin6_port = udp_hdr(skb)->source;
 408		sin6->sin6_flowinfo = 0;
 409
 410		if (is_udp4) {
 411			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 412					       &sin6->sin6_addr);
 413			sin6->sin6_scope_id = 0;
 414		} else {
 415			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 416			sin6->sin6_scope_id =
 417				ipv6_iface_scope_id(&sin6->sin6_addr,
 418						    inet6_iif(skb));
 419		}
 420		*addr_len = sizeof(*sin6);
 421
 422		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
 423						      (struct sockaddr *)sin6,
 424						      addr_len);
 425	}
 426
 427	if (udp_test_bit(GRO_ENABLED, sk))
 428		udp_cmsg_recv(msg, sk, skb);
 429
 430	if (np->rxopt.all)
 431		ip6_datagram_recv_common_ctl(sk, msg, skb);
 432
 433	if (is_udp4) {
 434		if (inet_cmsg_flags(inet))
 435			ip_cmsg_recv_offset(msg, sk, skb,
 436					    sizeof(struct udphdr), off);
 437	} else {
 438		if (np->rxopt.all)
 439			ip6_datagram_recv_specific_ctl(sk, msg, skb);
 440	}
 441
 442	err = copied;
 443	if (flags & MSG_TRUNC)
 444		err = ulen;
 445
 446	skb_consume_udp(sk, skb, peeking ? -err : err);
 447	return err;
 448
 449csum_copy_err:
 450	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 451				 udp_skb_destructor)) {
 452		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
 453		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 454	}
 455	kfree_skb(skb);
 456
 457	/* starting over for a new packet, but check if we need to yield */
 458	cond_resched();
 459	msg->msg_flags &= ~MSG_TRUNC;
 460	goto try_again;
 461}
 462
 463DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 464void udpv6_encap_enable(void)
 465{
 466	static_branch_inc(&udpv6_encap_needed_key);
 467}
 468EXPORT_SYMBOL(udpv6_encap_enable);
 469
 470/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 471 * through error handlers in encapsulations looking for a match.
 472 */
 473static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
 474				      struct inet6_skb_parm *opt,
 475				      u8 type, u8 code, int offset, __be32 info)
 476{
 477	int i;
 478
 479	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 480		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
 481			       u8 type, u8 code, int offset, __be32 info);
 482		const struct ip6_tnl_encap_ops *encap;
 483
 484		encap = rcu_dereference(ip6tun_encaps[i]);
 485		if (!encap)
 486			continue;
 487		handler = encap->err_handler;
 488		if (handler && !handler(skb, opt, type, code, offset, info))
 489			return 0;
 490	}
 491
 492	return -ENOENT;
 493}
 494
 495/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 496 * reversing source and destination port: this will match tunnels that force the
 497 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 498 * lwtunnels might actually break this assumption by being configured with
 499 * different destination ports on endpoints, in this case we won't be able to
 500 * trace ICMP messages back to them.
 501 *
 502 * If this doesn't match any socket, probe tunnels with arbitrary destination
 503 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 504 * we've sent packets to won't necessarily match the local destination port.
 505 *
 506 * Then ask the tunnel implementation to match the error against a valid
 507 * association.
 508 *
 509 * Return an error if we can't find a match, the socket if we need further
 510 * processing, zero otherwise.
 511 */
 512static struct sock *__udp6_lib_err_encap(struct net *net,
 513					 const struct ipv6hdr *hdr, int offset,
 514					 struct udphdr *uh,
 515					 struct udp_table *udptable,
 516					 struct sock *sk,
 517					 struct sk_buff *skb,
 518					 struct inet6_skb_parm *opt,
 519					 u8 type, u8 code, __be32 info)
 520{
 521	int (*lookup)(struct sock *sk, struct sk_buff *skb);
 522	int network_offset, transport_offset;
 523	struct udp_sock *up;
 524
 525	network_offset = skb_network_offset(skb);
 526	transport_offset = skb_transport_offset(skb);
 527
 528	/* Network header needs to point to the outer IPv6 header inside ICMP */
 529	skb_reset_network_header(skb);
 530
 531	/* Transport header needs to point to the UDP header */
 532	skb_set_transport_header(skb, offset);
 533
 534	if (sk) {
 535		up = udp_sk(sk);
 536
 537		lookup = READ_ONCE(up->encap_err_lookup);
 538		if (lookup && lookup(sk, skb))
 539			sk = NULL;
 540
 541		goto out;
 542	}
 543
 544	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
 545			       &hdr->saddr, uh->dest,
 546			       inet6_iif(skb), 0, udptable, skb);
 547	if (sk) {
 548		up = udp_sk(sk);
 549
 550		lookup = READ_ONCE(up->encap_err_lookup);
 551		if (!lookup || lookup(sk, skb))
 552			sk = NULL;
 553	}
 554
 555out:
 556	if (!sk) {
 557		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
 558							offset, info));
 559	}
 560
 561	skb_set_transport_header(skb, transport_offset);
 562	skb_set_network_header(skb, network_offset);
 563
 564	return sk;
 565}
 566
 567int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 568		   u8 type, u8 code, int offset, __be32 info,
 569		   struct udp_table *udptable)
 570{
 571	struct ipv6_pinfo *np;
 572	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 573	const struct in6_addr *saddr = &hdr->saddr;
 574	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
 575	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
 576	bool tunnel = false;
 577	struct sock *sk;
 578	int harderr;
 579	int err;
 580	struct net *net = dev_net(skb->dev);
 581
 582	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 583			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 584
 585	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
 586		/* No socket for error: try tunnels before discarding */
 587		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
 588			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
 589						  udptable, sk, skb,
 590						  opt, type, code, info);
 591			if (!sk)
 592				return 0;
 593		} else
 594			sk = ERR_PTR(-ENOENT);
 595
 596		if (IS_ERR(sk)) {
 597			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 598					  ICMP6_MIB_INERRORS);
 599			return PTR_ERR(sk);
 600		}
 601
 602		tunnel = true;
 603	}
 604
 605	harderr = icmpv6_err_convert(type, code, &err);
 606	np = inet6_sk(sk);
 607
 608	if (type == ICMPV6_PKT_TOOBIG) {
 609		if (!ip6_sk_accept_pmtu(sk))
 610			goto out;
 611		ip6_sk_update_pmtu(skb, sk, info);
 612		if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
 613			harderr = 1;
 614	}
 615	if (type == NDISC_REDIRECT) {
 616		if (tunnel) {
 617			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
 618				     READ_ONCE(sk->sk_mark), sk->sk_uid);
 619		} else {
 620			ip6_sk_redirect(skb, sk);
 621		}
 622		goto out;
 623	}
 624
 625	/* Tunnels don't have an application socket: don't pass errors back */
 626	if (tunnel) {
 627		if (udp_sk(sk)->encap_err_rcv)
 628			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
 629						  ntohl(info), (u8 *)(uh+1));
 630		goto out;
 631	}
 632
 633	if (!inet6_test_bit(RECVERR6, sk)) {
 634		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 635			goto out;
 636	} else {
 637		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 638	}
 639
 640	sk->sk_err = err;
 641	sk_error_report(sk);
 642out:
 643	return 0;
 644}
 645
 646static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 647{
 648	int rc;
 649
 650	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 651		sock_rps_save_rxhash(sk, skb);
 652		sk_mark_napi_id(sk, skb);
 653		sk_incoming_cpu_update(sk);
 654	} else {
 655		sk_mark_napi_id_once(sk, skb);
 656	}
 657
 658	rc = __udp_enqueue_schedule_skb(sk, skb);
 659	if (rc < 0) {
 660		int is_udplite = IS_UDPLITE(sk);
 661		enum skb_drop_reason drop_reason;
 662
 663		/* Note that an ENOMEM error is charged twice */
 664		if (rc == -ENOMEM) {
 665			UDP6_INC_STATS(sock_net(sk),
 666					 UDP_MIB_RCVBUFERRORS, is_udplite);
 667			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
 668		} else {
 669			UDP6_INC_STATS(sock_net(sk),
 670				       UDP_MIB_MEMERRORS, is_udplite);
 671			drop_reason = SKB_DROP_REASON_PROTO_MEM;
 672		}
 673		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 674		kfree_skb_reason(skb, drop_reason);
 675		trace_udp_fail_queue_rcv_skb(rc, sk);
 676		return -1;
 677	}
 678
 679	return 0;
 680}
 681
 682static __inline__ int udpv6_err(struct sk_buff *skb,
 683				struct inet6_skb_parm *opt, u8 type,
 684				u8 code, int offset, __be32 info)
 685{
 686	return __udp6_lib_err(skb, opt, type, code, offset, info,
 687			      dev_net(skb->dev)->ipv4.udp_table);
 688}
 689
 690static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 691{
 692	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 693	struct udp_sock *up = udp_sk(sk);
 694	int is_udplite = IS_UDPLITE(sk);
 695
 696	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
 697		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
 698		goto drop;
 699	}
 700	nf_reset_ct(skb);
 701
 702	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
 703	    READ_ONCE(up->encap_type)) {
 704		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 705
 706		/*
 707		 * This is an encapsulation socket so pass the skb to
 708		 * the socket's udp_encap_rcv() hook. Otherwise, just
 709		 * fall through and pass this up the UDP socket.
 710		 * up->encap_rcv() returns the following value:
 711		 * =0 if skb was successfully passed to the encap
 712		 *    handler or was discarded by it.
 713		 * >0 if skb should be passed on to UDP.
 714		 * <0 if skb should be resubmitted as proto -N
 715		 */
 716
 717		/* if we're overly short, let UDP handle it */
 718		encap_rcv = READ_ONCE(up->encap_rcv);
 719		if (encap_rcv) {
 720			int ret;
 721
 722			/* Verify checksum before giving to encap */
 723			if (udp_lib_checksum_complete(skb))
 724				goto csum_error;
 725
 726			ret = encap_rcv(sk, skb);
 727			if (ret <= 0) {
 728				__UDP6_INC_STATS(sock_net(sk),
 729						 UDP_MIB_INDATAGRAMS,
 730						 is_udplite);
 731				return -ret;
 732			}
 733		}
 734
 735		/* FALLTHROUGH -- it's a UDP Packet */
 736	}
 737
 738	/*
 739	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 740	 */
 741	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
 742		u16 pcrlen = READ_ONCE(up->pcrlen);
 743
 744		if (pcrlen == 0) {          /* full coverage was set  */
 745			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
 746					    UDP_SKB_CB(skb)->cscov, skb->len);
 747			goto drop;
 748		}
 749		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
 750			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
 751					    UDP_SKB_CB(skb)->cscov, pcrlen);
 752			goto drop;
 753		}
 754	}
 755
 756	prefetch(&sk->sk_rmem_alloc);
 757	if (rcu_access_pointer(sk->sk_filter) &&
 758	    udp_lib_checksum_complete(skb))
 759		goto csum_error;
 760
 761	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
 762		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
 763		goto drop;
 764	}
 765
 766	udp_csum_pull_header(skb);
 767
 768	skb_dst_drop(skb);
 769
 770	return __udpv6_queue_rcv_skb(sk, skb);
 771
 772csum_error:
 773	drop_reason = SKB_DROP_REASON_UDP_CSUM;
 774	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 775drop:
 776	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 777	atomic_inc(&sk->sk_drops);
 778	kfree_skb_reason(skb, drop_reason);
 779	return -1;
 780}
 781
 782static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 783{
 784	struct sk_buff *next, *segs;
 785	int ret;
 786
 787	if (likely(!udp_unexpected_gso(sk, skb)))
 788		return udpv6_queue_rcv_one_skb(sk, skb);
 789
 790	__skb_push(skb, -skb_mac_offset(skb));
 791	segs = udp_rcv_segment(sk, skb, false);
 792	skb_list_walk_safe(segs, skb, next) {
 793		__skb_pull(skb, skb_transport_offset(skb));
 794
 795		udp_post_segment_fix_csum(skb);
 796		ret = udpv6_queue_rcv_one_skb(sk, skb);
 797		if (ret > 0)
 798			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
 799						 true);
 800	}
 801	return 0;
 802}
 803
 804static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
 805				   __be16 loc_port, const struct in6_addr *loc_addr,
 806				   __be16 rmt_port, const struct in6_addr *rmt_addr,
 807				   int dif, int sdif, unsigned short hnum)
 808{
 809	const struct inet_sock *inet = inet_sk(sk);
 810
 811	if (!net_eq(sock_net(sk), net))
 812		return false;
 813
 814	if (udp_sk(sk)->udp_port_hash != hnum ||
 815	    sk->sk_family != PF_INET6 ||
 816	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 817	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 818		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
 819	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
 820	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 821		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 822		return false;
 823	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
 824		return false;
 825	return true;
 826}
 827
 828static void udp6_csum_zero_error(struct sk_buff *skb)
 829{
 830	/* RFC 2460 section 8.1 says that we SHOULD log
 831	 * this error. Well, it is reasonable.
 832	 */
 833	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
 834			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
 835			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
 836}
 837
 838/*
 839 * Note: called only from the BH handler context,
 840 * so we don't need to lock the hashes.
 841 */
 842static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 843		const struct in6_addr *saddr, const struct in6_addr *daddr,
 844		struct udp_table *udptable, int proto)
 845{
 846	struct sock *sk, *first = NULL;
 847	const struct udphdr *uh = udp_hdr(skb);
 848	unsigned short hnum = ntohs(uh->dest);
 849	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
 850	unsigned int offset = offsetof(typeof(*sk), sk_node);
 851	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 852	int dif = inet6_iif(skb);
 853	int sdif = inet6_sdif(skb);
 854	struct hlist_node *node;
 855	struct sk_buff *nskb;
 856
 857	if (use_hash2) {
 858		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 859			    udptable->mask;
 860		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 861start_lookup:
 862		hslot = &udptable->hash2[hash2];
 863		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 864	}
 865
 866	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 867		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
 868					    uh->source, saddr, dif, sdif,
 869					    hnum))
 870			continue;
 871		/* If zero checksum and no_check is not on for
 872		 * the socket then skip it.
 873		 */
 874		if (!uh->check && !udp_get_no_check6_rx(sk))
 875			continue;
 876		if (!first) {
 877			first = sk;
 878			continue;
 879		}
 880		nskb = skb_clone(skb, GFP_ATOMIC);
 881		if (unlikely(!nskb)) {
 882			atomic_inc(&sk->sk_drops);
 883			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
 884					 IS_UDPLITE(sk));
 885			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
 886					 IS_UDPLITE(sk));
 887			continue;
 888		}
 889
 890		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
 891			consume_skb(nskb);
 892	}
 893
 894	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 895	if (use_hash2 && hash2 != hash2_any) {
 896		hash2 = hash2_any;
 897		goto start_lookup;
 898	}
 899
 900	if (first) {
 901		if (udpv6_queue_rcv_skb(first, skb) > 0)
 902			consume_skb(skb);
 903	} else {
 904		kfree_skb(skb);
 905		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
 906				 proto == IPPROTO_UDPLITE);
 907	}
 908	return 0;
 909}
 910
 911static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 912{
 913	if (udp_sk_rx_dst_set(sk, dst))
 914		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
 
 
 
 915}
 916
 917/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 918 * return code conversion for ip layer consumption
 919 */
 920static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
 921				struct udphdr *uh)
 922{
 923	int ret;
 924
 925	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 926		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
 927
 928	ret = udpv6_queue_rcv_skb(sk, skb);
 929
 930	/* a return value > 0 means to resubmit the input */
 931	if (ret > 0)
 932		return ret;
 933	return 0;
 934}
 935
 936int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 937		   int proto)
 938{
 939	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
 940	const struct in6_addr *saddr, *daddr;
 941	struct net *net = dev_net(skb->dev);
 942	struct udphdr *uh;
 943	struct sock *sk;
 944	bool refcounted;
 945	u32 ulen = 0;
 946
 947	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 948		goto discard;
 949
 950	saddr = &ipv6_hdr(skb)->saddr;
 951	daddr = &ipv6_hdr(skb)->daddr;
 952	uh = udp_hdr(skb);
 953
 954	ulen = ntohs(uh->len);
 955	if (ulen > skb->len)
 956		goto short_packet;
 957
 958	if (proto == IPPROTO_UDP) {
 959		/* UDP validates ulen. */
 960
 961		/* Check for jumbo payload */
 962		if (ulen == 0)
 963			ulen = skb->len;
 964
 965		if (ulen < sizeof(*uh))
 966			goto short_packet;
 967
 968		if (ulen < skb->len) {
 969			if (pskb_trim_rcsum(skb, ulen))
 970				goto short_packet;
 971			saddr = &ipv6_hdr(skb)->saddr;
 972			daddr = &ipv6_hdr(skb)->daddr;
 973			uh = udp_hdr(skb);
 974		}
 975	}
 976
 977	if (udp6_csum_init(skb, uh, proto))
 978		goto csum_error;
 979
 980	/* Check if the socket is already available, e.g. due to early demux */
 981	sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
 982			      &refcounted, udp6_ehashfn);
 983	if (IS_ERR(sk))
 984		goto no_sk;
 985
 986	if (sk) {
 987		struct dst_entry *dst = skb_dst(skb);
 988		int ret;
 989
 990		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
 991			udp6_sk_rx_dst_set(sk, dst);
 992
 993		if (!uh->check && !udp_get_no_check6_rx(sk)) {
 994			if (refcounted)
 995				sock_put(sk);
 996			goto report_csum_error;
 997		}
 998
 999		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1000		if (refcounted)
1001			sock_put(sk);
1002		return ret;
1003	}
1004
1005	/*
1006	 *	Multicast receive code
1007	 */
1008	if (ipv6_addr_is_multicast(daddr))
1009		return __udp6_lib_mcast_deliver(net, skb,
1010				saddr, daddr, udptable, proto);
1011
1012	/* Unicast */
1013	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1014	if (sk) {
1015		if (!uh->check && !udp_get_no_check6_rx(sk))
1016			goto report_csum_error;
1017		return udp6_unicast_rcv_skb(sk, skb, uh);
1018	}
1019no_sk:
1020	reason = SKB_DROP_REASON_NO_SOCKET;
1021
1022	if (!uh->check)
1023		goto report_csum_error;
1024
1025	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1026		goto discard;
1027	nf_reset_ct(skb);
1028
1029	if (udp_lib_checksum_complete(skb))
1030		goto csum_error;
1031
1032	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1033	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1034
1035	kfree_skb_reason(skb, reason);
1036	return 0;
1037
1038short_packet:
1039	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1040		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1041	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1042			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1043			    saddr, ntohs(uh->source),
1044			    ulen, skb->len,
1045			    daddr, ntohs(uh->dest));
1046	goto discard;
1047
1048report_csum_error:
1049	udp6_csum_zero_error(skb);
1050csum_error:
1051	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1052		reason = SKB_DROP_REASON_UDP_CSUM;
1053	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1054discard:
1055	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1056	kfree_skb_reason(skb, reason);
1057	return 0;
1058}
1059
1060
1061static struct sock *__udp6_lib_demux_lookup(struct net *net,
1062			__be16 loc_port, const struct in6_addr *loc_addr,
1063			__be16 rmt_port, const struct in6_addr *rmt_addr,
1064			int dif, int sdif)
1065{
1066	struct udp_table *udptable = net->ipv4.udp_table;
1067	unsigned short hnum = ntohs(loc_port);
1068	unsigned int hash2, slot2;
1069	struct udp_hslot *hslot2;
1070	__portpair ports;
1071	struct sock *sk;
1072
1073	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1074	slot2 = hash2 & udptable->mask;
1075	hslot2 = &udptable->hash2[slot2];
1076	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1077
1078	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1079		if (sk->sk_state == TCP_ESTABLISHED &&
1080		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1081			return sk;
1082		/* Only check first socket in chain */
1083		break;
1084	}
1085	return NULL;
1086}
1087
1088void udp_v6_early_demux(struct sk_buff *skb)
1089{
1090	struct net *net = dev_net(skb->dev);
1091	const struct udphdr *uh;
1092	struct sock *sk;
1093	struct dst_entry *dst;
1094	int dif = skb->dev->ifindex;
1095	int sdif = inet6_sdif(skb);
1096
1097	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1098	    sizeof(struct udphdr)))
1099		return;
1100
1101	uh = udp_hdr(skb);
1102
1103	if (skb->pkt_type == PACKET_HOST)
1104		sk = __udp6_lib_demux_lookup(net, uh->dest,
1105					     &ipv6_hdr(skb)->daddr,
1106					     uh->source, &ipv6_hdr(skb)->saddr,
1107					     dif, sdif);
1108	else
1109		return;
1110
1111	if (!sk)
1112		return;
1113
1114	skb->sk = sk;
1115	DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1116	skb->destructor = sock_pfree;
1117	dst = rcu_dereference(sk->sk_rx_dst);
1118
1119	if (dst)
1120		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1121	if (dst) {
1122		/* set noref for now.
1123		 * any place which wants to hold dst has to call
1124		 * dst_hold_safe()
1125		 */
1126		skb_dst_set_noref(skb, dst);
1127	}
1128}
1129
1130INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1131{
1132	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1133}
1134
1135/*
1136 * Throw away all pending data and cancel the corking. Socket is locked.
1137 */
1138static void udp_v6_flush_pending_frames(struct sock *sk)
1139{
1140	struct udp_sock *up = udp_sk(sk);
1141
1142	if (up->pending == AF_INET)
1143		udp_flush_pending_frames(sk);
1144	else if (up->pending) {
1145		up->len = 0;
1146		WRITE_ONCE(up->pending, 0);
1147		ip6_flush_pending_frames(sk);
1148	}
1149}
1150
1151static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1152			     int addr_len)
1153{
1154	if (addr_len < offsetofend(struct sockaddr, sa_family))
1155		return -EINVAL;
1156	/* The following checks are replicated from __ip6_datagram_connect()
1157	 * and intended to prevent BPF program called below from accessing
1158	 * bytes that are out of the bound specified by user in addr_len.
1159	 */
1160	if (uaddr->sa_family == AF_INET) {
1161		if (ipv6_only_sock(sk))
1162			return -EAFNOSUPPORT;
1163		return udp_pre_connect(sk, uaddr, addr_len);
1164	}
1165
1166	if (addr_len < SIN6_LEN_RFC2133)
1167		return -EINVAL;
1168
1169	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1170}
1171
1172/**
1173 *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1174 *	@sk:	socket we are sending on
1175 *	@skb:	sk_buff containing the filled-in UDP header
1176 *		(checksum field must be zeroed out)
1177 *	@saddr: source address
1178 *	@daddr: destination address
1179 *	@len:	length of packet
1180 */
1181static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1182				 const struct in6_addr *saddr,
1183				 const struct in6_addr *daddr, int len)
1184{
1185	unsigned int offset;
1186	struct udphdr *uh = udp_hdr(skb);
1187	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1188	__wsum csum = 0;
1189
1190	if (!frags) {
1191		/* Only one fragment on the socket.  */
1192		skb->csum_start = skb_transport_header(skb) - skb->head;
1193		skb->csum_offset = offsetof(struct udphdr, check);
1194		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1195	} else {
1196		/*
1197		 * HW-checksum won't work as there are two or more
1198		 * fragments on the socket so that all csums of sk_buffs
1199		 * should be together
1200		 */
1201		offset = skb_transport_offset(skb);
1202		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1203		csum = skb->csum;
1204
1205		skb->ip_summed = CHECKSUM_NONE;
1206
1207		do {
1208			csum = csum_add(csum, frags->csum);
1209		} while ((frags = frags->next));
1210
1211		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1212					    csum);
1213		if (uh->check == 0)
1214			uh->check = CSUM_MANGLED_0;
1215	}
1216}
1217
1218/*
1219 *	Sending
1220 */
1221
1222static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1223			   struct inet_cork *cork)
1224{
1225	struct sock *sk = skb->sk;
1226	struct udphdr *uh;
1227	int err = 0;
1228	int is_udplite = IS_UDPLITE(sk);
1229	__wsum csum = 0;
1230	int offset = skb_transport_offset(skb);
1231	int len = skb->len - offset;
1232	int datalen = len - sizeof(*uh);
1233
1234	/*
1235	 * Create a UDP header
1236	 */
1237	uh = udp_hdr(skb);
1238	uh->source = fl6->fl6_sport;
1239	uh->dest = fl6->fl6_dport;
1240	uh->len = htons(len);
1241	uh->check = 0;
1242
1243	if (cork->gso_size) {
1244		const int hlen = skb_network_header_len(skb) +
1245				 sizeof(struct udphdr);
1246
1247		if (hlen + cork->gso_size > cork->fragsize) {
1248			kfree_skb(skb);
1249			return -EINVAL;
1250		}
1251		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1252			kfree_skb(skb);
1253			return -EINVAL;
1254		}
1255		if (udp_get_no_check6_tx(sk)) {
1256			kfree_skb(skb);
1257			return -EINVAL;
1258		}
1259		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1260		    dst_xfrm(skb_dst(skb))) {
1261			kfree_skb(skb);
1262			return -EIO;
1263		}
1264
1265		if (datalen > cork->gso_size) {
1266			skb_shinfo(skb)->gso_size = cork->gso_size;
1267			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1268			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1269								 cork->gso_size);
1270		}
1271		goto csum_partial;
1272	}
1273
1274	if (is_udplite)
1275		csum = udplite_csum(skb);
1276	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
1277		skb->ip_summed = CHECKSUM_NONE;
1278		goto send;
1279	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1280csum_partial:
1281		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1282		goto send;
1283	} else
1284		csum = udp_csum(skb);
1285
1286	/* add protocol-dependent pseudo-header */
1287	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1288				    len, fl6->flowi6_proto, csum);
1289	if (uh->check == 0)
1290		uh->check = CSUM_MANGLED_0;
1291
1292send:
1293	err = ip6_send_skb(skb);
1294	if (err) {
1295		if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1296			UDP6_INC_STATS(sock_net(sk),
1297				       UDP_MIB_SNDBUFERRORS, is_udplite);
1298			err = 0;
1299		}
1300	} else {
1301		UDP6_INC_STATS(sock_net(sk),
1302			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1303	}
1304	return err;
1305}
1306
1307static int udp_v6_push_pending_frames(struct sock *sk)
1308{
1309	struct sk_buff *skb;
1310	struct udp_sock  *up = udp_sk(sk);
1311	int err = 0;
1312
1313	if (up->pending == AF_INET)
1314		return udp_push_pending_frames(sk);
1315
1316	skb = ip6_finish_skb(sk);
1317	if (!skb)
1318		goto out;
1319
1320	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1321			      &inet_sk(sk)->cork.base);
1322out:
1323	up->len = 0;
1324	WRITE_ONCE(up->pending, 0);
1325	return err;
1326}
1327
1328int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1329{
1330	struct ipv6_txoptions opt_space;
1331	struct udp_sock *up = udp_sk(sk);
1332	struct inet_sock *inet = inet_sk(sk);
1333	struct ipv6_pinfo *np = inet6_sk(sk);
1334	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1335	struct in6_addr *daddr, *final_p, final;
1336	struct ipv6_txoptions *opt = NULL;
1337	struct ipv6_txoptions *opt_to_free = NULL;
1338	struct ip6_flowlabel *flowlabel = NULL;
1339	struct inet_cork_full cork;
1340	struct flowi6 *fl6 = &cork.fl.u.ip6;
1341	struct dst_entry *dst;
1342	struct ipcm6_cookie ipc6;
1343	int addr_len = msg->msg_namelen;
1344	bool connected = false;
1345	int ulen = len;
1346	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1347	int err;
1348	int is_udplite = IS_UDPLITE(sk);
1349	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1350
1351	ipcm6_init(&ipc6);
1352	ipc6.gso_size = READ_ONCE(up->gso_size);
1353	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1354	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1355
1356	/* destination address check */
1357	if (sin6) {
1358		if (addr_len < offsetof(struct sockaddr, sa_data))
1359			return -EINVAL;
1360
1361		switch (sin6->sin6_family) {
1362		case AF_INET6:
1363			if (addr_len < SIN6_LEN_RFC2133)
1364				return -EINVAL;
1365			daddr = &sin6->sin6_addr;
1366			if (ipv6_addr_any(daddr) &&
1367			    ipv6_addr_v4mapped(&np->saddr))
1368				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1369						       daddr);
1370			break;
1371		case AF_INET:
1372			goto do_udp_sendmsg;
1373		case AF_UNSPEC:
1374			msg->msg_name = sin6 = NULL;
1375			msg->msg_namelen = addr_len = 0;
1376			daddr = NULL;
1377			break;
1378		default:
1379			return -EINVAL;
1380		}
1381	} else if (!READ_ONCE(up->pending)) {
1382		if (sk->sk_state != TCP_ESTABLISHED)
1383			return -EDESTADDRREQ;
1384		daddr = &sk->sk_v6_daddr;
1385	} else
1386		daddr = NULL;
1387
1388	if (daddr) {
1389		if (ipv6_addr_v4mapped(daddr)) {
1390			struct sockaddr_in sin;
1391			sin.sin_family = AF_INET;
1392			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1393			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1394			msg->msg_name = &sin;
1395			msg->msg_namelen = sizeof(sin);
1396do_udp_sendmsg:
1397			err = ipv6_only_sock(sk) ?
1398				-ENETUNREACH : udp_sendmsg(sk, msg, len);
1399			msg->msg_name = sin6;
1400			msg->msg_namelen = addr_len;
1401			return err;
1402		}
1403	}
1404
1405	/* Rough check on arithmetic overflow,
1406	   better check is made in ip6_append_data().
1407	   */
1408	if (len > INT_MAX - sizeof(struct udphdr))
1409		return -EMSGSIZE;
1410
1411	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1412	if (READ_ONCE(up->pending)) {
1413		if (READ_ONCE(up->pending) == AF_INET)
1414			return udp_sendmsg(sk, msg, len);
1415		/*
1416		 * There are pending frames.
1417		 * The socket lock must be held while it's corked.
1418		 */
1419		lock_sock(sk);
1420		if (likely(up->pending)) {
1421			if (unlikely(up->pending != AF_INET6)) {
1422				release_sock(sk);
1423				return -EAFNOSUPPORT;
1424			}
1425			dst = NULL;
1426			goto do_append_data;
1427		}
1428		release_sock(sk);
1429	}
1430	ulen += sizeof(struct udphdr);
1431
1432	memset(fl6, 0, sizeof(*fl6));
1433
1434	if (sin6) {
1435		if (sin6->sin6_port == 0)
1436			return -EINVAL;
1437
1438		fl6->fl6_dport = sin6->sin6_port;
1439		daddr = &sin6->sin6_addr;
1440
1441		if (inet6_test_bit(SNDFLOW, sk)) {
1442			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1443			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1444				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1445				if (IS_ERR(flowlabel))
1446					return -EINVAL;
1447			}
1448		}
1449
1450		/*
1451		 * Otherwise it will be difficult to maintain
1452		 * sk->sk_dst_cache.
1453		 */
1454		if (sk->sk_state == TCP_ESTABLISHED &&
1455		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1456			daddr = &sk->sk_v6_daddr;
1457
1458		if (addr_len >= sizeof(struct sockaddr_in6) &&
1459		    sin6->sin6_scope_id &&
1460		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1461			fl6->flowi6_oif = sin6->sin6_scope_id;
1462	} else {
1463		if (sk->sk_state != TCP_ESTABLISHED)
1464			return -EDESTADDRREQ;
1465
1466		fl6->fl6_dport = inet->inet_dport;
1467		daddr = &sk->sk_v6_daddr;
1468		fl6->flowlabel = np->flow_label;
1469		connected = true;
1470	}
1471
1472	if (!fl6->flowi6_oif)
1473		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1474
1475	if (!fl6->flowi6_oif)
1476		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1477
1478	fl6->flowi6_uid = sk->sk_uid;
1479
1480	if (msg->msg_controllen) {
1481		opt = &opt_space;
1482		memset(opt, 0, sizeof(struct ipv6_txoptions));
1483		opt->tot_len = sizeof(*opt);
1484		ipc6.opt = opt;
1485
1486		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1487		if (err > 0) {
1488			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1489						    &ipc6);
1490			connected = false;
1491		}
1492		if (err < 0) {
1493			fl6_sock_release(flowlabel);
1494			return err;
1495		}
1496		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1497			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1498			if (IS_ERR(flowlabel))
1499				return -EINVAL;
1500		}
1501		if (!(opt->opt_nflen|opt->opt_flen))
1502			opt = NULL;
 
1503	}
1504	if (!opt) {
1505		opt = txopt_get(np);
1506		opt_to_free = opt;
1507	}
1508	if (flowlabel)
1509		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1510	opt = ipv6_fixup_options(&opt_space, opt);
1511	ipc6.opt = opt;
1512
1513	fl6->flowi6_proto = sk->sk_protocol;
1514	fl6->flowi6_mark = ipc6.sockc.mark;
1515	fl6->daddr = *daddr;
1516	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1517		fl6->saddr = np->saddr;
1518	fl6->fl6_sport = inet->inet_sport;
1519
1520	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1521		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1522					   (struct sockaddr *)sin6,
1523					   &addr_len,
1524					   &fl6->saddr);
1525		if (err)
1526			goto out_no_dst;
1527		if (sin6) {
1528			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1529				/* BPF program rewrote IPv6-only by IPv4-mapped
1530				 * IPv6. It's currently unsupported.
1531				 */
1532				err = -ENOTSUPP;
1533				goto out_no_dst;
1534			}
1535			if (sin6->sin6_port == 0) {
1536				/* BPF program set invalid port. Reject it. */
1537				err = -EINVAL;
1538				goto out_no_dst;
1539			}
1540			fl6->fl6_dport = sin6->sin6_port;
1541			fl6->daddr = sin6->sin6_addr;
1542		}
1543	}
1544
1545	if (ipv6_addr_any(&fl6->daddr))
1546		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1547
1548	final_p = fl6_update_dst(fl6, opt, &final);
1549	if (final_p)
1550		connected = false;
1551
1552	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1553		fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1554		connected = false;
1555	} else if (!fl6->flowi6_oif)
1556		fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1557
1558	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1559
1560	if (ipc6.tclass < 0)
1561		ipc6.tclass = np->tclass;
1562
1563	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1564
1565	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1566	if (IS_ERR(dst)) {
1567		err = PTR_ERR(dst);
1568		dst = NULL;
1569		goto out;
1570	}
1571
1572	if (ipc6.hlimit < 0)
1573		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1574
1575	if (msg->msg_flags&MSG_CONFIRM)
1576		goto do_confirm;
1577back_from_confirm:
1578
1579	/* Lockless fast path for the non-corking case */
1580	if (!corkreq) {
1581		struct sk_buff *skb;
1582
1583		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1584				   sizeof(struct udphdr), &ipc6,
1585				   dst_rt6_info(dst),
1586				   msg->msg_flags, &cork);
1587		err = PTR_ERR(skb);
1588		if (!IS_ERR_OR_NULL(skb))
1589			err = udp_v6_send_skb(skb, fl6, &cork.base);
1590		/* ip6_make_skb steals dst reference */
1591		goto out_no_dst;
1592	}
1593
1594	lock_sock(sk);
1595	if (unlikely(up->pending)) {
1596		/* The socket is already corked while preparing it. */
1597		/* ... which is an evident application bug. --ANK */
1598		release_sock(sk);
1599
1600		net_dbg_ratelimited("udp cork app bug 2\n");
1601		err = -EINVAL;
1602		goto out;
1603	}
1604
1605	WRITE_ONCE(up->pending, AF_INET6);
1606
1607do_append_data:
1608	if (ipc6.dontfrag < 0)
1609		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1610	up->len += ulen;
1611	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1612			      &ipc6, fl6, dst_rt6_info(dst),
1613			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1614	if (err)
1615		udp_v6_flush_pending_frames(sk);
1616	else if (!corkreq)
1617		err = udp_v6_push_pending_frames(sk);
1618	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1619		WRITE_ONCE(up->pending, 0);
1620
1621	if (err > 0)
1622		err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1623	release_sock(sk);
1624
1625out:
1626	dst_release(dst);
1627out_no_dst:
1628	fl6_sock_release(flowlabel);
1629	txopt_put(opt_to_free);
1630	if (!err)
1631		return len;
1632	/*
1633	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1634	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1635	 * we don't have a good statistic (IpOutDiscards but it can be too many
1636	 * things).  We could add another new stat but at least for now that
1637	 * seems like overkill.
1638	 */
1639	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1640		UDP6_INC_STATS(sock_net(sk),
1641			       UDP_MIB_SNDBUFERRORS, is_udplite);
1642	}
1643	return err;
1644
1645do_confirm:
1646	if (msg->msg_flags & MSG_PROBE)
1647		dst_confirm_neigh(dst, &fl6->daddr);
1648	if (!(msg->msg_flags&MSG_PROBE) || len)
1649		goto back_from_confirm;
1650	err = 0;
1651	goto out;
1652}
1653EXPORT_SYMBOL(udpv6_sendmsg);
1654
1655static void udpv6_splice_eof(struct socket *sock)
1656{
1657	struct sock *sk = sock->sk;
1658	struct udp_sock *up = udp_sk(sk);
1659
1660	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1661		return;
1662
1663	lock_sock(sk);
1664	if (up->pending && !udp_test_bit(CORK, sk))
1665		udp_v6_push_pending_frames(sk);
1666	release_sock(sk);
1667}
1668
1669void udpv6_destroy_sock(struct sock *sk)
1670{
1671	struct udp_sock *up = udp_sk(sk);
1672	lock_sock(sk);
1673
1674	/* protects from races with udp_abort() */
1675	sock_set_flag(sk, SOCK_DEAD);
1676	udp_v6_flush_pending_frames(sk);
1677	release_sock(sk);
1678
1679	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1680		if (up->encap_type) {
1681			void (*encap_destroy)(struct sock *sk);
1682			encap_destroy = READ_ONCE(up->encap_destroy);
1683			if (encap_destroy)
1684				encap_destroy(sk);
1685		}
1686		if (udp_test_bit(ENCAP_ENABLED, sk)) {
1687			static_branch_dec(&udpv6_encap_needed_key);
1688			udp_encap_disable();
1689		}
1690	}
1691}
1692
1693/*
1694 *	Socket option code for UDP
1695 */
1696int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1697		     unsigned int optlen)
1698{
1699	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1700		return udp_lib_setsockopt(sk, level, optname,
1701					  optval, optlen,
1702					  udp_v6_push_pending_frames);
1703	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1704}
1705
1706int udpv6_getsockopt(struct sock *sk, int level, int optname,
1707		     char __user *optval, int __user *optlen)
1708{
1709	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1710		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1711	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1712}
1713
 
 
 
 
 
1714
1715/* ------------------------------------------------------------------------ */
1716#ifdef CONFIG_PROC_FS
1717int udp6_seq_show(struct seq_file *seq, void *v)
1718{
1719	if (v == SEQ_START_TOKEN) {
1720		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1721	} else {
1722		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1723		const struct inet_sock *inet = inet_sk((const struct sock *)v);
1724		__u16 srcp = ntohs(inet->inet_sport);
1725		__u16 destp = ntohs(inet->inet_dport);
1726		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1727					  udp_rqueue_get(v), bucket);
1728	}
1729	return 0;
1730}
1731
1732const struct seq_operations udp6_seq_ops = {
1733	.start		= udp_seq_start,
1734	.next		= udp_seq_next,
1735	.stop		= udp_seq_stop,
1736	.show		= udp6_seq_show,
1737};
1738EXPORT_SYMBOL(udp6_seq_ops);
1739
1740static struct udp_seq_afinfo udp6_seq_afinfo = {
1741	.family		= AF_INET6,
1742	.udp_table	= NULL,
1743};
1744
1745int __net_init udp6_proc_init(struct net *net)
1746{
1747	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1748			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1749		return -ENOMEM;
1750	return 0;
1751}
1752
1753void udp6_proc_exit(struct net *net)
1754{
1755	remove_proc_entry("udp6", net->proc_net);
1756}
1757#endif /* CONFIG_PROC_FS */
1758
1759/* ------------------------------------------------------------------------ */
1760
1761struct proto udpv6_prot = {
1762	.name			= "UDPv6",
1763	.owner			= THIS_MODULE,
1764	.close			= udp_lib_close,
1765	.pre_connect		= udpv6_pre_connect,
1766	.connect		= ip6_datagram_connect,
1767	.disconnect		= udp_disconnect,
1768	.ioctl			= udp_ioctl,
1769	.init			= udpv6_init_sock,
1770	.destroy		= udpv6_destroy_sock,
1771	.setsockopt		= udpv6_setsockopt,
1772	.getsockopt		= udpv6_getsockopt,
1773	.sendmsg		= udpv6_sendmsg,
1774	.recvmsg		= udpv6_recvmsg,
1775	.splice_eof		= udpv6_splice_eof,
1776	.release_cb		= ip6_datagram_release_cb,
1777	.hash			= udp_lib_hash,
1778	.unhash			= udp_lib_unhash,
1779	.rehash			= udp_v6_rehash,
1780	.get_port		= udp_v6_get_port,
1781	.put_port		= udp_lib_unhash,
1782#ifdef CONFIG_BPF_SYSCALL
1783	.psock_update_sk_prot	= udp_bpf_update_proto,
1784#endif
1785
1786	.memory_allocated	= &udp_memory_allocated,
1787	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1788
1789	.sysctl_mem		= sysctl_udp_mem,
1790	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1791	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1792	.obj_size		= sizeof(struct udp6_sock),
1793	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1794	.h.udp_table		= NULL,
1795	.diag_destroy		= udp_abort,
1796};
1797
1798static struct inet_protosw udpv6_protosw = {
1799	.type =      SOCK_DGRAM,
1800	.protocol =  IPPROTO_UDP,
1801	.prot =      &udpv6_prot,
1802	.ops =       &inet6_dgram_ops,
1803	.flags =     INET_PROTOSW_PERMANENT,
1804};
1805
1806int __init udpv6_init(void)
1807{
1808	int ret;
1809
1810	net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1811		.handler     = udpv6_rcv,
1812		.err_handler = udpv6_err,
1813		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1814	};
1815	ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1816	if (ret)
1817		goto out;
1818
1819	ret = inet6_register_protosw(&udpv6_protosw);
1820	if (ret)
1821		goto out_udpv6_protocol;
1822out:
1823	return ret;
1824
1825out_udpv6_protocol:
1826	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1827	goto out;
1828}
1829
1830void udpv6_exit(void)
1831{
1832	inet6_unregister_protosw(&udpv6_protosw);
1833	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1834}