Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20#include <linux/bpf-cgroup.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/net.h>
26#include <linux/in6.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/ipv6.h>
30#include <linux/icmpv6.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/skbuff.h>
34#include <linux/slab.h>
35#include <linux/uaccess.h>
36#include <linux/indirect_call_wrapper.h>
37
38#include <net/addrconf.h>
39#include <net/ndisc.h>
40#include <net/protocol.h>
41#include <net/transp_v6.h>
42#include <net/ip6_route.h>
43#include <net/raw.h>
44#include <net/seg6.h>
45#include <net/tcp_states.h>
46#include <net/ip6_checksum.h>
47#include <net/ip6_tunnel.h>
48#include <trace/events/udp.h>
49#include <net/xfrm.h>
50#include <net/inet_hashtables.h>
51#include <net/inet6_hashtables.h>
52#include <net/busy_poll.h>
53#include <net/sock_reuseport.h>
54#include <net/gro.h>
55
56#include <linux/proc_fs.h>
57#include <linux/seq_file.h>
58#include <trace/events/skb.h>
59#include "udp_impl.h"
60
61static void udpv6_destruct_sock(struct sock *sk)
62{
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
65}
66
67int udpv6_init_sock(struct sock *sk)
68{
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
72 return 0;
73}
74
75INDIRECT_CALLABLE_SCOPE
76u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
78 const u16 lport,
79 const struct in6_addr *faddr,
80 const __be16 fport)
81{
82 static u32 udp6_ehash_secret __read_mostly;
83 static u32 udp_ipv6_hash_secret __read_mostly;
84
85 u32 lhash, fhash;
86
87 net_get_random_once(&udp6_ehash_secret,
88 sizeof(udp6_ehash_secret));
89 net_get_random_once(&udp_ipv6_hash_secret,
90 sizeof(udp_ipv6_hash_secret));
91
92 lhash = (__force u32)laddr->s6_addr32[3];
93 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
94
95 return __inet6_ehashfn(lhash, lport, fhash, fport,
96 udp6_ehash_secret + net_hash_mix(net));
97}
98
99int udp_v6_get_port(struct sock *sk, unsigned short snum)
100{
101 unsigned int hash2_nulladdr =
102 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
103 unsigned int hash2_partial =
104 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
105
106 /* precompute partial secondary hash */
107 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
108 return udp_lib_get_port(sk, snum, hash2_nulladdr);
109}
110
111void udp_v6_rehash(struct sock *sk)
112{
113 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
114 &sk->sk_v6_rcv_saddr,
115 inet_sk(sk)->inet_num);
116
117 udp_lib_rehash(sk, new_hash);
118}
119
120static int compute_score(struct sock *sk, struct net *net,
121 const struct in6_addr *saddr, __be16 sport,
122 const struct in6_addr *daddr, unsigned short hnum,
123 int dif, int sdif)
124{
125 int bound_dev_if, score;
126 struct inet_sock *inet;
127 bool dev_match;
128
129 if (!net_eq(sock_net(sk), net) ||
130 udp_sk(sk)->udp_port_hash != hnum ||
131 sk->sk_family != PF_INET6)
132 return -1;
133
134 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
135 return -1;
136
137 score = 0;
138 inet = inet_sk(sk);
139
140 if (inet->inet_dport) {
141 if (inet->inet_dport != sport)
142 return -1;
143 score++;
144 }
145
146 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
147 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
148 return -1;
149 score++;
150 }
151
152 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
153 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
154 if (!dev_match)
155 return -1;
156 if (bound_dev_if)
157 score++;
158
159 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
160 score++;
161
162 return score;
163}
164
165/* called with rcu_read_lock() */
166static struct sock *udp6_lib_lookup2(struct net *net,
167 const struct in6_addr *saddr, __be16 sport,
168 const struct in6_addr *daddr, unsigned int hnum,
169 int dif, int sdif, struct udp_hslot *hslot2,
170 struct sk_buff *skb)
171{
172 struct sock *sk, *result;
173 int score, badness;
174
175 result = NULL;
176 badness = -1;
177 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
178 score = compute_score(sk, net, saddr, sport,
179 daddr, hnum, dif, sdif);
180 if (score > badness) {
181 badness = score;
182
183 if (sk->sk_state == TCP_ESTABLISHED) {
184 result = sk;
185 continue;
186 }
187
188 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
189 saddr, sport, daddr, hnum, udp6_ehashfn);
190 if (!result) {
191 result = sk;
192 continue;
193 }
194
195 /* Fall back to scoring if group has connections */
196 if (!reuseport_has_conns(sk))
197 return result;
198
199 /* Reuseport logic returned an error, keep original score. */
200 if (IS_ERR(result))
201 continue;
202
203 badness = compute_score(sk, net, saddr, sport,
204 daddr, hnum, dif, sdif);
205 }
206 }
207 return result;
208}
209
210/* rcu_read_lock() must be held */
211struct sock *__udp6_lib_lookup(struct net *net,
212 const struct in6_addr *saddr, __be16 sport,
213 const struct in6_addr *daddr, __be16 dport,
214 int dif, int sdif, struct udp_table *udptable,
215 struct sk_buff *skb)
216{
217 unsigned short hnum = ntohs(dport);
218 unsigned int hash2, slot2;
219 struct udp_hslot *hslot2;
220 struct sock *result, *sk;
221
222 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
223 slot2 = hash2 & udptable->mask;
224 hslot2 = &udptable->hash2[slot2];
225
226 /* Lookup connected or non-wildcard sockets */
227 result = udp6_lib_lookup2(net, saddr, sport,
228 daddr, hnum, dif, sdif,
229 hslot2, skb);
230 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
231 goto done;
232
233 /* Lookup redirect from BPF */
234 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
235 udptable == net->ipv4.udp_table) {
236 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
237 saddr, sport, daddr, hnum, dif,
238 udp6_ehashfn);
239 if (sk) {
240 result = sk;
241 goto done;
242 }
243 }
244
245 /* Got non-wildcard socket or error on first lookup */
246 if (result)
247 goto done;
248
249 /* Lookup wildcard sockets */
250 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
251 slot2 = hash2 & udptable->mask;
252 hslot2 = &udptable->hash2[slot2];
253
254 result = udp6_lib_lookup2(net, saddr, sport,
255 &in6addr_any, hnum, dif, sdif,
256 hslot2, skb);
257done:
258 if (IS_ERR(result))
259 return NULL;
260 return result;
261}
262EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
263
264static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
265 __be16 sport, __be16 dport,
266 struct udp_table *udptable)
267{
268 const struct ipv6hdr *iph = ipv6_hdr(skb);
269
270 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
271 &iph->daddr, dport, inet6_iif(skb),
272 inet6_sdif(skb), udptable, skb);
273}
274
275struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
276 __be16 sport, __be16 dport)
277{
278 const struct ipv6hdr *iph = ipv6_hdr(skb);
279 struct net *net = dev_net(skb->dev);
280 int iif, sdif;
281
282 inet6_get_iif_sdif(skb, &iif, &sdif);
283
284 return __udp6_lib_lookup(net, &iph->saddr, sport,
285 &iph->daddr, dport, iif,
286 sdif, net->ipv4.udp_table, NULL);
287}
288
289/* Must be called under rcu_read_lock().
290 * Does increment socket refcount.
291 */
292#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
293struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
294 const struct in6_addr *daddr, __be16 dport, int dif)
295{
296 struct sock *sk;
297
298 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
299 dif, 0, net->ipv4.udp_table, NULL);
300 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
301 sk = NULL;
302 return sk;
303}
304EXPORT_SYMBOL_GPL(udp6_lib_lookup);
305#endif
306
307/* do not use the scratch area len for jumbogram: their length execeeds the
308 * scratch area space; note that the IP6CB flags is still in the first
309 * cacheline, so checking for jumbograms is cheap
310 */
311static int udp6_skb_len(struct sk_buff *skb)
312{
313 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
314}
315
316/*
317 * This should be easy, if there is something there we
318 * return it, otherwise we block.
319 */
320
321int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
322 int flags, int *addr_len)
323{
324 struct ipv6_pinfo *np = inet6_sk(sk);
325 struct inet_sock *inet = inet_sk(sk);
326 struct sk_buff *skb;
327 unsigned int ulen, copied;
328 int off, err, peeking = flags & MSG_PEEK;
329 int is_udplite = IS_UDPLITE(sk);
330 struct udp_mib __percpu *mib;
331 bool checksum_valid = false;
332 int is_udp4;
333
334 if (flags & MSG_ERRQUEUE)
335 return ipv6_recv_error(sk, msg, len, addr_len);
336
337 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
338 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
339
340try_again:
341 off = sk_peek_offset(sk, flags);
342 skb = __skb_recv_udp(sk, flags, &off, &err);
343 if (!skb)
344 return err;
345
346 ulen = udp6_skb_len(skb);
347 copied = len;
348 if (copied > ulen - off)
349 copied = ulen - off;
350 else if (copied < ulen)
351 msg->msg_flags |= MSG_TRUNC;
352
353 is_udp4 = (skb->protocol == htons(ETH_P_IP));
354 mib = __UDPX_MIB(sk, is_udp4);
355
356 /*
357 * If checksum is needed at all, try to do it while copying the
358 * data. If the data is truncated, or if we only want a partial
359 * coverage checksum (UDP-Lite), do it before the copy.
360 */
361
362 if (copied < ulen || peeking ||
363 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
364 checksum_valid = udp_skb_csum_unnecessary(skb) ||
365 !__udp_lib_checksum_complete(skb);
366 if (!checksum_valid)
367 goto csum_copy_err;
368 }
369
370 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
371 if (udp_skb_is_linear(skb))
372 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
373 else
374 err = skb_copy_datagram_msg(skb, off, msg, copied);
375 } else {
376 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
377 if (err == -EINVAL)
378 goto csum_copy_err;
379 }
380 if (unlikely(err)) {
381 if (!peeking) {
382 atomic_inc(&sk->sk_drops);
383 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
384 }
385 kfree_skb(skb);
386 return err;
387 }
388 if (!peeking)
389 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
390
391 sock_recv_cmsgs(msg, sk, skb);
392
393 /* Copy the address. */
394 if (msg->msg_name) {
395 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
396 sin6->sin6_family = AF_INET6;
397 sin6->sin6_port = udp_hdr(skb)->source;
398 sin6->sin6_flowinfo = 0;
399
400 if (is_udp4) {
401 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
402 &sin6->sin6_addr);
403 sin6->sin6_scope_id = 0;
404 } else {
405 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
406 sin6->sin6_scope_id =
407 ipv6_iface_scope_id(&sin6->sin6_addr,
408 inet6_iif(skb));
409 }
410 *addr_len = sizeof(*sin6);
411
412 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
413 (struct sockaddr *)sin6,
414 addr_len);
415 }
416
417 if (udp_test_bit(GRO_ENABLED, sk))
418 udp_cmsg_recv(msg, sk, skb);
419
420 if (np->rxopt.all)
421 ip6_datagram_recv_common_ctl(sk, msg, skb);
422
423 if (is_udp4) {
424 if (inet_cmsg_flags(inet))
425 ip_cmsg_recv_offset(msg, sk, skb,
426 sizeof(struct udphdr), off);
427 } else {
428 if (np->rxopt.all)
429 ip6_datagram_recv_specific_ctl(sk, msg, skb);
430 }
431
432 err = copied;
433 if (flags & MSG_TRUNC)
434 err = ulen;
435
436 skb_consume_udp(sk, skb, peeking ? -err : err);
437 return err;
438
439csum_copy_err:
440 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
441 udp_skb_destructor)) {
442 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
443 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
444 }
445 kfree_skb(skb);
446
447 /* starting over for a new packet, but check if we need to yield */
448 cond_resched();
449 msg->msg_flags &= ~MSG_TRUNC;
450 goto try_again;
451}
452
453DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
454void udpv6_encap_enable(void)
455{
456 static_branch_inc(&udpv6_encap_needed_key);
457}
458EXPORT_SYMBOL(udpv6_encap_enable);
459
460/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
461 * through error handlers in encapsulations looking for a match.
462 */
463static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
464 struct inet6_skb_parm *opt,
465 u8 type, u8 code, int offset, __be32 info)
466{
467 int i;
468
469 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
470 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
471 u8 type, u8 code, int offset, __be32 info);
472 const struct ip6_tnl_encap_ops *encap;
473
474 encap = rcu_dereference(ip6tun_encaps[i]);
475 if (!encap)
476 continue;
477 handler = encap->err_handler;
478 if (handler && !handler(skb, opt, type, code, offset, info))
479 return 0;
480 }
481
482 return -ENOENT;
483}
484
485/* Try to match ICMP errors to UDP tunnels by looking up a socket without
486 * reversing source and destination port: this will match tunnels that force the
487 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
488 * lwtunnels might actually break this assumption by being configured with
489 * different destination ports on endpoints, in this case we won't be able to
490 * trace ICMP messages back to them.
491 *
492 * If this doesn't match any socket, probe tunnels with arbitrary destination
493 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
494 * we've sent packets to won't necessarily match the local destination port.
495 *
496 * Then ask the tunnel implementation to match the error against a valid
497 * association.
498 *
499 * Return an error if we can't find a match, the socket if we need further
500 * processing, zero otherwise.
501 */
502static struct sock *__udp6_lib_err_encap(struct net *net,
503 const struct ipv6hdr *hdr, int offset,
504 struct udphdr *uh,
505 struct udp_table *udptable,
506 struct sock *sk,
507 struct sk_buff *skb,
508 struct inet6_skb_parm *opt,
509 u8 type, u8 code, __be32 info)
510{
511 int (*lookup)(struct sock *sk, struct sk_buff *skb);
512 int network_offset, transport_offset;
513 struct udp_sock *up;
514
515 network_offset = skb_network_offset(skb);
516 transport_offset = skb_transport_offset(skb);
517
518 /* Network header needs to point to the outer IPv6 header inside ICMP */
519 skb_reset_network_header(skb);
520
521 /* Transport header needs to point to the UDP header */
522 skb_set_transport_header(skb, offset);
523
524 if (sk) {
525 up = udp_sk(sk);
526
527 lookup = READ_ONCE(up->encap_err_lookup);
528 if (lookup && lookup(sk, skb))
529 sk = NULL;
530
531 goto out;
532 }
533
534 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
535 &hdr->saddr, uh->dest,
536 inet6_iif(skb), 0, udptable, skb);
537 if (sk) {
538 up = udp_sk(sk);
539
540 lookup = READ_ONCE(up->encap_err_lookup);
541 if (!lookup || lookup(sk, skb))
542 sk = NULL;
543 }
544
545out:
546 if (!sk) {
547 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
548 offset, info));
549 }
550
551 skb_set_transport_header(skb, transport_offset);
552 skb_set_network_header(skb, network_offset);
553
554 return sk;
555}
556
557int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
558 u8 type, u8 code, int offset, __be32 info,
559 struct udp_table *udptable)
560{
561 struct ipv6_pinfo *np;
562 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
563 const struct in6_addr *saddr = &hdr->saddr;
564 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
565 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
566 bool tunnel = false;
567 struct sock *sk;
568 int harderr;
569 int err;
570 struct net *net = dev_net(skb->dev);
571
572 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
573 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
574
575 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
576 /* No socket for error: try tunnels before discarding */
577 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
578 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
579 udptable, sk, skb,
580 opt, type, code, info);
581 if (!sk)
582 return 0;
583 } else
584 sk = ERR_PTR(-ENOENT);
585
586 if (IS_ERR(sk)) {
587 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
588 ICMP6_MIB_INERRORS);
589 return PTR_ERR(sk);
590 }
591
592 tunnel = true;
593 }
594
595 harderr = icmpv6_err_convert(type, code, &err);
596 np = inet6_sk(sk);
597
598 if (type == ICMPV6_PKT_TOOBIG) {
599 if (!ip6_sk_accept_pmtu(sk))
600 goto out;
601 ip6_sk_update_pmtu(skb, sk, info);
602 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
603 harderr = 1;
604 }
605 if (type == NDISC_REDIRECT) {
606 if (tunnel) {
607 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
608 READ_ONCE(sk->sk_mark), sk->sk_uid);
609 } else {
610 ip6_sk_redirect(skb, sk);
611 }
612 goto out;
613 }
614
615 /* Tunnels don't have an application socket: don't pass errors back */
616 if (tunnel) {
617 if (udp_sk(sk)->encap_err_rcv)
618 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
619 ntohl(info), (u8 *)(uh+1));
620 goto out;
621 }
622
623 if (!inet6_test_bit(RECVERR6, sk)) {
624 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
625 goto out;
626 } else {
627 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
628 }
629
630 sk->sk_err = err;
631 sk_error_report(sk);
632out:
633 return 0;
634}
635
636static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
637{
638 int rc;
639
640 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
641 sock_rps_save_rxhash(sk, skb);
642 sk_mark_napi_id(sk, skb);
643 sk_incoming_cpu_update(sk);
644 } else {
645 sk_mark_napi_id_once(sk, skb);
646 }
647
648 rc = __udp_enqueue_schedule_skb(sk, skb);
649 if (rc < 0) {
650 int is_udplite = IS_UDPLITE(sk);
651 enum skb_drop_reason drop_reason;
652
653 /* Note that an ENOMEM error is charged twice */
654 if (rc == -ENOMEM) {
655 UDP6_INC_STATS(sock_net(sk),
656 UDP_MIB_RCVBUFERRORS, is_udplite);
657 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
658 } else {
659 UDP6_INC_STATS(sock_net(sk),
660 UDP_MIB_MEMERRORS, is_udplite);
661 drop_reason = SKB_DROP_REASON_PROTO_MEM;
662 }
663 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
664 kfree_skb_reason(skb, drop_reason);
665 trace_udp_fail_queue_rcv_skb(rc, sk);
666 return -1;
667 }
668
669 return 0;
670}
671
672static __inline__ int udpv6_err(struct sk_buff *skb,
673 struct inet6_skb_parm *opt, u8 type,
674 u8 code, int offset, __be32 info)
675{
676 return __udp6_lib_err(skb, opt, type, code, offset, info,
677 dev_net(skb->dev)->ipv4.udp_table);
678}
679
680static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
681{
682 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
683 struct udp_sock *up = udp_sk(sk);
684 int is_udplite = IS_UDPLITE(sk);
685
686 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
687 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
688 goto drop;
689 }
690 nf_reset_ct(skb);
691
692 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
693 READ_ONCE(up->encap_type)) {
694 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
695
696 /*
697 * This is an encapsulation socket so pass the skb to
698 * the socket's udp_encap_rcv() hook. Otherwise, just
699 * fall through and pass this up the UDP socket.
700 * up->encap_rcv() returns the following value:
701 * =0 if skb was successfully passed to the encap
702 * handler or was discarded by it.
703 * >0 if skb should be passed on to UDP.
704 * <0 if skb should be resubmitted as proto -N
705 */
706
707 /* if we're overly short, let UDP handle it */
708 encap_rcv = READ_ONCE(up->encap_rcv);
709 if (encap_rcv) {
710 int ret;
711
712 /* Verify checksum before giving to encap */
713 if (udp_lib_checksum_complete(skb))
714 goto csum_error;
715
716 ret = encap_rcv(sk, skb);
717 if (ret <= 0) {
718 __UDP6_INC_STATS(sock_net(sk),
719 UDP_MIB_INDATAGRAMS,
720 is_udplite);
721 return -ret;
722 }
723 }
724
725 /* FALLTHROUGH -- it's a UDP Packet */
726 }
727
728 /*
729 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
730 */
731 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
732 u16 pcrlen = READ_ONCE(up->pcrlen);
733
734 if (pcrlen == 0) { /* full coverage was set */
735 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
736 UDP_SKB_CB(skb)->cscov, skb->len);
737 goto drop;
738 }
739 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
740 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
741 UDP_SKB_CB(skb)->cscov, pcrlen);
742 goto drop;
743 }
744 }
745
746 prefetch(&sk->sk_rmem_alloc);
747 if (rcu_access_pointer(sk->sk_filter) &&
748 udp_lib_checksum_complete(skb))
749 goto csum_error;
750
751 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
752 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
753 goto drop;
754 }
755
756 udp_csum_pull_header(skb);
757
758 skb_dst_drop(skb);
759
760 return __udpv6_queue_rcv_skb(sk, skb);
761
762csum_error:
763 drop_reason = SKB_DROP_REASON_UDP_CSUM;
764 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
765drop:
766 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
767 atomic_inc(&sk->sk_drops);
768 kfree_skb_reason(skb, drop_reason);
769 return -1;
770}
771
772static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
773{
774 struct sk_buff *next, *segs;
775 int ret;
776
777 if (likely(!udp_unexpected_gso(sk, skb)))
778 return udpv6_queue_rcv_one_skb(sk, skb);
779
780 __skb_push(skb, -skb_mac_offset(skb));
781 segs = udp_rcv_segment(sk, skb, false);
782 skb_list_walk_safe(segs, skb, next) {
783 __skb_pull(skb, skb_transport_offset(skb));
784
785 udp_post_segment_fix_csum(skb);
786 ret = udpv6_queue_rcv_one_skb(sk, skb);
787 if (ret > 0)
788 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
789 true);
790 }
791 return 0;
792}
793
794static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
795 __be16 loc_port, const struct in6_addr *loc_addr,
796 __be16 rmt_port, const struct in6_addr *rmt_addr,
797 int dif, int sdif, unsigned short hnum)
798{
799 const struct inet_sock *inet = inet_sk(sk);
800
801 if (!net_eq(sock_net(sk), net))
802 return false;
803
804 if (udp_sk(sk)->udp_port_hash != hnum ||
805 sk->sk_family != PF_INET6 ||
806 (inet->inet_dport && inet->inet_dport != rmt_port) ||
807 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
808 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
809 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
810 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
811 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
812 return false;
813 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
814 return false;
815 return true;
816}
817
818static void udp6_csum_zero_error(struct sk_buff *skb)
819{
820 /* RFC 2460 section 8.1 says that we SHOULD log
821 * this error. Well, it is reasonable.
822 */
823 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
824 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
825 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
826}
827
828/*
829 * Note: called only from the BH handler context,
830 * so we don't need to lock the hashes.
831 */
832static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
833 const struct in6_addr *saddr, const struct in6_addr *daddr,
834 struct udp_table *udptable, int proto)
835{
836 struct sock *sk, *first = NULL;
837 const struct udphdr *uh = udp_hdr(skb);
838 unsigned short hnum = ntohs(uh->dest);
839 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
840 unsigned int offset = offsetof(typeof(*sk), sk_node);
841 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
842 int dif = inet6_iif(skb);
843 int sdif = inet6_sdif(skb);
844 struct hlist_node *node;
845 struct sk_buff *nskb;
846
847 if (use_hash2) {
848 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
849 udptable->mask;
850 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
851start_lookup:
852 hslot = &udptable->hash2[hash2];
853 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
854 }
855
856 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
857 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
858 uh->source, saddr, dif, sdif,
859 hnum))
860 continue;
861 /* If zero checksum and no_check is not on for
862 * the socket then skip it.
863 */
864 if (!uh->check && !udp_get_no_check6_rx(sk))
865 continue;
866 if (!first) {
867 first = sk;
868 continue;
869 }
870 nskb = skb_clone(skb, GFP_ATOMIC);
871 if (unlikely(!nskb)) {
872 atomic_inc(&sk->sk_drops);
873 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
874 IS_UDPLITE(sk));
875 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
876 IS_UDPLITE(sk));
877 continue;
878 }
879
880 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
881 consume_skb(nskb);
882 }
883
884 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
885 if (use_hash2 && hash2 != hash2_any) {
886 hash2 = hash2_any;
887 goto start_lookup;
888 }
889
890 if (first) {
891 if (udpv6_queue_rcv_skb(first, skb) > 0)
892 consume_skb(skb);
893 } else {
894 kfree_skb(skb);
895 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
896 proto == IPPROTO_UDPLITE);
897 }
898 return 0;
899}
900
901static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
902{
903 if (udp_sk_rx_dst_set(sk, dst)) {
904 const struct rt6_info *rt = (const struct rt6_info *)dst;
905
906 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
907 }
908}
909
910/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
911 * return code conversion for ip layer consumption
912 */
913static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
914 struct udphdr *uh)
915{
916 int ret;
917
918 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
919 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
920
921 ret = udpv6_queue_rcv_skb(sk, skb);
922
923 /* a return value > 0 means to resubmit the input */
924 if (ret > 0)
925 return ret;
926 return 0;
927}
928
929int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
930 int proto)
931{
932 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
933 const struct in6_addr *saddr, *daddr;
934 struct net *net = dev_net(skb->dev);
935 struct udphdr *uh;
936 struct sock *sk;
937 bool refcounted;
938 u32 ulen = 0;
939
940 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
941 goto discard;
942
943 saddr = &ipv6_hdr(skb)->saddr;
944 daddr = &ipv6_hdr(skb)->daddr;
945 uh = udp_hdr(skb);
946
947 ulen = ntohs(uh->len);
948 if (ulen > skb->len)
949 goto short_packet;
950
951 if (proto == IPPROTO_UDP) {
952 /* UDP validates ulen. */
953
954 /* Check for jumbo payload */
955 if (ulen == 0)
956 ulen = skb->len;
957
958 if (ulen < sizeof(*uh))
959 goto short_packet;
960
961 if (ulen < skb->len) {
962 if (pskb_trim_rcsum(skb, ulen))
963 goto short_packet;
964 saddr = &ipv6_hdr(skb)->saddr;
965 daddr = &ipv6_hdr(skb)->daddr;
966 uh = udp_hdr(skb);
967 }
968 }
969
970 if (udp6_csum_init(skb, uh, proto))
971 goto csum_error;
972
973 /* Check if the socket is already available, e.g. due to early demux */
974 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
975 &refcounted, udp6_ehashfn);
976 if (IS_ERR(sk))
977 goto no_sk;
978
979 if (sk) {
980 struct dst_entry *dst = skb_dst(skb);
981 int ret;
982
983 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
984 udp6_sk_rx_dst_set(sk, dst);
985
986 if (!uh->check && !udp_get_no_check6_rx(sk)) {
987 if (refcounted)
988 sock_put(sk);
989 goto report_csum_error;
990 }
991
992 ret = udp6_unicast_rcv_skb(sk, skb, uh);
993 if (refcounted)
994 sock_put(sk);
995 return ret;
996 }
997
998 /*
999 * Multicast receive code
1000 */
1001 if (ipv6_addr_is_multicast(daddr))
1002 return __udp6_lib_mcast_deliver(net, skb,
1003 saddr, daddr, udptable, proto);
1004
1005 /* Unicast */
1006 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1007 if (sk) {
1008 if (!uh->check && !udp_get_no_check6_rx(sk))
1009 goto report_csum_error;
1010 return udp6_unicast_rcv_skb(sk, skb, uh);
1011 }
1012no_sk:
1013 reason = SKB_DROP_REASON_NO_SOCKET;
1014
1015 if (!uh->check)
1016 goto report_csum_error;
1017
1018 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1019 goto discard;
1020 nf_reset_ct(skb);
1021
1022 if (udp_lib_checksum_complete(skb))
1023 goto csum_error;
1024
1025 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1026 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1027
1028 kfree_skb_reason(skb, reason);
1029 return 0;
1030
1031short_packet:
1032 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1033 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1034 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1035 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1036 saddr, ntohs(uh->source),
1037 ulen, skb->len,
1038 daddr, ntohs(uh->dest));
1039 goto discard;
1040
1041report_csum_error:
1042 udp6_csum_zero_error(skb);
1043csum_error:
1044 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1045 reason = SKB_DROP_REASON_UDP_CSUM;
1046 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1047discard:
1048 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1049 kfree_skb_reason(skb, reason);
1050 return 0;
1051}
1052
1053
1054static struct sock *__udp6_lib_demux_lookup(struct net *net,
1055 __be16 loc_port, const struct in6_addr *loc_addr,
1056 __be16 rmt_port, const struct in6_addr *rmt_addr,
1057 int dif, int sdif)
1058{
1059 struct udp_table *udptable = net->ipv4.udp_table;
1060 unsigned short hnum = ntohs(loc_port);
1061 unsigned int hash2, slot2;
1062 struct udp_hslot *hslot2;
1063 __portpair ports;
1064 struct sock *sk;
1065
1066 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1067 slot2 = hash2 & udptable->mask;
1068 hslot2 = &udptable->hash2[slot2];
1069 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1070
1071 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1072 if (sk->sk_state == TCP_ESTABLISHED &&
1073 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1074 return sk;
1075 /* Only check first socket in chain */
1076 break;
1077 }
1078 return NULL;
1079}
1080
1081void udp_v6_early_demux(struct sk_buff *skb)
1082{
1083 struct net *net = dev_net(skb->dev);
1084 const struct udphdr *uh;
1085 struct sock *sk;
1086 struct dst_entry *dst;
1087 int dif = skb->dev->ifindex;
1088 int sdif = inet6_sdif(skb);
1089
1090 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1091 sizeof(struct udphdr)))
1092 return;
1093
1094 uh = udp_hdr(skb);
1095
1096 if (skb->pkt_type == PACKET_HOST)
1097 sk = __udp6_lib_demux_lookup(net, uh->dest,
1098 &ipv6_hdr(skb)->daddr,
1099 uh->source, &ipv6_hdr(skb)->saddr,
1100 dif, sdif);
1101 else
1102 return;
1103
1104 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1105 return;
1106
1107 skb->sk = sk;
1108 skb->destructor = sock_efree;
1109 dst = rcu_dereference(sk->sk_rx_dst);
1110
1111 if (dst)
1112 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1113 if (dst) {
1114 /* set noref for now.
1115 * any place which wants to hold dst has to call
1116 * dst_hold_safe()
1117 */
1118 skb_dst_set_noref(skb, dst);
1119 }
1120}
1121
1122INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1123{
1124 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1125}
1126
1127/*
1128 * Throw away all pending data and cancel the corking. Socket is locked.
1129 */
1130static void udp_v6_flush_pending_frames(struct sock *sk)
1131{
1132 struct udp_sock *up = udp_sk(sk);
1133
1134 if (up->pending == AF_INET)
1135 udp_flush_pending_frames(sk);
1136 else if (up->pending) {
1137 up->len = 0;
1138 WRITE_ONCE(up->pending, 0);
1139 ip6_flush_pending_frames(sk);
1140 }
1141}
1142
1143static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1144 int addr_len)
1145{
1146 if (addr_len < offsetofend(struct sockaddr, sa_family))
1147 return -EINVAL;
1148 /* The following checks are replicated from __ip6_datagram_connect()
1149 * and intended to prevent BPF program called below from accessing
1150 * bytes that are out of the bound specified by user in addr_len.
1151 */
1152 if (uaddr->sa_family == AF_INET) {
1153 if (ipv6_only_sock(sk))
1154 return -EAFNOSUPPORT;
1155 return udp_pre_connect(sk, uaddr, addr_len);
1156 }
1157
1158 if (addr_len < SIN6_LEN_RFC2133)
1159 return -EINVAL;
1160
1161 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1162}
1163
1164/**
1165 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1166 * @sk: socket we are sending on
1167 * @skb: sk_buff containing the filled-in UDP header
1168 * (checksum field must be zeroed out)
1169 * @saddr: source address
1170 * @daddr: destination address
1171 * @len: length of packet
1172 */
1173static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1174 const struct in6_addr *saddr,
1175 const struct in6_addr *daddr, int len)
1176{
1177 unsigned int offset;
1178 struct udphdr *uh = udp_hdr(skb);
1179 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1180 __wsum csum = 0;
1181
1182 if (!frags) {
1183 /* Only one fragment on the socket. */
1184 skb->csum_start = skb_transport_header(skb) - skb->head;
1185 skb->csum_offset = offsetof(struct udphdr, check);
1186 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1187 } else {
1188 /*
1189 * HW-checksum won't work as there are two or more
1190 * fragments on the socket so that all csums of sk_buffs
1191 * should be together
1192 */
1193 offset = skb_transport_offset(skb);
1194 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1195 csum = skb->csum;
1196
1197 skb->ip_summed = CHECKSUM_NONE;
1198
1199 do {
1200 csum = csum_add(csum, frags->csum);
1201 } while ((frags = frags->next));
1202
1203 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1204 csum);
1205 if (uh->check == 0)
1206 uh->check = CSUM_MANGLED_0;
1207 }
1208}
1209
1210/*
1211 * Sending
1212 */
1213
1214static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1215 struct inet_cork *cork)
1216{
1217 struct sock *sk = skb->sk;
1218 struct udphdr *uh;
1219 int err = 0;
1220 int is_udplite = IS_UDPLITE(sk);
1221 __wsum csum = 0;
1222 int offset = skb_transport_offset(skb);
1223 int len = skb->len - offset;
1224 int datalen = len - sizeof(*uh);
1225
1226 /*
1227 * Create a UDP header
1228 */
1229 uh = udp_hdr(skb);
1230 uh->source = fl6->fl6_sport;
1231 uh->dest = fl6->fl6_dport;
1232 uh->len = htons(len);
1233 uh->check = 0;
1234
1235 if (cork->gso_size) {
1236 const int hlen = skb_network_header_len(skb) +
1237 sizeof(struct udphdr);
1238
1239 if (hlen + cork->gso_size > cork->fragsize) {
1240 kfree_skb(skb);
1241 return -EINVAL;
1242 }
1243 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1244 kfree_skb(skb);
1245 return -EINVAL;
1246 }
1247 if (udp_get_no_check6_tx(sk)) {
1248 kfree_skb(skb);
1249 return -EINVAL;
1250 }
1251 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1252 dst_xfrm(skb_dst(skb))) {
1253 kfree_skb(skb);
1254 return -EIO;
1255 }
1256
1257 if (datalen > cork->gso_size) {
1258 skb_shinfo(skb)->gso_size = cork->gso_size;
1259 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1260 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1261 cork->gso_size);
1262 }
1263 goto csum_partial;
1264 }
1265
1266 if (is_udplite)
1267 csum = udplite_csum(skb);
1268 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1269 skb->ip_summed = CHECKSUM_NONE;
1270 goto send;
1271 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1272csum_partial:
1273 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1274 goto send;
1275 } else
1276 csum = udp_csum(skb);
1277
1278 /* add protocol-dependent pseudo-header */
1279 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1280 len, fl6->flowi6_proto, csum);
1281 if (uh->check == 0)
1282 uh->check = CSUM_MANGLED_0;
1283
1284send:
1285 err = ip6_send_skb(skb);
1286 if (err) {
1287 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1288 UDP6_INC_STATS(sock_net(sk),
1289 UDP_MIB_SNDBUFERRORS, is_udplite);
1290 err = 0;
1291 }
1292 } else {
1293 UDP6_INC_STATS(sock_net(sk),
1294 UDP_MIB_OUTDATAGRAMS, is_udplite);
1295 }
1296 return err;
1297}
1298
1299static int udp_v6_push_pending_frames(struct sock *sk)
1300{
1301 struct sk_buff *skb;
1302 struct udp_sock *up = udp_sk(sk);
1303 int err = 0;
1304
1305 if (up->pending == AF_INET)
1306 return udp_push_pending_frames(sk);
1307
1308 skb = ip6_finish_skb(sk);
1309 if (!skb)
1310 goto out;
1311
1312 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1313 &inet_sk(sk)->cork.base);
1314out:
1315 up->len = 0;
1316 WRITE_ONCE(up->pending, 0);
1317 return err;
1318}
1319
1320int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1321{
1322 struct ipv6_txoptions opt_space;
1323 struct udp_sock *up = udp_sk(sk);
1324 struct inet_sock *inet = inet_sk(sk);
1325 struct ipv6_pinfo *np = inet6_sk(sk);
1326 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1327 struct in6_addr *daddr, *final_p, final;
1328 struct ipv6_txoptions *opt = NULL;
1329 struct ipv6_txoptions *opt_to_free = NULL;
1330 struct ip6_flowlabel *flowlabel = NULL;
1331 struct inet_cork_full cork;
1332 struct flowi6 *fl6 = &cork.fl.u.ip6;
1333 struct dst_entry *dst;
1334 struct ipcm6_cookie ipc6;
1335 int addr_len = msg->msg_namelen;
1336 bool connected = false;
1337 int ulen = len;
1338 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1339 int err;
1340 int is_udplite = IS_UDPLITE(sk);
1341 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1342
1343 ipcm6_init(&ipc6);
1344 ipc6.gso_size = READ_ONCE(up->gso_size);
1345 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1346 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1347
1348 /* destination address check */
1349 if (sin6) {
1350 if (addr_len < offsetof(struct sockaddr, sa_data))
1351 return -EINVAL;
1352
1353 switch (sin6->sin6_family) {
1354 case AF_INET6:
1355 if (addr_len < SIN6_LEN_RFC2133)
1356 return -EINVAL;
1357 daddr = &sin6->sin6_addr;
1358 if (ipv6_addr_any(daddr) &&
1359 ipv6_addr_v4mapped(&np->saddr))
1360 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1361 daddr);
1362 break;
1363 case AF_INET:
1364 goto do_udp_sendmsg;
1365 case AF_UNSPEC:
1366 msg->msg_name = sin6 = NULL;
1367 msg->msg_namelen = addr_len = 0;
1368 daddr = NULL;
1369 break;
1370 default:
1371 return -EINVAL;
1372 }
1373 } else if (!READ_ONCE(up->pending)) {
1374 if (sk->sk_state != TCP_ESTABLISHED)
1375 return -EDESTADDRREQ;
1376 daddr = &sk->sk_v6_daddr;
1377 } else
1378 daddr = NULL;
1379
1380 if (daddr) {
1381 if (ipv6_addr_v4mapped(daddr)) {
1382 struct sockaddr_in sin;
1383 sin.sin_family = AF_INET;
1384 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1385 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1386 msg->msg_name = &sin;
1387 msg->msg_namelen = sizeof(sin);
1388do_udp_sendmsg:
1389 err = ipv6_only_sock(sk) ?
1390 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1391 msg->msg_name = sin6;
1392 msg->msg_namelen = addr_len;
1393 return err;
1394 }
1395 }
1396
1397 /* Rough check on arithmetic overflow,
1398 better check is made in ip6_append_data().
1399 */
1400 if (len > INT_MAX - sizeof(struct udphdr))
1401 return -EMSGSIZE;
1402
1403 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1404 if (READ_ONCE(up->pending)) {
1405 if (READ_ONCE(up->pending) == AF_INET)
1406 return udp_sendmsg(sk, msg, len);
1407 /*
1408 * There are pending frames.
1409 * The socket lock must be held while it's corked.
1410 */
1411 lock_sock(sk);
1412 if (likely(up->pending)) {
1413 if (unlikely(up->pending != AF_INET6)) {
1414 release_sock(sk);
1415 return -EAFNOSUPPORT;
1416 }
1417 dst = NULL;
1418 goto do_append_data;
1419 }
1420 release_sock(sk);
1421 }
1422 ulen += sizeof(struct udphdr);
1423
1424 memset(fl6, 0, sizeof(*fl6));
1425
1426 if (sin6) {
1427 if (sin6->sin6_port == 0)
1428 return -EINVAL;
1429
1430 fl6->fl6_dport = sin6->sin6_port;
1431 daddr = &sin6->sin6_addr;
1432
1433 if (inet6_test_bit(SNDFLOW, sk)) {
1434 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1435 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1436 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1437 if (IS_ERR(flowlabel))
1438 return -EINVAL;
1439 }
1440 }
1441
1442 /*
1443 * Otherwise it will be difficult to maintain
1444 * sk->sk_dst_cache.
1445 */
1446 if (sk->sk_state == TCP_ESTABLISHED &&
1447 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1448 daddr = &sk->sk_v6_daddr;
1449
1450 if (addr_len >= sizeof(struct sockaddr_in6) &&
1451 sin6->sin6_scope_id &&
1452 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1453 fl6->flowi6_oif = sin6->sin6_scope_id;
1454 } else {
1455 if (sk->sk_state != TCP_ESTABLISHED)
1456 return -EDESTADDRREQ;
1457
1458 fl6->fl6_dport = inet->inet_dport;
1459 daddr = &sk->sk_v6_daddr;
1460 fl6->flowlabel = np->flow_label;
1461 connected = true;
1462 }
1463
1464 if (!fl6->flowi6_oif)
1465 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1466
1467 if (!fl6->flowi6_oif)
1468 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1469
1470 fl6->flowi6_uid = sk->sk_uid;
1471
1472 if (msg->msg_controllen) {
1473 opt = &opt_space;
1474 memset(opt, 0, sizeof(struct ipv6_txoptions));
1475 opt->tot_len = sizeof(*opt);
1476 ipc6.opt = opt;
1477
1478 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1479 if (err > 0)
1480 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1481 &ipc6);
1482 if (err < 0) {
1483 fl6_sock_release(flowlabel);
1484 return err;
1485 }
1486 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1487 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1488 if (IS_ERR(flowlabel))
1489 return -EINVAL;
1490 }
1491 if (!(opt->opt_nflen|opt->opt_flen))
1492 opt = NULL;
1493 connected = false;
1494 }
1495 if (!opt) {
1496 opt = txopt_get(np);
1497 opt_to_free = opt;
1498 }
1499 if (flowlabel)
1500 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1501 opt = ipv6_fixup_options(&opt_space, opt);
1502 ipc6.opt = opt;
1503
1504 fl6->flowi6_proto = sk->sk_protocol;
1505 fl6->flowi6_mark = ipc6.sockc.mark;
1506 fl6->daddr = *daddr;
1507 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1508 fl6->saddr = np->saddr;
1509 fl6->fl6_sport = inet->inet_sport;
1510
1511 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1512 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1513 (struct sockaddr *)sin6,
1514 &addr_len,
1515 &fl6->saddr);
1516 if (err)
1517 goto out_no_dst;
1518 if (sin6) {
1519 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1520 /* BPF program rewrote IPv6-only by IPv4-mapped
1521 * IPv6. It's currently unsupported.
1522 */
1523 err = -ENOTSUPP;
1524 goto out_no_dst;
1525 }
1526 if (sin6->sin6_port == 0) {
1527 /* BPF program set invalid port. Reject it. */
1528 err = -EINVAL;
1529 goto out_no_dst;
1530 }
1531 fl6->fl6_dport = sin6->sin6_port;
1532 fl6->daddr = sin6->sin6_addr;
1533 }
1534 }
1535
1536 if (ipv6_addr_any(&fl6->daddr))
1537 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1538
1539 final_p = fl6_update_dst(fl6, opt, &final);
1540 if (final_p)
1541 connected = false;
1542
1543 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1544 fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1545 connected = false;
1546 } else if (!fl6->flowi6_oif)
1547 fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1548
1549 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1550
1551 if (ipc6.tclass < 0)
1552 ipc6.tclass = np->tclass;
1553
1554 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1555
1556 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1557 if (IS_ERR(dst)) {
1558 err = PTR_ERR(dst);
1559 dst = NULL;
1560 goto out;
1561 }
1562
1563 if (ipc6.hlimit < 0)
1564 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1565
1566 if (msg->msg_flags&MSG_CONFIRM)
1567 goto do_confirm;
1568back_from_confirm:
1569
1570 /* Lockless fast path for the non-corking case */
1571 if (!corkreq) {
1572 struct sk_buff *skb;
1573
1574 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1575 sizeof(struct udphdr), &ipc6,
1576 (struct rt6_info *)dst,
1577 msg->msg_flags, &cork);
1578 err = PTR_ERR(skb);
1579 if (!IS_ERR_OR_NULL(skb))
1580 err = udp_v6_send_skb(skb, fl6, &cork.base);
1581 /* ip6_make_skb steals dst reference */
1582 goto out_no_dst;
1583 }
1584
1585 lock_sock(sk);
1586 if (unlikely(up->pending)) {
1587 /* The socket is already corked while preparing it. */
1588 /* ... which is an evident application bug. --ANK */
1589 release_sock(sk);
1590
1591 net_dbg_ratelimited("udp cork app bug 2\n");
1592 err = -EINVAL;
1593 goto out;
1594 }
1595
1596 WRITE_ONCE(up->pending, AF_INET6);
1597
1598do_append_data:
1599 if (ipc6.dontfrag < 0)
1600 ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1601 up->len += ulen;
1602 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1603 &ipc6, fl6, (struct rt6_info *)dst,
1604 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1605 if (err)
1606 udp_v6_flush_pending_frames(sk);
1607 else if (!corkreq)
1608 err = udp_v6_push_pending_frames(sk);
1609 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1610 WRITE_ONCE(up->pending, 0);
1611
1612 if (err > 0)
1613 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1614 release_sock(sk);
1615
1616out:
1617 dst_release(dst);
1618out_no_dst:
1619 fl6_sock_release(flowlabel);
1620 txopt_put(opt_to_free);
1621 if (!err)
1622 return len;
1623 /*
1624 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1625 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1626 * we don't have a good statistic (IpOutDiscards but it can be too many
1627 * things). We could add another new stat but at least for now that
1628 * seems like overkill.
1629 */
1630 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1631 UDP6_INC_STATS(sock_net(sk),
1632 UDP_MIB_SNDBUFERRORS, is_udplite);
1633 }
1634 return err;
1635
1636do_confirm:
1637 if (msg->msg_flags & MSG_PROBE)
1638 dst_confirm_neigh(dst, &fl6->daddr);
1639 if (!(msg->msg_flags&MSG_PROBE) || len)
1640 goto back_from_confirm;
1641 err = 0;
1642 goto out;
1643}
1644EXPORT_SYMBOL(udpv6_sendmsg);
1645
1646static void udpv6_splice_eof(struct socket *sock)
1647{
1648 struct sock *sk = sock->sk;
1649 struct udp_sock *up = udp_sk(sk);
1650
1651 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1652 return;
1653
1654 lock_sock(sk);
1655 if (up->pending && !udp_test_bit(CORK, sk))
1656 udp_v6_push_pending_frames(sk);
1657 release_sock(sk);
1658}
1659
1660void udpv6_destroy_sock(struct sock *sk)
1661{
1662 struct udp_sock *up = udp_sk(sk);
1663 lock_sock(sk);
1664
1665 /* protects from races with udp_abort() */
1666 sock_set_flag(sk, SOCK_DEAD);
1667 udp_v6_flush_pending_frames(sk);
1668 release_sock(sk);
1669
1670 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1671 if (up->encap_type) {
1672 void (*encap_destroy)(struct sock *sk);
1673 encap_destroy = READ_ONCE(up->encap_destroy);
1674 if (encap_destroy)
1675 encap_destroy(sk);
1676 }
1677 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1678 static_branch_dec(&udpv6_encap_needed_key);
1679 udp_encap_disable();
1680 }
1681 }
1682}
1683
1684/*
1685 * Socket option code for UDP
1686 */
1687int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1688 unsigned int optlen)
1689{
1690 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1691 return udp_lib_setsockopt(sk, level, optname,
1692 optval, optlen,
1693 udp_v6_push_pending_frames);
1694 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1695}
1696
1697int udpv6_getsockopt(struct sock *sk, int level, int optname,
1698 char __user *optval, int __user *optlen)
1699{
1700 if (level == SOL_UDP || level == SOL_UDPLITE)
1701 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1702 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1703}
1704
1705static const struct inet6_protocol udpv6_protocol = {
1706 .handler = udpv6_rcv,
1707 .err_handler = udpv6_err,
1708 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1709};
1710
1711/* ------------------------------------------------------------------------ */
1712#ifdef CONFIG_PROC_FS
1713int udp6_seq_show(struct seq_file *seq, void *v)
1714{
1715 if (v == SEQ_START_TOKEN) {
1716 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1717 } else {
1718 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1719 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1720 __u16 srcp = ntohs(inet->inet_sport);
1721 __u16 destp = ntohs(inet->inet_dport);
1722 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1723 udp_rqueue_get(v), bucket);
1724 }
1725 return 0;
1726}
1727
1728const struct seq_operations udp6_seq_ops = {
1729 .start = udp_seq_start,
1730 .next = udp_seq_next,
1731 .stop = udp_seq_stop,
1732 .show = udp6_seq_show,
1733};
1734EXPORT_SYMBOL(udp6_seq_ops);
1735
1736static struct udp_seq_afinfo udp6_seq_afinfo = {
1737 .family = AF_INET6,
1738 .udp_table = NULL,
1739};
1740
1741int __net_init udp6_proc_init(struct net *net)
1742{
1743 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1744 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1745 return -ENOMEM;
1746 return 0;
1747}
1748
1749void udp6_proc_exit(struct net *net)
1750{
1751 remove_proc_entry("udp6", net->proc_net);
1752}
1753#endif /* CONFIG_PROC_FS */
1754
1755/* ------------------------------------------------------------------------ */
1756
1757struct proto udpv6_prot = {
1758 .name = "UDPv6",
1759 .owner = THIS_MODULE,
1760 .close = udp_lib_close,
1761 .pre_connect = udpv6_pre_connect,
1762 .connect = ip6_datagram_connect,
1763 .disconnect = udp_disconnect,
1764 .ioctl = udp_ioctl,
1765 .init = udpv6_init_sock,
1766 .destroy = udpv6_destroy_sock,
1767 .setsockopt = udpv6_setsockopt,
1768 .getsockopt = udpv6_getsockopt,
1769 .sendmsg = udpv6_sendmsg,
1770 .recvmsg = udpv6_recvmsg,
1771 .splice_eof = udpv6_splice_eof,
1772 .release_cb = ip6_datagram_release_cb,
1773 .hash = udp_lib_hash,
1774 .unhash = udp_lib_unhash,
1775 .rehash = udp_v6_rehash,
1776 .get_port = udp_v6_get_port,
1777 .put_port = udp_lib_unhash,
1778#ifdef CONFIG_BPF_SYSCALL
1779 .psock_update_sk_prot = udp_bpf_update_proto,
1780#endif
1781
1782 .memory_allocated = &udp_memory_allocated,
1783 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1784
1785 .sysctl_mem = sysctl_udp_mem,
1786 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1787 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1788 .obj_size = sizeof(struct udp6_sock),
1789 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1790 .h.udp_table = NULL,
1791 .diag_destroy = udp_abort,
1792};
1793
1794static struct inet_protosw udpv6_protosw = {
1795 .type = SOCK_DGRAM,
1796 .protocol = IPPROTO_UDP,
1797 .prot = &udpv6_prot,
1798 .ops = &inet6_dgram_ops,
1799 .flags = INET_PROTOSW_PERMANENT,
1800};
1801
1802int __init udpv6_init(void)
1803{
1804 int ret;
1805
1806 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1807 if (ret)
1808 goto out;
1809
1810 ret = inet6_register_protosw(&udpv6_protosw);
1811 if (ret)
1812 goto out_udpv6_protocol;
1813out:
1814 return ret;
1815
1816out_udpv6_protocol:
1817 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1818 goto out;
1819}
1820
1821void udpv6_exit(void)
1822{
1823 inet6_unregister_protosw(&udpv6_protosw);
1824 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1825}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/socket.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/in6.h>
26#include <linux/netdevice.h>
27#include <linux/if_arp.h>
28#include <linux/ipv6.h>
29#include <linux/icmpv6.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/skbuff.h>
33#include <linux/slab.h>
34#include <linux/uaccess.h>
35#include <linux/indirect_call_wrapper.h>
36
37#include <net/addrconf.h>
38#include <net/ndisc.h>
39#include <net/protocol.h>
40#include <net/transp_v6.h>
41#include <net/ip6_route.h>
42#include <net/raw.h>
43#include <net/tcp_states.h>
44#include <net/ip6_checksum.h>
45#include <net/ip6_tunnel.h>
46#include <net/xfrm.h>
47#include <net/inet_hashtables.h>
48#include <net/inet6_hashtables.h>
49#include <net/busy_poll.h>
50#include <net/sock_reuseport.h>
51
52#include <linux/proc_fs.h>
53#include <linux/seq_file.h>
54#include <trace/events/skb.h>
55#include "udp_impl.h"
56
57static u32 udp6_ehashfn(const struct net *net,
58 const struct in6_addr *laddr,
59 const u16 lport,
60 const struct in6_addr *faddr,
61 const __be16 fport)
62{
63 static u32 udp6_ehash_secret __read_mostly;
64 static u32 udp_ipv6_hash_secret __read_mostly;
65
66 u32 lhash, fhash;
67
68 net_get_random_once(&udp6_ehash_secret,
69 sizeof(udp6_ehash_secret));
70 net_get_random_once(&udp_ipv6_hash_secret,
71 sizeof(udp_ipv6_hash_secret));
72
73 lhash = (__force u32)laddr->s6_addr32[3];
74 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75
76 return __inet6_ehashfn(lhash, lport, fhash, fport,
77 udp_ipv6_hash_secret + net_hash_mix(net));
78}
79
80int udp_v6_get_port(struct sock *sk, unsigned short snum)
81{
82 unsigned int hash2_nulladdr =
83 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
84 unsigned int hash2_partial =
85 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
86
87 /* precompute partial secondary hash */
88 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
89 return udp_lib_get_port(sk, snum, hash2_nulladdr);
90}
91
92void udp_v6_rehash(struct sock *sk)
93{
94 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
95 &sk->sk_v6_rcv_saddr,
96 inet_sk(sk)->inet_num);
97
98 udp_lib_rehash(sk, new_hash);
99}
100
101static int compute_score(struct sock *sk, struct net *net,
102 const struct in6_addr *saddr, __be16 sport,
103 const struct in6_addr *daddr, unsigned short hnum,
104 int dif, int sdif)
105{
106 int score;
107 struct inet_sock *inet;
108 bool dev_match;
109
110 if (!net_eq(sock_net(sk), net) ||
111 udp_sk(sk)->udp_port_hash != hnum ||
112 sk->sk_family != PF_INET6)
113 return -1;
114
115 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
116 return -1;
117
118 score = 0;
119 inet = inet_sk(sk);
120
121 if (inet->inet_dport) {
122 if (inet->inet_dport != sport)
123 return -1;
124 score++;
125 }
126
127 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
128 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
129 return -1;
130 score++;
131 }
132
133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
134 if (!dev_match)
135 return -1;
136 score++;
137
138 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 score++;
140
141 return score;
142}
143
144static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
145 struct sk_buff *skb,
146 const struct in6_addr *saddr,
147 __be16 sport,
148 const struct in6_addr *daddr,
149 unsigned int hnum)
150{
151 struct sock *reuse_sk = NULL;
152 u32 hash;
153
154 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
155 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
156 reuse_sk = reuseport_select_sock(sk, hash, skb,
157 sizeof(struct udphdr));
158 }
159 return reuse_sk;
160}
161
162/* called with rcu_read_lock() */
163static struct sock *udp6_lib_lookup2(struct net *net,
164 const struct in6_addr *saddr, __be16 sport,
165 const struct in6_addr *daddr, unsigned int hnum,
166 int dif, int sdif, struct udp_hslot *hslot2,
167 struct sk_buff *skb)
168{
169 struct sock *sk, *result;
170 int score, badness;
171
172 result = NULL;
173 badness = -1;
174 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
175 score = compute_score(sk, net, saddr, sport,
176 daddr, hnum, dif, sdif);
177 if (score > badness) {
178 result = lookup_reuseport(net, sk, skb,
179 saddr, sport, daddr, hnum);
180 /* Fall back to scoring if group has connections */
181 if (result && !reuseport_has_conns(sk, false))
182 return result;
183
184 result = result ? : sk;
185 badness = score;
186 }
187 }
188 return result;
189}
190
191static inline struct sock *udp6_lookup_run_bpf(struct net *net,
192 struct udp_table *udptable,
193 struct sk_buff *skb,
194 const struct in6_addr *saddr,
195 __be16 sport,
196 const struct in6_addr *daddr,
197 u16 hnum)
198{
199 struct sock *sk, *reuse_sk;
200 bool no_reuseport;
201
202 if (udptable != &udp_table)
203 return NULL; /* only UDP is supported */
204
205 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
206 saddr, sport, daddr, hnum, &sk);
207 if (no_reuseport || IS_ERR_OR_NULL(sk))
208 return sk;
209
210 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
211 if (reuse_sk)
212 sk = reuse_sk;
213 return sk;
214}
215
216/* rcu_read_lock() must be held */
217struct sock *__udp6_lib_lookup(struct net *net,
218 const struct in6_addr *saddr, __be16 sport,
219 const struct in6_addr *daddr, __be16 dport,
220 int dif, int sdif, struct udp_table *udptable,
221 struct sk_buff *skb)
222{
223 unsigned short hnum = ntohs(dport);
224 unsigned int hash2, slot2;
225 struct udp_hslot *hslot2;
226 struct sock *result, *sk;
227
228 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
229 slot2 = hash2 & udptable->mask;
230 hslot2 = &udptable->hash2[slot2];
231
232 /* Lookup connected or non-wildcard sockets */
233 result = udp6_lib_lookup2(net, saddr, sport,
234 daddr, hnum, dif, sdif,
235 hslot2, skb);
236 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
237 goto done;
238
239 /* Lookup redirect from BPF */
240 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
241 sk = udp6_lookup_run_bpf(net, udptable, skb,
242 saddr, sport, daddr, hnum);
243 if (sk) {
244 result = sk;
245 goto done;
246 }
247 }
248
249 /* Got non-wildcard socket or error on first lookup */
250 if (result)
251 goto done;
252
253 /* Lookup wildcard sockets */
254 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
255 slot2 = hash2 & udptable->mask;
256 hslot2 = &udptable->hash2[slot2];
257
258 result = udp6_lib_lookup2(net, saddr, sport,
259 &in6addr_any, hnum, dif, sdif,
260 hslot2, skb);
261done:
262 if (IS_ERR(result))
263 return NULL;
264 return result;
265}
266EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
267
268static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
269 __be16 sport, __be16 dport,
270 struct udp_table *udptable)
271{
272 const struct ipv6hdr *iph = ipv6_hdr(skb);
273
274 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
275 &iph->daddr, dport, inet6_iif(skb),
276 inet6_sdif(skb), udptable, skb);
277}
278
279struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
280 __be16 sport, __be16 dport)
281{
282 const struct ipv6hdr *iph = ipv6_hdr(skb);
283
284 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
285 &iph->daddr, dport, inet6_iif(skb),
286 inet6_sdif(skb), &udp_table, NULL);
287}
288EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
289
290/* Must be called under rcu_read_lock().
291 * Does increment socket refcount.
292 */
293#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
294struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
295 const struct in6_addr *daddr, __be16 dport, int dif)
296{
297 struct sock *sk;
298
299 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
300 dif, 0, &udp_table, NULL);
301 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
302 sk = NULL;
303 return sk;
304}
305EXPORT_SYMBOL_GPL(udp6_lib_lookup);
306#endif
307
308/* do not use the scratch area len for jumbogram: their length execeeds the
309 * scratch area space; note that the IP6CB flags is still in the first
310 * cacheline, so checking for jumbograms is cheap
311 */
312static int udp6_skb_len(struct sk_buff *skb)
313{
314 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
315}
316
317/*
318 * This should be easy, if there is something there we
319 * return it, otherwise we block.
320 */
321
322int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
323 int noblock, int flags, int *addr_len)
324{
325 struct ipv6_pinfo *np = inet6_sk(sk);
326 struct inet_sock *inet = inet_sk(sk);
327 struct sk_buff *skb;
328 unsigned int ulen, copied;
329 int off, err, peeking = flags & MSG_PEEK;
330 int is_udplite = IS_UDPLITE(sk);
331 struct udp_mib __percpu *mib;
332 bool checksum_valid = false;
333 int is_udp4;
334
335 if (flags & MSG_ERRQUEUE)
336 return ipv6_recv_error(sk, msg, len, addr_len);
337
338 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
339 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
340
341try_again:
342 off = sk_peek_offset(sk, flags);
343 skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
344 if (!skb)
345 return err;
346
347 ulen = udp6_skb_len(skb);
348 copied = len;
349 if (copied > ulen - off)
350 copied = ulen - off;
351 else if (copied < ulen)
352 msg->msg_flags |= MSG_TRUNC;
353
354 is_udp4 = (skb->protocol == htons(ETH_P_IP));
355 mib = __UDPX_MIB(sk, is_udp4);
356
357 /*
358 * If checksum is needed at all, try to do it while copying the
359 * data. If the data is truncated, or if we only want a partial
360 * coverage checksum (UDP-Lite), do it before the copy.
361 */
362
363 if (copied < ulen || peeking ||
364 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
365 checksum_valid = udp_skb_csum_unnecessary(skb) ||
366 !__udp_lib_checksum_complete(skb);
367 if (!checksum_valid)
368 goto csum_copy_err;
369 }
370
371 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
372 if (udp_skb_is_linear(skb))
373 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
374 else
375 err = skb_copy_datagram_msg(skb, off, msg, copied);
376 } else {
377 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
378 if (err == -EINVAL)
379 goto csum_copy_err;
380 }
381 if (unlikely(err)) {
382 if (!peeking) {
383 atomic_inc(&sk->sk_drops);
384 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
385 }
386 kfree_skb(skb);
387 return err;
388 }
389 if (!peeking)
390 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
391
392 sock_recv_ts_and_drops(msg, sk, skb);
393
394 /* Copy the address. */
395 if (msg->msg_name) {
396 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
397 sin6->sin6_family = AF_INET6;
398 sin6->sin6_port = udp_hdr(skb)->source;
399 sin6->sin6_flowinfo = 0;
400
401 if (is_udp4) {
402 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
403 &sin6->sin6_addr);
404 sin6->sin6_scope_id = 0;
405 } else {
406 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
407 sin6->sin6_scope_id =
408 ipv6_iface_scope_id(&sin6->sin6_addr,
409 inet6_iif(skb));
410 }
411 *addr_len = sizeof(*sin6);
412
413 if (cgroup_bpf_enabled)
414 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
415 (struct sockaddr *)sin6);
416 }
417
418 if (udp_sk(sk)->gro_enabled)
419 udp_cmsg_recv(msg, sk, skb);
420
421 if (np->rxopt.all)
422 ip6_datagram_recv_common_ctl(sk, msg, skb);
423
424 if (is_udp4) {
425 if (inet->cmsg_flags)
426 ip_cmsg_recv_offset(msg, sk, skb,
427 sizeof(struct udphdr), off);
428 } else {
429 if (np->rxopt.all)
430 ip6_datagram_recv_specific_ctl(sk, msg, skb);
431 }
432
433 err = copied;
434 if (flags & MSG_TRUNC)
435 err = ulen;
436
437 skb_consume_udp(sk, skb, peeking ? -err : err);
438 return err;
439
440csum_copy_err:
441 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
442 udp_skb_destructor)) {
443 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
444 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
445 }
446 kfree_skb(skb);
447
448 /* starting over for a new packet, but check if we need to yield */
449 cond_resched();
450 msg->msg_flags &= ~MSG_TRUNC;
451 goto try_again;
452}
453
454DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
455void udpv6_encap_enable(void)
456{
457 static_branch_inc(&udpv6_encap_needed_key);
458}
459EXPORT_SYMBOL(udpv6_encap_enable);
460
461/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
462 * through error handlers in encapsulations looking for a match.
463 */
464static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
465 struct inet6_skb_parm *opt,
466 u8 type, u8 code, int offset, __be32 info)
467{
468 int i;
469
470 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
471 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
472 u8 type, u8 code, int offset, __be32 info);
473 const struct ip6_tnl_encap_ops *encap;
474
475 encap = rcu_dereference(ip6tun_encaps[i]);
476 if (!encap)
477 continue;
478 handler = encap->err_handler;
479 if (handler && !handler(skb, opt, type, code, offset, info))
480 return 0;
481 }
482
483 return -ENOENT;
484}
485
486/* Try to match ICMP errors to UDP tunnels by looking up a socket without
487 * reversing source and destination port: this will match tunnels that force the
488 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
489 * lwtunnels might actually break this assumption by being configured with
490 * different destination ports on endpoints, in this case we won't be able to
491 * trace ICMP messages back to them.
492 *
493 * If this doesn't match any socket, probe tunnels with arbitrary destination
494 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
495 * we've sent packets to won't necessarily match the local destination port.
496 *
497 * Then ask the tunnel implementation to match the error against a valid
498 * association.
499 *
500 * Return an error if we can't find a match, the socket if we need further
501 * processing, zero otherwise.
502 */
503static struct sock *__udp6_lib_err_encap(struct net *net,
504 const struct ipv6hdr *hdr, int offset,
505 struct udphdr *uh,
506 struct udp_table *udptable,
507 struct sk_buff *skb,
508 struct inet6_skb_parm *opt,
509 u8 type, u8 code, __be32 info)
510{
511 int network_offset, transport_offset;
512 struct sock *sk;
513
514 network_offset = skb_network_offset(skb);
515 transport_offset = skb_transport_offset(skb);
516
517 /* Network header needs to point to the outer IPv6 header inside ICMP */
518 skb_reset_network_header(skb);
519
520 /* Transport header needs to point to the UDP header */
521 skb_set_transport_header(skb, offset);
522
523 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
524 &hdr->saddr, uh->dest,
525 inet6_iif(skb), 0, udptable, skb);
526 if (sk) {
527 int (*lookup)(struct sock *sk, struct sk_buff *skb);
528 struct udp_sock *up = udp_sk(sk);
529
530 lookup = READ_ONCE(up->encap_err_lookup);
531 if (!lookup || lookup(sk, skb))
532 sk = NULL;
533 }
534
535 if (!sk) {
536 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
537 offset, info));
538 }
539
540 skb_set_transport_header(skb, transport_offset);
541 skb_set_network_header(skb, network_offset);
542
543 return sk;
544}
545
546int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
547 u8 type, u8 code, int offset, __be32 info,
548 struct udp_table *udptable)
549{
550 struct ipv6_pinfo *np;
551 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
552 const struct in6_addr *saddr = &hdr->saddr;
553 const struct in6_addr *daddr = &hdr->daddr;
554 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
555 bool tunnel = false;
556 struct sock *sk;
557 int harderr;
558 int err;
559 struct net *net = dev_net(skb->dev);
560
561 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
562 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
563 if (!sk) {
564 /* No socket for error: try tunnels before discarding */
565 sk = ERR_PTR(-ENOENT);
566 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
567 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
568 udptable, skb,
569 opt, type, code, info);
570 if (!sk)
571 return 0;
572 }
573
574 if (IS_ERR(sk)) {
575 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
576 ICMP6_MIB_INERRORS);
577 return PTR_ERR(sk);
578 }
579
580 tunnel = true;
581 }
582
583 harderr = icmpv6_err_convert(type, code, &err);
584 np = inet6_sk(sk);
585
586 if (type == ICMPV6_PKT_TOOBIG) {
587 if (!ip6_sk_accept_pmtu(sk))
588 goto out;
589 ip6_sk_update_pmtu(skb, sk, info);
590 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
591 harderr = 1;
592 }
593 if (type == NDISC_REDIRECT) {
594 if (tunnel) {
595 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
596 sk->sk_mark, sk->sk_uid);
597 } else {
598 ip6_sk_redirect(skb, sk);
599 }
600 goto out;
601 }
602
603 /* Tunnels don't have an application socket: don't pass errors back */
604 if (tunnel)
605 goto out;
606
607 if (!np->recverr) {
608 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
609 goto out;
610 } else {
611 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
612 }
613
614 sk->sk_err = err;
615 sk->sk_error_report(sk);
616out:
617 return 0;
618}
619
620static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
621{
622 int rc;
623
624 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
625 sock_rps_save_rxhash(sk, skb);
626 sk_mark_napi_id(sk, skb);
627 sk_incoming_cpu_update(sk);
628 } else {
629 sk_mark_napi_id_once(sk, skb);
630 }
631
632 rc = __udp_enqueue_schedule_skb(sk, skb);
633 if (rc < 0) {
634 int is_udplite = IS_UDPLITE(sk);
635
636 /* Note that an ENOMEM error is charged twice */
637 if (rc == -ENOMEM)
638 UDP6_INC_STATS(sock_net(sk),
639 UDP_MIB_RCVBUFERRORS, is_udplite);
640 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
641 kfree_skb(skb);
642 return -1;
643 }
644
645 return 0;
646}
647
648static __inline__ int udpv6_err(struct sk_buff *skb,
649 struct inet6_skb_parm *opt, u8 type,
650 u8 code, int offset, __be32 info)
651{
652 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
653}
654
655static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
656{
657 struct udp_sock *up = udp_sk(sk);
658 int is_udplite = IS_UDPLITE(sk);
659
660 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
661 goto drop;
662
663 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
664 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
665
666 /*
667 * This is an encapsulation socket so pass the skb to
668 * the socket's udp_encap_rcv() hook. Otherwise, just
669 * fall through and pass this up the UDP socket.
670 * up->encap_rcv() returns the following value:
671 * =0 if skb was successfully passed to the encap
672 * handler or was discarded by it.
673 * >0 if skb should be passed on to UDP.
674 * <0 if skb should be resubmitted as proto -N
675 */
676
677 /* if we're overly short, let UDP handle it */
678 encap_rcv = READ_ONCE(up->encap_rcv);
679 if (encap_rcv) {
680 int ret;
681
682 /* Verify checksum before giving to encap */
683 if (udp_lib_checksum_complete(skb))
684 goto csum_error;
685
686 ret = encap_rcv(sk, skb);
687 if (ret <= 0) {
688 __UDP_INC_STATS(sock_net(sk),
689 UDP_MIB_INDATAGRAMS,
690 is_udplite);
691 return -ret;
692 }
693 }
694
695 /* FALLTHROUGH -- it's a UDP Packet */
696 }
697
698 /*
699 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
700 */
701 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
702
703 if (up->pcrlen == 0) { /* full coverage was set */
704 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
705 UDP_SKB_CB(skb)->cscov, skb->len);
706 goto drop;
707 }
708 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
709 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
710 UDP_SKB_CB(skb)->cscov, up->pcrlen);
711 goto drop;
712 }
713 }
714
715 prefetch(&sk->sk_rmem_alloc);
716 if (rcu_access_pointer(sk->sk_filter) &&
717 udp_lib_checksum_complete(skb))
718 goto csum_error;
719
720 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
721 goto drop;
722
723 udp_csum_pull_header(skb);
724
725 skb_dst_drop(skb);
726
727 return __udpv6_queue_rcv_skb(sk, skb);
728
729csum_error:
730 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
731drop:
732 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
733 atomic_inc(&sk->sk_drops);
734 kfree_skb(skb);
735 return -1;
736}
737
738static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
739{
740 struct sk_buff *next, *segs;
741 int ret;
742
743 if (likely(!udp_unexpected_gso(sk, skb)))
744 return udpv6_queue_rcv_one_skb(sk, skb);
745
746 __skb_push(skb, -skb_mac_offset(skb));
747 segs = udp_rcv_segment(sk, skb, false);
748 skb_list_walk_safe(segs, skb, next) {
749 __skb_pull(skb, skb_transport_offset(skb));
750
751 ret = udpv6_queue_rcv_one_skb(sk, skb);
752 if (ret > 0)
753 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
754 true);
755 }
756 return 0;
757}
758
759static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
760 __be16 loc_port, const struct in6_addr *loc_addr,
761 __be16 rmt_port, const struct in6_addr *rmt_addr,
762 int dif, int sdif, unsigned short hnum)
763{
764 struct inet_sock *inet = inet_sk(sk);
765
766 if (!net_eq(sock_net(sk), net))
767 return false;
768
769 if (udp_sk(sk)->udp_port_hash != hnum ||
770 sk->sk_family != PF_INET6 ||
771 (inet->inet_dport && inet->inet_dport != rmt_port) ||
772 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
773 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
774 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
775 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
776 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
777 return false;
778 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
779 return false;
780 return true;
781}
782
783static void udp6_csum_zero_error(struct sk_buff *skb)
784{
785 /* RFC 2460 section 8.1 says that we SHOULD log
786 * this error. Well, it is reasonable.
787 */
788 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
789 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
790 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
791}
792
793/*
794 * Note: called only from the BH handler context,
795 * so we don't need to lock the hashes.
796 */
797static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
798 const struct in6_addr *saddr, const struct in6_addr *daddr,
799 struct udp_table *udptable, int proto)
800{
801 struct sock *sk, *first = NULL;
802 const struct udphdr *uh = udp_hdr(skb);
803 unsigned short hnum = ntohs(uh->dest);
804 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
805 unsigned int offset = offsetof(typeof(*sk), sk_node);
806 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
807 int dif = inet6_iif(skb);
808 int sdif = inet6_sdif(skb);
809 struct hlist_node *node;
810 struct sk_buff *nskb;
811
812 if (use_hash2) {
813 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
814 udptable->mask;
815 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
816start_lookup:
817 hslot = &udptable->hash2[hash2];
818 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
819 }
820
821 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
822 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
823 uh->source, saddr, dif, sdif,
824 hnum))
825 continue;
826 /* If zero checksum and no_check is not on for
827 * the socket then skip it.
828 */
829 if (!uh->check && !udp_sk(sk)->no_check6_rx)
830 continue;
831 if (!first) {
832 first = sk;
833 continue;
834 }
835 nskb = skb_clone(skb, GFP_ATOMIC);
836 if (unlikely(!nskb)) {
837 atomic_inc(&sk->sk_drops);
838 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
839 IS_UDPLITE(sk));
840 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
841 IS_UDPLITE(sk));
842 continue;
843 }
844
845 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
846 consume_skb(nskb);
847 }
848
849 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
850 if (use_hash2 && hash2 != hash2_any) {
851 hash2 = hash2_any;
852 goto start_lookup;
853 }
854
855 if (first) {
856 if (udpv6_queue_rcv_skb(first, skb) > 0)
857 consume_skb(skb);
858 } else {
859 kfree_skb(skb);
860 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
861 proto == IPPROTO_UDPLITE);
862 }
863 return 0;
864}
865
866static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
867{
868 if (udp_sk_rx_dst_set(sk, dst)) {
869 const struct rt6_info *rt = (const struct rt6_info *)dst;
870
871 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
872 }
873}
874
875/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
876 * return code conversion for ip layer consumption
877 */
878static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
879 struct udphdr *uh)
880{
881 int ret;
882
883 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
884 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
885
886 ret = udpv6_queue_rcv_skb(sk, skb);
887
888 /* a return value > 0 means to resubmit the input */
889 if (ret > 0)
890 return ret;
891 return 0;
892}
893
894int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
895 int proto)
896{
897 const struct in6_addr *saddr, *daddr;
898 struct net *net = dev_net(skb->dev);
899 struct udphdr *uh;
900 struct sock *sk;
901 bool refcounted;
902 u32 ulen = 0;
903
904 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
905 goto discard;
906
907 saddr = &ipv6_hdr(skb)->saddr;
908 daddr = &ipv6_hdr(skb)->daddr;
909 uh = udp_hdr(skb);
910
911 ulen = ntohs(uh->len);
912 if (ulen > skb->len)
913 goto short_packet;
914
915 if (proto == IPPROTO_UDP) {
916 /* UDP validates ulen. */
917
918 /* Check for jumbo payload */
919 if (ulen == 0)
920 ulen = skb->len;
921
922 if (ulen < sizeof(*uh))
923 goto short_packet;
924
925 if (ulen < skb->len) {
926 if (pskb_trim_rcsum(skb, ulen))
927 goto short_packet;
928 saddr = &ipv6_hdr(skb)->saddr;
929 daddr = &ipv6_hdr(skb)->daddr;
930 uh = udp_hdr(skb);
931 }
932 }
933
934 if (udp6_csum_init(skb, uh, proto))
935 goto csum_error;
936
937 /* Check if the socket is already available, e.g. due to early demux */
938 sk = skb_steal_sock(skb, &refcounted);
939 if (sk) {
940 struct dst_entry *dst = skb_dst(skb);
941 int ret;
942
943 if (unlikely(sk->sk_rx_dst != dst))
944 udp6_sk_rx_dst_set(sk, dst);
945
946 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
947 if (refcounted)
948 sock_put(sk);
949 goto report_csum_error;
950 }
951
952 ret = udp6_unicast_rcv_skb(sk, skb, uh);
953 if (refcounted)
954 sock_put(sk);
955 return ret;
956 }
957
958 /*
959 * Multicast receive code
960 */
961 if (ipv6_addr_is_multicast(daddr))
962 return __udp6_lib_mcast_deliver(net, skb,
963 saddr, daddr, udptable, proto);
964
965 /* Unicast */
966 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
967 if (sk) {
968 if (!uh->check && !udp_sk(sk)->no_check6_rx)
969 goto report_csum_error;
970 return udp6_unicast_rcv_skb(sk, skb, uh);
971 }
972
973 if (!uh->check)
974 goto report_csum_error;
975
976 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
977 goto discard;
978
979 if (udp_lib_checksum_complete(skb))
980 goto csum_error;
981
982 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
983 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
984
985 kfree_skb(skb);
986 return 0;
987
988short_packet:
989 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
990 proto == IPPROTO_UDPLITE ? "-Lite" : "",
991 saddr, ntohs(uh->source),
992 ulen, skb->len,
993 daddr, ntohs(uh->dest));
994 goto discard;
995
996report_csum_error:
997 udp6_csum_zero_error(skb);
998csum_error:
999 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1000discard:
1001 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1002 kfree_skb(skb);
1003 return 0;
1004}
1005
1006
1007static struct sock *__udp6_lib_demux_lookup(struct net *net,
1008 __be16 loc_port, const struct in6_addr *loc_addr,
1009 __be16 rmt_port, const struct in6_addr *rmt_addr,
1010 int dif, int sdif)
1011{
1012 unsigned short hnum = ntohs(loc_port);
1013 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1014 unsigned int slot2 = hash2 & udp_table.mask;
1015 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1016 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1017 struct sock *sk;
1018
1019 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1020 if (sk->sk_state == TCP_ESTABLISHED &&
1021 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1022 return sk;
1023 /* Only check first socket in chain */
1024 break;
1025 }
1026 return NULL;
1027}
1028
1029INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1030{
1031 struct net *net = dev_net(skb->dev);
1032 const struct udphdr *uh;
1033 struct sock *sk;
1034 struct dst_entry *dst;
1035 int dif = skb->dev->ifindex;
1036 int sdif = inet6_sdif(skb);
1037
1038 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1039 sizeof(struct udphdr)))
1040 return;
1041
1042 uh = udp_hdr(skb);
1043
1044 if (skb->pkt_type == PACKET_HOST)
1045 sk = __udp6_lib_demux_lookup(net, uh->dest,
1046 &ipv6_hdr(skb)->daddr,
1047 uh->source, &ipv6_hdr(skb)->saddr,
1048 dif, sdif);
1049 else
1050 return;
1051
1052 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1053 return;
1054
1055 skb->sk = sk;
1056 skb->destructor = sock_efree;
1057 dst = READ_ONCE(sk->sk_rx_dst);
1058
1059 if (dst)
1060 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1061 if (dst) {
1062 /* set noref for now.
1063 * any place which wants to hold dst has to call
1064 * dst_hold_safe()
1065 */
1066 skb_dst_set_noref(skb, dst);
1067 }
1068}
1069
1070INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1071{
1072 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1073}
1074
1075/*
1076 * Throw away all pending data and cancel the corking. Socket is locked.
1077 */
1078static void udp_v6_flush_pending_frames(struct sock *sk)
1079{
1080 struct udp_sock *up = udp_sk(sk);
1081
1082 if (up->pending == AF_INET)
1083 udp_flush_pending_frames(sk);
1084 else if (up->pending) {
1085 up->len = 0;
1086 up->pending = 0;
1087 ip6_flush_pending_frames(sk);
1088 }
1089}
1090
1091static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1092 int addr_len)
1093{
1094 if (addr_len < offsetofend(struct sockaddr, sa_family))
1095 return -EINVAL;
1096 /* The following checks are replicated from __ip6_datagram_connect()
1097 * and intended to prevent BPF program called below from accessing
1098 * bytes that are out of the bound specified by user in addr_len.
1099 */
1100 if (uaddr->sa_family == AF_INET) {
1101 if (__ipv6_only_sock(sk))
1102 return -EAFNOSUPPORT;
1103 return udp_pre_connect(sk, uaddr, addr_len);
1104 }
1105
1106 if (addr_len < SIN6_LEN_RFC2133)
1107 return -EINVAL;
1108
1109 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1110}
1111
1112/**
1113 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1114 * @sk: socket we are sending on
1115 * @skb: sk_buff containing the filled-in UDP header
1116 * (checksum field must be zeroed out)
1117 * @saddr: source address
1118 * @daddr: destination address
1119 * @len: length of packet
1120 */
1121static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1122 const struct in6_addr *saddr,
1123 const struct in6_addr *daddr, int len)
1124{
1125 unsigned int offset;
1126 struct udphdr *uh = udp_hdr(skb);
1127 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1128 __wsum csum = 0;
1129
1130 if (!frags) {
1131 /* Only one fragment on the socket. */
1132 skb->csum_start = skb_transport_header(skb) - skb->head;
1133 skb->csum_offset = offsetof(struct udphdr, check);
1134 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1135 } else {
1136 /*
1137 * HW-checksum won't work as there are two or more
1138 * fragments on the socket so that all csums of sk_buffs
1139 * should be together
1140 */
1141 offset = skb_transport_offset(skb);
1142 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1143 csum = skb->csum;
1144
1145 skb->ip_summed = CHECKSUM_NONE;
1146
1147 do {
1148 csum = csum_add(csum, frags->csum);
1149 } while ((frags = frags->next));
1150
1151 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1152 csum);
1153 if (uh->check == 0)
1154 uh->check = CSUM_MANGLED_0;
1155 }
1156}
1157
1158/*
1159 * Sending
1160 */
1161
1162static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1163 struct inet_cork *cork)
1164{
1165 struct sock *sk = skb->sk;
1166 struct udphdr *uh;
1167 int err = 0;
1168 int is_udplite = IS_UDPLITE(sk);
1169 __wsum csum = 0;
1170 int offset = skb_transport_offset(skb);
1171 int len = skb->len - offset;
1172 int datalen = len - sizeof(*uh);
1173
1174 /*
1175 * Create a UDP header
1176 */
1177 uh = udp_hdr(skb);
1178 uh->source = fl6->fl6_sport;
1179 uh->dest = fl6->fl6_dport;
1180 uh->len = htons(len);
1181 uh->check = 0;
1182
1183 if (cork->gso_size) {
1184 const int hlen = skb_network_header_len(skb) +
1185 sizeof(struct udphdr);
1186
1187 if (hlen + cork->gso_size > cork->fragsize) {
1188 kfree_skb(skb);
1189 return -EINVAL;
1190 }
1191 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1192 kfree_skb(skb);
1193 return -EINVAL;
1194 }
1195 if (udp_sk(sk)->no_check6_tx) {
1196 kfree_skb(skb);
1197 return -EINVAL;
1198 }
1199 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1200 dst_xfrm(skb_dst(skb))) {
1201 kfree_skb(skb);
1202 return -EIO;
1203 }
1204
1205 if (datalen > cork->gso_size) {
1206 skb_shinfo(skb)->gso_size = cork->gso_size;
1207 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1208 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1209 cork->gso_size);
1210 }
1211 goto csum_partial;
1212 }
1213
1214 if (is_udplite)
1215 csum = udplite_csum(skb);
1216 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1217 skb->ip_summed = CHECKSUM_NONE;
1218 goto send;
1219 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1220csum_partial:
1221 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1222 goto send;
1223 } else
1224 csum = udp_csum(skb);
1225
1226 /* add protocol-dependent pseudo-header */
1227 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1228 len, fl6->flowi6_proto, csum);
1229 if (uh->check == 0)
1230 uh->check = CSUM_MANGLED_0;
1231
1232send:
1233 err = ip6_send_skb(skb);
1234 if (err) {
1235 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1236 UDP6_INC_STATS(sock_net(sk),
1237 UDP_MIB_SNDBUFERRORS, is_udplite);
1238 err = 0;
1239 }
1240 } else {
1241 UDP6_INC_STATS(sock_net(sk),
1242 UDP_MIB_OUTDATAGRAMS, is_udplite);
1243 }
1244 return err;
1245}
1246
1247static int udp_v6_push_pending_frames(struct sock *sk)
1248{
1249 struct sk_buff *skb;
1250 struct udp_sock *up = udp_sk(sk);
1251 struct flowi6 fl6;
1252 int err = 0;
1253
1254 if (up->pending == AF_INET)
1255 return udp_push_pending_frames(sk);
1256
1257 /* ip6_finish_skb will release the cork, so make a copy of
1258 * fl6 here.
1259 */
1260 fl6 = inet_sk(sk)->cork.fl.u.ip6;
1261
1262 skb = ip6_finish_skb(sk);
1263 if (!skb)
1264 goto out;
1265
1266 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1267
1268out:
1269 up->len = 0;
1270 up->pending = 0;
1271 return err;
1272}
1273
1274int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1275{
1276 struct ipv6_txoptions opt_space;
1277 struct udp_sock *up = udp_sk(sk);
1278 struct inet_sock *inet = inet_sk(sk);
1279 struct ipv6_pinfo *np = inet6_sk(sk);
1280 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1281 struct in6_addr *daddr, *final_p, final;
1282 struct ipv6_txoptions *opt = NULL;
1283 struct ipv6_txoptions *opt_to_free = NULL;
1284 struct ip6_flowlabel *flowlabel = NULL;
1285 struct flowi6 fl6;
1286 struct dst_entry *dst;
1287 struct ipcm6_cookie ipc6;
1288 int addr_len = msg->msg_namelen;
1289 bool connected = false;
1290 int ulen = len;
1291 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1292 int err;
1293 int is_udplite = IS_UDPLITE(sk);
1294 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1295
1296 ipcm6_init(&ipc6);
1297 ipc6.gso_size = up->gso_size;
1298 ipc6.sockc.tsflags = sk->sk_tsflags;
1299 ipc6.sockc.mark = sk->sk_mark;
1300
1301 /* destination address check */
1302 if (sin6) {
1303 if (addr_len < offsetof(struct sockaddr, sa_data))
1304 return -EINVAL;
1305
1306 switch (sin6->sin6_family) {
1307 case AF_INET6:
1308 if (addr_len < SIN6_LEN_RFC2133)
1309 return -EINVAL;
1310 daddr = &sin6->sin6_addr;
1311 if (ipv6_addr_any(daddr) &&
1312 ipv6_addr_v4mapped(&np->saddr))
1313 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1314 daddr);
1315 break;
1316 case AF_INET:
1317 goto do_udp_sendmsg;
1318 case AF_UNSPEC:
1319 msg->msg_name = sin6 = NULL;
1320 msg->msg_namelen = addr_len = 0;
1321 daddr = NULL;
1322 break;
1323 default:
1324 return -EINVAL;
1325 }
1326 } else if (!up->pending) {
1327 if (sk->sk_state != TCP_ESTABLISHED)
1328 return -EDESTADDRREQ;
1329 daddr = &sk->sk_v6_daddr;
1330 } else
1331 daddr = NULL;
1332
1333 if (daddr) {
1334 if (ipv6_addr_v4mapped(daddr)) {
1335 struct sockaddr_in sin;
1336 sin.sin_family = AF_INET;
1337 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1338 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1339 msg->msg_name = &sin;
1340 msg->msg_namelen = sizeof(sin);
1341do_udp_sendmsg:
1342 if (__ipv6_only_sock(sk))
1343 return -ENETUNREACH;
1344 return udp_sendmsg(sk, msg, len);
1345 }
1346 }
1347
1348 if (up->pending == AF_INET)
1349 return udp_sendmsg(sk, msg, len);
1350
1351 /* Rough check on arithmetic overflow,
1352 better check is made in ip6_append_data().
1353 */
1354 if (len > INT_MAX - sizeof(struct udphdr))
1355 return -EMSGSIZE;
1356
1357 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1358 if (up->pending) {
1359 /*
1360 * There are pending frames.
1361 * The socket lock must be held while it's corked.
1362 */
1363 lock_sock(sk);
1364 if (likely(up->pending)) {
1365 if (unlikely(up->pending != AF_INET6)) {
1366 release_sock(sk);
1367 return -EAFNOSUPPORT;
1368 }
1369 dst = NULL;
1370 goto do_append_data;
1371 }
1372 release_sock(sk);
1373 }
1374 ulen += sizeof(struct udphdr);
1375
1376 memset(&fl6, 0, sizeof(fl6));
1377
1378 if (sin6) {
1379 if (sin6->sin6_port == 0)
1380 return -EINVAL;
1381
1382 fl6.fl6_dport = sin6->sin6_port;
1383 daddr = &sin6->sin6_addr;
1384
1385 if (np->sndflow) {
1386 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1387 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1388 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1389 if (IS_ERR(flowlabel))
1390 return -EINVAL;
1391 }
1392 }
1393
1394 /*
1395 * Otherwise it will be difficult to maintain
1396 * sk->sk_dst_cache.
1397 */
1398 if (sk->sk_state == TCP_ESTABLISHED &&
1399 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1400 daddr = &sk->sk_v6_daddr;
1401
1402 if (addr_len >= sizeof(struct sockaddr_in6) &&
1403 sin6->sin6_scope_id &&
1404 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1405 fl6.flowi6_oif = sin6->sin6_scope_id;
1406 } else {
1407 if (sk->sk_state != TCP_ESTABLISHED)
1408 return -EDESTADDRREQ;
1409
1410 fl6.fl6_dport = inet->inet_dport;
1411 daddr = &sk->sk_v6_daddr;
1412 fl6.flowlabel = np->flow_label;
1413 connected = true;
1414 }
1415
1416 if (!fl6.flowi6_oif)
1417 fl6.flowi6_oif = sk->sk_bound_dev_if;
1418
1419 if (!fl6.flowi6_oif)
1420 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1421
1422 fl6.flowi6_mark = ipc6.sockc.mark;
1423 fl6.flowi6_uid = sk->sk_uid;
1424
1425 if (msg->msg_controllen) {
1426 opt = &opt_space;
1427 memset(opt, 0, sizeof(struct ipv6_txoptions));
1428 opt->tot_len = sizeof(*opt);
1429 ipc6.opt = opt;
1430
1431 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1432 if (err > 0)
1433 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1434 &ipc6);
1435 if (err < 0) {
1436 fl6_sock_release(flowlabel);
1437 return err;
1438 }
1439 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1440 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1441 if (IS_ERR(flowlabel))
1442 return -EINVAL;
1443 }
1444 if (!(opt->opt_nflen|opt->opt_flen))
1445 opt = NULL;
1446 connected = false;
1447 }
1448 if (!opt) {
1449 opt = txopt_get(np);
1450 opt_to_free = opt;
1451 }
1452 if (flowlabel)
1453 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1454 opt = ipv6_fixup_options(&opt_space, opt);
1455 ipc6.opt = opt;
1456
1457 fl6.flowi6_proto = sk->sk_protocol;
1458 fl6.daddr = *daddr;
1459 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1460 fl6.saddr = np->saddr;
1461 fl6.fl6_sport = inet->inet_sport;
1462
1463 if (cgroup_bpf_enabled && !connected) {
1464 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1465 (struct sockaddr *)sin6, &fl6.saddr);
1466 if (err)
1467 goto out_no_dst;
1468 if (sin6) {
1469 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1470 /* BPF program rewrote IPv6-only by IPv4-mapped
1471 * IPv6. It's currently unsupported.
1472 */
1473 err = -ENOTSUPP;
1474 goto out_no_dst;
1475 }
1476 if (sin6->sin6_port == 0) {
1477 /* BPF program set invalid port. Reject it. */
1478 err = -EINVAL;
1479 goto out_no_dst;
1480 }
1481 fl6.fl6_dport = sin6->sin6_port;
1482 fl6.daddr = sin6->sin6_addr;
1483 }
1484 }
1485
1486 if (ipv6_addr_any(&fl6.daddr))
1487 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1488
1489 final_p = fl6_update_dst(&fl6, opt, &final);
1490 if (final_p)
1491 connected = false;
1492
1493 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1494 fl6.flowi6_oif = np->mcast_oif;
1495 connected = false;
1496 } else if (!fl6.flowi6_oif)
1497 fl6.flowi6_oif = np->ucast_oif;
1498
1499 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1500
1501 if (ipc6.tclass < 0)
1502 ipc6.tclass = np->tclass;
1503
1504 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1505
1506 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1507 if (IS_ERR(dst)) {
1508 err = PTR_ERR(dst);
1509 dst = NULL;
1510 goto out;
1511 }
1512
1513 if (ipc6.hlimit < 0)
1514 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1515
1516 if (msg->msg_flags&MSG_CONFIRM)
1517 goto do_confirm;
1518back_from_confirm:
1519
1520 /* Lockless fast path for the non-corking case */
1521 if (!corkreq) {
1522 struct inet_cork_full cork;
1523 struct sk_buff *skb;
1524
1525 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1526 sizeof(struct udphdr), &ipc6,
1527 &fl6, (struct rt6_info *)dst,
1528 msg->msg_flags, &cork);
1529 err = PTR_ERR(skb);
1530 if (!IS_ERR_OR_NULL(skb))
1531 err = udp_v6_send_skb(skb, &fl6, &cork.base);
1532 goto out;
1533 }
1534
1535 lock_sock(sk);
1536 if (unlikely(up->pending)) {
1537 /* The socket is already corked while preparing it. */
1538 /* ... which is an evident application bug. --ANK */
1539 release_sock(sk);
1540
1541 net_dbg_ratelimited("udp cork app bug 2\n");
1542 err = -EINVAL;
1543 goto out;
1544 }
1545
1546 up->pending = AF_INET6;
1547
1548do_append_data:
1549 if (ipc6.dontfrag < 0)
1550 ipc6.dontfrag = np->dontfrag;
1551 up->len += ulen;
1552 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1553 &ipc6, &fl6, (struct rt6_info *)dst,
1554 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1555 if (err)
1556 udp_v6_flush_pending_frames(sk);
1557 else if (!corkreq)
1558 err = udp_v6_push_pending_frames(sk);
1559 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1560 up->pending = 0;
1561
1562 if (err > 0)
1563 err = np->recverr ? net_xmit_errno(err) : 0;
1564 release_sock(sk);
1565
1566out:
1567 dst_release(dst);
1568out_no_dst:
1569 fl6_sock_release(flowlabel);
1570 txopt_put(opt_to_free);
1571 if (!err)
1572 return len;
1573 /*
1574 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1575 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1576 * we don't have a good statistic (IpOutDiscards but it can be too many
1577 * things). We could add another new stat but at least for now that
1578 * seems like overkill.
1579 */
1580 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1581 UDP6_INC_STATS(sock_net(sk),
1582 UDP_MIB_SNDBUFERRORS, is_udplite);
1583 }
1584 return err;
1585
1586do_confirm:
1587 if (msg->msg_flags & MSG_PROBE)
1588 dst_confirm_neigh(dst, &fl6.daddr);
1589 if (!(msg->msg_flags&MSG_PROBE) || len)
1590 goto back_from_confirm;
1591 err = 0;
1592 goto out;
1593}
1594
1595void udpv6_destroy_sock(struct sock *sk)
1596{
1597 struct udp_sock *up = udp_sk(sk);
1598 lock_sock(sk);
1599 udp_v6_flush_pending_frames(sk);
1600 release_sock(sk);
1601
1602 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1603 if (up->encap_type) {
1604 void (*encap_destroy)(struct sock *sk);
1605 encap_destroy = READ_ONCE(up->encap_destroy);
1606 if (encap_destroy)
1607 encap_destroy(sk);
1608 }
1609 if (up->encap_enabled)
1610 static_branch_dec(&udpv6_encap_needed_key);
1611 }
1612
1613 inet6_destroy_sock(sk);
1614}
1615
1616/*
1617 * Socket option code for UDP
1618 */
1619int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1620 unsigned int optlen)
1621{
1622 if (level == SOL_UDP || level == SOL_UDPLITE)
1623 return udp_lib_setsockopt(sk, level, optname,
1624 optval, optlen,
1625 udp_v6_push_pending_frames);
1626 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1627}
1628
1629int udpv6_getsockopt(struct sock *sk, int level, int optname,
1630 char __user *optval, int __user *optlen)
1631{
1632 if (level == SOL_UDP || level == SOL_UDPLITE)
1633 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1634 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1635}
1636
1637/* thinking of making this const? Don't.
1638 * early_demux can change based on sysctl.
1639 */
1640static struct inet6_protocol udpv6_protocol = {
1641 .early_demux = udp_v6_early_demux,
1642 .early_demux_handler = udp_v6_early_demux,
1643 .handler = udpv6_rcv,
1644 .err_handler = udpv6_err,
1645 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1646};
1647
1648/* ------------------------------------------------------------------------ */
1649#ifdef CONFIG_PROC_FS
1650int udp6_seq_show(struct seq_file *seq, void *v)
1651{
1652 if (v == SEQ_START_TOKEN) {
1653 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1654 } else {
1655 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1656 struct inet_sock *inet = inet_sk(v);
1657 __u16 srcp = ntohs(inet->inet_sport);
1658 __u16 destp = ntohs(inet->inet_dport);
1659 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1660 udp_rqueue_get(v), bucket);
1661 }
1662 return 0;
1663}
1664
1665const struct seq_operations udp6_seq_ops = {
1666 .start = udp_seq_start,
1667 .next = udp_seq_next,
1668 .stop = udp_seq_stop,
1669 .show = udp6_seq_show,
1670};
1671EXPORT_SYMBOL(udp6_seq_ops);
1672
1673static struct udp_seq_afinfo udp6_seq_afinfo = {
1674 .family = AF_INET6,
1675 .udp_table = &udp_table,
1676};
1677
1678int __net_init udp6_proc_init(struct net *net)
1679{
1680 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1681 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1682 return -ENOMEM;
1683 return 0;
1684}
1685
1686void udp6_proc_exit(struct net *net)
1687{
1688 remove_proc_entry("udp6", net->proc_net);
1689}
1690#endif /* CONFIG_PROC_FS */
1691
1692/* ------------------------------------------------------------------------ */
1693
1694struct proto udpv6_prot = {
1695 .name = "UDPv6",
1696 .owner = THIS_MODULE,
1697 .close = udp_lib_close,
1698 .pre_connect = udpv6_pre_connect,
1699 .connect = ip6_datagram_connect,
1700 .disconnect = udp_disconnect,
1701 .ioctl = udp_ioctl,
1702 .init = udp_init_sock,
1703 .destroy = udpv6_destroy_sock,
1704 .setsockopt = udpv6_setsockopt,
1705 .getsockopt = udpv6_getsockopt,
1706 .sendmsg = udpv6_sendmsg,
1707 .recvmsg = udpv6_recvmsg,
1708 .release_cb = ip6_datagram_release_cb,
1709 .hash = udp_lib_hash,
1710 .unhash = udp_lib_unhash,
1711 .rehash = udp_v6_rehash,
1712 .get_port = udp_v6_get_port,
1713 .memory_allocated = &udp_memory_allocated,
1714 .sysctl_mem = sysctl_udp_mem,
1715 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1716 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1717 .obj_size = sizeof(struct udp6_sock),
1718 .h.udp_table = &udp_table,
1719 .diag_destroy = udp_abort,
1720};
1721
1722static struct inet_protosw udpv6_protosw = {
1723 .type = SOCK_DGRAM,
1724 .protocol = IPPROTO_UDP,
1725 .prot = &udpv6_prot,
1726 .ops = &inet6_dgram_ops,
1727 .flags = INET_PROTOSW_PERMANENT,
1728};
1729
1730int __init udpv6_init(void)
1731{
1732 int ret;
1733
1734 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1735 if (ret)
1736 goto out;
1737
1738 ret = inet6_register_protosw(&udpv6_protosw);
1739 if (ret)
1740 goto out_udpv6_protocol;
1741out:
1742 return ret;
1743
1744out_udpv6_protocol:
1745 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1746 goto out;
1747}
1748
1749void udpv6_exit(void)
1750{
1751 inet6_unregister_protosw(&udpv6_protosw);
1752 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1753}