Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20#include <linux/bpf-cgroup.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/net.h>
26#include <linux/in6.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/ipv6.h>
30#include <linux/icmpv6.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/skbuff.h>
34#include <linux/slab.h>
35#include <linux/uaccess.h>
36#include <linux/indirect_call_wrapper.h>
37#include <trace/events/udp.h>
38
39#include <net/addrconf.h>
40#include <net/ndisc.h>
41#include <net/protocol.h>
42#include <net/transp_v6.h>
43#include <net/ip6_route.h>
44#include <net/raw.h>
45#include <net/seg6.h>
46#include <net/tcp_states.h>
47#include <net/ip6_checksum.h>
48#include <net/ip6_tunnel.h>
49#include <net/xfrm.h>
50#include <net/inet_hashtables.h>
51#include <net/inet6_hashtables.h>
52#include <net/busy_poll.h>
53#include <net/sock_reuseport.h>
54#include <net/gro.h>
55
56#include <linux/proc_fs.h>
57#include <linux/seq_file.h>
58#include <trace/events/skb.h>
59#include "udp_impl.h"
60
61static void udpv6_destruct_sock(struct sock *sk)
62{
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
65}
66
67int udpv6_init_sock(struct sock *sk)
68{
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
72 return 0;
73}
74
75INDIRECT_CALLABLE_SCOPE
76u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
78 const u16 lport,
79 const struct in6_addr *faddr,
80 const __be16 fport)
81{
82 u32 lhash, fhash;
83
84 net_get_random_once(&udp6_ehash_secret,
85 sizeof(udp6_ehash_secret));
86 net_get_random_once(&udp_ipv6_hash_secret,
87 sizeof(udp_ipv6_hash_secret));
88
89 lhash = (__force u32)laddr->s6_addr32[3];
90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
91
92 return __inet6_ehashfn(lhash, lport, fhash, fport,
93 udp6_ehash_secret + net_hash_mix(net));
94}
95
96int udp_v6_get_port(struct sock *sk, unsigned short snum)
97{
98 unsigned int hash2_nulladdr =
99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
100 unsigned int hash2_partial =
101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
102
103 /* precompute partial secondary hash */
104 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
105 return udp_lib_get_port(sk, snum, hash2_nulladdr);
106}
107
108void udp_v6_rehash(struct sock *sk)
109{
110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
111 &sk->sk_v6_rcv_saddr,
112 inet_sk(sk)->inet_num);
113 u16 new_hash4;
114
115 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
116 new_hash4 = udp_ehashfn(sock_net(sk),
117 sk->sk_rcv_saddr, sk->sk_num,
118 sk->sk_daddr, sk->sk_dport);
119 } else {
120 new_hash4 = udp6_ehashfn(sock_net(sk),
121 &sk->sk_v6_rcv_saddr, sk->sk_num,
122 &sk->sk_v6_daddr, sk->sk_dport);
123 }
124
125 udp_lib_rehash(sk, new_hash, new_hash4);
126}
127
128static int compute_score(struct sock *sk, const struct net *net,
129 const struct in6_addr *saddr, __be16 sport,
130 const struct in6_addr *daddr, unsigned short hnum,
131 int dif, int sdif)
132{
133 int bound_dev_if, score;
134 struct inet_sock *inet;
135 bool dev_match;
136
137 if (!net_eq(sock_net(sk), net) ||
138 udp_sk(sk)->udp_port_hash != hnum ||
139 sk->sk_family != PF_INET6)
140 return -1;
141
142 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
143 return -1;
144
145 score = 0;
146 inet = inet_sk(sk);
147
148 if (inet->inet_dport) {
149 if (inet->inet_dport != sport)
150 return -1;
151 score++;
152 }
153
154 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
155 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
156 return -1;
157 score++;
158 }
159
160 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
161 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
162 if (!dev_match)
163 return -1;
164 if (bound_dev_if)
165 score++;
166
167 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
168 score++;
169
170 return score;
171}
172
173/**
174 * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
175 * @net: Network namespace
176 * @saddr: Source address, network order
177 * @sport: Source port, network order
178 * @daddr: Destination address, network order
179 * @hnum: Destination port, host order
180 * @dif: Destination interface index
181 * @sdif: Destination bridge port index, if relevant
182 * @udptable: Set of UDP hash tables
183 *
184 * Simplified lookup to be used as fallback if no sockets are found due to a
185 * potential race between (receive) address change, and lookup happening before
186 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
187 * result sockets, because if we have one, we don't need the fallback at all.
188 *
189 * Called under rcu_read_lock().
190 *
191 * Return: socket with highest matching score if any, NULL if none
192 */
193static struct sock *udp6_lib_lookup1(const struct net *net,
194 const struct in6_addr *saddr, __be16 sport,
195 const struct in6_addr *daddr,
196 unsigned int hnum, int dif, int sdif,
197 const struct udp_table *udptable)
198{
199 unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
200 struct udp_hslot *hslot = &udptable->hash[slot];
201 struct sock *sk, *result = NULL;
202 int score, badness = 0;
203
204 sk_for_each_rcu(sk, &hslot->head) {
205 score = compute_score(sk, net,
206 saddr, sport, daddr, hnum, dif, sdif);
207 if (score > badness) {
208 result = sk;
209 badness = score;
210 }
211 }
212
213 return result;
214}
215
216/* called with rcu_read_lock() */
217static struct sock *udp6_lib_lookup2(const struct net *net,
218 const struct in6_addr *saddr, __be16 sport,
219 const struct in6_addr *daddr, unsigned int hnum,
220 int dif, int sdif, struct udp_hslot *hslot2,
221 struct sk_buff *skb)
222{
223 struct sock *sk, *result;
224 int score, badness;
225 bool need_rescore;
226
227 result = NULL;
228 badness = -1;
229 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
230 need_rescore = false;
231rescore:
232 score = compute_score(need_rescore ? result : sk, net, saddr,
233 sport, daddr, hnum, dif, sdif);
234 if (score > badness) {
235 badness = score;
236
237 if (need_rescore)
238 continue;
239
240 if (sk->sk_state == TCP_ESTABLISHED) {
241 result = sk;
242 continue;
243 }
244
245 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
246 saddr, sport, daddr, hnum, udp6_ehashfn);
247 if (!result) {
248 result = sk;
249 continue;
250 }
251
252 /* Fall back to scoring if group has connections */
253 if (!reuseport_has_conns(sk))
254 return result;
255
256 /* Reuseport logic returned an error, keep original score. */
257 if (IS_ERR(result))
258 continue;
259
260 /* compute_score is too long of a function to be
261 * inlined, and calling it again here yields
262 * measureable overhead for some
263 * workloads. Work around it by jumping
264 * backwards to rescore 'result'.
265 */
266 need_rescore = true;
267 goto rescore;
268 }
269 }
270 return result;
271}
272
273#if IS_ENABLED(CONFIG_BASE_SMALL)
274static struct sock *udp6_lib_lookup4(const struct net *net,
275 const struct in6_addr *saddr, __be16 sport,
276 const struct in6_addr *daddr,
277 unsigned int hnum, int dif, int sdif,
278 struct udp_table *udptable)
279{
280 return NULL;
281}
282
283static void udp6_hash4(struct sock *sk)
284{
285}
286#else /* !CONFIG_BASE_SMALL */
287static struct sock *udp6_lib_lookup4(const struct net *net,
288 const struct in6_addr *saddr, __be16 sport,
289 const struct in6_addr *daddr,
290 unsigned int hnum, int dif, int sdif,
291 struct udp_table *udptable)
292{
293 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
294 const struct hlist_nulls_node *node;
295 struct udp_hslot *hslot4;
296 unsigned int hash4, slot;
297 struct udp_sock *up;
298 struct sock *sk;
299
300 hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
301 slot = hash4 & udptable->mask;
302 hslot4 = &udptable->hash4[slot];
303
304begin:
305 udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
306 sk = (struct sock *)up;
307 if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
308 return sk;
309 }
310
311 /* if the nulls value we got at the end of this lookup is not the
312 * expected one, we must restart lookup. We probably met an item that
313 * was moved to another chain due to rehash.
314 */
315 if (get_nulls_value(node) != slot)
316 goto begin;
317
318 return NULL;
319}
320
321static void udp6_hash4(struct sock *sk)
322{
323 struct net *net = sock_net(sk);
324 unsigned int hash;
325
326 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
327 udp4_hash4(sk);
328 return;
329 }
330
331 if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
332 return;
333
334 hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
335 &sk->sk_v6_daddr, sk->sk_dport);
336
337 udp_lib_hash4(sk, hash);
338}
339#endif /* CONFIG_BASE_SMALL */
340
341/* rcu_read_lock() must be held */
342struct sock *__udp6_lib_lookup(const struct net *net,
343 const struct in6_addr *saddr, __be16 sport,
344 const struct in6_addr *daddr, __be16 dport,
345 int dif, int sdif, struct udp_table *udptable,
346 struct sk_buff *skb)
347{
348 unsigned short hnum = ntohs(dport);
349 struct udp_hslot *hslot2;
350 struct sock *result, *sk;
351 unsigned int hash2;
352
353 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
354 hslot2 = udp_hashslot2(udptable, hash2);
355
356 if (udp_has_hash4(hslot2)) {
357 result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
358 dif, sdif, udptable);
359 if (result) /* udp6_lib_lookup4 return sk or NULL */
360 return result;
361 }
362
363 /* Lookup connected or non-wildcard sockets */
364 result = udp6_lib_lookup2(net, saddr, sport,
365 daddr, hnum, dif, sdif,
366 hslot2, skb);
367 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
368 goto done;
369
370 /* Lookup redirect from BPF */
371 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
372 udptable == net->ipv4.udp_table) {
373 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
374 saddr, sport, daddr, hnum, dif,
375 udp6_ehashfn);
376 if (sk) {
377 result = sk;
378 goto done;
379 }
380 }
381
382 /* Got non-wildcard socket or error on first lookup */
383 if (result)
384 goto done;
385
386 /* Lookup wildcard sockets */
387 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
388 hslot2 = udp_hashslot2(udptable, hash2);
389
390 result = udp6_lib_lookup2(net, saddr, sport,
391 &in6addr_any, hnum, dif, sdif,
392 hslot2, skb);
393 if (!IS_ERR_OR_NULL(result))
394 goto done;
395
396 /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
397 result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
398 udptable);
399
400done:
401 if (IS_ERR(result))
402 return NULL;
403 return result;
404}
405EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
406
407static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
408 __be16 sport, __be16 dport,
409 struct udp_table *udptable)
410{
411 const struct ipv6hdr *iph = ipv6_hdr(skb);
412
413 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
414 &iph->daddr, dport, inet6_iif(skb),
415 inet6_sdif(skb), udptable, skb);
416}
417
418struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
419 __be16 sport, __be16 dport)
420{
421 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
422 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
423 struct net *net = dev_net(skb->dev);
424 int iif, sdif;
425
426 inet6_get_iif_sdif(skb, &iif, &sdif);
427
428 return __udp6_lib_lookup(net, &iph->saddr, sport,
429 &iph->daddr, dport, iif,
430 sdif, net->ipv4.udp_table, NULL);
431}
432
433/* Must be called under rcu_read_lock().
434 * Does increment socket refcount.
435 */
436#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
437struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
438 const struct in6_addr *daddr, __be16 dport, int dif)
439{
440 struct sock *sk;
441
442 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
443 dif, 0, net->ipv4.udp_table, NULL);
444 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
445 sk = NULL;
446 return sk;
447}
448EXPORT_SYMBOL_GPL(udp6_lib_lookup);
449#endif
450
451/* do not use the scratch area len for jumbogram: their length execeeds the
452 * scratch area space; note that the IP6CB flags is still in the first
453 * cacheline, so checking for jumbograms is cheap
454 */
455static int udp6_skb_len(struct sk_buff *skb)
456{
457 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
458}
459
460/*
461 * This should be easy, if there is something there we
462 * return it, otherwise we block.
463 */
464
465int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
466 int flags, int *addr_len)
467{
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct inet_sock *inet = inet_sk(sk);
470 struct sk_buff *skb;
471 unsigned int ulen, copied;
472 int off, err, peeking = flags & MSG_PEEK;
473 int is_udplite = IS_UDPLITE(sk);
474 struct udp_mib __percpu *mib;
475 bool checksum_valid = false;
476 int is_udp4;
477
478 if (flags & MSG_ERRQUEUE)
479 return ipv6_recv_error(sk, msg, len, addr_len);
480
481 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
482 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
483
484try_again:
485 off = sk_peek_offset(sk, flags);
486 skb = __skb_recv_udp(sk, flags, &off, &err);
487 if (!skb)
488 return err;
489
490 ulen = udp6_skb_len(skb);
491 copied = len;
492 if (copied > ulen - off)
493 copied = ulen - off;
494 else if (copied < ulen)
495 msg->msg_flags |= MSG_TRUNC;
496
497 is_udp4 = (skb->protocol == htons(ETH_P_IP));
498 mib = __UDPX_MIB(sk, is_udp4);
499
500 /*
501 * If checksum is needed at all, try to do it while copying the
502 * data. If the data is truncated, or if we only want a partial
503 * coverage checksum (UDP-Lite), do it before the copy.
504 */
505
506 if (copied < ulen || peeking ||
507 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
508 checksum_valid = udp_skb_csum_unnecessary(skb) ||
509 !__udp_lib_checksum_complete(skb);
510 if (!checksum_valid)
511 goto csum_copy_err;
512 }
513
514 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
515 if (udp_skb_is_linear(skb))
516 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
517 else
518 err = skb_copy_datagram_msg(skb, off, msg, copied);
519 } else {
520 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
521 if (err == -EINVAL)
522 goto csum_copy_err;
523 }
524 if (unlikely(err)) {
525 if (!peeking) {
526 atomic_inc(&sk->sk_drops);
527 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
528 }
529 kfree_skb(skb);
530 return err;
531 }
532 if (!peeking)
533 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
534
535 sock_recv_cmsgs(msg, sk, skb);
536
537 /* Copy the address. */
538 if (msg->msg_name) {
539 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
540 sin6->sin6_family = AF_INET6;
541 sin6->sin6_port = udp_hdr(skb)->source;
542 sin6->sin6_flowinfo = 0;
543
544 if (is_udp4) {
545 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
546 &sin6->sin6_addr);
547 sin6->sin6_scope_id = 0;
548 } else {
549 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
550 sin6->sin6_scope_id =
551 ipv6_iface_scope_id(&sin6->sin6_addr,
552 inet6_iif(skb));
553 }
554 *addr_len = sizeof(*sin6);
555
556 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
557 (struct sockaddr *)sin6,
558 addr_len);
559 }
560
561 if (udp_test_bit(GRO_ENABLED, sk))
562 udp_cmsg_recv(msg, sk, skb);
563
564 if (np->rxopt.all)
565 ip6_datagram_recv_common_ctl(sk, msg, skb);
566
567 if (is_udp4) {
568 if (inet_cmsg_flags(inet))
569 ip_cmsg_recv_offset(msg, sk, skb,
570 sizeof(struct udphdr), off);
571 } else {
572 if (np->rxopt.all)
573 ip6_datagram_recv_specific_ctl(sk, msg, skb);
574 }
575
576 err = copied;
577 if (flags & MSG_TRUNC)
578 err = ulen;
579
580 skb_consume_udp(sk, skb, peeking ? -err : err);
581 return err;
582
583csum_copy_err:
584 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
585 udp_skb_destructor)) {
586 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
587 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
588 }
589 kfree_skb(skb);
590
591 /* starting over for a new packet, but check if we need to yield */
592 cond_resched();
593 msg->msg_flags &= ~MSG_TRUNC;
594 goto try_again;
595}
596
597DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
598void udpv6_encap_enable(void)
599{
600 static_branch_inc(&udpv6_encap_needed_key);
601}
602EXPORT_SYMBOL(udpv6_encap_enable);
603
604/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
605 * through error handlers in encapsulations looking for a match.
606 */
607static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
608 struct inet6_skb_parm *opt,
609 u8 type, u8 code, int offset, __be32 info)
610{
611 int i;
612
613 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
614 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
615 u8 type, u8 code, int offset, __be32 info);
616 const struct ip6_tnl_encap_ops *encap;
617
618 encap = rcu_dereference(ip6tun_encaps[i]);
619 if (!encap)
620 continue;
621 handler = encap->err_handler;
622 if (handler && !handler(skb, opt, type, code, offset, info))
623 return 0;
624 }
625
626 return -ENOENT;
627}
628
629/* Try to match ICMP errors to UDP tunnels by looking up a socket without
630 * reversing source and destination port: this will match tunnels that force the
631 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
632 * lwtunnels might actually break this assumption by being configured with
633 * different destination ports on endpoints, in this case we won't be able to
634 * trace ICMP messages back to them.
635 *
636 * If this doesn't match any socket, probe tunnels with arbitrary destination
637 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
638 * we've sent packets to won't necessarily match the local destination port.
639 *
640 * Then ask the tunnel implementation to match the error against a valid
641 * association.
642 *
643 * Return an error if we can't find a match, the socket if we need further
644 * processing, zero otherwise.
645 */
646static struct sock *__udp6_lib_err_encap(struct net *net,
647 const struct ipv6hdr *hdr, int offset,
648 struct udphdr *uh,
649 struct udp_table *udptable,
650 struct sock *sk,
651 struct sk_buff *skb,
652 struct inet6_skb_parm *opt,
653 u8 type, u8 code, __be32 info)
654{
655 int (*lookup)(struct sock *sk, struct sk_buff *skb);
656 int network_offset, transport_offset;
657 struct udp_sock *up;
658
659 network_offset = skb_network_offset(skb);
660 transport_offset = skb_transport_offset(skb);
661
662 /* Network header needs to point to the outer IPv6 header inside ICMP */
663 skb_reset_network_header(skb);
664
665 /* Transport header needs to point to the UDP header */
666 skb_set_transport_header(skb, offset);
667
668 if (sk) {
669 up = udp_sk(sk);
670
671 lookup = READ_ONCE(up->encap_err_lookup);
672 if (lookup && lookup(sk, skb))
673 sk = NULL;
674
675 goto out;
676 }
677
678 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
679 &hdr->saddr, uh->dest,
680 inet6_iif(skb), 0, udptable, skb);
681 if (sk) {
682 up = udp_sk(sk);
683
684 lookup = READ_ONCE(up->encap_err_lookup);
685 if (!lookup || lookup(sk, skb))
686 sk = NULL;
687 }
688
689out:
690 if (!sk) {
691 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
692 offset, info));
693 }
694
695 skb_set_transport_header(skb, transport_offset);
696 skb_set_network_header(skb, network_offset);
697
698 return sk;
699}
700
701int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
702 u8 type, u8 code, int offset, __be32 info,
703 struct udp_table *udptable)
704{
705 struct ipv6_pinfo *np;
706 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
707 const struct in6_addr *saddr = &hdr->saddr;
708 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
709 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
710 bool tunnel = false;
711 struct sock *sk;
712 int harderr;
713 int err;
714 struct net *net = dev_net(skb->dev);
715
716 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
717 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
718
719 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
720 /* No socket for error: try tunnels before discarding */
721 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
722 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
723 udptable, sk, skb,
724 opt, type, code, info);
725 if (!sk)
726 return 0;
727 } else
728 sk = ERR_PTR(-ENOENT);
729
730 if (IS_ERR(sk)) {
731 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
732 ICMP6_MIB_INERRORS);
733 return PTR_ERR(sk);
734 }
735
736 tunnel = true;
737 }
738
739 harderr = icmpv6_err_convert(type, code, &err);
740 np = inet6_sk(sk);
741
742 if (type == ICMPV6_PKT_TOOBIG) {
743 if (!ip6_sk_accept_pmtu(sk))
744 goto out;
745 ip6_sk_update_pmtu(skb, sk, info);
746 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
747 harderr = 1;
748 }
749 if (type == NDISC_REDIRECT) {
750 if (tunnel) {
751 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
752 READ_ONCE(sk->sk_mark), sk->sk_uid);
753 } else {
754 ip6_sk_redirect(skb, sk);
755 }
756 goto out;
757 }
758
759 /* Tunnels don't have an application socket: don't pass errors back */
760 if (tunnel) {
761 if (udp_sk(sk)->encap_err_rcv)
762 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
763 ntohl(info), (u8 *)(uh+1));
764 goto out;
765 }
766
767 if (!inet6_test_bit(RECVERR6, sk)) {
768 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
769 goto out;
770 } else {
771 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
772 }
773
774 sk->sk_err = err;
775 sk_error_report(sk);
776out:
777 return 0;
778}
779
780static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
781{
782 int rc;
783
784 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
785 sock_rps_save_rxhash(sk, skb);
786 sk_mark_napi_id(sk, skb);
787 sk_incoming_cpu_update(sk);
788 } else {
789 sk_mark_napi_id_once(sk, skb);
790 }
791
792 rc = __udp_enqueue_schedule_skb(sk, skb);
793 if (rc < 0) {
794 int is_udplite = IS_UDPLITE(sk);
795 enum skb_drop_reason drop_reason;
796
797 /* Note that an ENOMEM error is charged twice */
798 if (rc == -ENOMEM) {
799 UDP6_INC_STATS(sock_net(sk),
800 UDP_MIB_RCVBUFERRORS, is_udplite);
801 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
802 } else {
803 UDP6_INC_STATS(sock_net(sk),
804 UDP_MIB_MEMERRORS, is_udplite);
805 drop_reason = SKB_DROP_REASON_PROTO_MEM;
806 }
807 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
808 trace_udp_fail_queue_rcv_skb(rc, sk, skb);
809 sk_skb_reason_drop(sk, skb, drop_reason);
810 return -1;
811 }
812
813 return 0;
814}
815
816static __inline__ int udpv6_err(struct sk_buff *skb,
817 struct inet6_skb_parm *opt, u8 type,
818 u8 code, int offset, __be32 info)
819{
820 return __udp6_lib_err(skb, opt, type, code, offset, info,
821 dev_net(skb->dev)->ipv4.udp_table);
822}
823
824static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
825{
826 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
827 struct udp_sock *up = udp_sk(sk);
828 int is_udplite = IS_UDPLITE(sk);
829
830 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
831 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
832 goto drop;
833 }
834 nf_reset_ct(skb);
835
836 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
837 READ_ONCE(up->encap_type)) {
838 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
839
840 /*
841 * This is an encapsulation socket so pass the skb to
842 * the socket's udp_encap_rcv() hook. Otherwise, just
843 * fall through and pass this up the UDP socket.
844 * up->encap_rcv() returns the following value:
845 * =0 if skb was successfully passed to the encap
846 * handler or was discarded by it.
847 * >0 if skb should be passed on to UDP.
848 * <0 if skb should be resubmitted as proto -N
849 */
850
851 /* if we're overly short, let UDP handle it */
852 encap_rcv = READ_ONCE(up->encap_rcv);
853 if (encap_rcv) {
854 int ret;
855
856 /* Verify checksum before giving to encap */
857 if (udp_lib_checksum_complete(skb))
858 goto csum_error;
859
860 ret = encap_rcv(sk, skb);
861 if (ret <= 0) {
862 __UDP6_INC_STATS(sock_net(sk),
863 UDP_MIB_INDATAGRAMS,
864 is_udplite);
865 return -ret;
866 }
867 }
868
869 /* FALLTHROUGH -- it's a UDP Packet */
870 }
871
872 /*
873 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
874 */
875 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
876 u16 pcrlen = READ_ONCE(up->pcrlen);
877
878 if (pcrlen == 0) { /* full coverage was set */
879 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
880 UDP_SKB_CB(skb)->cscov, skb->len);
881 goto drop;
882 }
883 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
884 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
885 UDP_SKB_CB(skb)->cscov, pcrlen);
886 goto drop;
887 }
888 }
889
890 prefetch(&sk->sk_rmem_alloc);
891 if (rcu_access_pointer(sk->sk_filter) &&
892 udp_lib_checksum_complete(skb))
893 goto csum_error;
894
895 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
896 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
897 goto drop;
898 }
899
900 udp_csum_pull_header(skb);
901
902 skb_dst_drop(skb);
903
904 return __udpv6_queue_rcv_skb(sk, skb);
905
906csum_error:
907 drop_reason = SKB_DROP_REASON_UDP_CSUM;
908 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
909drop:
910 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
911 atomic_inc(&sk->sk_drops);
912 sk_skb_reason_drop(sk, skb, drop_reason);
913 return -1;
914}
915
916static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
917{
918 struct sk_buff *next, *segs;
919 int ret;
920
921 if (likely(!udp_unexpected_gso(sk, skb)))
922 return udpv6_queue_rcv_one_skb(sk, skb);
923
924 __skb_push(skb, -skb_mac_offset(skb));
925 segs = udp_rcv_segment(sk, skb, false);
926 skb_list_walk_safe(segs, skb, next) {
927 __skb_pull(skb, skb_transport_offset(skb));
928
929 udp_post_segment_fix_csum(skb);
930 ret = udpv6_queue_rcv_one_skb(sk, skb);
931 if (ret > 0)
932 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
933 true);
934 }
935 return 0;
936}
937
938static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
939 __be16 loc_port, const struct in6_addr *loc_addr,
940 __be16 rmt_port, const struct in6_addr *rmt_addr,
941 int dif, int sdif, unsigned short hnum)
942{
943 const struct inet_sock *inet = inet_sk(sk);
944
945 if (!net_eq(sock_net(sk), net))
946 return false;
947
948 if (udp_sk(sk)->udp_port_hash != hnum ||
949 sk->sk_family != PF_INET6 ||
950 (inet->inet_dport && inet->inet_dport != rmt_port) ||
951 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
952 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
953 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
954 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
955 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
956 return false;
957 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
958 return false;
959 return true;
960}
961
962static void udp6_csum_zero_error(struct sk_buff *skb)
963{
964 /* RFC 2460 section 8.1 says that we SHOULD log
965 * this error. Well, it is reasonable.
966 */
967 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
968 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
969 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
970}
971
972/*
973 * Note: called only from the BH handler context,
974 * so we don't need to lock the hashes.
975 */
976static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
977 const struct in6_addr *saddr, const struct in6_addr *daddr,
978 struct udp_table *udptable, int proto)
979{
980 struct sock *sk, *first = NULL;
981 const struct udphdr *uh = udp_hdr(skb);
982 unsigned short hnum = ntohs(uh->dest);
983 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
984 unsigned int offset = offsetof(typeof(*sk), sk_node);
985 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
986 int dif = inet6_iif(skb);
987 int sdif = inet6_sdif(skb);
988 struct hlist_node *node;
989 struct sk_buff *nskb;
990
991 if (use_hash2) {
992 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
993 udptable->mask;
994 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
995start_lookup:
996 hslot = &udptable->hash2[hash2].hslot;
997 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
998 }
999
1000 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1001 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
1002 uh->source, saddr, dif, sdif,
1003 hnum))
1004 continue;
1005 /* If zero checksum and no_check is not on for
1006 * the socket then skip it.
1007 */
1008 if (!uh->check && !udp_get_no_check6_rx(sk))
1009 continue;
1010 if (!first) {
1011 first = sk;
1012 continue;
1013 }
1014 nskb = skb_clone(skb, GFP_ATOMIC);
1015 if (unlikely(!nskb)) {
1016 atomic_inc(&sk->sk_drops);
1017 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1018 IS_UDPLITE(sk));
1019 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1020 IS_UDPLITE(sk));
1021 continue;
1022 }
1023
1024 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
1025 consume_skb(nskb);
1026 }
1027
1028 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1029 if (use_hash2 && hash2 != hash2_any) {
1030 hash2 = hash2_any;
1031 goto start_lookup;
1032 }
1033
1034 if (first) {
1035 if (udpv6_queue_rcv_skb(first, skb) > 0)
1036 consume_skb(skb);
1037 } else {
1038 kfree_skb(skb);
1039 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1040 proto == IPPROTO_UDPLITE);
1041 }
1042 return 0;
1043}
1044
1045static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1046{
1047 if (udp_sk_rx_dst_set(sk, dst))
1048 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
1049}
1050
1051/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
1052 * return code conversion for ip layer consumption
1053 */
1054static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1055 struct udphdr *uh)
1056{
1057 int ret;
1058
1059 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1060 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1061
1062 ret = udpv6_queue_rcv_skb(sk, skb);
1063
1064 /* a return value > 0 means to resubmit the input */
1065 if (ret > 0)
1066 return ret;
1067 return 0;
1068}
1069
1070int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1071 int proto)
1072{
1073 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1074 const struct in6_addr *saddr, *daddr;
1075 struct net *net = dev_net(skb->dev);
1076 struct sock *sk = NULL;
1077 struct udphdr *uh;
1078 bool refcounted;
1079 u32 ulen = 0;
1080
1081 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1082 goto discard;
1083
1084 saddr = &ipv6_hdr(skb)->saddr;
1085 daddr = &ipv6_hdr(skb)->daddr;
1086 uh = udp_hdr(skb);
1087
1088 ulen = ntohs(uh->len);
1089 if (ulen > skb->len)
1090 goto short_packet;
1091
1092 if (proto == IPPROTO_UDP) {
1093 /* UDP validates ulen. */
1094
1095 /* Check for jumbo payload */
1096 if (ulen == 0)
1097 ulen = skb->len;
1098
1099 if (ulen < sizeof(*uh))
1100 goto short_packet;
1101
1102 if (ulen < skb->len) {
1103 if (pskb_trim_rcsum(skb, ulen))
1104 goto short_packet;
1105 saddr = &ipv6_hdr(skb)->saddr;
1106 daddr = &ipv6_hdr(skb)->daddr;
1107 uh = udp_hdr(skb);
1108 }
1109 }
1110
1111 if (udp6_csum_init(skb, uh, proto))
1112 goto csum_error;
1113
1114 /* Check if the socket is already available, e.g. due to early demux */
1115 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1116 &refcounted, udp6_ehashfn);
1117 if (IS_ERR(sk))
1118 goto no_sk;
1119
1120 if (sk) {
1121 struct dst_entry *dst = skb_dst(skb);
1122 int ret;
1123
1124 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1125 udp6_sk_rx_dst_set(sk, dst);
1126
1127 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1128 if (refcounted)
1129 sock_put(sk);
1130 goto report_csum_error;
1131 }
1132
1133 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1134 if (refcounted)
1135 sock_put(sk);
1136 return ret;
1137 }
1138
1139 /*
1140 * Multicast receive code
1141 */
1142 if (ipv6_addr_is_multicast(daddr))
1143 return __udp6_lib_mcast_deliver(net, skb,
1144 saddr, daddr, udptable, proto);
1145
1146 /* Unicast */
1147 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1148 if (sk) {
1149 if (!uh->check && !udp_get_no_check6_rx(sk))
1150 goto report_csum_error;
1151 return udp6_unicast_rcv_skb(sk, skb, uh);
1152 }
1153no_sk:
1154 reason = SKB_DROP_REASON_NO_SOCKET;
1155
1156 if (!uh->check)
1157 goto report_csum_error;
1158
1159 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1160 goto discard;
1161 nf_reset_ct(skb);
1162
1163 if (udp_lib_checksum_complete(skb))
1164 goto csum_error;
1165
1166 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1167 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1168
1169 sk_skb_reason_drop(sk, skb, reason);
1170 return 0;
1171
1172short_packet:
1173 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1174 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1175 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1176 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1177 saddr, ntohs(uh->source),
1178 ulen, skb->len,
1179 daddr, ntohs(uh->dest));
1180 goto discard;
1181
1182report_csum_error:
1183 udp6_csum_zero_error(skb);
1184csum_error:
1185 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1186 reason = SKB_DROP_REASON_UDP_CSUM;
1187 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1188discard:
1189 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1190 sk_skb_reason_drop(sk, skb, reason);
1191 return 0;
1192}
1193
1194
1195static struct sock *__udp6_lib_demux_lookup(struct net *net,
1196 __be16 loc_port, const struct in6_addr *loc_addr,
1197 __be16 rmt_port, const struct in6_addr *rmt_addr,
1198 int dif, int sdif)
1199{
1200 struct udp_table *udptable = net->ipv4.udp_table;
1201 unsigned short hnum = ntohs(loc_port);
1202 struct udp_hslot *hslot2;
1203 unsigned int hash2;
1204 __portpair ports;
1205 struct sock *sk;
1206
1207 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1208 hslot2 = udp_hashslot2(udptable, hash2);
1209 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1210
1211 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1212 if (sk->sk_state == TCP_ESTABLISHED &&
1213 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1214 return sk;
1215 /* Only check first socket in chain */
1216 break;
1217 }
1218 return NULL;
1219}
1220
1221void udp_v6_early_demux(struct sk_buff *skb)
1222{
1223 struct net *net = dev_net(skb->dev);
1224 const struct udphdr *uh;
1225 struct sock *sk;
1226 struct dst_entry *dst;
1227 int dif = skb->dev->ifindex;
1228 int sdif = inet6_sdif(skb);
1229
1230 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1231 sizeof(struct udphdr)))
1232 return;
1233
1234 uh = udp_hdr(skb);
1235
1236 if (skb->pkt_type == PACKET_HOST)
1237 sk = __udp6_lib_demux_lookup(net, uh->dest,
1238 &ipv6_hdr(skb)->daddr,
1239 uh->source, &ipv6_hdr(skb)->saddr,
1240 dif, sdif);
1241 else
1242 return;
1243
1244 if (!sk)
1245 return;
1246
1247 skb->sk = sk;
1248 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1249 skb->destructor = sock_pfree;
1250 dst = rcu_dereference(sk->sk_rx_dst);
1251
1252 if (dst)
1253 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1254 if (dst) {
1255 /* set noref for now.
1256 * any place which wants to hold dst has to call
1257 * dst_hold_safe()
1258 */
1259 skb_dst_set_noref(skb, dst);
1260 }
1261}
1262
1263INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1264{
1265 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1266}
1267
1268/*
1269 * Throw away all pending data and cancel the corking. Socket is locked.
1270 */
1271static void udp_v6_flush_pending_frames(struct sock *sk)
1272{
1273 struct udp_sock *up = udp_sk(sk);
1274
1275 if (up->pending == AF_INET)
1276 udp_flush_pending_frames(sk);
1277 else if (up->pending) {
1278 up->len = 0;
1279 WRITE_ONCE(up->pending, 0);
1280 ip6_flush_pending_frames(sk);
1281 }
1282}
1283
1284static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1285 int addr_len)
1286{
1287 if (addr_len < offsetofend(struct sockaddr, sa_family))
1288 return -EINVAL;
1289 /* The following checks are replicated from __ip6_datagram_connect()
1290 * and intended to prevent BPF program called below from accessing
1291 * bytes that are out of the bound specified by user in addr_len.
1292 */
1293 if (uaddr->sa_family == AF_INET) {
1294 if (ipv6_only_sock(sk))
1295 return -EAFNOSUPPORT;
1296 return udp_pre_connect(sk, uaddr, addr_len);
1297 }
1298
1299 if (addr_len < SIN6_LEN_RFC2133)
1300 return -EINVAL;
1301
1302 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1303}
1304
1305static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1306{
1307 int res;
1308
1309 lock_sock(sk);
1310 res = __ip6_datagram_connect(sk, uaddr, addr_len);
1311 if (!res)
1312 udp6_hash4(sk);
1313 release_sock(sk);
1314 return res;
1315}
1316
1317/**
1318 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1319 * @sk: socket we are sending on
1320 * @skb: sk_buff containing the filled-in UDP header
1321 * (checksum field must be zeroed out)
1322 * @saddr: source address
1323 * @daddr: destination address
1324 * @len: length of packet
1325 */
1326static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1327 const struct in6_addr *saddr,
1328 const struct in6_addr *daddr, int len)
1329{
1330 unsigned int offset;
1331 struct udphdr *uh = udp_hdr(skb);
1332 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1333 __wsum csum = 0;
1334
1335 if (!frags) {
1336 /* Only one fragment on the socket. */
1337 skb->csum_start = skb_transport_header(skb) - skb->head;
1338 skb->csum_offset = offsetof(struct udphdr, check);
1339 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1340 } else {
1341 /*
1342 * HW-checksum won't work as there are two or more
1343 * fragments on the socket so that all csums of sk_buffs
1344 * should be together
1345 */
1346 offset = skb_transport_offset(skb);
1347 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1348 csum = skb->csum;
1349
1350 skb->ip_summed = CHECKSUM_NONE;
1351
1352 do {
1353 csum = csum_add(csum, frags->csum);
1354 } while ((frags = frags->next));
1355
1356 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1357 csum);
1358 if (uh->check == 0)
1359 uh->check = CSUM_MANGLED_0;
1360 }
1361}
1362
1363/*
1364 * Sending
1365 */
1366
1367static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1368 struct inet_cork *cork)
1369{
1370 struct sock *sk = skb->sk;
1371 struct udphdr *uh;
1372 int err = 0;
1373 int is_udplite = IS_UDPLITE(sk);
1374 __wsum csum = 0;
1375 int offset = skb_transport_offset(skb);
1376 int len = skb->len - offset;
1377 int datalen = len - sizeof(*uh);
1378
1379 /*
1380 * Create a UDP header
1381 */
1382 uh = udp_hdr(skb);
1383 uh->source = fl6->fl6_sport;
1384 uh->dest = fl6->fl6_dport;
1385 uh->len = htons(len);
1386 uh->check = 0;
1387
1388 if (cork->gso_size) {
1389 const int hlen = skb_network_header_len(skb) +
1390 sizeof(struct udphdr);
1391
1392 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1393 kfree_skb(skb);
1394 return -EMSGSIZE;
1395 }
1396 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1397 kfree_skb(skb);
1398 return -EINVAL;
1399 }
1400 if (udp_get_no_check6_tx(sk)) {
1401 kfree_skb(skb);
1402 return -EINVAL;
1403 }
1404 if (is_udplite || dst_xfrm(skb_dst(skb))) {
1405 kfree_skb(skb);
1406 return -EIO;
1407 }
1408
1409 if (datalen > cork->gso_size) {
1410 skb_shinfo(skb)->gso_size = cork->gso_size;
1411 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1412 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1413 cork->gso_size);
1414
1415 /* Don't checksum the payload, skb will get segmented */
1416 goto csum_partial;
1417 }
1418 }
1419
1420 if (is_udplite)
1421 csum = udplite_csum(skb);
1422 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1423 skb->ip_summed = CHECKSUM_NONE;
1424 goto send;
1425 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1426csum_partial:
1427 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1428 goto send;
1429 } else
1430 csum = udp_csum(skb);
1431
1432 /* add protocol-dependent pseudo-header */
1433 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1434 len, fl6->flowi6_proto, csum);
1435 if (uh->check == 0)
1436 uh->check = CSUM_MANGLED_0;
1437
1438send:
1439 err = ip6_send_skb(skb);
1440 if (err) {
1441 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1442 UDP6_INC_STATS(sock_net(sk),
1443 UDP_MIB_SNDBUFERRORS, is_udplite);
1444 err = 0;
1445 }
1446 } else {
1447 UDP6_INC_STATS(sock_net(sk),
1448 UDP_MIB_OUTDATAGRAMS, is_udplite);
1449 }
1450 return err;
1451}
1452
1453static int udp_v6_push_pending_frames(struct sock *sk)
1454{
1455 struct sk_buff *skb;
1456 struct udp_sock *up = udp_sk(sk);
1457 int err = 0;
1458
1459 if (up->pending == AF_INET)
1460 return udp_push_pending_frames(sk);
1461
1462 skb = ip6_finish_skb(sk);
1463 if (!skb)
1464 goto out;
1465
1466 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1467 &inet_sk(sk)->cork.base);
1468out:
1469 up->len = 0;
1470 WRITE_ONCE(up->pending, 0);
1471 return err;
1472}
1473
1474int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1475{
1476 struct ipv6_txoptions opt_space;
1477 struct udp_sock *up = udp_sk(sk);
1478 struct inet_sock *inet = inet_sk(sk);
1479 struct ipv6_pinfo *np = inet6_sk(sk);
1480 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1481 struct in6_addr *daddr, *final_p, final;
1482 struct ipv6_txoptions *opt = NULL;
1483 struct ipv6_txoptions *opt_to_free = NULL;
1484 struct ip6_flowlabel *flowlabel = NULL;
1485 struct inet_cork_full cork;
1486 struct flowi6 *fl6 = &cork.fl.u.ip6;
1487 struct dst_entry *dst;
1488 struct ipcm6_cookie ipc6;
1489 int addr_len = msg->msg_namelen;
1490 bool connected = false;
1491 int ulen = len;
1492 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1493 int err;
1494 int is_udplite = IS_UDPLITE(sk);
1495 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1496
1497 ipcm6_init(&ipc6);
1498 ipc6.gso_size = READ_ONCE(up->gso_size);
1499 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1500 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1501
1502 /* destination address check */
1503 if (sin6) {
1504 if (addr_len < offsetof(struct sockaddr, sa_data))
1505 return -EINVAL;
1506
1507 switch (sin6->sin6_family) {
1508 case AF_INET6:
1509 if (addr_len < SIN6_LEN_RFC2133)
1510 return -EINVAL;
1511 daddr = &sin6->sin6_addr;
1512 if (ipv6_addr_any(daddr) &&
1513 ipv6_addr_v4mapped(&np->saddr))
1514 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1515 daddr);
1516 break;
1517 case AF_INET:
1518 goto do_udp_sendmsg;
1519 case AF_UNSPEC:
1520 msg->msg_name = sin6 = NULL;
1521 msg->msg_namelen = addr_len = 0;
1522 daddr = NULL;
1523 break;
1524 default:
1525 return -EINVAL;
1526 }
1527 } else if (!READ_ONCE(up->pending)) {
1528 if (sk->sk_state != TCP_ESTABLISHED)
1529 return -EDESTADDRREQ;
1530 daddr = &sk->sk_v6_daddr;
1531 } else
1532 daddr = NULL;
1533
1534 if (daddr) {
1535 if (ipv6_addr_v4mapped(daddr)) {
1536 struct sockaddr_in sin;
1537 sin.sin_family = AF_INET;
1538 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1539 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1540 msg->msg_name = &sin;
1541 msg->msg_namelen = sizeof(sin);
1542do_udp_sendmsg:
1543 err = ipv6_only_sock(sk) ?
1544 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1545 msg->msg_name = sin6;
1546 msg->msg_namelen = addr_len;
1547 return err;
1548 }
1549 }
1550
1551 /* Rough check on arithmetic overflow,
1552 better check is made in ip6_append_data().
1553 */
1554 if (len > INT_MAX - sizeof(struct udphdr))
1555 return -EMSGSIZE;
1556
1557 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1558 if (READ_ONCE(up->pending)) {
1559 if (READ_ONCE(up->pending) == AF_INET)
1560 return udp_sendmsg(sk, msg, len);
1561 /*
1562 * There are pending frames.
1563 * The socket lock must be held while it's corked.
1564 */
1565 lock_sock(sk);
1566 if (likely(up->pending)) {
1567 if (unlikely(up->pending != AF_INET6)) {
1568 release_sock(sk);
1569 return -EAFNOSUPPORT;
1570 }
1571 dst = NULL;
1572 goto do_append_data;
1573 }
1574 release_sock(sk);
1575 }
1576 ulen += sizeof(struct udphdr);
1577
1578 memset(fl6, 0, sizeof(*fl6));
1579
1580 if (sin6) {
1581 if (sin6->sin6_port == 0)
1582 return -EINVAL;
1583
1584 fl6->fl6_dport = sin6->sin6_port;
1585 daddr = &sin6->sin6_addr;
1586
1587 if (inet6_test_bit(SNDFLOW, sk)) {
1588 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1589 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1590 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1591 if (IS_ERR(flowlabel))
1592 return -EINVAL;
1593 }
1594 }
1595
1596 /*
1597 * Otherwise it will be difficult to maintain
1598 * sk->sk_dst_cache.
1599 */
1600 if (sk->sk_state == TCP_ESTABLISHED &&
1601 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1602 daddr = &sk->sk_v6_daddr;
1603
1604 if (addr_len >= sizeof(struct sockaddr_in6) &&
1605 sin6->sin6_scope_id &&
1606 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1607 fl6->flowi6_oif = sin6->sin6_scope_id;
1608 } else {
1609 if (sk->sk_state != TCP_ESTABLISHED)
1610 return -EDESTADDRREQ;
1611
1612 fl6->fl6_dport = inet->inet_dport;
1613 daddr = &sk->sk_v6_daddr;
1614 fl6->flowlabel = np->flow_label;
1615 connected = true;
1616 }
1617
1618 if (!fl6->flowi6_oif)
1619 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1620
1621 if (!fl6->flowi6_oif)
1622 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1623
1624 fl6->flowi6_uid = sk->sk_uid;
1625
1626 if (msg->msg_controllen) {
1627 opt = &opt_space;
1628 memset(opt, 0, sizeof(struct ipv6_txoptions));
1629 opt->tot_len = sizeof(*opt);
1630 ipc6.opt = opt;
1631
1632 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1633 if (err > 0) {
1634 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1635 &ipc6);
1636 connected = false;
1637 }
1638 if (err < 0) {
1639 fl6_sock_release(flowlabel);
1640 return err;
1641 }
1642 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1643 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1644 if (IS_ERR(flowlabel))
1645 return -EINVAL;
1646 }
1647 if (!(opt->opt_nflen|opt->opt_flen))
1648 opt = NULL;
1649 }
1650 if (!opt) {
1651 opt = txopt_get(np);
1652 opt_to_free = opt;
1653 }
1654 if (flowlabel)
1655 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1656 opt = ipv6_fixup_options(&opt_space, opt);
1657 ipc6.opt = opt;
1658
1659 fl6->flowi6_proto = sk->sk_protocol;
1660 fl6->flowi6_mark = ipc6.sockc.mark;
1661 fl6->daddr = *daddr;
1662 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1663 fl6->saddr = np->saddr;
1664 fl6->fl6_sport = inet->inet_sport;
1665
1666 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1667 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1668 (struct sockaddr *)sin6,
1669 &addr_len,
1670 &fl6->saddr);
1671 if (err)
1672 goto out_no_dst;
1673 if (sin6) {
1674 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1675 /* BPF program rewrote IPv6-only by IPv4-mapped
1676 * IPv6. It's currently unsupported.
1677 */
1678 err = -ENOTSUPP;
1679 goto out_no_dst;
1680 }
1681 if (sin6->sin6_port == 0) {
1682 /* BPF program set invalid port. Reject it. */
1683 err = -EINVAL;
1684 goto out_no_dst;
1685 }
1686 fl6->fl6_dport = sin6->sin6_port;
1687 fl6->daddr = sin6->sin6_addr;
1688 }
1689 }
1690
1691 if (ipv6_addr_any(&fl6->daddr))
1692 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1693
1694 final_p = fl6_update_dst(fl6, opt, &final);
1695 if (final_p)
1696 connected = false;
1697
1698 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1699 fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1700 connected = false;
1701 } else if (!fl6->flowi6_oif)
1702 fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1703
1704 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1705
1706 if (ipc6.tclass < 0)
1707 ipc6.tclass = np->tclass;
1708
1709 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1710
1711 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1712 if (IS_ERR(dst)) {
1713 err = PTR_ERR(dst);
1714 dst = NULL;
1715 goto out;
1716 }
1717
1718 if (ipc6.hlimit < 0)
1719 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1720
1721 if (msg->msg_flags&MSG_CONFIRM)
1722 goto do_confirm;
1723back_from_confirm:
1724
1725 /* Lockless fast path for the non-corking case */
1726 if (!corkreq) {
1727 struct sk_buff *skb;
1728
1729 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1730 sizeof(struct udphdr), &ipc6,
1731 dst_rt6_info(dst),
1732 msg->msg_flags, &cork);
1733 err = PTR_ERR(skb);
1734 if (!IS_ERR_OR_NULL(skb))
1735 err = udp_v6_send_skb(skb, fl6, &cork.base);
1736 /* ip6_make_skb steals dst reference */
1737 goto out_no_dst;
1738 }
1739
1740 lock_sock(sk);
1741 if (unlikely(up->pending)) {
1742 /* The socket is already corked while preparing it. */
1743 /* ... which is an evident application bug. --ANK */
1744 release_sock(sk);
1745
1746 net_dbg_ratelimited("udp cork app bug 2\n");
1747 err = -EINVAL;
1748 goto out;
1749 }
1750
1751 WRITE_ONCE(up->pending, AF_INET6);
1752
1753do_append_data:
1754 if (ipc6.dontfrag < 0)
1755 ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1756 up->len += ulen;
1757 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1758 &ipc6, fl6, dst_rt6_info(dst),
1759 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1760 if (err)
1761 udp_v6_flush_pending_frames(sk);
1762 else if (!corkreq)
1763 err = udp_v6_push_pending_frames(sk);
1764 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1765 WRITE_ONCE(up->pending, 0);
1766
1767 if (err > 0)
1768 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1769 release_sock(sk);
1770
1771out:
1772 dst_release(dst);
1773out_no_dst:
1774 fl6_sock_release(flowlabel);
1775 txopt_put(opt_to_free);
1776 if (!err)
1777 return len;
1778 /*
1779 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1780 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1781 * we don't have a good statistic (IpOutDiscards but it can be too many
1782 * things). We could add another new stat but at least for now that
1783 * seems like overkill.
1784 */
1785 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1786 UDP6_INC_STATS(sock_net(sk),
1787 UDP_MIB_SNDBUFERRORS, is_udplite);
1788 }
1789 return err;
1790
1791do_confirm:
1792 if (msg->msg_flags & MSG_PROBE)
1793 dst_confirm_neigh(dst, &fl6->daddr);
1794 if (!(msg->msg_flags&MSG_PROBE) || len)
1795 goto back_from_confirm;
1796 err = 0;
1797 goto out;
1798}
1799EXPORT_SYMBOL(udpv6_sendmsg);
1800
1801static void udpv6_splice_eof(struct socket *sock)
1802{
1803 struct sock *sk = sock->sk;
1804 struct udp_sock *up = udp_sk(sk);
1805
1806 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1807 return;
1808
1809 lock_sock(sk);
1810 if (up->pending && !udp_test_bit(CORK, sk))
1811 udp_v6_push_pending_frames(sk);
1812 release_sock(sk);
1813}
1814
1815void udpv6_destroy_sock(struct sock *sk)
1816{
1817 struct udp_sock *up = udp_sk(sk);
1818 lock_sock(sk);
1819
1820 /* protects from races with udp_abort() */
1821 sock_set_flag(sk, SOCK_DEAD);
1822 udp_v6_flush_pending_frames(sk);
1823 release_sock(sk);
1824
1825 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1826 if (up->encap_type) {
1827 void (*encap_destroy)(struct sock *sk);
1828 encap_destroy = READ_ONCE(up->encap_destroy);
1829 if (encap_destroy)
1830 encap_destroy(sk);
1831 }
1832 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1833 static_branch_dec(&udpv6_encap_needed_key);
1834 udp_encap_disable();
1835 }
1836 }
1837}
1838
1839/*
1840 * Socket option code for UDP
1841 */
1842int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1843 unsigned int optlen)
1844{
1845 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1846 return udp_lib_setsockopt(sk, level, optname,
1847 optval, optlen,
1848 udp_v6_push_pending_frames);
1849 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1850}
1851
1852int udpv6_getsockopt(struct sock *sk, int level, int optname,
1853 char __user *optval, int __user *optlen)
1854{
1855 if (level == SOL_UDP || level == SOL_UDPLITE)
1856 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1857 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1858}
1859
1860
1861/* ------------------------------------------------------------------------ */
1862#ifdef CONFIG_PROC_FS
1863int udp6_seq_show(struct seq_file *seq, void *v)
1864{
1865 if (v == SEQ_START_TOKEN) {
1866 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1867 } else {
1868 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1869 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1870 __u16 srcp = ntohs(inet->inet_sport);
1871 __u16 destp = ntohs(inet->inet_dport);
1872 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1873 udp_rqueue_get(v), bucket);
1874 }
1875 return 0;
1876}
1877
1878const struct seq_operations udp6_seq_ops = {
1879 .start = udp_seq_start,
1880 .next = udp_seq_next,
1881 .stop = udp_seq_stop,
1882 .show = udp6_seq_show,
1883};
1884EXPORT_SYMBOL(udp6_seq_ops);
1885
1886static struct udp_seq_afinfo udp6_seq_afinfo = {
1887 .family = AF_INET6,
1888 .udp_table = NULL,
1889};
1890
1891int __net_init udp6_proc_init(struct net *net)
1892{
1893 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1894 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1895 return -ENOMEM;
1896 return 0;
1897}
1898
1899void udp6_proc_exit(struct net *net)
1900{
1901 remove_proc_entry("udp6", net->proc_net);
1902}
1903#endif /* CONFIG_PROC_FS */
1904
1905/* ------------------------------------------------------------------------ */
1906
1907struct proto udpv6_prot = {
1908 .name = "UDPv6",
1909 .owner = THIS_MODULE,
1910 .close = udp_lib_close,
1911 .pre_connect = udpv6_pre_connect,
1912 .connect = udpv6_connect,
1913 .disconnect = udp_disconnect,
1914 .ioctl = udp_ioctl,
1915 .init = udpv6_init_sock,
1916 .destroy = udpv6_destroy_sock,
1917 .setsockopt = udpv6_setsockopt,
1918 .getsockopt = udpv6_getsockopt,
1919 .sendmsg = udpv6_sendmsg,
1920 .recvmsg = udpv6_recvmsg,
1921 .splice_eof = udpv6_splice_eof,
1922 .release_cb = ip6_datagram_release_cb,
1923 .hash = udp_lib_hash,
1924 .unhash = udp_lib_unhash,
1925 .rehash = udp_v6_rehash,
1926 .get_port = udp_v6_get_port,
1927 .put_port = udp_lib_unhash,
1928#ifdef CONFIG_BPF_SYSCALL
1929 .psock_update_sk_prot = udp_bpf_update_proto,
1930#endif
1931
1932 .memory_allocated = &udp_memory_allocated,
1933 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1934
1935 .sysctl_mem = sysctl_udp_mem,
1936 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1937 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1938 .obj_size = sizeof(struct udp6_sock),
1939 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1940 .h.udp_table = NULL,
1941 .diag_destroy = udp_abort,
1942};
1943
1944static struct inet_protosw udpv6_protosw = {
1945 .type = SOCK_DGRAM,
1946 .protocol = IPPROTO_UDP,
1947 .prot = &udpv6_prot,
1948 .ops = &inet6_dgram_ops,
1949 .flags = INET_PROTOSW_PERMANENT,
1950};
1951
1952int __init udpv6_init(void)
1953{
1954 int ret;
1955
1956 net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1957 .handler = udpv6_rcv,
1958 .err_handler = udpv6_err,
1959 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1960 };
1961 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1962 if (ret)
1963 goto out;
1964
1965 ret = inet6_register_protosw(&udpv6_protosw);
1966 if (ret)
1967 goto out_udpv6_protocol;
1968out:
1969 return ret;
1970
1971out_udpv6_protocol:
1972 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1973 goto out;
1974}
1975
1976void udpv6_exit(void)
1977{
1978 inet6_unregister_protosw(&udpv6_protosw);
1979 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1980}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20#include <linux/bpf-cgroup.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/net.h>
26#include <linux/in6.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/ipv6.h>
30#include <linux/icmpv6.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/skbuff.h>
34#include <linux/slab.h>
35#include <linux/uaccess.h>
36#include <linux/indirect_call_wrapper.h>
37
38#include <net/addrconf.h>
39#include <net/ndisc.h>
40#include <net/protocol.h>
41#include <net/transp_v6.h>
42#include <net/ip6_route.h>
43#include <net/raw.h>
44#include <net/seg6.h>
45#include <net/tcp_states.h>
46#include <net/ip6_checksum.h>
47#include <net/ip6_tunnel.h>
48#include <trace/events/udp.h>
49#include <net/xfrm.h>
50#include <net/inet_hashtables.h>
51#include <net/inet6_hashtables.h>
52#include <net/busy_poll.h>
53#include <net/sock_reuseport.h>
54#include <net/gro.h>
55
56#include <linux/proc_fs.h>
57#include <linux/seq_file.h>
58#include <trace/events/skb.h>
59#include "udp_impl.h"
60
61static void udpv6_destruct_sock(struct sock *sk)
62{
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
65}
66
67int udpv6_init_sock(struct sock *sk)
68{
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
72 return 0;
73}
74
75INDIRECT_CALLABLE_SCOPE
76u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
78 const u16 lport,
79 const struct in6_addr *faddr,
80 const __be16 fport)
81{
82 u32 lhash, fhash;
83
84 net_get_random_once(&udp6_ehash_secret,
85 sizeof(udp6_ehash_secret));
86 net_get_random_once(&udp_ipv6_hash_secret,
87 sizeof(udp_ipv6_hash_secret));
88
89 lhash = (__force u32)laddr->s6_addr32[3];
90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
91
92 return __inet6_ehashfn(lhash, lport, fhash, fport,
93 udp6_ehash_secret + net_hash_mix(net));
94}
95
96int udp_v6_get_port(struct sock *sk, unsigned short snum)
97{
98 unsigned int hash2_nulladdr =
99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
100 unsigned int hash2_partial =
101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
102
103 /* precompute partial secondary hash */
104 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
105 return udp_lib_get_port(sk, snum, hash2_nulladdr);
106}
107
108void udp_v6_rehash(struct sock *sk)
109{
110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
111 &sk->sk_v6_rcv_saddr,
112 inet_sk(sk)->inet_num);
113
114 udp_lib_rehash(sk, new_hash);
115}
116
117static int compute_score(struct sock *sk, struct net *net,
118 const struct in6_addr *saddr, __be16 sport,
119 const struct in6_addr *daddr, unsigned short hnum,
120 int dif, int sdif)
121{
122 int bound_dev_if, score;
123 struct inet_sock *inet;
124 bool dev_match;
125
126 if (!net_eq(sock_net(sk), net) ||
127 udp_sk(sk)->udp_port_hash != hnum ||
128 sk->sk_family != PF_INET6)
129 return -1;
130
131 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
132 return -1;
133
134 score = 0;
135 inet = inet_sk(sk);
136
137 if (inet->inet_dport) {
138 if (inet->inet_dport != sport)
139 return -1;
140 score++;
141 }
142
143 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
144 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
145 return -1;
146 score++;
147 }
148
149 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
150 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
151 if (!dev_match)
152 return -1;
153 if (bound_dev_if)
154 score++;
155
156 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
157 score++;
158
159 return score;
160}
161
162/* called with rcu_read_lock() */
163static struct sock *udp6_lib_lookup2(struct net *net,
164 const struct in6_addr *saddr, __be16 sport,
165 const struct in6_addr *daddr, unsigned int hnum,
166 int dif, int sdif, struct udp_hslot *hslot2,
167 struct sk_buff *skb)
168{
169 struct sock *sk, *result;
170 int score, badness;
171 bool need_rescore;
172
173 result = NULL;
174 badness = -1;
175 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
176 need_rescore = false;
177rescore:
178 score = compute_score(need_rescore ? result : sk, net, saddr,
179 sport, daddr, hnum, dif, sdif);
180 if (score > badness) {
181 badness = score;
182
183 if (need_rescore)
184 continue;
185
186 if (sk->sk_state == TCP_ESTABLISHED) {
187 result = sk;
188 continue;
189 }
190
191 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
192 saddr, sport, daddr, hnum, udp6_ehashfn);
193 if (!result) {
194 result = sk;
195 continue;
196 }
197
198 /* Fall back to scoring if group has connections */
199 if (!reuseport_has_conns(sk))
200 return result;
201
202 /* Reuseport logic returned an error, keep original score. */
203 if (IS_ERR(result))
204 continue;
205
206 /* compute_score is too long of a function to be
207 * inlined, and calling it again here yields
208 * measureable overhead for some
209 * workloads. Work around it by jumping
210 * backwards to rescore 'result'.
211 */
212 need_rescore = true;
213 goto rescore;
214 }
215 }
216 return result;
217}
218
219/* rcu_read_lock() must be held */
220struct sock *__udp6_lib_lookup(struct net *net,
221 const struct in6_addr *saddr, __be16 sport,
222 const struct in6_addr *daddr, __be16 dport,
223 int dif, int sdif, struct udp_table *udptable,
224 struct sk_buff *skb)
225{
226 unsigned short hnum = ntohs(dport);
227 unsigned int hash2, slot2;
228 struct udp_hslot *hslot2;
229 struct sock *result, *sk;
230
231 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
232 slot2 = hash2 & udptable->mask;
233 hslot2 = &udptable->hash2[slot2];
234
235 /* Lookup connected or non-wildcard sockets */
236 result = udp6_lib_lookup2(net, saddr, sport,
237 daddr, hnum, dif, sdif,
238 hslot2, skb);
239 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
240 goto done;
241
242 /* Lookup redirect from BPF */
243 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
244 udptable == net->ipv4.udp_table) {
245 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
246 saddr, sport, daddr, hnum, dif,
247 udp6_ehashfn);
248 if (sk) {
249 result = sk;
250 goto done;
251 }
252 }
253
254 /* Got non-wildcard socket or error on first lookup */
255 if (result)
256 goto done;
257
258 /* Lookup wildcard sockets */
259 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
260 slot2 = hash2 & udptable->mask;
261 hslot2 = &udptable->hash2[slot2];
262
263 result = udp6_lib_lookup2(net, saddr, sport,
264 &in6addr_any, hnum, dif, sdif,
265 hslot2, skb);
266done:
267 if (IS_ERR(result))
268 return NULL;
269 return result;
270}
271EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
272
273static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
274 __be16 sport, __be16 dport,
275 struct udp_table *udptable)
276{
277 const struct ipv6hdr *iph = ipv6_hdr(skb);
278
279 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
280 &iph->daddr, dport, inet6_iif(skb),
281 inet6_sdif(skb), udptable, skb);
282}
283
284struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
285 __be16 sport, __be16 dport)
286{
287 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
288 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
289 struct net *net = dev_net(skb->dev);
290 int iif, sdif;
291
292 inet6_get_iif_sdif(skb, &iif, &sdif);
293
294 return __udp6_lib_lookup(net, &iph->saddr, sport,
295 &iph->daddr, dport, iif,
296 sdif, net->ipv4.udp_table, NULL);
297}
298
299/* Must be called under rcu_read_lock().
300 * Does increment socket refcount.
301 */
302#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
303struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
304 const struct in6_addr *daddr, __be16 dport, int dif)
305{
306 struct sock *sk;
307
308 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
309 dif, 0, net->ipv4.udp_table, NULL);
310 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
311 sk = NULL;
312 return sk;
313}
314EXPORT_SYMBOL_GPL(udp6_lib_lookup);
315#endif
316
317/* do not use the scratch area len for jumbogram: their length execeeds the
318 * scratch area space; note that the IP6CB flags is still in the first
319 * cacheline, so checking for jumbograms is cheap
320 */
321static int udp6_skb_len(struct sk_buff *skb)
322{
323 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
324}
325
326/*
327 * This should be easy, if there is something there we
328 * return it, otherwise we block.
329 */
330
331int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
332 int flags, int *addr_len)
333{
334 struct ipv6_pinfo *np = inet6_sk(sk);
335 struct inet_sock *inet = inet_sk(sk);
336 struct sk_buff *skb;
337 unsigned int ulen, copied;
338 int off, err, peeking = flags & MSG_PEEK;
339 int is_udplite = IS_UDPLITE(sk);
340 struct udp_mib __percpu *mib;
341 bool checksum_valid = false;
342 int is_udp4;
343
344 if (flags & MSG_ERRQUEUE)
345 return ipv6_recv_error(sk, msg, len, addr_len);
346
347 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
348 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
349
350try_again:
351 off = sk_peek_offset(sk, flags);
352 skb = __skb_recv_udp(sk, flags, &off, &err);
353 if (!skb)
354 return err;
355
356 ulen = udp6_skb_len(skb);
357 copied = len;
358 if (copied > ulen - off)
359 copied = ulen - off;
360 else if (copied < ulen)
361 msg->msg_flags |= MSG_TRUNC;
362
363 is_udp4 = (skb->protocol == htons(ETH_P_IP));
364 mib = __UDPX_MIB(sk, is_udp4);
365
366 /*
367 * If checksum is needed at all, try to do it while copying the
368 * data. If the data is truncated, or if we only want a partial
369 * coverage checksum (UDP-Lite), do it before the copy.
370 */
371
372 if (copied < ulen || peeking ||
373 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
374 checksum_valid = udp_skb_csum_unnecessary(skb) ||
375 !__udp_lib_checksum_complete(skb);
376 if (!checksum_valid)
377 goto csum_copy_err;
378 }
379
380 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
381 if (udp_skb_is_linear(skb))
382 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
383 else
384 err = skb_copy_datagram_msg(skb, off, msg, copied);
385 } else {
386 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
387 if (err == -EINVAL)
388 goto csum_copy_err;
389 }
390 if (unlikely(err)) {
391 if (!peeking) {
392 atomic_inc(&sk->sk_drops);
393 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
394 }
395 kfree_skb(skb);
396 return err;
397 }
398 if (!peeking)
399 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
400
401 sock_recv_cmsgs(msg, sk, skb);
402
403 /* Copy the address. */
404 if (msg->msg_name) {
405 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
406 sin6->sin6_family = AF_INET6;
407 sin6->sin6_port = udp_hdr(skb)->source;
408 sin6->sin6_flowinfo = 0;
409
410 if (is_udp4) {
411 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
412 &sin6->sin6_addr);
413 sin6->sin6_scope_id = 0;
414 } else {
415 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
416 sin6->sin6_scope_id =
417 ipv6_iface_scope_id(&sin6->sin6_addr,
418 inet6_iif(skb));
419 }
420 *addr_len = sizeof(*sin6);
421
422 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
423 (struct sockaddr *)sin6,
424 addr_len);
425 }
426
427 if (udp_test_bit(GRO_ENABLED, sk))
428 udp_cmsg_recv(msg, sk, skb);
429
430 if (np->rxopt.all)
431 ip6_datagram_recv_common_ctl(sk, msg, skb);
432
433 if (is_udp4) {
434 if (inet_cmsg_flags(inet))
435 ip_cmsg_recv_offset(msg, sk, skb,
436 sizeof(struct udphdr), off);
437 } else {
438 if (np->rxopt.all)
439 ip6_datagram_recv_specific_ctl(sk, msg, skb);
440 }
441
442 err = copied;
443 if (flags & MSG_TRUNC)
444 err = ulen;
445
446 skb_consume_udp(sk, skb, peeking ? -err : err);
447 return err;
448
449csum_copy_err:
450 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
451 udp_skb_destructor)) {
452 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
453 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
454 }
455 kfree_skb(skb);
456
457 /* starting over for a new packet, but check if we need to yield */
458 cond_resched();
459 msg->msg_flags &= ~MSG_TRUNC;
460 goto try_again;
461}
462
463DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
464void udpv6_encap_enable(void)
465{
466 static_branch_inc(&udpv6_encap_needed_key);
467}
468EXPORT_SYMBOL(udpv6_encap_enable);
469
470/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
471 * through error handlers in encapsulations looking for a match.
472 */
473static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
474 struct inet6_skb_parm *opt,
475 u8 type, u8 code, int offset, __be32 info)
476{
477 int i;
478
479 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
480 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
481 u8 type, u8 code, int offset, __be32 info);
482 const struct ip6_tnl_encap_ops *encap;
483
484 encap = rcu_dereference(ip6tun_encaps[i]);
485 if (!encap)
486 continue;
487 handler = encap->err_handler;
488 if (handler && !handler(skb, opt, type, code, offset, info))
489 return 0;
490 }
491
492 return -ENOENT;
493}
494
495/* Try to match ICMP errors to UDP tunnels by looking up a socket without
496 * reversing source and destination port: this will match tunnels that force the
497 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
498 * lwtunnels might actually break this assumption by being configured with
499 * different destination ports on endpoints, in this case we won't be able to
500 * trace ICMP messages back to them.
501 *
502 * If this doesn't match any socket, probe tunnels with arbitrary destination
503 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
504 * we've sent packets to won't necessarily match the local destination port.
505 *
506 * Then ask the tunnel implementation to match the error against a valid
507 * association.
508 *
509 * Return an error if we can't find a match, the socket if we need further
510 * processing, zero otherwise.
511 */
512static struct sock *__udp6_lib_err_encap(struct net *net,
513 const struct ipv6hdr *hdr, int offset,
514 struct udphdr *uh,
515 struct udp_table *udptable,
516 struct sock *sk,
517 struct sk_buff *skb,
518 struct inet6_skb_parm *opt,
519 u8 type, u8 code, __be32 info)
520{
521 int (*lookup)(struct sock *sk, struct sk_buff *skb);
522 int network_offset, transport_offset;
523 struct udp_sock *up;
524
525 network_offset = skb_network_offset(skb);
526 transport_offset = skb_transport_offset(skb);
527
528 /* Network header needs to point to the outer IPv6 header inside ICMP */
529 skb_reset_network_header(skb);
530
531 /* Transport header needs to point to the UDP header */
532 skb_set_transport_header(skb, offset);
533
534 if (sk) {
535 up = udp_sk(sk);
536
537 lookup = READ_ONCE(up->encap_err_lookup);
538 if (lookup && lookup(sk, skb))
539 sk = NULL;
540
541 goto out;
542 }
543
544 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
545 &hdr->saddr, uh->dest,
546 inet6_iif(skb), 0, udptable, skb);
547 if (sk) {
548 up = udp_sk(sk);
549
550 lookup = READ_ONCE(up->encap_err_lookup);
551 if (!lookup || lookup(sk, skb))
552 sk = NULL;
553 }
554
555out:
556 if (!sk) {
557 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
558 offset, info));
559 }
560
561 skb_set_transport_header(skb, transport_offset);
562 skb_set_network_header(skb, network_offset);
563
564 return sk;
565}
566
567int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
568 u8 type, u8 code, int offset, __be32 info,
569 struct udp_table *udptable)
570{
571 struct ipv6_pinfo *np;
572 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
573 const struct in6_addr *saddr = &hdr->saddr;
574 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
575 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
576 bool tunnel = false;
577 struct sock *sk;
578 int harderr;
579 int err;
580 struct net *net = dev_net(skb->dev);
581
582 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
583 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
584
585 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
586 /* No socket for error: try tunnels before discarding */
587 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
588 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
589 udptable, sk, skb,
590 opt, type, code, info);
591 if (!sk)
592 return 0;
593 } else
594 sk = ERR_PTR(-ENOENT);
595
596 if (IS_ERR(sk)) {
597 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
598 ICMP6_MIB_INERRORS);
599 return PTR_ERR(sk);
600 }
601
602 tunnel = true;
603 }
604
605 harderr = icmpv6_err_convert(type, code, &err);
606 np = inet6_sk(sk);
607
608 if (type == ICMPV6_PKT_TOOBIG) {
609 if (!ip6_sk_accept_pmtu(sk))
610 goto out;
611 ip6_sk_update_pmtu(skb, sk, info);
612 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
613 harderr = 1;
614 }
615 if (type == NDISC_REDIRECT) {
616 if (tunnel) {
617 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
618 READ_ONCE(sk->sk_mark), sk->sk_uid);
619 } else {
620 ip6_sk_redirect(skb, sk);
621 }
622 goto out;
623 }
624
625 /* Tunnels don't have an application socket: don't pass errors back */
626 if (tunnel) {
627 if (udp_sk(sk)->encap_err_rcv)
628 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
629 ntohl(info), (u8 *)(uh+1));
630 goto out;
631 }
632
633 if (!inet6_test_bit(RECVERR6, sk)) {
634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 goto out;
636 } else {
637 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
638 }
639
640 sk->sk_err = err;
641 sk_error_report(sk);
642out:
643 return 0;
644}
645
646static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
647{
648 int rc;
649
650 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
651 sock_rps_save_rxhash(sk, skb);
652 sk_mark_napi_id(sk, skb);
653 sk_incoming_cpu_update(sk);
654 } else {
655 sk_mark_napi_id_once(sk, skb);
656 }
657
658 rc = __udp_enqueue_schedule_skb(sk, skb);
659 if (rc < 0) {
660 int is_udplite = IS_UDPLITE(sk);
661 enum skb_drop_reason drop_reason;
662
663 /* Note that an ENOMEM error is charged twice */
664 if (rc == -ENOMEM) {
665 UDP6_INC_STATS(sock_net(sk),
666 UDP_MIB_RCVBUFERRORS, is_udplite);
667 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
668 } else {
669 UDP6_INC_STATS(sock_net(sk),
670 UDP_MIB_MEMERRORS, is_udplite);
671 drop_reason = SKB_DROP_REASON_PROTO_MEM;
672 }
673 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
674 kfree_skb_reason(skb, drop_reason);
675 trace_udp_fail_queue_rcv_skb(rc, sk);
676 return -1;
677 }
678
679 return 0;
680}
681
682static __inline__ int udpv6_err(struct sk_buff *skb,
683 struct inet6_skb_parm *opt, u8 type,
684 u8 code, int offset, __be32 info)
685{
686 return __udp6_lib_err(skb, opt, type, code, offset, info,
687 dev_net(skb->dev)->ipv4.udp_table);
688}
689
690static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
691{
692 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
693 struct udp_sock *up = udp_sk(sk);
694 int is_udplite = IS_UDPLITE(sk);
695
696 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
697 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
698 goto drop;
699 }
700 nf_reset_ct(skb);
701
702 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
703 READ_ONCE(up->encap_type)) {
704 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
705
706 /*
707 * This is an encapsulation socket so pass the skb to
708 * the socket's udp_encap_rcv() hook. Otherwise, just
709 * fall through and pass this up the UDP socket.
710 * up->encap_rcv() returns the following value:
711 * =0 if skb was successfully passed to the encap
712 * handler or was discarded by it.
713 * >0 if skb should be passed on to UDP.
714 * <0 if skb should be resubmitted as proto -N
715 */
716
717 /* if we're overly short, let UDP handle it */
718 encap_rcv = READ_ONCE(up->encap_rcv);
719 if (encap_rcv) {
720 int ret;
721
722 /* Verify checksum before giving to encap */
723 if (udp_lib_checksum_complete(skb))
724 goto csum_error;
725
726 ret = encap_rcv(sk, skb);
727 if (ret <= 0) {
728 __UDP6_INC_STATS(sock_net(sk),
729 UDP_MIB_INDATAGRAMS,
730 is_udplite);
731 return -ret;
732 }
733 }
734
735 /* FALLTHROUGH -- it's a UDP Packet */
736 }
737
738 /*
739 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
740 */
741 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
742 u16 pcrlen = READ_ONCE(up->pcrlen);
743
744 if (pcrlen == 0) { /* full coverage was set */
745 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
746 UDP_SKB_CB(skb)->cscov, skb->len);
747 goto drop;
748 }
749 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
750 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
751 UDP_SKB_CB(skb)->cscov, pcrlen);
752 goto drop;
753 }
754 }
755
756 prefetch(&sk->sk_rmem_alloc);
757 if (rcu_access_pointer(sk->sk_filter) &&
758 udp_lib_checksum_complete(skb))
759 goto csum_error;
760
761 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
762 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
763 goto drop;
764 }
765
766 udp_csum_pull_header(skb);
767
768 skb_dst_drop(skb);
769
770 return __udpv6_queue_rcv_skb(sk, skb);
771
772csum_error:
773 drop_reason = SKB_DROP_REASON_UDP_CSUM;
774 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
775drop:
776 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
777 atomic_inc(&sk->sk_drops);
778 kfree_skb_reason(skb, drop_reason);
779 return -1;
780}
781
782static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
783{
784 struct sk_buff *next, *segs;
785 int ret;
786
787 if (likely(!udp_unexpected_gso(sk, skb)))
788 return udpv6_queue_rcv_one_skb(sk, skb);
789
790 __skb_push(skb, -skb_mac_offset(skb));
791 segs = udp_rcv_segment(sk, skb, false);
792 skb_list_walk_safe(segs, skb, next) {
793 __skb_pull(skb, skb_transport_offset(skb));
794
795 udp_post_segment_fix_csum(skb);
796 ret = udpv6_queue_rcv_one_skb(sk, skb);
797 if (ret > 0)
798 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
799 true);
800 }
801 return 0;
802}
803
804static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
805 __be16 loc_port, const struct in6_addr *loc_addr,
806 __be16 rmt_port, const struct in6_addr *rmt_addr,
807 int dif, int sdif, unsigned short hnum)
808{
809 const struct inet_sock *inet = inet_sk(sk);
810
811 if (!net_eq(sock_net(sk), net))
812 return false;
813
814 if (udp_sk(sk)->udp_port_hash != hnum ||
815 sk->sk_family != PF_INET6 ||
816 (inet->inet_dport && inet->inet_dport != rmt_port) ||
817 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
818 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
819 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
820 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
821 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
822 return false;
823 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
824 return false;
825 return true;
826}
827
828static void udp6_csum_zero_error(struct sk_buff *skb)
829{
830 /* RFC 2460 section 8.1 says that we SHOULD log
831 * this error. Well, it is reasonable.
832 */
833 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
834 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
835 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
836}
837
838/*
839 * Note: called only from the BH handler context,
840 * so we don't need to lock the hashes.
841 */
842static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
843 const struct in6_addr *saddr, const struct in6_addr *daddr,
844 struct udp_table *udptable, int proto)
845{
846 struct sock *sk, *first = NULL;
847 const struct udphdr *uh = udp_hdr(skb);
848 unsigned short hnum = ntohs(uh->dest);
849 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
850 unsigned int offset = offsetof(typeof(*sk), sk_node);
851 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
852 int dif = inet6_iif(skb);
853 int sdif = inet6_sdif(skb);
854 struct hlist_node *node;
855 struct sk_buff *nskb;
856
857 if (use_hash2) {
858 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
859 udptable->mask;
860 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
861start_lookup:
862 hslot = &udptable->hash2[hash2];
863 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
864 }
865
866 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
867 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
868 uh->source, saddr, dif, sdif,
869 hnum))
870 continue;
871 /* If zero checksum and no_check is not on for
872 * the socket then skip it.
873 */
874 if (!uh->check && !udp_get_no_check6_rx(sk))
875 continue;
876 if (!first) {
877 first = sk;
878 continue;
879 }
880 nskb = skb_clone(skb, GFP_ATOMIC);
881 if (unlikely(!nskb)) {
882 atomic_inc(&sk->sk_drops);
883 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
884 IS_UDPLITE(sk));
885 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
886 IS_UDPLITE(sk));
887 continue;
888 }
889
890 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
891 consume_skb(nskb);
892 }
893
894 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
895 if (use_hash2 && hash2 != hash2_any) {
896 hash2 = hash2_any;
897 goto start_lookup;
898 }
899
900 if (first) {
901 if (udpv6_queue_rcv_skb(first, skb) > 0)
902 consume_skb(skb);
903 } else {
904 kfree_skb(skb);
905 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
906 proto == IPPROTO_UDPLITE);
907 }
908 return 0;
909}
910
911static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
912{
913 if (udp_sk_rx_dst_set(sk, dst))
914 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
915}
916
917/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
918 * return code conversion for ip layer consumption
919 */
920static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
921 struct udphdr *uh)
922{
923 int ret;
924
925 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
926 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
927
928 ret = udpv6_queue_rcv_skb(sk, skb);
929
930 /* a return value > 0 means to resubmit the input */
931 if (ret > 0)
932 return ret;
933 return 0;
934}
935
936int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
937 int proto)
938{
939 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
940 const struct in6_addr *saddr, *daddr;
941 struct net *net = dev_net(skb->dev);
942 struct udphdr *uh;
943 struct sock *sk;
944 bool refcounted;
945 u32 ulen = 0;
946
947 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
948 goto discard;
949
950 saddr = &ipv6_hdr(skb)->saddr;
951 daddr = &ipv6_hdr(skb)->daddr;
952 uh = udp_hdr(skb);
953
954 ulen = ntohs(uh->len);
955 if (ulen > skb->len)
956 goto short_packet;
957
958 if (proto == IPPROTO_UDP) {
959 /* UDP validates ulen. */
960
961 /* Check for jumbo payload */
962 if (ulen == 0)
963 ulen = skb->len;
964
965 if (ulen < sizeof(*uh))
966 goto short_packet;
967
968 if (ulen < skb->len) {
969 if (pskb_trim_rcsum(skb, ulen))
970 goto short_packet;
971 saddr = &ipv6_hdr(skb)->saddr;
972 daddr = &ipv6_hdr(skb)->daddr;
973 uh = udp_hdr(skb);
974 }
975 }
976
977 if (udp6_csum_init(skb, uh, proto))
978 goto csum_error;
979
980 /* Check if the socket is already available, e.g. due to early demux */
981 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
982 &refcounted, udp6_ehashfn);
983 if (IS_ERR(sk))
984 goto no_sk;
985
986 if (sk) {
987 struct dst_entry *dst = skb_dst(skb);
988 int ret;
989
990 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
991 udp6_sk_rx_dst_set(sk, dst);
992
993 if (!uh->check && !udp_get_no_check6_rx(sk)) {
994 if (refcounted)
995 sock_put(sk);
996 goto report_csum_error;
997 }
998
999 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1000 if (refcounted)
1001 sock_put(sk);
1002 return ret;
1003 }
1004
1005 /*
1006 * Multicast receive code
1007 */
1008 if (ipv6_addr_is_multicast(daddr))
1009 return __udp6_lib_mcast_deliver(net, skb,
1010 saddr, daddr, udptable, proto);
1011
1012 /* Unicast */
1013 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1014 if (sk) {
1015 if (!uh->check && !udp_get_no_check6_rx(sk))
1016 goto report_csum_error;
1017 return udp6_unicast_rcv_skb(sk, skb, uh);
1018 }
1019no_sk:
1020 reason = SKB_DROP_REASON_NO_SOCKET;
1021
1022 if (!uh->check)
1023 goto report_csum_error;
1024
1025 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1026 goto discard;
1027 nf_reset_ct(skb);
1028
1029 if (udp_lib_checksum_complete(skb))
1030 goto csum_error;
1031
1032 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1033 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1034
1035 kfree_skb_reason(skb, reason);
1036 return 0;
1037
1038short_packet:
1039 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1040 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1041 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1042 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1043 saddr, ntohs(uh->source),
1044 ulen, skb->len,
1045 daddr, ntohs(uh->dest));
1046 goto discard;
1047
1048report_csum_error:
1049 udp6_csum_zero_error(skb);
1050csum_error:
1051 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1052 reason = SKB_DROP_REASON_UDP_CSUM;
1053 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1054discard:
1055 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1056 kfree_skb_reason(skb, reason);
1057 return 0;
1058}
1059
1060
1061static struct sock *__udp6_lib_demux_lookup(struct net *net,
1062 __be16 loc_port, const struct in6_addr *loc_addr,
1063 __be16 rmt_port, const struct in6_addr *rmt_addr,
1064 int dif, int sdif)
1065{
1066 struct udp_table *udptable = net->ipv4.udp_table;
1067 unsigned short hnum = ntohs(loc_port);
1068 unsigned int hash2, slot2;
1069 struct udp_hslot *hslot2;
1070 __portpair ports;
1071 struct sock *sk;
1072
1073 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1074 slot2 = hash2 & udptable->mask;
1075 hslot2 = &udptable->hash2[slot2];
1076 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1077
1078 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1079 if (sk->sk_state == TCP_ESTABLISHED &&
1080 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1081 return sk;
1082 /* Only check first socket in chain */
1083 break;
1084 }
1085 return NULL;
1086}
1087
1088void udp_v6_early_demux(struct sk_buff *skb)
1089{
1090 struct net *net = dev_net(skb->dev);
1091 const struct udphdr *uh;
1092 struct sock *sk;
1093 struct dst_entry *dst;
1094 int dif = skb->dev->ifindex;
1095 int sdif = inet6_sdif(skb);
1096
1097 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1098 sizeof(struct udphdr)))
1099 return;
1100
1101 uh = udp_hdr(skb);
1102
1103 if (skb->pkt_type == PACKET_HOST)
1104 sk = __udp6_lib_demux_lookup(net, uh->dest,
1105 &ipv6_hdr(skb)->daddr,
1106 uh->source, &ipv6_hdr(skb)->saddr,
1107 dif, sdif);
1108 else
1109 return;
1110
1111 if (!sk)
1112 return;
1113
1114 skb->sk = sk;
1115 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1116 skb->destructor = sock_pfree;
1117 dst = rcu_dereference(sk->sk_rx_dst);
1118
1119 if (dst)
1120 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1121 if (dst) {
1122 /* set noref for now.
1123 * any place which wants to hold dst has to call
1124 * dst_hold_safe()
1125 */
1126 skb_dst_set_noref(skb, dst);
1127 }
1128}
1129
1130INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1131{
1132 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1133}
1134
1135/*
1136 * Throw away all pending data and cancel the corking. Socket is locked.
1137 */
1138static void udp_v6_flush_pending_frames(struct sock *sk)
1139{
1140 struct udp_sock *up = udp_sk(sk);
1141
1142 if (up->pending == AF_INET)
1143 udp_flush_pending_frames(sk);
1144 else if (up->pending) {
1145 up->len = 0;
1146 WRITE_ONCE(up->pending, 0);
1147 ip6_flush_pending_frames(sk);
1148 }
1149}
1150
1151static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1152 int addr_len)
1153{
1154 if (addr_len < offsetofend(struct sockaddr, sa_family))
1155 return -EINVAL;
1156 /* The following checks are replicated from __ip6_datagram_connect()
1157 * and intended to prevent BPF program called below from accessing
1158 * bytes that are out of the bound specified by user in addr_len.
1159 */
1160 if (uaddr->sa_family == AF_INET) {
1161 if (ipv6_only_sock(sk))
1162 return -EAFNOSUPPORT;
1163 return udp_pre_connect(sk, uaddr, addr_len);
1164 }
1165
1166 if (addr_len < SIN6_LEN_RFC2133)
1167 return -EINVAL;
1168
1169 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1170}
1171
1172/**
1173 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1174 * @sk: socket we are sending on
1175 * @skb: sk_buff containing the filled-in UDP header
1176 * (checksum field must be zeroed out)
1177 * @saddr: source address
1178 * @daddr: destination address
1179 * @len: length of packet
1180 */
1181static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1182 const struct in6_addr *saddr,
1183 const struct in6_addr *daddr, int len)
1184{
1185 unsigned int offset;
1186 struct udphdr *uh = udp_hdr(skb);
1187 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1188 __wsum csum = 0;
1189
1190 if (!frags) {
1191 /* Only one fragment on the socket. */
1192 skb->csum_start = skb_transport_header(skb) - skb->head;
1193 skb->csum_offset = offsetof(struct udphdr, check);
1194 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1195 } else {
1196 /*
1197 * HW-checksum won't work as there are two or more
1198 * fragments on the socket so that all csums of sk_buffs
1199 * should be together
1200 */
1201 offset = skb_transport_offset(skb);
1202 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1203 csum = skb->csum;
1204
1205 skb->ip_summed = CHECKSUM_NONE;
1206
1207 do {
1208 csum = csum_add(csum, frags->csum);
1209 } while ((frags = frags->next));
1210
1211 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1212 csum);
1213 if (uh->check == 0)
1214 uh->check = CSUM_MANGLED_0;
1215 }
1216}
1217
1218/*
1219 * Sending
1220 */
1221
1222static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1223 struct inet_cork *cork)
1224{
1225 struct sock *sk = skb->sk;
1226 struct udphdr *uh;
1227 int err = 0;
1228 int is_udplite = IS_UDPLITE(sk);
1229 __wsum csum = 0;
1230 int offset = skb_transport_offset(skb);
1231 int len = skb->len - offset;
1232 int datalen = len - sizeof(*uh);
1233
1234 /*
1235 * Create a UDP header
1236 */
1237 uh = udp_hdr(skb);
1238 uh->source = fl6->fl6_sport;
1239 uh->dest = fl6->fl6_dport;
1240 uh->len = htons(len);
1241 uh->check = 0;
1242
1243 if (cork->gso_size) {
1244 const int hlen = skb_network_header_len(skb) +
1245 sizeof(struct udphdr);
1246
1247 if (hlen + cork->gso_size > cork->fragsize) {
1248 kfree_skb(skb);
1249 return -EINVAL;
1250 }
1251 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1252 kfree_skb(skb);
1253 return -EINVAL;
1254 }
1255 if (udp_get_no_check6_tx(sk)) {
1256 kfree_skb(skb);
1257 return -EINVAL;
1258 }
1259 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1260 dst_xfrm(skb_dst(skb))) {
1261 kfree_skb(skb);
1262 return -EIO;
1263 }
1264
1265 if (datalen > cork->gso_size) {
1266 skb_shinfo(skb)->gso_size = cork->gso_size;
1267 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1268 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1269 cork->gso_size);
1270 }
1271 goto csum_partial;
1272 }
1273
1274 if (is_udplite)
1275 csum = udplite_csum(skb);
1276 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1277 skb->ip_summed = CHECKSUM_NONE;
1278 goto send;
1279 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1280csum_partial:
1281 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1282 goto send;
1283 } else
1284 csum = udp_csum(skb);
1285
1286 /* add protocol-dependent pseudo-header */
1287 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1288 len, fl6->flowi6_proto, csum);
1289 if (uh->check == 0)
1290 uh->check = CSUM_MANGLED_0;
1291
1292send:
1293 err = ip6_send_skb(skb);
1294 if (err) {
1295 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1296 UDP6_INC_STATS(sock_net(sk),
1297 UDP_MIB_SNDBUFERRORS, is_udplite);
1298 err = 0;
1299 }
1300 } else {
1301 UDP6_INC_STATS(sock_net(sk),
1302 UDP_MIB_OUTDATAGRAMS, is_udplite);
1303 }
1304 return err;
1305}
1306
1307static int udp_v6_push_pending_frames(struct sock *sk)
1308{
1309 struct sk_buff *skb;
1310 struct udp_sock *up = udp_sk(sk);
1311 int err = 0;
1312
1313 if (up->pending == AF_INET)
1314 return udp_push_pending_frames(sk);
1315
1316 skb = ip6_finish_skb(sk);
1317 if (!skb)
1318 goto out;
1319
1320 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1321 &inet_sk(sk)->cork.base);
1322out:
1323 up->len = 0;
1324 WRITE_ONCE(up->pending, 0);
1325 return err;
1326}
1327
1328int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1329{
1330 struct ipv6_txoptions opt_space;
1331 struct udp_sock *up = udp_sk(sk);
1332 struct inet_sock *inet = inet_sk(sk);
1333 struct ipv6_pinfo *np = inet6_sk(sk);
1334 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1335 struct in6_addr *daddr, *final_p, final;
1336 struct ipv6_txoptions *opt = NULL;
1337 struct ipv6_txoptions *opt_to_free = NULL;
1338 struct ip6_flowlabel *flowlabel = NULL;
1339 struct inet_cork_full cork;
1340 struct flowi6 *fl6 = &cork.fl.u.ip6;
1341 struct dst_entry *dst;
1342 struct ipcm6_cookie ipc6;
1343 int addr_len = msg->msg_namelen;
1344 bool connected = false;
1345 int ulen = len;
1346 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1347 int err;
1348 int is_udplite = IS_UDPLITE(sk);
1349 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1350
1351 ipcm6_init(&ipc6);
1352 ipc6.gso_size = READ_ONCE(up->gso_size);
1353 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1354 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1355
1356 /* destination address check */
1357 if (sin6) {
1358 if (addr_len < offsetof(struct sockaddr, sa_data))
1359 return -EINVAL;
1360
1361 switch (sin6->sin6_family) {
1362 case AF_INET6:
1363 if (addr_len < SIN6_LEN_RFC2133)
1364 return -EINVAL;
1365 daddr = &sin6->sin6_addr;
1366 if (ipv6_addr_any(daddr) &&
1367 ipv6_addr_v4mapped(&np->saddr))
1368 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1369 daddr);
1370 break;
1371 case AF_INET:
1372 goto do_udp_sendmsg;
1373 case AF_UNSPEC:
1374 msg->msg_name = sin6 = NULL;
1375 msg->msg_namelen = addr_len = 0;
1376 daddr = NULL;
1377 break;
1378 default:
1379 return -EINVAL;
1380 }
1381 } else if (!READ_ONCE(up->pending)) {
1382 if (sk->sk_state != TCP_ESTABLISHED)
1383 return -EDESTADDRREQ;
1384 daddr = &sk->sk_v6_daddr;
1385 } else
1386 daddr = NULL;
1387
1388 if (daddr) {
1389 if (ipv6_addr_v4mapped(daddr)) {
1390 struct sockaddr_in sin;
1391 sin.sin_family = AF_INET;
1392 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1393 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1394 msg->msg_name = &sin;
1395 msg->msg_namelen = sizeof(sin);
1396do_udp_sendmsg:
1397 err = ipv6_only_sock(sk) ?
1398 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1399 msg->msg_name = sin6;
1400 msg->msg_namelen = addr_len;
1401 return err;
1402 }
1403 }
1404
1405 /* Rough check on arithmetic overflow,
1406 better check is made in ip6_append_data().
1407 */
1408 if (len > INT_MAX - sizeof(struct udphdr))
1409 return -EMSGSIZE;
1410
1411 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1412 if (READ_ONCE(up->pending)) {
1413 if (READ_ONCE(up->pending) == AF_INET)
1414 return udp_sendmsg(sk, msg, len);
1415 /*
1416 * There are pending frames.
1417 * The socket lock must be held while it's corked.
1418 */
1419 lock_sock(sk);
1420 if (likely(up->pending)) {
1421 if (unlikely(up->pending != AF_INET6)) {
1422 release_sock(sk);
1423 return -EAFNOSUPPORT;
1424 }
1425 dst = NULL;
1426 goto do_append_data;
1427 }
1428 release_sock(sk);
1429 }
1430 ulen += sizeof(struct udphdr);
1431
1432 memset(fl6, 0, sizeof(*fl6));
1433
1434 if (sin6) {
1435 if (sin6->sin6_port == 0)
1436 return -EINVAL;
1437
1438 fl6->fl6_dport = sin6->sin6_port;
1439 daddr = &sin6->sin6_addr;
1440
1441 if (inet6_test_bit(SNDFLOW, sk)) {
1442 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1443 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1444 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1445 if (IS_ERR(flowlabel))
1446 return -EINVAL;
1447 }
1448 }
1449
1450 /*
1451 * Otherwise it will be difficult to maintain
1452 * sk->sk_dst_cache.
1453 */
1454 if (sk->sk_state == TCP_ESTABLISHED &&
1455 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1456 daddr = &sk->sk_v6_daddr;
1457
1458 if (addr_len >= sizeof(struct sockaddr_in6) &&
1459 sin6->sin6_scope_id &&
1460 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1461 fl6->flowi6_oif = sin6->sin6_scope_id;
1462 } else {
1463 if (sk->sk_state != TCP_ESTABLISHED)
1464 return -EDESTADDRREQ;
1465
1466 fl6->fl6_dport = inet->inet_dport;
1467 daddr = &sk->sk_v6_daddr;
1468 fl6->flowlabel = np->flow_label;
1469 connected = true;
1470 }
1471
1472 if (!fl6->flowi6_oif)
1473 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1474
1475 if (!fl6->flowi6_oif)
1476 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1477
1478 fl6->flowi6_uid = sk->sk_uid;
1479
1480 if (msg->msg_controllen) {
1481 opt = &opt_space;
1482 memset(opt, 0, sizeof(struct ipv6_txoptions));
1483 opt->tot_len = sizeof(*opt);
1484 ipc6.opt = opt;
1485
1486 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1487 if (err > 0) {
1488 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1489 &ipc6);
1490 connected = false;
1491 }
1492 if (err < 0) {
1493 fl6_sock_release(flowlabel);
1494 return err;
1495 }
1496 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1497 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1498 if (IS_ERR(flowlabel))
1499 return -EINVAL;
1500 }
1501 if (!(opt->opt_nflen|opt->opt_flen))
1502 opt = NULL;
1503 }
1504 if (!opt) {
1505 opt = txopt_get(np);
1506 opt_to_free = opt;
1507 }
1508 if (flowlabel)
1509 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1510 opt = ipv6_fixup_options(&opt_space, opt);
1511 ipc6.opt = opt;
1512
1513 fl6->flowi6_proto = sk->sk_protocol;
1514 fl6->flowi6_mark = ipc6.sockc.mark;
1515 fl6->daddr = *daddr;
1516 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1517 fl6->saddr = np->saddr;
1518 fl6->fl6_sport = inet->inet_sport;
1519
1520 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1521 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1522 (struct sockaddr *)sin6,
1523 &addr_len,
1524 &fl6->saddr);
1525 if (err)
1526 goto out_no_dst;
1527 if (sin6) {
1528 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1529 /* BPF program rewrote IPv6-only by IPv4-mapped
1530 * IPv6. It's currently unsupported.
1531 */
1532 err = -ENOTSUPP;
1533 goto out_no_dst;
1534 }
1535 if (sin6->sin6_port == 0) {
1536 /* BPF program set invalid port. Reject it. */
1537 err = -EINVAL;
1538 goto out_no_dst;
1539 }
1540 fl6->fl6_dport = sin6->sin6_port;
1541 fl6->daddr = sin6->sin6_addr;
1542 }
1543 }
1544
1545 if (ipv6_addr_any(&fl6->daddr))
1546 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1547
1548 final_p = fl6_update_dst(fl6, opt, &final);
1549 if (final_p)
1550 connected = false;
1551
1552 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1553 fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1554 connected = false;
1555 } else if (!fl6->flowi6_oif)
1556 fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1557
1558 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1559
1560 if (ipc6.tclass < 0)
1561 ipc6.tclass = np->tclass;
1562
1563 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1564
1565 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1566 if (IS_ERR(dst)) {
1567 err = PTR_ERR(dst);
1568 dst = NULL;
1569 goto out;
1570 }
1571
1572 if (ipc6.hlimit < 0)
1573 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1574
1575 if (msg->msg_flags&MSG_CONFIRM)
1576 goto do_confirm;
1577back_from_confirm:
1578
1579 /* Lockless fast path for the non-corking case */
1580 if (!corkreq) {
1581 struct sk_buff *skb;
1582
1583 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1584 sizeof(struct udphdr), &ipc6,
1585 dst_rt6_info(dst),
1586 msg->msg_flags, &cork);
1587 err = PTR_ERR(skb);
1588 if (!IS_ERR_OR_NULL(skb))
1589 err = udp_v6_send_skb(skb, fl6, &cork.base);
1590 /* ip6_make_skb steals dst reference */
1591 goto out_no_dst;
1592 }
1593
1594 lock_sock(sk);
1595 if (unlikely(up->pending)) {
1596 /* The socket is already corked while preparing it. */
1597 /* ... which is an evident application bug. --ANK */
1598 release_sock(sk);
1599
1600 net_dbg_ratelimited("udp cork app bug 2\n");
1601 err = -EINVAL;
1602 goto out;
1603 }
1604
1605 WRITE_ONCE(up->pending, AF_INET6);
1606
1607do_append_data:
1608 if (ipc6.dontfrag < 0)
1609 ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
1610 up->len += ulen;
1611 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1612 &ipc6, fl6, dst_rt6_info(dst),
1613 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1614 if (err)
1615 udp_v6_flush_pending_frames(sk);
1616 else if (!corkreq)
1617 err = udp_v6_push_pending_frames(sk);
1618 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1619 WRITE_ONCE(up->pending, 0);
1620
1621 if (err > 0)
1622 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1623 release_sock(sk);
1624
1625out:
1626 dst_release(dst);
1627out_no_dst:
1628 fl6_sock_release(flowlabel);
1629 txopt_put(opt_to_free);
1630 if (!err)
1631 return len;
1632 /*
1633 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1634 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1635 * we don't have a good statistic (IpOutDiscards but it can be too many
1636 * things). We could add another new stat but at least for now that
1637 * seems like overkill.
1638 */
1639 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1640 UDP6_INC_STATS(sock_net(sk),
1641 UDP_MIB_SNDBUFERRORS, is_udplite);
1642 }
1643 return err;
1644
1645do_confirm:
1646 if (msg->msg_flags & MSG_PROBE)
1647 dst_confirm_neigh(dst, &fl6->daddr);
1648 if (!(msg->msg_flags&MSG_PROBE) || len)
1649 goto back_from_confirm;
1650 err = 0;
1651 goto out;
1652}
1653EXPORT_SYMBOL(udpv6_sendmsg);
1654
1655static void udpv6_splice_eof(struct socket *sock)
1656{
1657 struct sock *sk = sock->sk;
1658 struct udp_sock *up = udp_sk(sk);
1659
1660 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1661 return;
1662
1663 lock_sock(sk);
1664 if (up->pending && !udp_test_bit(CORK, sk))
1665 udp_v6_push_pending_frames(sk);
1666 release_sock(sk);
1667}
1668
1669void udpv6_destroy_sock(struct sock *sk)
1670{
1671 struct udp_sock *up = udp_sk(sk);
1672 lock_sock(sk);
1673
1674 /* protects from races with udp_abort() */
1675 sock_set_flag(sk, SOCK_DEAD);
1676 udp_v6_flush_pending_frames(sk);
1677 release_sock(sk);
1678
1679 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1680 if (up->encap_type) {
1681 void (*encap_destroy)(struct sock *sk);
1682 encap_destroy = READ_ONCE(up->encap_destroy);
1683 if (encap_destroy)
1684 encap_destroy(sk);
1685 }
1686 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1687 static_branch_dec(&udpv6_encap_needed_key);
1688 udp_encap_disable();
1689 }
1690 }
1691}
1692
1693/*
1694 * Socket option code for UDP
1695 */
1696int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1697 unsigned int optlen)
1698{
1699 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1700 return udp_lib_setsockopt(sk, level, optname,
1701 optval, optlen,
1702 udp_v6_push_pending_frames);
1703 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1704}
1705
1706int udpv6_getsockopt(struct sock *sk, int level, int optname,
1707 char __user *optval, int __user *optlen)
1708{
1709 if (level == SOL_UDP || level == SOL_UDPLITE)
1710 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1711 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1712}
1713
1714
1715/* ------------------------------------------------------------------------ */
1716#ifdef CONFIG_PROC_FS
1717int udp6_seq_show(struct seq_file *seq, void *v)
1718{
1719 if (v == SEQ_START_TOKEN) {
1720 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1721 } else {
1722 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1723 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1724 __u16 srcp = ntohs(inet->inet_sport);
1725 __u16 destp = ntohs(inet->inet_dport);
1726 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1727 udp_rqueue_get(v), bucket);
1728 }
1729 return 0;
1730}
1731
1732const struct seq_operations udp6_seq_ops = {
1733 .start = udp_seq_start,
1734 .next = udp_seq_next,
1735 .stop = udp_seq_stop,
1736 .show = udp6_seq_show,
1737};
1738EXPORT_SYMBOL(udp6_seq_ops);
1739
1740static struct udp_seq_afinfo udp6_seq_afinfo = {
1741 .family = AF_INET6,
1742 .udp_table = NULL,
1743};
1744
1745int __net_init udp6_proc_init(struct net *net)
1746{
1747 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1748 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1749 return -ENOMEM;
1750 return 0;
1751}
1752
1753void udp6_proc_exit(struct net *net)
1754{
1755 remove_proc_entry("udp6", net->proc_net);
1756}
1757#endif /* CONFIG_PROC_FS */
1758
1759/* ------------------------------------------------------------------------ */
1760
1761struct proto udpv6_prot = {
1762 .name = "UDPv6",
1763 .owner = THIS_MODULE,
1764 .close = udp_lib_close,
1765 .pre_connect = udpv6_pre_connect,
1766 .connect = ip6_datagram_connect,
1767 .disconnect = udp_disconnect,
1768 .ioctl = udp_ioctl,
1769 .init = udpv6_init_sock,
1770 .destroy = udpv6_destroy_sock,
1771 .setsockopt = udpv6_setsockopt,
1772 .getsockopt = udpv6_getsockopt,
1773 .sendmsg = udpv6_sendmsg,
1774 .recvmsg = udpv6_recvmsg,
1775 .splice_eof = udpv6_splice_eof,
1776 .release_cb = ip6_datagram_release_cb,
1777 .hash = udp_lib_hash,
1778 .unhash = udp_lib_unhash,
1779 .rehash = udp_v6_rehash,
1780 .get_port = udp_v6_get_port,
1781 .put_port = udp_lib_unhash,
1782#ifdef CONFIG_BPF_SYSCALL
1783 .psock_update_sk_prot = udp_bpf_update_proto,
1784#endif
1785
1786 .memory_allocated = &udp_memory_allocated,
1787 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1788
1789 .sysctl_mem = sysctl_udp_mem,
1790 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1791 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1792 .obj_size = sizeof(struct udp6_sock),
1793 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1794 .h.udp_table = NULL,
1795 .diag_destroy = udp_abort,
1796};
1797
1798static struct inet_protosw udpv6_protosw = {
1799 .type = SOCK_DGRAM,
1800 .protocol = IPPROTO_UDP,
1801 .prot = &udpv6_prot,
1802 .ops = &inet6_dgram_ops,
1803 .flags = INET_PROTOSW_PERMANENT,
1804};
1805
1806int __init udpv6_init(void)
1807{
1808 int ret;
1809
1810 net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1811 .handler = udpv6_rcv,
1812 .err_handler = udpv6_err,
1813 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1814 };
1815 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1816 if (ret)
1817 goto out;
1818
1819 ret = inet6_register_protosw(&udpv6_protosw);
1820 if (ret)
1821 goto out_udpv6_protocol;
1822out:
1823 return ret;
1824
1825out_udpv6_protocol:
1826 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1827 goto out;
1828}
1829
1830void udpv6_exit(void)
1831{
1832 inet6_unregister_protosw(&udpv6_protosw);
1833 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1834}