Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP module.
8 *
9 * Version: @(#)udp.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
17 */
18#ifndef _UDP_H
19#define _UDP_H
20
21#include <linux/list.h>
22#include <linux/bug.h>
23#include <net/inet_sock.h>
24#include <net/sock.h>
25#include <net/snmp.h>
26#include <net/ip.h>
27#include <linux/ipv6.h>
28#include <linux/seq_file.h>
29#include <linux/poll.h>
30
31/**
32 * struct udp_skb_cb - UDP(-Lite) private variables
33 *
34 * @header: private variables used by IPv4/IPv6
35 * @cscov: checksum coverage length (UDP-Lite only)
36 * @partial_cov: if set indicates partial csum coverage
37 */
38struct udp_skb_cb {
39 union {
40 struct inet_skb_parm h4;
41#if IS_ENABLED(CONFIG_IPV6)
42 struct inet6_skb_parm h6;
43#endif
44 } header;
45 __u16 cscov;
46 __u8 partial_cov;
47};
48#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
49
50/**
51 * struct udp_hslot - UDP hash slot
52 *
53 * @head: head of list of sockets
54 * @count: number of sockets in 'head' list
55 * @lock: spinlock protecting changes to head/count
56 */
57struct udp_hslot {
58 struct hlist_head head;
59 int count;
60 spinlock_t lock;
61} __attribute__((aligned(2 * sizeof(long))));
62
63/**
64 * struct udp_table - UDP table
65 *
66 * @hash: hash table, sockets are hashed on (local port)
67 * @hash2: hash table, sockets are hashed on (local port, local address)
68 * @mask: number of slots in hash tables, minus 1
69 * @log: log2(number of slots in hash table)
70 */
71struct udp_table {
72 struct udp_hslot *hash;
73 struct udp_hslot *hash2;
74 unsigned int mask;
75 unsigned int log;
76};
77extern struct udp_table udp_table;
78void udp_table_init(struct udp_table *, const char *);
79static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
80 struct net *net, unsigned int num)
81{
82 return &table->hash[udp_hashfn(net, num, table->mask)];
83}
84/*
85 * For secondary hash, net_hash_mix() is performed before calling
86 * udp_hashslot2(), this explains difference with udp_hashslot()
87 */
88static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
89 unsigned int hash)
90{
91 return &table->hash2[hash & table->mask];
92}
93
94extern struct proto udp_prot;
95
96extern atomic_long_t udp_memory_allocated;
97
98/* sysctl variables for udp */
99extern long sysctl_udp_mem[3];
100extern int sysctl_udp_rmem_min;
101extern int sysctl_udp_wmem_min;
102
103struct sk_buff;
104
105/*
106 * Generic checksumming routines for UDP(-Lite) v4 and v6
107 */
108static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
109{
110 return (UDP_SKB_CB(skb)->cscov == skb->len ?
111 __skb_checksum_complete(skb) :
112 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
113}
114
115static inline int udp_lib_checksum_complete(struct sk_buff *skb)
116{
117 return !skb_csum_unnecessary(skb) &&
118 __udp_lib_checksum_complete(skb);
119}
120
121/**
122 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
123 * @sk: socket we are writing to
124 * @skb: sk_buff containing the filled-in UDP header
125 * (checksum field must be zeroed out)
126 */
127static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
128{
129 __wsum csum = csum_partial(skb_transport_header(skb),
130 sizeof(struct udphdr), 0);
131 skb_queue_walk(&sk->sk_write_queue, skb) {
132 csum = csum_add(csum, skb->csum);
133 }
134 return csum;
135}
136
137static inline __wsum udp_csum(struct sk_buff *skb)
138{
139 __wsum csum = csum_partial(skb_transport_header(skb),
140 sizeof(struct udphdr), skb->csum);
141
142 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
143 csum = csum_add(csum, skb->csum);
144 }
145 return csum;
146}
147
148static inline __sum16 udp_v4_check(int len, __be32 saddr,
149 __be32 daddr, __wsum base)
150{
151 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
152}
153
154void udp_set_csum(bool nocheck, struct sk_buff *skb,
155 __be32 saddr, __be32 daddr, int len);
156
157static inline void udp_csum_pull_header(struct sk_buff *skb)
158{
159 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
160 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
161 skb->csum);
162 skb_pull_rcsum(skb, sizeof(struct udphdr));
163 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
164}
165
166typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
167 __be16 dport);
168
169struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
170 struct udphdr *uh, udp_lookup_t lookup);
171int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
172
173struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
174 netdev_features_t features);
175
176static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
177{
178 struct udphdr *uh;
179 unsigned int hlen, off;
180
181 off = skb_gro_offset(skb);
182 hlen = off + sizeof(*uh);
183 uh = skb_gro_header_fast(skb, off);
184 if (skb_gro_header_hard(skb, hlen))
185 uh = skb_gro_header_slow(skb, hlen, off);
186
187 return uh;
188}
189
190/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
191static inline int udp_lib_hash(struct sock *sk)
192{
193 BUG();
194 return 0;
195}
196
197void udp_lib_unhash(struct sock *sk);
198void udp_lib_rehash(struct sock *sk, u16 new_hash);
199
200static inline void udp_lib_close(struct sock *sk, long timeout)
201{
202 sk_common_release(sk);
203}
204
205int udp_lib_get_port(struct sock *sk, unsigned short snum,
206 unsigned int hash2_nulladdr);
207
208u32 udp_flow_hashrnd(void);
209
210static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
211 int min, int max, bool use_eth)
212{
213 u32 hash;
214
215 if (min >= max) {
216 /* Use default range */
217 inet_get_local_port_range(net, &min, &max);
218 }
219
220 hash = skb_get_hash(skb);
221 if (unlikely(!hash)) {
222 if (use_eth) {
223 /* Can't find a normal hash, caller has indicated an
224 * Ethernet packet so use that to compute a hash.
225 */
226 hash = jhash(skb->data, 2 * ETH_ALEN,
227 (__force u32) skb->protocol);
228 } else {
229 /* Can't derive any sort of hash for the packet, set
230 * to some consistent random value.
231 */
232 hash = udp_flow_hashrnd();
233 }
234 }
235
236 /* Since this is being sent on the wire obfuscate hash a bit
237 * to minimize possbility that any useful information to an
238 * attacker is leaked. Only upper 16 bits are relevant in the
239 * computation for 16 bit port value.
240 */
241 hash ^= hash << 16;
242
243 return htons((((u64) hash * (max - min)) >> 32) + min);
244}
245
246static inline int udp_rqueue_get(struct sock *sk)
247{
248 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
249}
250
251static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
252 int dif, int sdif)
253{
254#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
255 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
256 bound_dev_if, dif, sdif);
257#else
258 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
259#endif
260}
261
262/* net/ipv4/udp.c */
263void udp_destruct_sock(struct sock *sk);
264void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
265int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
266void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
267struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
268 int noblock, int *off, int *err);
269static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
270 int noblock, int *err)
271{
272 int off = 0;
273
274 return __skb_recv_udp(sk, flags, noblock, &off, err);
275}
276
277int udp_v4_early_demux(struct sk_buff *skb);
278bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
279int udp_get_port(struct sock *sk, unsigned short snum,
280 int (*saddr_cmp)(const struct sock *,
281 const struct sock *));
282int udp_err(struct sk_buff *, u32);
283int udp_abort(struct sock *sk, int err);
284int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
285int udp_push_pending_frames(struct sock *sk);
286void udp_flush_pending_frames(struct sock *sk);
287int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
288void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
289int udp_rcv(struct sk_buff *skb);
290int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
291int udp_init_sock(struct sock *sk);
292int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
293int __udp_disconnect(struct sock *sk, int flags);
294int udp_disconnect(struct sock *sk, int flags);
295__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
296struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
297 netdev_features_t features,
298 bool is_ipv6);
299int udp_lib_getsockopt(struct sock *sk, int level, int optname,
300 char __user *optval, int __user *optlen);
301int udp_lib_setsockopt(struct sock *sk, int level, int optname,
302 char __user *optval, unsigned int optlen,
303 int (*push_pending_frames)(struct sock *));
304struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
305 __be32 daddr, __be16 dport, int dif);
306struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
307 __be32 daddr, __be16 dport, int dif, int sdif,
308 struct udp_table *tbl, struct sk_buff *skb);
309struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
310 __be16 sport, __be16 dport);
311struct sock *udp6_lib_lookup(struct net *net,
312 const struct in6_addr *saddr, __be16 sport,
313 const struct in6_addr *daddr, __be16 dport,
314 int dif);
315struct sock *__udp6_lib_lookup(struct net *net,
316 const struct in6_addr *saddr, __be16 sport,
317 const struct in6_addr *daddr, __be16 dport,
318 int dif, int sdif, struct udp_table *tbl,
319 struct sk_buff *skb);
320struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
321 __be16 sport, __be16 dport);
322
323/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
324 * possibly multiple cache miss on dequeue()
325 */
326struct udp_dev_scratch {
327 /* skb->truesize and the stateless bit are embedded in a single field;
328 * do not use a bitfield since the compiler emits better/smaller code
329 * this way
330 */
331 u32 _tsize_state;
332
333#if BITS_PER_LONG == 64
334 /* len and the bit needed to compute skb_csum_unnecessary
335 * will be on cold cache lines at recvmsg time.
336 * skb->len can be stored on 16 bits since the udp header has been
337 * already validated and pulled.
338 */
339 u16 len;
340 bool is_linear;
341 bool csum_unnecessary;
342#endif
343};
344
345static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
346{
347 return (struct udp_dev_scratch *)&skb->dev_scratch;
348}
349
350#if BITS_PER_LONG == 64
351static inline unsigned int udp_skb_len(struct sk_buff *skb)
352{
353 return udp_skb_scratch(skb)->len;
354}
355
356static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
357{
358 return udp_skb_scratch(skb)->csum_unnecessary;
359}
360
361static inline bool udp_skb_is_linear(struct sk_buff *skb)
362{
363 return udp_skb_scratch(skb)->is_linear;
364}
365
366#else
367static inline unsigned int udp_skb_len(struct sk_buff *skb)
368{
369 return skb->len;
370}
371
372static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
373{
374 return skb_csum_unnecessary(skb);
375}
376
377static inline bool udp_skb_is_linear(struct sk_buff *skb)
378{
379 return !skb_is_nonlinear(skb);
380}
381#endif
382
383static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
384 struct iov_iter *to)
385{
386 int n;
387
388 n = copy_to_iter(skb->data + off, len, to);
389 if (n == len)
390 return 0;
391
392 iov_iter_revert(to, n);
393 return -EFAULT;
394}
395
396/*
397 * SNMP statistics for UDP and UDP-Lite
398 */
399#define UDP_INC_STATS(net, field, is_udplite) do { \
400 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
401 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
402#define __UDP_INC_STATS(net, field, is_udplite) do { \
403 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
404 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
405
406#define __UDP6_INC_STATS(net, field, is_udplite) do { \
407 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
408 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
409} while(0)
410#define UDP6_INC_STATS(net, field, __lite) do { \
411 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
412 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
413} while(0)
414
415#if IS_ENABLED(CONFIG_IPV6)
416#define __UDPX_MIB(sk, ipv4) \
417({ \
418 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
419 sock_net(sk)->mib.udp_statistics) : \
420 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
421 sock_net(sk)->mib.udp_stats_in6); \
422})
423#else
424#define __UDPX_MIB(sk, ipv4) \
425({ \
426 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
427 sock_net(sk)->mib.udp_statistics; \
428})
429#endif
430
431#define __UDPX_INC_STATS(sk, field) \
432 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
433
434#ifdef CONFIG_PROC_FS
435struct udp_seq_afinfo {
436 sa_family_t family;
437 struct udp_table *udp_table;
438};
439
440struct udp_iter_state {
441 struct seq_net_private p;
442 int bucket;
443};
444
445void *udp_seq_start(struct seq_file *seq, loff_t *pos);
446void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
447void udp_seq_stop(struct seq_file *seq, void *v);
448
449extern const struct seq_operations udp_seq_ops;
450extern const struct seq_operations udp6_seq_ops;
451
452int udp4_proc_init(void);
453void udp4_proc_exit(void);
454#endif /* CONFIG_PROC_FS */
455
456int udpv4_offload_init(void);
457
458void udp_init(void);
459
460DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
461void udp_encap_enable(void);
462#if IS_ENABLED(CONFIG_IPV6)
463DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
464void udpv6_encap_enable(void);
465#endif
466
467static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
468 struct sk_buff *skb, bool ipv4)
469{
470 netdev_features_t features = NETIF_F_SG;
471 struct sk_buff *segs;
472
473 /* Avoid csum recalculation by skb_segment unless userspace explicitly
474 * asks for the final checksum values
475 */
476 if (!inet_get_convert_csum(sk))
477 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
478
479 /* the GSO CB lays after the UDP one, no need to save and restore any
480 * CB fragment
481 */
482 segs = __skb_gso_segment(skb, features, false);
483 if (IS_ERR_OR_NULL(segs)) {
484 int segs_nr = skb_shinfo(skb)->gso_segs;
485
486 atomic_add(segs_nr, &sk->sk_drops);
487 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
488 kfree_skb(skb);
489 return NULL;
490 }
491
492 consume_skb(skb);
493 return segs;
494}
495
496#endif /* _UDP_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP module.
8 *
9 * Version: @(#)udp.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
17 */
18#ifndef _UDP_H
19#define _UDP_H
20
21#include <linux/list.h>
22#include <linux/bug.h>
23#include <net/inet_sock.h>
24#include <net/sock.h>
25#include <net/snmp.h>
26#include <net/ip.h>
27#include <linux/ipv6.h>
28#include <linux/seq_file.h>
29#include <linux/poll.h>
30#include <linux/indirect_call_wrapper.h>
31
32/**
33 * struct udp_skb_cb - UDP(-Lite) private variables
34 *
35 * @header: private variables used by IPv4/IPv6
36 * @cscov: checksum coverage length (UDP-Lite only)
37 * @partial_cov: if set indicates partial csum coverage
38 */
39struct udp_skb_cb {
40 union {
41 struct inet_skb_parm h4;
42#if IS_ENABLED(CONFIG_IPV6)
43 struct inet6_skb_parm h6;
44#endif
45 } header;
46 __u16 cscov;
47 __u8 partial_cov;
48};
49#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
50
51/**
52 * struct udp_hslot - UDP hash slot
53 *
54 * @head: head of list of sockets
55 * @count: number of sockets in 'head' list
56 * @lock: spinlock protecting changes to head/count
57 */
58struct udp_hslot {
59 struct hlist_head head;
60 int count;
61 spinlock_t lock;
62} __attribute__((aligned(2 * sizeof(long))));
63
64/**
65 * struct udp_table - UDP table
66 *
67 * @hash: hash table, sockets are hashed on (local port)
68 * @hash2: hash table, sockets are hashed on (local port, local address)
69 * @mask: number of slots in hash tables, minus 1
70 * @log: log2(number of slots in hash table)
71 */
72struct udp_table {
73 struct udp_hslot *hash;
74 struct udp_hslot *hash2;
75 unsigned int mask;
76 unsigned int log;
77};
78extern struct udp_table udp_table;
79void udp_table_init(struct udp_table *, const char *);
80static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
81 struct net *net, unsigned int num)
82{
83 return &table->hash[udp_hashfn(net, num, table->mask)];
84}
85/*
86 * For secondary hash, net_hash_mix() is performed before calling
87 * udp_hashslot2(), this explains difference with udp_hashslot()
88 */
89static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
90 unsigned int hash)
91{
92 return &table->hash2[hash & table->mask];
93}
94
95extern struct proto udp_prot;
96
97extern atomic_long_t udp_memory_allocated;
98
99/* sysctl variables for udp */
100extern long sysctl_udp_mem[3];
101extern int sysctl_udp_rmem_min;
102extern int sysctl_udp_wmem_min;
103
104struct sk_buff;
105
106/*
107 * Generic checksumming routines for UDP(-Lite) v4 and v6
108 */
109static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
110{
111 return (UDP_SKB_CB(skb)->cscov == skb->len ?
112 __skb_checksum_complete(skb) :
113 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
114}
115
116static inline int udp_lib_checksum_complete(struct sk_buff *skb)
117{
118 return !skb_csum_unnecessary(skb) &&
119 __udp_lib_checksum_complete(skb);
120}
121
122/**
123 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
124 * @sk: socket we are writing to
125 * @skb: sk_buff containing the filled-in UDP header
126 * (checksum field must be zeroed out)
127 */
128static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
129{
130 __wsum csum = csum_partial(skb_transport_header(skb),
131 sizeof(struct udphdr), 0);
132 skb_queue_walk(&sk->sk_write_queue, skb) {
133 csum = csum_add(csum, skb->csum);
134 }
135 return csum;
136}
137
138static inline __wsum udp_csum(struct sk_buff *skb)
139{
140 __wsum csum = csum_partial(skb_transport_header(skb),
141 sizeof(struct udphdr), skb->csum);
142
143 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
144 csum = csum_add(csum, skb->csum);
145 }
146 return csum;
147}
148
149static inline __sum16 udp_v4_check(int len, __be32 saddr,
150 __be32 daddr, __wsum base)
151{
152 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
153}
154
155void udp_set_csum(bool nocheck, struct sk_buff *skb,
156 __be32 saddr, __be32 daddr, int len);
157
158static inline void udp_csum_pull_header(struct sk_buff *skb)
159{
160 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
161 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
162 skb->csum);
163 skb_pull_rcsum(skb, sizeof(struct udphdr));
164 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
165}
166
167typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
168 __be16 dport);
169
170INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
171 struct sk_buff *));
172INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
173INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
174 struct sk_buff *));
175INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
176struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
177 struct udphdr *uh, struct sock *sk);
178int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
179
180struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
181 netdev_features_t features);
182
183static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
184{
185 struct udphdr *uh;
186 unsigned int hlen, off;
187
188 off = skb_gro_offset(skb);
189 hlen = off + sizeof(*uh);
190 uh = skb_gro_header_fast(skb, off);
191 if (skb_gro_header_hard(skb, hlen))
192 uh = skb_gro_header_slow(skb, hlen, off);
193
194 return uh;
195}
196
197/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
198static inline int udp_lib_hash(struct sock *sk)
199{
200 BUG();
201 return 0;
202}
203
204void udp_lib_unhash(struct sock *sk);
205void udp_lib_rehash(struct sock *sk, u16 new_hash);
206
207static inline void udp_lib_close(struct sock *sk, long timeout)
208{
209 sk_common_release(sk);
210}
211
212int udp_lib_get_port(struct sock *sk, unsigned short snum,
213 unsigned int hash2_nulladdr);
214
215u32 udp_flow_hashrnd(void);
216
217static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
218 int min, int max, bool use_eth)
219{
220 u32 hash;
221
222 if (min >= max) {
223 /* Use default range */
224 inet_get_local_port_range(net, &min, &max);
225 }
226
227 hash = skb_get_hash(skb);
228 if (unlikely(!hash)) {
229 if (use_eth) {
230 /* Can't find a normal hash, caller has indicated an
231 * Ethernet packet so use that to compute a hash.
232 */
233 hash = jhash(skb->data, 2 * ETH_ALEN,
234 (__force u32) skb->protocol);
235 } else {
236 /* Can't derive any sort of hash for the packet, set
237 * to some consistent random value.
238 */
239 hash = udp_flow_hashrnd();
240 }
241 }
242
243 /* Since this is being sent on the wire obfuscate hash a bit
244 * to minimize possbility that any useful information to an
245 * attacker is leaked. Only upper 16 bits are relevant in the
246 * computation for 16 bit port value.
247 */
248 hash ^= hash << 16;
249
250 return htons((((u64) hash * (max - min)) >> 32) + min);
251}
252
253static inline int udp_rqueue_get(struct sock *sk)
254{
255 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
256}
257
258static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
259 int dif, int sdif)
260{
261#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
262 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
263 bound_dev_if, dif, sdif);
264#else
265 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
266#endif
267}
268
269/* net/ipv4/udp.c */
270void udp_destruct_sock(struct sock *sk);
271void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
272int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
273void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
274struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
275 int noblock, int *off, int *err);
276static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
277 int noblock, int *err)
278{
279 int off = 0;
280
281 return __skb_recv_udp(sk, flags, noblock, &off, err);
282}
283
284int udp_v4_early_demux(struct sk_buff *skb);
285bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
286int udp_get_port(struct sock *sk, unsigned short snum,
287 int (*saddr_cmp)(const struct sock *,
288 const struct sock *));
289int udp_err(struct sk_buff *, u32);
290int udp_abort(struct sock *sk, int err);
291int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
292int udp_push_pending_frames(struct sock *sk);
293void udp_flush_pending_frames(struct sock *sk);
294int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
295void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
296int udp_rcv(struct sk_buff *skb);
297int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
298int udp_init_sock(struct sock *sk);
299int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
300int __udp_disconnect(struct sock *sk, int flags);
301int udp_disconnect(struct sock *sk, int flags);
302__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
303struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
304 netdev_features_t features,
305 bool is_ipv6);
306int udp_lib_getsockopt(struct sock *sk, int level, int optname,
307 char __user *optval, int __user *optlen);
308int udp_lib_setsockopt(struct sock *sk, int level, int optname,
309 sockptr_t optval, unsigned int optlen,
310 int (*push_pending_frames)(struct sock *));
311struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
312 __be32 daddr, __be16 dport, int dif);
313struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
314 __be32 daddr, __be16 dport, int dif, int sdif,
315 struct udp_table *tbl, struct sk_buff *skb);
316struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
317 __be16 sport, __be16 dport);
318struct sock *udp6_lib_lookup(struct net *net,
319 const struct in6_addr *saddr, __be16 sport,
320 const struct in6_addr *daddr, __be16 dport,
321 int dif);
322struct sock *__udp6_lib_lookup(struct net *net,
323 const struct in6_addr *saddr, __be16 sport,
324 const struct in6_addr *daddr, __be16 dport,
325 int dif, int sdif, struct udp_table *tbl,
326 struct sk_buff *skb);
327struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
328 __be16 sport, __be16 dport);
329
330/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
331 * possibly multiple cache miss on dequeue()
332 */
333struct udp_dev_scratch {
334 /* skb->truesize and the stateless bit are embedded in a single field;
335 * do not use a bitfield since the compiler emits better/smaller code
336 * this way
337 */
338 u32 _tsize_state;
339
340#if BITS_PER_LONG == 64
341 /* len and the bit needed to compute skb_csum_unnecessary
342 * will be on cold cache lines at recvmsg time.
343 * skb->len can be stored on 16 bits since the udp header has been
344 * already validated and pulled.
345 */
346 u16 len;
347 bool is_linear;
348 bool csum_unnecessary;
349#endif
350};
351
352static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
353{
354 return (struct udp_dev_scratch *)&skb->dev_scratch;
355}
356
357#if BITS_PER_LONG == 64
358static inline unsigned int udp_skb_len(struct sk_buff *skb)
359{
360 return udp_skb_scratch(skb)->len;
361}
362
363static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
364{
365 return udp_skb_scratch(skb)->csum_unnecessary;
366}
367
368static inline bool udp_skb_is_linear(struct sk_buff *skb)
369{
370 return udp_skb_scratch(skb)->is_linear;
371}
372
373#else
374static inline unsigned int udp_skb_len(struct sk_buff *skb)
375{
376 return skb->len;
377}
378
379static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
380{
381 return skb_csum_unnecessary(skb);
382}
383
384static inline bool udp_skb_is_linear(struct sk_buff *skb)
385{
386 return !skb_is_nonlinear(skb);
387}
388#endif
389
390static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
391 struct iov_iter *to)
392{
393 int n;
394
395 n = copy_to_iter(skb->data + off, len, to);
396 if (n == len)
397 return 0;
398
399 iov_iter_revert(to, n);
400 return -EFAULT;
401}
402
403/*
404 * SNMP statistics for UDP and UDP-Lite
405 */
406#define UDP_INC_STATS(net, field, is_udplite) do { \
407 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
408 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
409#define __UDP_INC_STATS(net, field, is_udplite) do { \
410 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
411 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
412
413#define __UDP6_INC_STATS(net, field, is_udplite) do { \
414 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
415 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
416} while(0)
417#define UDP6_INC_STATS(net, field, __lite) do { \
418 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
419 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
420} while(0)
421
422#if IS_ENABLED(CONFIG_IPV6)
423#define __UDPX_MIB(sk, ipv4) \
424({ \
425 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
426 sock_net(sk)->mib.udp_statistics) : \
427 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
428 sock_net(sk)->mib.udp_stats_in6); \
429})
430#else
431#define __UDPX_MIB(sk, ipv4) \
432({ \
433 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
434 sock_net(sk)->mib.udp_statistics; \
435})
436#endif
437
438#define __UDPX_INC_STATS(sk, field) \
439 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
440
441#ifdef CONFIG_PROC_FS
442struct udp_seq_afinfo {
443 sa_family_t family;
444 struct udp_table *udp_table;
445};
446
447struct udp_iter_state {
448 struct seq_net_private p;
449 int bucket;
450 struct udp_seq_afinfo *bpf_seq_afinfo;
451};
452
453void *udp_seq_start(struct seq_file *seq, loff_t *pos);
454void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
455void udp_seq_stop(struct seq_file *seq, void *v);
456
457extern const struct seq_operations udp_seq_ops;
458extern const struct seq_operations udp6_seq_ops;
459
460int udp4_proc_init(void);
461void udp4_proc_exit(void);
462#endif /* CONFIG_PROC_FS */
463
464int udpv4_offload_init(void);
465
466void udp_init(void);
467
468DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
469void udp_encap_enable(void);
470#if IS_ENABLED(CONFIG_IPV6)
471DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
472void udpv6_encap_enable(void);
473#endif
474
475static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
476 struct sk_buff *skb, bool ipv4)
477{
478 netdev_features_t features = NETIF_F_SG;
479 struct sk_buff *segs;
480
481 /* Avoid csum recalculation by skb_segment unless userspace explicitly
482 * asks for the final checksum values
483 */
484 if (!inet_get_convert_csum(sk))
485 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
486
487 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
488 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
489 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
490 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
491 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
492 * specific case, where PARTIAL is both correct and required.
493 */
494 if (skb->pkt_type == PACKET_LOOPBACK)
495 skb->ip_summed = CHECKSUM_PARTIAL;
496
497 /* the GSO CB lays after the UDP one, no need to save and restore any
498 * CB fragment
499 */
500 segs = __skb_gso_segment(skb, features, false);
501 if (IS_ERR_OR_NULL(segs)) {
502 int segs_nr = skb_shinfo(skb)->gso_segs;
503
504 atomic_add(segs_nr, &sk->sk_drops);
505 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
506 kfree_skb(skb);
507 return NULL;
508 }
509
510 consume_skb(skb);
511 return segs;
512}
513
514#ifdef CONFIG_BPF_STREAM_PARSER
515struct sk_psock;
516struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
517#endif /* BPF_STREAM_PARSER */
518
519#endif /* _UDP_H */