Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		The User Datagram Protocol (UDP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  11 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
  12 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  13 *
  14 * Fixes:
  15 *		Alan Cox	:	verify_area() calls
  16 *		Alan Cox	: 	stopped close while in use off icmp
  17 *					messages. Not a fix but a botch that
  18 *					for udp at least is 'valid'.
  19 *		Alan Cox	:	Fixed icmp handling properly
  20 *		Alan Cox	: 	Correct error for oversized datagrams
  21 *		Alan Cox	:	Tidied select() semantics.
  22 *		Alan Cox	:	udp_err() fixed properly, also now
  23 *					select and read wake correctly on errors
  24 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
  25 *		Alan Cox	:	UDP can count its memory
  26 *		Alan Cox	:	send to an unknown connection causes
  27 *					an ECONNREFUSED off the icmp, but
  28 *					does NOT close.
  29 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
  30 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
  31 *					bug no longer crashes it.
  32 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
  33 *		Alan Cox	:	Uses skb_free_datagram
  34 *		Alan Cox	:	Added get/set sockopt support.
  35 *		Alan Cox	:	Broadcasting without option set returns EACCES.
  36 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
  37 *		Alan Cox	:	Use ip_tos and ip_ttl
  38 *		Alan Cox	:	SNMP Mibs
  39 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
  40 *		Matt Dillon	:	UDP length checks.
  41 *		Alan Cox	:	Smarter af_inet used properly.
  42 *		Alan Cox	:	Use new kernel side addressing.
  43 *		Alan Cox	:	Incorrect return on truncated datagram receive.
  44 *	Arnt Gulbrandsen 	:	New udp_send and stuff
  45 *		Alan Cox	:	Cache last socket
  46 *		Alan Cox	:	Route cache
  47 *		Jon Peatfield	:	Minor efficiency fix to sendto().
  48 *		Mike Shaver	:	RFC1122 checks.
  49 *		Alan Cox	:	Nonblocking error fix.
  50 *	Willy Konynenberg	:	Transparent proxying support.
  51 *		Mike McLagan	:	Routing by source
  52 *		David S. Miller	:	New socket lookup architecture.
  53 *					Last socket cache retained as it
  54 *					does have a high hit rate.
  55 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
  56 *		Andi Kleen	:	Some cleanups, cache destination entry
  57 *					for connect.
  58 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  59 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
  60 *					return ENOTCONN for unconnected sockets (POSIX)
  61 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
  62 *					bound-to-device socket
  63 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
  64 *					datagrams.
  65 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
  66 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
  67 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  68 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
  69 *					a single port at the same time.
  70 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  71 *	James Chapman		:	Add L2TP encapsulation type.
  72 *
  73 *
  74 *		This program is free software; you can redistribute it and/or
  75 *		modify it under the terms of the GNU General Public License
  76 *		as published by the Free Software Foundation; either version
  77 *		2 of the License, or (at your option) any later version.
  78 */
  79
  80#include <asm/system.h>
  81#include <asm/uaccess.h>
 
  82#include <asm/ioctls.h>
  83#include <linux/bootmem.h>
  84#include <linux/highmem.h>
  85#include <linux/swap.h>
  86#include <linux/types.h>
  87#include <linux/fcntl.h>
  88#include <linux/module.h>
  89#include <linux/socket.h>
  90#include <linux/sockios.h>
  91#include <linux/igmp.h>
 
  92#include <linux/in.h>
  93#include <linux/errno.h>
  94#include <linux/timer.h>
  95#include <linux/mm.h>
  96#include <linux/inet.h>
  97#include <linux/netdevice.h>
  98#include <linux/slab.h>
  99#include <net/tcp_states.h>
 100#include <linux/skbuff.h>
 101#include <linux/proc_fs.h>
 102#include <linux/seq_file.h>
 103#include <net/net_namespace.h>
 104#include <net/icmp.h>
 
 
 105#include <net/route.h>
 106#include <net/checksum.h>
 107#include <net/xfrm.h>
 108#include <trace/events/udp.h>
 
 
 
 
 109#include "udp_impl.h"
 
 
 
 
 
 
 110
 111struct udp_table udp_table __read_mostly;
 112EXPORT_SYMBOL(udp_table);
 113
 114long sysctl_udp_mem[3] __read_mostly;
 115EXPORT_SYMBOL(sysctl_udp_mem);
 116
 117int sysctl_udp_rmem_min __read_mostly;
 118EXPORT_SYMBOL(sysctl_udp_rmem_min);
 119
 120int sysctl_udp_wmem_min __read_mostly;
 121EXPORT_SYMBOL(sysctl_udp_wmem_min);
 122
 123atomic_long_t udp_memory_allocated;
 124EXPORT_SYMBOL(udp_memory_allocated);
 125
 126#define MAX_UDP_PORTS 65536
 127#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
 128
 129static int udp_lib_lport_inuse(struct net *net, __u16 num,
 130			       const struct udp_hslot *hslot,
 131			       unsigned long *bitmap,
 132			       struct sock *sk,
 133			       int (*saddr_comp)(const struct sock *sk1,
 134						 const struct sock *sk2),
 135			       unsigned int log)
 136{
 137	struct sock *sk2;
 138	struct hlist_nulls_node *node;
 139
 140	sk_nulls_for_each(sk2, node, &hslot->head)
 141		if (net_eq(sock_net(sk2), net) &&
 142		    sk2 != sk &&
 143		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
 144		    (!sk2->sk_reuse || !sk->sk_reuse) &&
 145		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
 146		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 147		    (*saddr_comp)(sk, sk2)) {
 148			if (bitmap)
 
 
 
 
 
 
 
 149				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
 150					  bitmap);
 151			else
 152				return 1;
 153		}
 
 154	return 0;
 155}
 156
 157/*
 158 * Note: we still hold spinlock of primary hash chain, so no other writer
 159 * can insert/delete a socket with local_port == num
 160 */
 161static int udp_lib_lport_inuse2(struct net *net, __u16 num,
 162			       struct udp_hslot *hslot2,
 163			       struct sock *sk,
 164			       int (*saddr_comp)(const struct sock *sk1,
 165						 const struct sock *sk2))
 166{
 167	struct sock *sk2;
 168	struct hlist_nulls_node *node;
 169	int res = 0;
 170
 171	spin_lock(&hslot2->lock);
 172	udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
 173		if (net_eq(sock_net(sk2), net) &&
 174		    sk2 != sk &&
 175		    (udp_sk(sk2)->udp_port_hash == num) &&
 176		    (!sk2->sk_reuse || !sk->sk_reuse) &&
 177		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
 178		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 179		    (*saddr_comp)(sk, sk2)) {
 180			res = 1;
 
 
 
 
 
 
 181			break;
 182		}
 
 183	spin_unlock(&hslot2->lock);
 184	return res;
 185}
 186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187/**
 188 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
 189 *
 190 *  @sk:          socket struct in question
 191 *  @snum:        port number to look up
 192 *  @saddr_comp:  AF-dependent comparison of bound local IP addresses
 193 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
 194 *                   with NULL address
 195 */
 196int udp_lib_get_port(struct sock *sk, unsigned short snum,
 197		       int (*saddr_comp)(const struct sock *sk1,
 198					 const struct sock *sk2),
 199		     unsigned int hash2_nulladdr)
 200{
 201	struct udp_hslot *hslot, *hslot2;
 202	struct udp_table *udptable = sk->sk_prot->h.udp_table;
 203	int    error = 1;
 204	struct net *net = sock_net(sk);
 205
 206	if (!snum) {
 207		int low, high, remaining;
 208		unsigned rand;
 209		unsigned short first, last;
 210		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 211
 212		inet_get_local_port_range(&low, &high);
 213		remaining = (high - low) + 1;
 214
 215		rand = net_random();
 216		first = (((u64)rand * remaining) >> 32) + low;
 217		/*
 218		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
 219		 */
 220		rand = (rand | 1) * (udptable->mask + 1);
 221		last = first + udptable->mask + 1;
 222		do {
 223			hslot = udp_hashslot(udptable, net, first);
 224			bitmap_zero(bitmap, PORTS_PER_CHAIN);
 225			spin_lock_bh(&hslot->lock);
 226			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
 227					    saddr_comp, udptable->log);
 228
 229			snum = first;
 230			/*
 231			 * Iterate on all possible values of snum for this hash.
 232			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
 233			 * give us randomization and full range coverage.
 234			 */
 235			do {
 236				if (low <= snum && snum <= high &&
 237				    !test_bit(snum >> udptable->log, bitmap) &&
 238				    !inet_is_reserved_local_port(snum))
 239					goto found;
 240				snum += rand;
 241			} while (snum != first);
 242			spin_unlock_bh(&hslot->lock);
 
 243		} while (++first != last);
 244		goto fail;
 245	} else {
 246		hslot = udp_hashslot(udptable, net, snum);
 247		spin_lock_bh(&hslot->lock);
 248		if (hslot->count > 10) {
 249			int exist;
 250			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
 251
 252			slot2          &= udptable->mask;
 253			hash2_nulladdr &= udptable->mask;
 254
 255			hslot2 = udp_hashslot2(udptable, slot2);
 256			if (hslot->count < hslot2->count)
 257				goto scan_primary_hash;
 258
 259			exist = udp_lib_lport_inuse2(net, snum, hslot2,
 260						     sk, saddr_comp);
 261			if (!exist && (hash2_nulladdr != slot2)) {
 262				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
 263				exist = udp_lib_lport_inuse2(net, snum, hslot2,
 264							     sk, saddr_comp);
 265			}
 266			if (exist)
 267				goto fail_unlock;
 268			else
 269				goto found;
 270		}
 271scan_primary_hash:
 272		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
 273					saddr_comp, 0))
 274			goto fail_unlock;
 275	}
 276found:
 277	inet_sk(sk)->inet_num = snum;
 278	udp_sk(sk)->udp_port_hash = snum;
 279	udp_sk(sk)->udp_portaddr_hash ^= snum;
 280	if (sk_unhashed(sk)) {
 281		sk_nulls_add_node_rcu(sk, &hslot->head);
 
 
 
 
 
 
 
 
 282		hslot->count++;
 283		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 284
 285		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
 286		spin_lock(&hslot2->lock);
 287		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
 288					 &hslot2->head);
 
 
 
 
 
 289		hslot2->count++;
 290		spin_unlock(&hslot2->lock);
 291	}
 
 292	error = 0;
 293fail_unlock:
 294	spin_unlock_bh(&hslot->lock);
 295fail:
 296	return error;
 297}
 298EXPORT_SYMBOL(udp_lib_get_port);
 299
 300static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
 301{
 302	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
 303
 304	return 	(!ipv6_only_sock(sk2)  &&
 305		 (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
 306		   inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
 307}
 308
 309static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
 310				       unsigned int port)
 311{
 312	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
 313}
 314
 315int udp_v4_get_port(struct sock *sk, unsigned short snum)
 316{
 317	unsigned int hash2_nulladdr =
 318		udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
 319	unsigned int hash2_partial =
 320		udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
 321
 322	/* precompute partial secondary hash */
 323	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 324	return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
 325}
 326
 327static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
 328			 unsigned short hnum,
 329			 __be16 sport, __be32 daddr, __be16 dport, int dif)
 330{
 331	int score = -1;
 332
 333	if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 334			!ipv6_only_sock(sk)) {
 335		struct inet_sock *inet = inet_sk(sk);
 336
 337		score = (sk->sk_family == PF_INET ? 1 : 0);
 338		if (inet->inet_rcv_saddr) {
 339			if (inet->inet_rcv_saddr != daddr)
 340				return -1;
 341			score += 2;
 342		}
 343		if (inet->inet_daddr) {
 344			if (inet->inet_daddr != saddr)
 345				return -1;
 346			score += 2;
 347		}
 348		if (inet->inet_dport) {
 349			if (inet->inet_dport != sport)
 350				return -1;
 351			score += 2;
 352		}
 353		if (sk->sk_bound_dev_if) {
 354			if (sk->sk_bound_dev_if != dif)
 355				return -1;
 356			score += 2;
 357		}
 358	}
 
 
 
 
 
 
 
 
 
 359	return score;
 360}
 361
 362/*
 363 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
 364 */
 365#define SCORE2_MAX (1 + 2 + 2 + 2)
 366static inline int compute_score2(struct sock *sk, struct net *net,
 367				 __be32 saddr, __be16 sport,
 368				 __be32 daddr, unsigned int hnum, int dif)
 369{
 370	int score = -1;
 371
 372	if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
 373		struct inet_sock *inet = inet_sk(sk);
 374
 375		if (inet->inet_rcv_saddr != daddr)
 376			return -1;
 377		if (inet->inet_num != hnum)
 378			return -1;
 379
 380		score = (sk->sk_family == PF_INET ? 1 : 0);
 381		if (inet->inet_daddr) {
 382			if (inet->inet_daddr != saddr)
 383				return -1;
 384			score += 2;
 385		}
 386		if (inet->inet_dport) {
 387			if (inet->inet_dport != sport)
 388				return -1;
 389			score += 2;
 390		}
 391		if (sk->sk_bound_dev_if) {
 392			if (sk->sk_bound_dev_if != dif)
 393				return -1;
 394			score += 2;
 395		}
 396	}
 397	return score;
 398}
 399
 400
 401/* called with read_rcu_lock() */
 402static struct sock *udp4_lib_lookup2(struct net *net,
 403		__be32 saddr, __be16 sport,
 404		__be32 daddr, unsigned int hnum, int dif,
 405		struct udp_hslot *hslot2, unsigned int slot2)
 
 
 406{
 407	struct sock *sk, *result;
 408	struct hlist_nulls_node *node;
 409	int score, badness;
 410
 411begin:
 412	result = NULL;
 413	badness = -1;
 414	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
 415		score = compute_score2(sk, net, saddr, sport,
 416				      daddr, hnum, dif);
 417		if (score > badness) {
 418			result = sk;
 
 
 
 
 
 
 419			badness = score;
 420			if (score == SCORE2_MAX)
 421				goto exact_match;
 422		}
 423	}
 424	/*
 425	 * if the nulls value we got at the end of this lookup is
 426	 * not the expected one, we must restart lookup.
 427	 * We probably met an item that was moved to another chain.
 428	 */
 429	if (get_nulls_value(node) != slot2)
 430		goto begin;
 431
 432	if (result) {
 433exact_match:
 434		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 435			result = NULL;
 436		else if (unlikely(compute_score2(result, net, saddr, sport,
 437				  daddr, hnum, dif) < badness)) {
 438			sock_put(result);
 439			goto begin;
 440		}
 441	}
 442	return result;
 443}
 444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 446 * harder than this. -DaveM
 447 */
 448static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 449		__be16 sport, __be32 daddr, __be16 dport,
 450		int dif, struct udp_table *udptable)
 451{
 452	struct sock *sk, *result;
 453	struct hlist_nulls_node *node;
 454	unsigned short hnum = ntohs(dport);
 455	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 456	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 457	int score, badness;
 458
 459	rcu_read_lock();
 460	if (hslot->count > 10) {
 461		hash2 = udp4_portaddr_hash(net, daddr, hnum);
 462		slot2 = hash2 & udptable->mask;
 463		hslot2 = &udptable->hash2[slot2];
 464		if (hslot->count < hslot2->count)
 465			goto begin;
 466
 467		result = udp4_lib_lookup2(net, saddr, sport,
 468					  daddr, hnum, dif,
 469					  hslot2, slot2);
 470		if (!result) {
 471			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 472			slot2 = hash2 & udptable->mask;
 473			hslot2 = &udptable->hash2[slot2];
 474			if (hslot->count < hslot2->count)
 475				goto begin;
 476
 477			result = udp4_lib_lookup2(net, saddr, sport,
 478						  htonl(INADDR_ANY), hnum, dif,
 479						  hslot2, slot2);
 480		}
 481		rcu_read_unlock();
 482		return result;
 483	}
 484begin:
 485	result = NULL;
 486	badness = -1;
 487	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
 488		score = compute_score(sk, net, saddr, hnum, sport,
 489				      daddr, dport, dif);
 490		if (score > badness) {
 491			result = sk;
 492			badness = score;
 493		}
 494	}
 495	/*
 496	 * if the nulls value we got at the end of this lookup is
 497	 * not the expected one, we must restart lookup.
 498	 * We probably met an item that was moved to another chain.
 499	 */
 500	if (get_nulls_value(node) != slot)
 501		goto begin;
 502
 503	if (result) {
 504		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 505			result = NULL;
 506		else if (unlikely(compute_score(result, net, saddr, hnum, sport,
 507				  daddr, dport, dif) < badness)) {
 508			sock_put(result);
 509			goto begin;
 510		}
 511	}
 512	rcu_read_unlock();
 513	return result;
 514}
 
 515
 516static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
 517						 __be16 sport, __be16 dport,
 518						 struct udp_table *udptable)
 519{
 520	struct sock *sk;
 521	const struct iphdr *iph = ip_hdr(skb);
 522
 523	if (unlikely(sk = skb_steal_sock(skb)))
 524		return sk;
 525	else
 526		return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
 527					 iph->daddr, dport, inet_iif(skb),
 528					 udptable);
 529}
 530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 532			     __be32 daddr, __be16 dport, int dif)
 533{
 534	return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
 
 
 
 
 
 
 535}
 536EXPORT_SYMBOL_GPL(udp4_lib_lookup);
 
 537
 538static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
 539					     __be16 loc_port, __be32 loc_addr,
 540					     __be16 rmt_port, __be32 rmt_addr,
 541					     int dif)
 542{
 543	struct hlist_nulls_node *node;
 544	struct sock *s = sk;
 545	unsigned short hnum = ntohs(loc_port);
 546
 547	sk_nulls_for_each_from(s, node) {
 548		struct inet_sock *inet = inet_sk(s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549
 550		if (!net_eq(sock_net(s), net) ||
 551		    udp_sk(s)->udp_port_hash != hnum ||
 552		    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
 553		    (inet->inet_dport != rmt_port && inet->inet_dport) ||
 554		    (inet->inet_rcv_saddr &&
 555		     inet->inet_rcv_saddr != loc_addr) ||
 556		    ipv6_only_sock(s) ||
 557		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
 558			continue;
 559		if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
 
 
 
 560			continue;
 561		goto found;
 
 
 562	}
 563	s = NULL;
 564found:
 565	return s;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566}
 567
 568/*
 569 * This routine is called by the ICMP module when it gets some
 570 * sort of error condition.  If err < 0 then the socket should
 571 * be closed and the error returned to the user.  If err > 0
 572 * it's just the icmp type << 8 | icmp code.
 573 * Header points to the ip header of the error packet. We move
 574 * on past this. Then (as it used to claim before adjustment)
 575 * header points to the first 8 bytes of the udp header.  We need
 576 * to find the appropriate port.
 577 */
 578
 579void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 580{
 581	struct inet_sock *inet;
 582	const struct iphdr *iph = (const struct iphdr *)skb->data;
 583	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
 584	const int type = icmp_hdr(skb)->type;
 585	const int code = icmp_hdr(skb)->code;
 
 586	struct sock *sk;
 587	int harderr;
 588	int err;
 589	struct net *net = dev_net(skb->dev);
 590
 591	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
 592			iph->saddr, uh->source, skb->dev->ifindex, udptable);
 593	if (sk == NULL) {
 594		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 595		return;	/* No socket for error */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596	}
 597
 598	err = 0;
 599	harderr = 0;
 600	inet = inet_sk(sk);
 601
 602	switch (type) {
 603	default:
 604	case ICMP_TIME_EXCEEDED:
 605		err = EHOSTUNREACH;
 606		break;
 607	case ICMP_SOURCE_QUENCH:
 608		goto out;
 609	case ICMP_PARAMETERPROB:
 610		err = EPROTO;
 611		harderr = 1;
 612		break;
 613	case ICMP_DEST_UNREACH:
 614		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
 
 615			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
 616				err = EMSGSIZE;
 617				harderr = 1;
 618				break;
 619			}
 620			goto out;
 621		}
 622		err = EHOSTUNREACH;
 623		if (code <= NR_ICMP_UNREACH) {
 624			harderr = icmp_err_convert[code].fatal;
 625			err = icmp_err_convert[code].errno;
 626		}
 627		break;
 
 
 
 628	}
 629
 630	/*
 631	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
 632	 *	4.1.3.3.
 633	 */
 
 
 
 
 634	if (!inet->recverr) {
 635		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 636			goto out;
 637	} else
 638		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
 639
 640	sk->sk_err = err;
 641	sk->sk_error_report(sk);
 642out:
 643	sock_put(sk);
 644}
 645
 646void udp_err(struct sk_buff *skb, u32 info)
 647{
 648	__udp4_lib_err(skb, info, &udp_table);
 649}
 650
 651/*
 652 * Throw away all pending data and cancel the corking. Socket is locked.
 653 */
 654void udp_flush_pending_frames(struct sock *sk)
 655{
 656	struct udp_sock *up = udp_sk(sk);
 657
 658	if (up->pending) {
 659		up->len = 0;
 660		up->pending = 0;
 661		ip_flush_pending_frames(sk);
 662	}
 663}
 664EXPORT_SYMBOL(udp_flush_pending_frames);
 665
 666/**
 667 * 	udp4_hwcsum  -  handle outgoing HW checksumming
 668 * 	@skb: 	sk_buff containing the filled-in UDP header
 669 * 	        (checksum field must be zeroed out)
 670 *	@src:	source IP address
 671 *	@dst:	destination IP address
 672 */
 673static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
 674{
 675	struct udphdr *uh = udp_hdr(skb);
 676	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
 677	int offset = skb_transport_offset(skb);
 678	int len = skb->len - offset;
 679	int hlen = len;
 680	__wsum csum = 0;
 681
 682	if (!frags) {
 683		/*
 684		 * Only one fragment on the socket.
 685		 */
 686		skb->csum_start = skb_transport_header(skb) - skb->head;
 687		skb->csum_offset = offsetof(struct udphdr, check);
 688		uh->check = ~csum_tcpudp_magic(src, dst, len,
 689					       IPPROTO_UDP, 0);
 690	} else {
 
 
 691		/*
 692		 * HW-checksum won't work as there are two or more
 693		 * fragments on the socket so that all csums of sk_buffs
 694		 * should be together
 695		 */
 696		do {
 697			csum = csum_add(csum, frags->csum);
 698			hlen -= frags->len;
 699		} while ((frags = frags->next));
 700
 701		csum = skb_checksum(skb, offset, hlen, csum);
 702		skb->ip_summed = CHECKSUM_NONE;
 703
 704		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
 705		if (uh->check == 0)
 706			uh->check = CSUM_MANGLED_0;
 707	}
 708}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709
 710static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
 
 711{
 712	struct sock *sk = skb->sk;
 713	struct inet_sock *inet = inet_sk(sk);
 714	struct udphdr *uh;
 715	int err = 0;
 716	int is_udplite = IS_UDPLITE(sk);
 717	int offset = skb_transport_offset(skb);
 718	int len = skb->len - offset;
 
 719	__wsum csum = 0;
 720
 721	/*
 722	 * Create a UDP header
 723	 */
 724	uh = udp_hdr(skb);
 725	uh->source = inet->inet_sport;
 726	uh->dest = fl4->fl4_dport;
 727	uh->len = htons(len);
 728	uh->check = 0;
 729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730	if (is_udplite)  				 /*     UDP-Lite      */
 731		csum = udplite_csum(skb);
 732
 733	else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
 734
 735		skb->ip_summed = CHECKSUM_NONE;
 736		goto send;
 737
 738	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 
 739
 740		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
 741		goto send;
 742
 743	} else
 744		csum = udp_csum(skb);
 745
 746	/* add protocol-dependent pseudo-header */
 747	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
 748				      sk->sk_protocol, csum);
 749	if (uh->check == 0)
 750		uh->check = CSUM_MANGLED_0;
 751
 752send:
 753	err = ip_send_skb(skb);
 754	if (err) {
 755		if (err == -ENOBUFS && !inet->recverr) {
 756			UDP_INC_STATS_USER(sock_net(sk),
 757					   UDP_MIB_SNDBUFERRORS, is_udplite);
 758			err = 0;
 759		}
 760	} else
 761		UDP_INC_STATS_USER(sock_net(sk),
 762				   UDP_MIB_OUTDATAGRAMS, is_udplite);
 763	return err;
 764}
 765
 766/*
 767 * Push out all pending data as one UDP datagram. Socket is locked.
 768 */
 769static int udp_push_pending_frames(struct sock *sk)
 770{
 771	struct udp_sock  *up = udp_sk(sk);
 772	struct inet_sock *inet = inet_sk(sk);
 773	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
 774	struct sk_buff *skb;
 775	int err = 0;
 776
 777	skb = ip_finish_skb(sk, fl4);
 778	if (!skb)
 779		goto out;
 780
 781	err = udp_send_skb(skb, fl4);
 782
 783out:
 784	up->len = 0;
 785	up->pending = 0;
 786	return err;
 787}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788
 789int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 790		size_t len)
 791{
 792	struct inet_sock *inet = inet_sk(sk);
 793	struct udp_sock *up = udp_sk(sk);
 
 794	struct flowi4 fl4_stack;
 795	struct flowi4 *fl4;
 796	int ulen = len;
 797	struct ipcm_cookie ipc;
 798	struct rtable *rt = NULL;
 799	int free = 0;
 800	int connected = 0;
 801	__be32 daddr, faddr, saddr;
 802	__be16 dport;
 803	u8  tos;
 804	int err, is_udplite = IS_UDPLITE(sk);
 805	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
 806	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
 807	struct sk_buff *skb;
 808	struct ip_options_data opt_copy;
 809
 810	if (len > 0xFFFF)
 811		return -EMSGSIZE;
 812
 813	/*
 814	 *	Check the flags.
 815	 */
 816
 817	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
 818		return -EOPNOTSUPP;
 819
 820	ipc.opt = NULL;
 821	ipc.tx_flags = 0;
 822
 823	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 824
 825	fl4 = &inet->cork.fl.u.ip4;
 826	if (up->pending) {
 827		/*
 828		 * There are pending frames.
 829		 * The socket lock must be held while it's corked.
 830		 */
 831		lock_sock(sk);
 832		if (likely(up->pending)) {
 833			if (unlikely(up->pending != AF_INET)) {
 834				release_sock(sk);
 835				return -EINVAL;
 836			}
 837			goto do_append_data;
 838		}
 839		release_sock(sk);
 840	}
 841	ulen += sizeof(struct udphdr);
 842
 843	/*
 844	 *	Get and verify the address.
 845	 */
 846	if (msg->msg_name) {
 847		struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
 848		if (msg->msg_namelen < sizeof(*usin))
 849			return -EINVAL;
 850		if (usin->sin_family != AF_INET) {
 851			if (usin->sin_family != AF_UNSPEC)
 852				return -EAFNOSUPPORT;
 853		}
 854
 855		daddr = usin->sin_addr.s_addr;
 856		dport = usin->sin_port;
 857		if (dport == 0)
 858			return -EINVAL;
 859	} else {
 860		if (sk->sk_state != TCP_ESTABLISHED)
 861			return -EDESTADDRREQ;
 862		daddr = inet->inet_daddr;
 863		dport = inet->inet_dport;
 864		/* Open fast path for connected socket.
 865		   Route will not be used, if at least one option is set.
 866		 */
 867		connected = 1;
 868	}
 869	ipc.addr = inet->inet_saddr;
 870
 871	ipc.oif = sk->sk_bound_dev_if;
 872	err = sock_tx_timestamp(sk, &ipc.tx_flags);
 873	if (err)
 874		return err;
 875	if (msg->msg_controllen) {
 876		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
 877		if (err)
 
 
 
 
 878			return err;
 
 879		if (ipc.opt)
 880			free = 1;
 881		connected = 0;
 882	}
 883	if (!ipc.opt) {
 884		struct ip_options_rcu *inet_opt;
 885
 886		rcu_read_lock();
 887		inet_opt = rcu_dereference(inet->inet_opt);
 888		if (inet_opt) {
 889			memcpy(&opt_copy, inet_opt,
 890			       sizeof(*inet_opt) + inet_opt->opt.optlen);
 891			ipc.opt = &opt_copy.opt;
 892		}
 893		rcu_read_unlock();
 894	}
 895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896	saddr = ipc.addr;
 897	ipc.addr = faddr = daddr;
 898
 899	if (ipc.opt && ipc.opt->opt.srr) {
 900		if (!daddr)
 901			return -EINVAL;
 
 
 902		faddr = ipc.opt->opt.faddr;
 903		connected = 0;
 904	}
 905	tos = RT_TOS(inet->tos);
 906	if (sock_flag(sk, SOCK_LOCALROUTE) ||
 907	    (msg->msg_flags & MSG_DONTROUTE) ||
 908	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
 909		tos |= RTO_ONLINK;
 910		connected = 0;
 911	}
 912
 913	if (ipv4_is_multicast(daddr)) {
 914		if (!ipc.oif)
 915			ipc.oif = inet->mc_index;
 916		if (!saddr)
 917			saddr = inet->mc_addr;
 918		connected = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919	}
 920
 921	if (connected)
 922		rt = (struct rtable *)sk_dst_check(sk, 0);
 923
 924	if (rt == NULL) {
 925		struct net *net = sock_net(sk);
 
 926
 927		fl4 = &fl4_stack;
 928		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 
 929				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 930				   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
 931				   faddr, saddr, dport, inet->inet_sport);
 
 932
 933		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 934		rt = ip_route_output_flow(net, fl4, sk);
 935		if (IS_ERR(rt)) {
 936			err = PTR_ERR(rt);
 937			rt = NULL;
 938			if (err == -ENETUNREACH)
 939				IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 940			goto out;
 941		}
 942
 943		err = -EACCES;
 944		if ((rt->rt_flags & RTCF_BROADCAST) &&
 945		    !sock_flag(sk, SOCK_BROADCAST))
 946			goto out;
 947		if (connected)
 948			sk_dst_set(sk, dst_clone(&rt->dst));
 949	}
 950
 951	if (msg->msg_flags&MSG_CONFIRM)
 952		goto do_confirm;
 953back_from_confirm:
 954
 955	saddr = fl4->saddr;
 956	if (!ipc.addr)
 957		daddr = ipc.addr = fl4->daddr;
 958
 959	/* Lockless fast path for the non-corking case. */
 960	if (!corkreq) {
 961		skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen,
 
 
 962				  sizeof(struct udphdr), &ipc, &rt,
 963				  msg->msg_flags);
 964		err = PTR_ERR(skb);
 965		if (skb && !IS_ERR(skb))
 966			err = udp_send_skb(skb, fl4);
 967		goto out;
 968	}
 969
 970	lock_sock(sk);
 971	if (unlikely(up->pending)) {
 972		/* The socket is already corked while preparing it. */
 973		/* ... which is an evident application bug. --ANK */
 974		release_sock(sk);
 975
 976		LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
 977		err = -EINVAL;
 978		goto out;
 979	}
 980	/*
 981	 *	Now cork the socket to pend data.
 982	 */
 983	fl4 = &inet->cork.fl.u.ip4;
 984	fl4->daddr = daddr;
 985	fl4->saddr = saddr;
 986	fl4->fl4_dport = dport;
 987	fl4->fl4_sport = inet->inet_sport;
 988	up->pending = AF_INET;
 989
 990do_append_data:
 991	up->len += ulen;
 992	err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen,
 993			     sizeof(struct udphdr), &ipc, &rt,
 994			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
 995	if (err)
 996		udp_flush_pending_frames(sk);
 997	else if (!corkreq)
 998		err = udp_push_pending_frames(sk);
 999	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1000		up->pending = 0;
1001	release_sock(sk);
1002
1003out:
1004	ip_rt_put(rt);
 
1005	if (free)
1006		kfree(ipc.opt);
1007	if (!err)
1008		return len;
1009	/*
1010	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1011	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1012	 * we don't have a good statistic (IpOutDiscards but it can be too many
1013	 * things).  We could add another new stat but at least for now that
1014	 * seems like overkill.
1015	 */
1016	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1017		UDP_INC_STATS_USER(sock_net(sk),
1018				UDP_MIB_SNDBUFERRORS, is_udplite);
1019	}
1020	return err;
1021
1022do_confirm:
1023	dst_confirm(&rt->dst);
 
1024	if (!(msg->msg_flags&MSG_PROBE) || len)
1025		goto back_from_confirm;
1026	err = 0;
1027	goto out;
1028}
1029EXPORT_SYMBOL(udp_sendmsg);
1030
1031int udp_sendpage(struct sock *sk, struct page *page, int offset,
1032		 size_t size, int flags)
1033{
1034	struct inet_sock *inet = inet_sk(sk);
1035	struct udp_sock *up = udp_sk(sk);
1036	int ret;
1037
 
 
 
1038	if (!up->pending) {
1039		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
1040
1041		/* Call udp_sendmsg to specify destination address which
1042		 * sendpage interface can't pass.
1043		 * This will succeed only when the socket is connected.
1044		 */
1045		ret = udp_sendmsg(NULL, sk, &msg, 0);
1046		if (ret < 0)
1047			return ret;
1048	}
1049
1050	lock_sock(sk);
1051
1052	if (unlikely(!up->pending)) {
1053		release_sock(sk);
1054
1055		LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
1056		return -EINVAL;
1057	}
1058
1059	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1060			     page, offset, size, flags);
1061	if (ret == -EOPNOTSUPP) {
1062		release_sock(sk);
1063		return sock_no_sendpage(sk->sk_socket, page, offset,
1064					size, flags);
1065	}
1066	if (ret < 0) {
1067		udp_flush_pending_frames(sk);
1068		goto out;
1069	}
1070
1071	up->len += size;
1072	if (!(up->corkflag || (flags&MSG_MORE)))
1073		ret = udp_push_pending_frames(sk);
1074	if (!ret)
1075		ret = size;
1076out:
1077	release_sock(sk);
1078	return ret;
1079}
1080
 
1081
1082/**
1083 *	first_packet_length	- return length of first packet in receive queue
1084 *	@sk: socket
1085 *
1086 *	Drops all bad checksum frames, until a valid one is found.
1087 *	Returns the length of found skb, or 0 if none is found.
 
 
1088 */
1089static unsigned int first_packet_length(struct sock *sk)
1090{
1091	struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1092	struct sk_buff *skb;
1093	unsigned int res;
1094
1095	__skb_queue_head_init(&list_kill);
 
 
 
1096
1097	spin_lock_bh(&rcvq->lock);
1098	while ((skb = skb_peek(rcvq)) != NULL &&
1099		udp_lib_checksum_complete(skb)) {
1100		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1101				 IS_UDPLITE(sk));
1102		atomic_inc(&sk->sk_drops);
1103		__skb_unlink(skb, rcvq);
1104		__skb_queue_tail(&list_kill, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105	}
1106	res = skb ? skb->len : 0;
1107	spin_unlock_bh(&rcvq->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
1109	if (!skb_queue_empty(&list_kill)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110		bool slow = lock_sock_fast(sk);
1111
1112		__skb_queue_purge(&list_kill);
1113		sk_mem_reclaim_partial(sk);
1114		unlock_sock_fast(sk, slow);
1115	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116	return res;
1117}
1118
1119/*
1120 *	IOCTL requests applicable to the UDP protocol
1121 */
1122
1123int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1124{
1125	switch (cmd) {
1126	case SIOCOUTQ:
1127	{
1128		int amount = sk_wmem_alloc_get(sk);
1129
1130		return put_user(amount, (int __user *)arg);
1131	}
1132
1133	case SIOCINQ:
1134	{
1135		unsigned int amount = first_packet_length(sk);
1136
1137		if (amount)
1138			/*
1139			 * We will only return the amount
1140			 * of this packet since that is all
1141			 * that will be read.
1142			 */
1143			amount -= sizeof(struct udphdr);
1144
1145		return put_user(amount, (int __user *)arg);
1146	}
1147
1148	default:
1149		return -ENOIOCTLCMD;
1150	}
1151
1152	return 0;
1153}
1154EXPORT_SYMBOL(udp_ioctl);
1155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156/*
1157 * 	This should be easy, if there is something there we
1158 * 	return it, otherwise we block.
1159 */
1160
1161int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1162		size_t len, int noblock, int flags, int *addr_len)
1163{
1164	struct inet_sock *inet = inet_sk(sk);
1165	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1166	struct sk_buff *skb;
1167	unsigned int ulen;
1168	int peeked;
1169	int err;
1170	int is_udplite = IS_UDPLITE(sk);
1171	bool slow;
1172
1173	/*
1174	 *	Check any passed addresses
1175	 */
1176	if (addr_len)
1177		*addr_len = sizeof(*sin);
1178
1179	if (flags & MSG_ERRQUEUE)
1180		return ip_recv_error(sk, msg, len);
1181
1182try_again:
1183	skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1184				  &peeked, &err);
1185	if (!skb)
1186		goto out;
1187
1188	ulen = skb->len - sizeof(struct udphdr);
1189	if (len > ulen)
1190		len = ulen;
1191	else if (len < ulen)
 
1192		msg->msg_flags |= MSG_TRUNC;
1193
1194	/*
1195	 * If checksum is needed at all, try to do it while copying the
1196	 * data.  If the data is truncated, or if we only want a partial
1197	 * coverage checksum (UDP-Lite), do it before the copy.
1198	 */
1199
1200	if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1201		if (udp_lib_checksum_complete(skb))
 
 
 
1202			goto csum_copy_err;
1203	}
1204
1205	if (skb_csum_unnecessary(skb))
1206		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1207					      msg->msg_iov, len);
1208	else {
1209		err = skb_copy_and_csum_datagram_iovec(skb,
1210						       sizeof(struct udphdr),
1211						       msg->msg_iov);
1212
1213		if (err == -EINVAL)
1214			goto csum_copy_err;
1215	}
1216
1217	if (err)
1218		goto out_free;
 
 
 
 
 
 
 
1219
1220	if (!peeked)
1221		UDP_INC_STATS_USER(sock_net(sk),
1222				UDP_MIB_INDATAGRAMS, is_udplite);
1223
1224	sock_recv_ts_and_drops(msg, sk, skb);
1225
1226	/* Copy the address. */
1227	if (sin) {
1228		sin->sin_family = AF_INET;
1229		sin->sin_port = udp_hdr(skb)->source;
1230		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1231		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 
 
 
 
 
1232	}
 
 
 
 
1233	if (inet->cmsg_flags)
1234		ip_cmsg_recv(msg, skb);
1235
1236	err = len;
1237	if (flags & MSG_TRUNC)
1238		err = ulen;
1239
1240out_free:
1241	skb_free_datagram_locked(sk, skb);
1242out:
1243	return err;
1244
1245csum_copy_err:
1246	slow = lock_sock_fast(sk);
1247	if (!skb_kill_datagram(sk, skb, flags))
1248		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1249	unlock_sock_fast(sk, slow);
1250
1251	if (noblock)
1252		return -EAGAIN;
1253
1254	/* starting over for a new packet */
 
1255	msg->msg_flags &= ~MSG_TRUNC;
1256	goto try_again;
1257}
1258
 
 
 
 
 
 
 
 
1259
1260int udp_disconnect(struct sock *sk, int flags)
 
 
 
 
1261{
1262	struct inet_sock *inet = inet_sk(sk);
1263	/*
1264	 *	1003.1g - break association.
1265	 */
1266
1267	sk->sk_state = TCP_CLOSE;
1268	inet->inet_daddr = 0;
1269	inet->inet_dport = 0;
1270	sock_rps_save_rxhash(sk, 0);
1271	sk->sk_bound_dev_if = 0;
1272	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1273		inet_reset_saddr(sk);
 
 
 
 
1274
1275	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1276		sk->sk_prot->unhash(sk);
1277		inet->inet_sport = 0;
1278	}
1279	sk_dst_reset(sk);
1280	return 0;
1281}
 
 
 
 
 
 
 
 
 
1282EXPORT_SYMBOL(udp_disconnect);
1283
1284void udp_lib_unhash(struct sock *sk)
1285{
1286	if (sk_hashed(sk)) {
1287		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1288		struct udp_hslot *hslot, *hslot2;
1289
1290		hslot  = udp_hashslot(udptable, sock_net(sk),
1291				      udp_sk(sk)->udp_port_hash);
1292		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1293
1294		spin_lock_bh(&hslot->lock);
1295		if (sk_nulls_del_node_init_rcu(sk)) {
 
 
1296			hslot->count--;
1297			inet_sk(sk)->inet_num = 0;
1298			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1299
1300			spin_lock(&hslot2->lock);
1301			hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1302			hslot2->count--;
1303			spin_unlock(&hslot2->lock);
1304		}
1305		spin_unlock_bh(&hslot->lock);
1306	}
1307}
1308EXPORT_SYMBOL(udp_lib_unhash);
1309
1310/*
1311 * inet_rcv_saddr was changed, we must rehash secondary hash
1312 */
1313void udp_lib_rehash(struct sock *sk, u16 newhash)
1314{
1315	if (sk_hashed(sk)) {
1316		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1317		struct udp_hslot *hslot, *hslot2, *nhslot2;
1318
1319		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1320		nhslot2 = udp_hashslot2(udptable, newhash);
1321		udp_sk(sk)->udp_portaddr_hash = newhash;
1322		if (hslot2 != nhslot2) {
 
 
1323			hslot = udp_hashslot(udptable, sock_net(sk),
1324					     udp_sk(sk)->udp_port_hash);
1325			/* we must lock primary chain too */
1326			spin_lock_bh(&hslot->lock);
 
 
1327
1328			spin_lock(&hslot2->lock);
1329			hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1330			hslot2->count--;
1331			spin_unlock(&hslot2->lock);
1332
1333			spin_lock(&nhslot2->lock);
1334			hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1335						 &nhslot2->head);
1336			nhslot2->count++;
1337			spin_unlock(&nhslot2->lock);
 
 
1338
1339			spin_unlock_bh(&hslot->lock);
1340		}
1341	}
1342}
1343EXPORT_SYMBOL(udp_lib_rehash);
1344
1345static void udp_v4_rehash(struct sock *sk)
1346{
1347	u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1348					  inet_sk(sk)->inet_rcv_saddr,
1349					  inet_sk(sk)->inet_num);
1350	udp_lib_rehash(sk, new_hash);
1351}
1352
1353static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1354{
1355	int rc;
1356
1357	if (inet_sk(sk)->inet_daddr)
1358		sock_rps_save_rxhash(sk, skb->rxhash);
 
 
 
 
 
1359
1360	rc = ip_queue_rcv_skb(sk, skb);
1361	if (rc < 0) {
1362		int is_udplite = IS_UDPLITE(sk);
1363
1364		/* Note that an ENOMEM error is charged twice */
1365		if (rc == -ENOMEM)
1366			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1367					 is_udplite);
1368		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1369		kfree_skb(skb);
1370		trace_udp_fail_queue_rcv_skb(rc, sk);
1371		return -1;
1372	}
1373
1374	return 0;
1375
1376}
1377
1378/* returns:
1379 *  -1: error
1380 *   0: success
1381 *  >0: "udp encap" protocol resubmission
1382 *
1383 * Note that in the success and error cases, the skb is assumed to
1384 * have either been requeued or freed.
1385 */
1386int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1387{
1388	struct udp_sock *up = udp_sk(sk);
1389	int rc;
1390	int is_udplite = IS_UDPLITE(sk);
1391
1392	/*
1393	 *	Charge it to the socket, dropping if the queue is full.
1394	 */
1395	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1396		goto drop;
1397	nf_reset(skb);
 
 
 
1398
1399	if (up->encap_type) {
1400		/*
1401		 * This is an encapsulation socket so pass the skb to
1402		 * the socket's udp_encap_rcv() hook. Otherwise, just
1403		 * fall through and pass this up the UDP socket.
1404		 * up->encap_rcv() returns the following value:
1405		 * =0 if skb was successfully passed to the encap
1406		 *    handler or was discarded by it.
1407		 * >0 if skb should be passed on to UDP.
1408		 * <0 if skb should be resubmitted as proto -N
1409		 */
1410
1411		/* if we're overly short, let UDP handle it */
1412		if (skb->len > sizeof(struct udphdr) &&
1413		    up->encap_rcv != NULL) {
1414			int ret;
1415
1416			ret = (*up->encap_rcv)(sk, skb);
 
 
 
 
1417			if (ret <= 0) {
1418				UDP_INC_STATS_BH(sock_net(sk),
1419						 UDP_MIB_INDATAGRAMS,
1420						 is_udplite);
1421				return -ret;
1422			}
1423		}
1424
1425		/* FALLTHROUGH -- it's a UDP Packet */
1426	}
1427
1428	/*
1429	 * 	UDP-Lite specific tests, ignored on UDP sockets
1430	 */
1431	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
1432
1433		/*
1434		 * MIB statistics other than incrementing the error count are
1435		 * disabled for the following two types of errors: these depend
1436		 * on the application settings, not on the functioning of the
1437		 * protocol stack as such.
1438		 *
1439		 * RFC 3828 here recommends (sec 3.3): "There should also be a
1440		 * way ... to ... at least let the receiving application block
1441		 * delivery of packets with coverage values less than a value
1442		 * provided by the application."
1443		 */
1444		if (up->pcrlen == 0) {          /* full coverage was set  */
1445			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
1446				"%d while full coverage %d requested\n",
1447				UDP_SKB_CB(skb)->cscov, skb->len);
1448			goto drop;
1449		}
1450		/* The next case involves violating the min. coverage requested
1451		 * by the receiver. This is subtle: if receiver wants x and x is
1452		 * greater than the buffersize/MTU then receiver will complain
1453		 * that it wants x while sender emits packets of smaller size y.
1454		 * Therefore the above ...()->partial_cov statement is essential.
1455		 */
1456		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
1457			LIMIT_NETDEBUG(KERN_WARNING
1458				"UDPLITE: coverage %d too small, need min %d\n",
1459				UDP_SKB_CB(skb)->cscov, up->pcrlen);
1460			goto drop;
1461		}
1462	}
1463
1464	if (rcu_dereference_raw(sk->sk_filter)) {
1465		if (udp_lib_checksum_complete(skb))
1466			goto drop;
1467	}
1468
1469
1470	if (sk_rcvqueues_full(sk, skb))
1471		goto drop;
1472
1473	rc = 0;
1474
1475	bh_lock_sock(sk);
1476	if (!sock_owned_by_user(sk))
1477		rc = __udp_queue_rcv_skb(sk, skb);
1478	else if (sk_add_backlog(sk, skb)) {
1479		bh_unlock_sock(sk);
1480		goto drop;
1481	}
1482	bh_unlock_sock(sk);
1483
1484	return rc;
1485
 
 
1486drop:
1487	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1488	atomic_inc(&sk->sk_drops);
1489	kfree_skb(skb);
1490	return -1;
1491}
1492
1493
1494static void flush_stack(struct sock **stack, unsigned int count,
1495			struct sk_buff *skb, unsigned int final)
1496{
1497	unsigned int i;
1498	struct sk_buff *skb1 = NULL;
1499	struct sock *sk;
1500
1501	for (i = 0; i < count; i++) {
1502		sk = stack[i];
1503		if (likely(skb1 == NULL))
1504			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1505
1506		if (!skb1) {
1507			atomic_inc(&sk->sk_drops);
1508			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1509					 IS_UDPLITE(sk));
1510			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1511					 IS_UDPLITE(sk));
1512		}
 
 
 
 
1513
1514		if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
1515			skb1 = NULL;
 
 
 
 
 
 
 
 
 
1516	}
1517	if (unlikely(skb1))
1518		kfree_skb(skb1);
1519}
 
1520
1521/*
1522 *	Multicasts and broadcasts go to each listener.
1523 *
1524 *	Note: called only from the BH handler context.
1525 */
1526static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1527				    struct udphdr  *uh,
1528				    __be32 saddr, __be32 daddr,
1529				    struct udp_table *udptable)
 
1530{
1531	struct sock *sk, *stack[256 / sizeof(struct sock *)];
1532	struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
1533	int dif;
1534	unsigned int i, count = 0;
1535
1536	spin_lock(&hslot->lock);
1537	sk = sk_nulls_head(&hslot->head);
1538	dif = skb->dev->ifindex;
1539	sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
1540	while (sk) {
1541		stack[count++] = sk;
1542		sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
1543				       daddr, uh->source, saddr, dif);
1544		if (unlikely(count == ARRAY_SIZE(stack))) {
1545			if (!sk)
1546				break;
1547			flush_stack(stack, count, skb, ~0);
1548			count = 0;
 
 
 
 
 
 
 
 
 
1549		}
1550	}
1551	/*
1552	 * before releasing chain lock, we must take a reference on sockets
1553	 */
1554	for (i = 0; i < count; i++)
1555		sock_hold(stack[i]);
1556
1557	spin_unlock(&hslot->lock);
 
 
 
 
 
 
 
 
 
 
1558
1559	/*
1560	 * do the slow work with no lock held
1561	 */
1562	if (count) {
1563		flush_stack(stack, count, skb, count - 1);
1564
1565		for (i = 0; i < count; i++)
1566			sock_put(stack[i]);
 
1567	} else {
1568		kfree_skb(skb);
 
 
1569	}
1570	return 0;
1571}
1572
1573/* Initialize UDP checksum. If exited with zero value (success),
1574 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1575 * Otherwise, csum completion requires chacksumming packet body,
1576 * including udp header and folding it to skb->csum.
1577 */
1578static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1579				 int proto)
1580{
1581	const struct iphdr *iph;
1582	int err;
1583
1584	UDP_SKB_CB(skb)->partial_cov = 0;
1585	UDP_SKB_CB(skb)->cscov = skb->len;
1586
1587	if (proto == IPPROTO_UDPLITE) {
1588		err = udplite_checksum_init(skb, uh);
1589		if (err)
1590			return err;
 
 
 
 
 
1591	}
1592
1593	iph = ip_hdr(skb);
1594	if (uh->check == 0) {
1595		skb->ip_summed = CHECKSUM_UNNECESSARY;
1596	} else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1597		if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1598				      proto, skb->csum))
1599			skb->ip_summed = CHECKSUM_UNNECESSARY;
1600	}
1601	if (!skb_csum_unnecessary(skb))
1602		skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1603					       skb->len, proto, 0);
1604	/* Probably, we should checksum udp header (it should be in cache
1605	 * in any case) and data in tiny packets (< rx copybreak).
1606	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1607
1608	return 0;
1609}
1610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611/*
1612 *	All we need to do is get the socket, and then do a checksum.
1613 */
1614
1615int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1616		   int proto)
1617{
1618	struct sock *sk;
1619	struct udphdr *uh;
1620	unsigned short ulen;
1621	struct rtable *rt = skb_rtable(skb);
1622	__be32 saddr, daddr;
1623	struct net *net = dev_net(skb->dev);
 
1624
1625	/*
1626	 *  Validate the packet.
1627	 */
1628	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1629		goto drop;		/* No space for header. */
1630
1631	uh   = udp_hdr(skb);
1632	ulen = ntohs(uh->len);
1633	saddr = ip_hdr(skb)->saddr;
1634	daddr = ip_hdr(skb)->daddr;
1635
1636	if (ulen > skb->len)
1637		goto short_packet;
1638
1639	if (proto == IPPROTO_UDP) {
1640		/* UDP validates ulen. */
1641		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1642			goto short_packet;
1643		uh = udp_hdr(skb);
1644	}
1645
1646	if (udp4_csum_init(skb, uh, proto))
1647		goto csum_error;
1648
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1649	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1650		return __udp4_lib_mcast_deliver(net, skb, uh,
1651				saddr, daddr, udptable);
1652
1653	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1654
1655	if (sk != NULL) {
1656		int ret = udp_queue_rcv_skb(sk, skb);
1657		sock_put(sk);
1658
1659		/* a return value > 0 means to resubmit the input, but
1660		 * it wants the return to be -protocol, or 0
1661		 */
1662		if (ret > 0)
1663			return -ret;
1664		return 0;
1665	}
1666
1667	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1668		goto drop;
1669	nf_reset(skb);
1670
1671	/* No socket. Drop packet silently, if checksum is wrong */
1672	if (udp_lib_checksum_complete(skb))
1673		goto csum_error;
1674
1675	UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1676	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1677
1678	/*
1679	 * Hmm.  We got an UDP packet to a port to which we
1680	 * don't wanna listen.  Ignore it.
1681	 */
1682	kfree_skb(skb);
1683	return 0;
1684
1685short_packet:
1686	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1687		       proto == IPPROTO_UDPLITE ? "-Lite" : "",
1688		       &saddr,
1689		       ntohs(uh->source),
1690		       ulen,
1691		       skb->len,
1692		       &daddr,
1693		       ntohs(uh->dest));
1694	goto drop;
1695
1696csum_error:
1697	/*
1698	 * RFC1122: OK.  Discards the bad packet silently (as far as
1699	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1700	 */
1701	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1702		       proto == IPPROTO_UDPLITE ? "-Lite" : "",
1703		       &saddr,
1704		       ntohs(uh->source),
1705		       &daddr,
1706		       ntohs(uh->dest),
1707		       ulen);
1708drop:
1709	UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1710	kfree_skb(skb);
1711	return 0;
1712}
1713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714int udp_rcv(struct sk_buff *skb)
1715{
1716	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1717}
1718
1719void udp_destroy_sock(struct sock *sk)
1720{
 
1721	bool slow = lock_sock_fast(sk);
1722	udp_flush_pending_frames(sk);
1723	unlock_sock_fast(sk, slow);
 
 
 
 
 
 
 
 
 
 
1724}
1725
1726/*
1727 *	Socket option code for UDP
1728 */
1729int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1730		       char __user *optval, unsigned int optlen,
1731		       int (*push_pending_frames)(struct sock *))
1732{
1733	struct udp_sock *up = udp_sk(sk);
1734	int val;
1735	int err = 0;
1736	int is_udplite = IS_UDPLITE(sk);
1737
1738	if (optlen < sizeof(int))
1739		return -EINVAL;
1740
1741	if (get_user(val, (int __user *)optval))
1742		return -EFAULT;
1743
 
 
1744	switch (optname) {
1745	case UDP_CORK:
1746		if (val != 0) {
1747			up->corkflag = 1;
1748		} else {
1749			up->corkflag = 0;
1750			lock_sock(sk);
1751			(*push_pending_frames)(sk);
1752			release_sock(sk);
1753		}
1754		break;
1755
1756	case UDP_ENCAP:
1757		switch (val) {
1758		case 0:
 
1759		case UDP_ENCAP_ESPINUDP:
1760		case UDP_ENCAP_ESPINUDP_NON_IKE:
1761			up->encap_rcv = xfrm4_udp_encap_rcv;
1762			/* FALLTHROUGH */
 
 
 
 
 
 
1763		case UDP_ENCAP_L2TPINUDP:
1764			up->encap_type = val;
 
 
 
1765			break;
1766		default:
1767			err = -ENOPROTOOPT;
1768			break;
1769		}
1770		break;
1771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772	/*
1773	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
1774	 */
1775	/* The sender sets actual checksum coverage length via this option.
1776	 * The case coverage > packet length is handled by send module. */
1777	case UDPLITE_SEND_CSCOV:
1778		if (!is_udplite)         /* Disable the option on UDP sockets */
1779			return -ENOPROTOOPT;
1780		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1781			val = 8;
1782		else if (val > USHRT_MAX)
1783			val = USHRT_MAX;
1784		up->pcslen = val;
1785		up->pcflag |= UDPLITE_SEND_CC;
1786		break;
1787
1788	/* The receiver specifies a minimum checksum coverage value. To make
1789	 * sense, this should be set to at least 8 (as done below). If zero is
1790	 * used, this again means full checksum coverage.                     */
1791	case UDPLITE_RECV_CSCOV:
1792		if (!is_udplite)         /* Disable the option on UDP sockets */
1793			return -ENOPROTOOPT;
1794		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
1795			val = 8;
1796		else if (val > USHRT_MAX)
1797			val = USHRT_MAX;
1798		up->pcrlen = val;
1799		up->pcflag |= UDPLITE_RECV_CC;
1800		break;
1801
1802	default:
1803		err = -ENOPROTOOPT;
1804		break;
1805	}
1806
1807	return err;
1808}
1809EXPORT_SYMBOL(udp_lib_setsockopt);
1810
1811int udp_setsockopt(struct sock *sk, int level, int optname,
1812		   char __user *optval, unsigned int optlen)
1813{
1814	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1815		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
 
1816					  udp_push_pending_frames);
1817	return ip_setsockopt(sk, level, optname, optval, optlen);
1818}
1819
1820#ifdef CONFIG_COMPAT
1821int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1822			  char __user *optval, unsigned int optlen)
1823{
1824	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1825		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1826					  udp_push_pending_frames);
1827	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1828}
1829#endif
1830
1831int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1832		       char __user *optval, int __user *optlen)
1833{
1834	struct udp_sock *up = udp_sk(sk);
1835	int val, len;
1836
1837	if (get_user(len, optlen))
1838		return -EFAULT;
1839
1840	len = min_t(unsigned int, len, sizeof(int));
1841
1842	if (len < 0)
1843		return -EINVAL;
1844
1845	switch (optname) {
1846	case UDP_CORK:
1847		val = up->corkflag;
1848		break;
1849
1850	case UDP_ENCAP:
1851		val = up->encap_type;
1852		break;
1853
 
 
 
 
 
 
 
 
 
 
 
 
1854	/* The following two cannot be changed on UDP sockets, the return is
1855	 * always 0 (which corresponds to the full checksum coverage of UDP). */
1856	case UDPLITE_SEND_CSCOV:
1857		val = up->pcslen;
1858		break;
1859
1860	case UDPLITE_RECV_CSCOV:
1861		val = up->pcrlen;
1862		break;
1863
1864	default:
1865		return -ENOPROTOOPT;
1866	}
1867
1868	if (put_user(len, optlen))
1869		return -EFAULT;
1870	if (copy_to_user(optval, &val, len))
1871		return -EFAULT;
1872	return 0;
1873}
1874EXPORT_SYMBOL(udp_lib_getsockopt);
1875
1876int udp_getsockopt(struct sock *sk, int level, int optname,
1877		   char __user *optval, int __user *optlen)
1878{
1879	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1880		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1881	return ip_getsockopt(sk, level, optname, optval, optlen);
1882}
1883
1884#ifdef CONFIG_COMPAT
1885int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1886				 char __user *optval, int __user *optlen)
1887{
1888	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1889		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1890	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1891}
1892#endif
1893/**
1894 * 	udp_poll - wait for a UDP event.
1895 *	@file - file struct
1896 *	@sock - socket
1897 *	@wait - poll table
1898 *
1899 *	This is same as datagram poll, except for the special case of
1900 *	blocking sockets. If application is using a blocking fd
1901 *	and a packet with checksum error is in the queue;
1902 *	then it could get return from select indicating data available
1903 *	but then block when reading it. Add special case code
1904 *	to work around these arguably broken applications.
1905 */
1906unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1907{
1908	unsigned int mask = datagram_poll(file, sock, wait);
1909	struct sock *sk = sock->sk;
1910
 
 
 
1911	/* Check for false positives due to checksum errors */
1912	if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
1913	    !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
1914		mask &= ~(POLLIN | POLLRDNORM);
1915
1916	return mask;
1917
1918}
1919EXPORT_SYMBOL(udp_poll);
1920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1921struct proto udp_prot = {
1922	.name		   = "UDP",
1923	.owner		   = THIS_MODULE,
1924	.close		   = udp_lib_close,
1925	.connect	   = ip4_datagram_connect,
1926	.disconnect	   = udp_disconnect,
1927	.ioctl		   = udp_ioctl,
1928	.destroy	   = udp_destroy_sock,
1929	.setsockopt	   = udp_setsockopt,
1930	.getsockopt	   = udp_getsockopt,
1931	.sendmsg	   = udp_sendmsg,
1932	.recvmsg	   = udp_recvmsg,
1933	.sendpage	   = udp_sendpage,
1934	.backlog_rcv	   = __udp_queue_rcv_skb,
1935	.hash		   = udp_lib_hash,
1936	.unhash		   = udp_lib_unhash,
1937	.rehash		   = udp_v4_rehash,
1938	.get_port	   = udp_v4_get_port,
1939	.memory_allocated  = &udp_memory_allocated,
1940	.sysctl_mem	   = sysctl_udp_mem,
1941	.sysctl_wmem	   = &sysctl_udp_wmem_min,
1942	.sysctl_rmem	   = &sysctl_udp_rmem_min,
1943	.obj_size	   = sizeof(struct udp_sock),
1944	.slab_flags	   = SLAB_DESTROY_BY_RCU,
1945	.h.udp_table	   = &udp_table,
1946#ifdef CONFIG_COMPAT
1947	.compat_setsockopt = compat_udp_setsockopt,
1948	.compat_getsockopt = compat_udp_getsockopt,
1949#endif
1950	.clear_sk	   = sk_prot_clear_portaddr_nulls,
1951};
1952EXPORT_SYMBOL(udp_prot);
1953
1954/* ------------------------------------------------------------------------ */
1955#ifdef CONFIG_PROC_FS
1956
1957static struct sock *udp_get_first(struct seq_file *seq, int start)
1958{
1959	struct sock *sk;
 
1960	struct udp_iter_state *state = seq->private;
1961	struct net *net = seq_file_net(seq);
1962
1963	for (state->bucket = start; state->bucket <= state->udp_table->mask;
 
 
 
 
 
1964	     ++state->bucket) {
1965		struct hlist_nulls_node *node;
1966		struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
1967
1968		if (hlist_nulls_empty(&hslot->head))
1969			continue;
1970
1971		spin_lock_bh(&hslot->lock);
1972		sk_nulls_for_each(sk, node, &hslot->head) {
1973			if (!net_eq(sock_net(sk), net))
1974				continue;
1975			if (sk->sk_family == state->family)
 
1976				goto found;
1977		}
1978		spin_unlock_bh(&hslot->lock);
1979	}
1980	sk = NULL;
1981found:
1982	return sk;
1983}
1984
1985static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1986{
 
1987	struct udp_iter_state *state = seq->private;
1988	struct net *net = seq_file_net(seq);
1989
 
 
 
 
 
1990	do {
1991		sk = sk_nulls_next(sk);
1992	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
 
 
1993
1994	if (!sk) {
1995		if (state->bucket <= state->udp_table->mask)
1996			spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
1997		return udp_get_first(seq, state->bucket + 1);
1998	}
1999	return sk;
2000}
2001
2002static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2003{
2004	struct sock *sk = udp_get_first(seq, 0);
2005
2006	if (sk)
2007		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
2008			--pos;
2009	return pos ? NULL : sk;
2010}
2011
2012static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2013{
2014	struct udp_iter_state *state = seq->private;
2015	state->bucket = MAX_UDP_PORTS;
2016
2017	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
2018}
 
2019
2020static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2021{
2022	struct sock *sk;
2023
2024	if (v == SEQ_START_TOKEN)
2025		sk = udp_get_idx(seq, 0);
2026	else
2027		sk = udp_get_next(seq, v);
2028
2029	++*pos;
2030	return sk;
2031}
 
2032
2033static void udp_seq_stop(struct seq_file *seq, void *v)
2034{
 
2035	struct udp_iter_state *state = seq->private;
2036
2037	if (state->bucket <= state->udp_table->mask)
2038		spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2039}
2040
2041static int udp_seq_open(struct inode *inode, struct file *file)
2042{
2043	struct udp_seq_afinfo *afinfo = PDE(inode)->data;
2044	struct udp_iter_state *s;
2045	int err;
2046
2047	err = seq_open_net(inode, file, &afinfo->seq_ops,
2048			   sizeof(struct udp_iter_state));
2049	if (err < 0)
2050		return err;
2051
2052	s = ((struct seq_file *)file->private_data)->private;
2053	s->family		= afinfo->family;
2054	s->udp_table		= afinfo->udp_table;
2055	return err;
2056}
2057
2058/* ------------------------------------------------------------------------ */
2059int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
2060{
2061	struct proc_dir_entry *p;
2062	int rc = 0;
2063
2064	afinfo->seq_fops.open		= udp_seq_open;
2065	afinfo->seq_fops.read		= seq_read;
2066	afinfo->seq_fops.llseek		= seq_lseek;
2067	afinfo->seq_fops.release	= seq_release_net;
2068
2069	afinfo->seq_ops.start		= udp_seq_start;
2070	afinfo->seq_ops.next		= udp_seq_next;
2071	afinfo->seq_ops.stop		= udp_seq_stop;
2072
2073	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2074			     &afinfo->seq_fops, afinfo);
2075	if (!p)
2076		rc = -ENOMEM;
2077	return rc;
2078}
2079EXPORT_SYMBOL(udp_proc_register);
2080
2081void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
2082{
2083	proc_net_remove(net, afinfo->name);
2084}
2085EXPORT_SYMBOL(udp_proc_unregister);
2086
2087/* ------------------------------------------------------------------------ */
2088static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2089		int bucket, int *len)
2090{
2091	struct inet_sock *inet = inet_sk(sp);
2092	__be32 dest = inet->inet_daddr;
2093	__be32 src  = inet->inet_rcv_saddr;
2094	__u16 destp	  = ntohs(inet->inet_dport);
2095	__u16 srcp	  = ntohs(inet->inet_sport);
2096
2097	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2098		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
2099		bucket, src, srcp, dest, destp, sp->sk_state,
2100		sk_wmem_alloc_get(sp),
2101		sk_rmem_alloc_get(sp),
2102		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
2103		atomic_read(&sp->sk_refcnt), sp,
2104		atomic_read(&sp->sk_drops), len);
 
 
2105}
2106
2107int udp4_seq_show(struct seq_file *seq, void *v)
2108{
 
2109	if (v == SEQ_START_TOKEN)
2110		seq_printf(seq, "%-127s\n",
2111			   "  sl  local_address rem_address   st tx_queue "
2112			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2113			   "inode ref pointer drops");
2114	else {
2115		struct udp_iter_state *state = seq->private;
2116		int len;
2117
2118		udp4_format_sock(v, seq, state->bucket, &len);
2119		seq_printf(seq, "%*s\n", 127 - len, "");
2120	}
 
2121	return 0;
2122}
2123
2124/* ------------------------------------------------------------------------ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2125static struct udp_seq_afinfo udp4_seq_afinfo = {
2126	.name		= "udp",
2127	.family		= AF_INET,
2128	.udp_table	= &udp_table,
2129	.seq_fops	= {
2130		.owner	=	THIS_MODULE,
2131	},
2132	.seq_ops	= {
2133		.show		= udp4_seq_show,
2134	},
2135};
2136
2137static int __net_init udp4_proc_init_net(struct net *net)
2138{
2139	return udp_proc_register(net, &udp4_seq_afinfo);
 
 
 
2140}
2141
2142static void __net_exit udp4_proc_exit_net(struct net *net)
2143{
2144	udp_proc_unregister(net, &udp4_seq_afinfo);
2145}
2146
2147static struct pernet_operations udp4_net_ops = {
2148	.init = udp4_proc_init_net,
2149	.exit = udp4_proc_exit_net,
2150};
2151
2152int __init udp4_proc_init(void)
2153{
2154	return register_pernet_subsys(&udp4_net_ops);
2155}
2156
2157void udp4_proc_exit(void)
2158{
2159	unregister_pernet_subsys(&udp4_net_ops);
2160}
2161#endif /* CONFIG_PROC_FS */
2162
2163static __initdata unsigned long uhash_entries;
2164static int __init set_uhash_entries(char *str)
2165{
 
 
2166	if (!str)
2167		return 0;
2168	uhash_entries = simple_strtoul(str, &str, 0);
 
 
 
 
2169	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2170		uhash_entries = UDP_HTABLE_SIZE_MIN;
2171	return 1;
2172}
2173__setup("uhash_entries=", set_uhash_entries);
2174
2175void __init udp_table_init(struct udp_table *table, const char *name)
2176{
2177	unsigned int i;
2178
2179	if (!CONFIG_BASE_SMALL)
2180		table->hash = alloc_large_system_hash(name,
2181			2 * sizeof(struct udp_hslot),
2182			uhash_entries,
2183			21, /* one slot per 2 MB */
2184			0,
2185			&table->log,
2186			&table->mask,
2187			64 * 1024);
2188	/*
2189	 * Make sure hash table has the minimum size
2190	 */
2191	if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
2192		table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
2193				      2 * sizeof(struct udp_hslot), GFP_KERNEL);
2194		if (!table->hash)
2195			panic(name);
2196		table->log = ilog2(UDP_HTABLE_SIZE_MIN);
2197		table->mask = UDP_HTABLE_SIZE_MIN - 1;
2198	}
2199	table->hash2 = table->hash + (table->mask + 1);
2200	for (i = 0; i <= table->mask; i++) {
2201		INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
2202		table->hash[i].count = 0;
2203		spin_lock_init(&table->hash[i].lock);
2204	}
2205	for (i = 0; i <= table->mask; i++) {
2206		INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2207		table->hash2[i].count = 0;
2208		spin_lock_init(&table->hash2[i].lock);
2209	}
2210}
2211
2212void __init udp_init(void)
2213{
2214	unsigned long limit;
2215
2216	udp_table_init(&udp_table, "UDP");
2217	limit = nr_free_buffer_pages() / 8;
2218	limit = max(limit, 128UL);
2219	sysctl_udp_mem[0] = limit / 4 * 3;
2220	sysctl_udp_mem[1] = limit;
2221	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
2222
2223	sysctl_udp_rmem_min = SK_MEM_QUANTUM;
2224	sysctl_udp_wmem_min = SK_MEM_QUANTUM;
2225}
 
2226
2227int udp4_ufo_send_check(struct sk_buff *skb)
2228{
2229	const struct iphdr *iph;
2230	struct udphdr *uh;
2231
2232	if (!pskb_may_pull(skb, sizeof(*uh)))
2233		return -EINVAL;
2234
2235	iph = ip_hdr(skb);
2236	uh = udp_hdr(skb);
2237
2238	uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
2239				       IPPROTO_UDP, 0);
2240	skb->csum_start = skb_transport_header(skb) - skb->head;
2241	skb->csum_offset = offsetof(struct udphdr, check);
2242	skb->ip_summed = CHECKSUM_PARTIAL;
2243	return 0;
2244}
2245
2246struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
2247{
2248	struct sk_buff *segs = ERR_PTR(-EINVAL);
2249	unsigned int mss;
2250	int offset;
2251	__wsum csum;
2252
2253	mss = skb_shinfo(skb)->gso_size;
2254	if (unlikely(skb->len <= mss))
2255		goto out;
2256
2257	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2258		/* Packet is from an untrusted source, reset gso_segs. */
2259		int type = skb_shinfo(skb)->gso_type;
 
 
2260
2261		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
2262			     !(type & (SKB_GSO_UDP))))
2263			goto out;
 
 
 
 
 
 
 
 
 
2264
2265		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
 
2266
2267		segs = NULL;
2268		goto out;
2269	}
2270
2271	/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
2272	 * do checksum of UDP packets sent as multiple IP fragments.
2273	 */
2274	offset = skb_checksum_start_offset(skb);
2275	csum = skb_checksum(skb, offset, skb->len - offset, 0);
2276	offset += skb->csum_offset;
2277	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
2278	skb->ip_summed = CHECKSUM_NONE;
2279
2280	/* Fragment the skb. IP headers of the fragments are updated in
2281	 * inet_gso_segment()
2282	 */
2283	segs = skb_segment(skb, features);
2284out:
2285	return segs;
 
 
 
 
 
 
 
 
 
2286}
 
2287
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		The User Datagram Protocol (UDP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  12 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
  13 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  14 *
  15 * Fixes:
  16 *		Alan Cox	:	verify_area() calls
  17 *		Alan Cox	: 	stopped close while in use off icmp
  18 *					messages. Not a fix but a botch that
  19 *					for udp at least is 'valid'.
  20 *		Alan Cox	:	Fixed icmp handling properly
  21 *		Alan Cox	: 	Correct error for oversized datagrams
  22 *		Alan Cox	:	Tidied select() semantics.
  23 *		Alan Cox	:	udp_err() fixed properly, also now
  24 *					select and read wake correctly on errors
  25 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
  26 *		Alan Cox	:	UDP can count its memory
  27 *		Alan Cox	:	send to an unknown connection causes
  28 *					an ECONNREFUSED off the icmp, but
  29 *					does NOT close.
  30 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
  31 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
  32 *					bug no longer crashes it.
  33 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
  34 *		Alan Cox	:	Uses skb_free_datagram
  35 *		Alan Cox	:	Added get/set sockopt support.
  36 *		Alan Cox	:	Broadcasting without option set returns EACCES.
  37 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
  38 *		Alan Cox	:	Use ip_tos and ip_ttl
  39 *		Alan Cox	:	SNMP Mibs
  40 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
  41 *		Matt Dillon	:	UDP length checks.
  42 *		Alan Cox	:	Smarter af_inet used properly.
  43 *		Alan Cox	:	Use new kernel side addressing.
  44 *		Alan Cox	:	Incorrect return on truncated datagram receive.
  45 *	Arnt Gulbrandsen 	:	New udp_send and stuff
  46 *		Alan Cox	:	Cache last socket
  47 *		Alan Cox	:	Route cache
  48 *		Jon Peatfield	:	Minor efficiency fix to sendto().
  49 *		Mike Shaver	:	RFC1122 checks.
  50 *		Alan Cox	:	Nonblocking error fix.
  51 *	Willy Konynenberg	:	Transparent proxying support.
  52 *		Mike McLagan	:	Routing by source
  53 *		David S. Miller	:	New socket lookup architecture.
  54 *					Last socket cache retained as it
  55 *					does have a high hit rate.
  56 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
  57 *		Andi Kleen	:	Some cleanups, cache destination entry
  58 *					for connect.
  59 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  60 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
  61 *					return ENOTCONN for unconnected sockets (POSIX)
  62 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
  63 *					bound-to-device socket
  64 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
  65 *					datagrams.
  66 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
  67 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
  68 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
  69 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
  70 *					a single port at the same time.
  71 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  72 *	James Chapman		:	Add L2TP encapsulation type.
 
 
 
 
 
 
  73 */
  74
  75#define pr_fmt(fmt) "UDP: " fmt
  76
  77#include <linux/uaccess.h>
  78#include <asm/ioctls.h>
  79#include <linux/memblock.h>
  80#include <linux/highmem.h>
  81#include <linux/swap.h>
  82#include <linux/types.h>
  83#include <linux/fcntl.h>
  84#include <linux/module.h>
  85#include <linux/socket.h>
  86#include <linux/sockios.h>
  87#include <linux/igmp.h>
  88#include <linux/inetdevice.h>
  89#include <linux/in.h>
  90#include <linux/errno.h>
  91#include <linux/timer.h>
  92#include <linux/mm.h>
  93#include <linux/inet.h>
  94#include <linux/netdevice.h>
  95#include <linux/slab.h>
  96#include <net/tcp_states.h>
  97#include <linux/skbuff.h>
  98#include <linux/proc_fs.h>
  99#include <linux/seq_file.h>
 100#include <net/net_namespace.h>
 101#include <net/icmp.h>
 102#include <net/inet_hashtables.h>
 103#include <net/ip_tunnels.h>
 104#include <net/route.h>
 105#include <net/checksum.h>
 106#include <net/xfrm.h>
 107#include <trace/events/udp.h>
 108#include <linux/static_key.h>
 109#include <linux/btf_ids.h>
 110#include <trace/events/skb.h>
 111#include <net/busy_poll.h>
 112#include "udp_impl.h"
 113#include <net/sock_reuseport.h>
 114#include <net/addrconf.h>
 115#include <net/udp_tunnel.h>
 116#if IS_ENABLED(CONFIG_IPV6)
 117#include <net/ipv6_stubs.h>
 118#endif
 119
 120struct udp_table udp_table __read_mostly;
 121EXPORT_SYMBOL(udp_table);
 122
 123long sysctl_udp_mem[3] __read_mostly;
 124EXPORT_SYMBOL(sysctl_udp_mem);
 125
 
 
 
 
 
 
 126atomic_long_t udp_memory_allocated;
 127EXPORT_SYMBOL(udp_memory_allocated);
 128
 129#define MAX_UDP_PORTS 65536
 130#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
 131
 132static int udp_lib_lport_inuse(struct net *net, __u16 num,
 133			       const struct udp_hslot *hslot,
 134			       unsigned long *bitmap,
 135			       struct sock *sk, unsigned int log)
 
 
 
 136{
 137	struct sock *sk2;
 138	kuid_t uid = sock_i_uid(sk);
 139
 140	sk_for_each(sk2, &hslot->head) {
 141		if (net_eq(sock_net(sk2), net) &&
 142		    sk2 != sk &&
 143		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
 144		    (!sk2->sk_reuse || !sk->sk_reuse) &&
 145		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
 146		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 147		    inet_rcv_saddr_equal(sk, sk2, true)) {
 148			if (sk2->sk_reuseport && sk->sk_reuseport &&
 149			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
 150			    uid_eq(uid, sock_i_uid(sk2))) {
 151				if (!bitmap)
 152					return 0;
 153			} else {
 154				if (!bitmap)
 155					return 1;
 156				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
 157					  bitmap);
 158			}
 
 159		}
 160	}
 161	return 0;
 162}
 163
 164/*
 165 * Note: we still hold spinlock of primary hash chain, so no other writer
 166 * can insert/delete a socket with local_port == num
 167 */
 168static int udp_lib_lport_inuse2(struct net *net, __u16 num,
 169				struct udp_hslot *hslot2,
 170				struct sock *sk)
 
 
 171{
 172	struct sock *sk2;
 173	kuid_t uid = sock_i_uid(sk);
 174	int res = 0;
 175
 176	spin_lock(&hslot2->lock);
 177	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
 178		if (net_eq(sock_net(sk2), net) &&
 179		    sk2 != sk &&
 180		    (udp_sk(sk2)->udp_port_hash == num) &&
 181		    (!sk2->sk_reuse || !sk->sk_reuse) &&
 182		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
 183		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 184		    inet_rcv_saddr_equal(sk, sk2, true)) {
 185			if (sk2->sk_reuseport && sk->sk_reuseport &&
 186			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
 187			    uid_eq(uid, sock_i_uid(sk2))) {
 188				res = 0;
 189			} else {
 190				res = 1;
 191			}
 192			break;
 193		}
 194	}
 195	spin_unlock(&hslot2->lock);
 196	return res;
 197}
 198
 199static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
 200{
 201	struct net *net = sock_net(sk);
 202	kuid_t uid = sock_i_uid(sk);
 203	struct sock *sk2;
 204
 205	sk_for_each(sk2, &hslot->head) {
 206		if (net_eq(sock_net(sk2), net) &&
 207		    sk2 != sk &&
 208		    sk2->sk_family == sk->sk_family &&
 209		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
 210		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
 211		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 212		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
 213		    inet_rcv_saddr_equal(sk, sk2, false)) {
 214			return reuseport_add_sock(sk, sk2,
 215						  inet_rcv_saddr_any(sk));
 216		}
 217	}
 218
 219	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
 220}
 221
 222/**
 223 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
 224 *
 225 *  @sk:          socket struct in question
 226 *  @snum:        port number to look up
 
 227 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
 228 *                   with NULL address
 229 */
 230int udp_lib_get_port(struct sock *sk, unsigned short snum,
 
 
 231		     unsigned int hash2_nulladdr)
 232{
 233	struct udp_hslot *hslot, *hslot2;
 234	struct udp_table *udptable = sk->sk_prot->h.udp_table;
 235	int    error = 1;
 236	struct net *net = sock_net(sk);
 237
 238	if (!snum) {
 239		int low, high, remaining;
 240		unsigned int rand;
 241		unsigned short first, last;
 242		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 243
 244		inet_get_local_port_range(net, &low, &high);
 245		remaining = (high - low) + 1;
 246
 247		rand = prandom_u32();
 248		first = reciprocal_scale(rand, remaining) + low;
 249		/*
 250		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
 251		 */
 252		rand = (rand | 1) * (udptable->mask + 1);
 253		last = first + udptable->mask + 1;
 254		do {
 255			hslot = udp_hashslot(udptable, net, first);
 256			bitmap_zero(bitmap, PORTS_PER_CHAIN);
 257			spin_lock_bh(&hslot->lock);
 258			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
 259					    udptable->log);
 260
 261			snum = first;
 262			/*
 263			 * Iterate on all possible values of snum for this hash.
 264			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
 265			 * give us randomization and full range coverage.
 266			 */
 267			do {
 268				if (low <= snum && snum <= high &&
 269				    !test_bit(snum >> udptable->log, bitmap) &&
 270				    !inet_is_local_reserved_port(net, snum))
 271					goto found;
 272				snum += rand;
 273			} while (snum != first);
 274			spin_unlock_bh(&hslot->lock);
 275			cond_resched();
 276		} while (++first != last);
 277		goto fail;
 278	} else {
 279		hslot = udp_hashslot(udptable, net, snum);
 280		spin_lock_bh(&hslot->lock);
 281		if (hslot->count > 10) {
 282			int exist;
 283			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
 284
 285			slot2          &= udptable->mask;
 286			hash2_nulladdr &= udptable->mask;
 287
 288			hslot2 = udp_hashslot2(udptable, slot2);
 289			if (hslot->count < hslot2->count)
 290				goto scan_primary_hash;
 291
 292			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
 
 293			if (!exist && (hash2_nulladdr != slot2)) {
 294				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
 295				exist = udp_lib_lport_inuse2(net, snum, hslot2,
 296							     sk);
 297			}
 298			if (exist)
 299				goto fail_unlock;
 300			else
 301				goto found;
 302		}
 303scan_primary_hash:
 304		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
 
 305			goto fail_unlock;
 306	}
 307found:
 308	inet_sk(sk)->inet_num = snum;
 309	udp_sk(sk)->udp_port_hash = snum;
 310	udp_sk(sk)->udp_portaddr_hash ^= snum;
 311	if (sk_unhashed(sk)) {
 312		if (sk->sk_reuseport &&
 313		    udp_reuseport_add_sock(sk, hslot)) {
 314			inet_sk(sk)->inet_num = 0;
 315			udp_sk(sk)->udp_port_hash = 0;
 316			udp_sk(sk)->udp_portaddr_hash ^= snum;
 317			goto fail_unlock;
 318		}
 319
 320		sk_add_node_rcu(sk, &hslot->head);
 321		hslot->count++;
 322		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 323
 324		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
 325		spin_lock(&hslot2->lock);
 326		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
 327		    sk->sk_family == AF_INET6)
 328			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
 329					   &hslot2->head);
 330		else
 331			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
 332					   &hslot2->head);
 333		hslot2->count++;
 334		spin_unlock(&hslot2->lock);
 335	}
 336	sock_set_flag(sk, SOCK_RCU_FREE);
 337	error = 0;
 338fail_unlock:
 339	spin_unlock_bh(&hslot->lock);
 340fail:
 341	return error;
 342}
 343EXPORT_SYMBOL(udp_lib_get_port);
 344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345int udp_v4_get_port(struct sock *sk, unsigned short snum)
 346{
 347	unsigned int hash2_nulladdr =
 348		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
 349	unsigned int hash2_partial =
 350		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
 351
 352	/* precompute partial secondary hash */
 353	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 354	return udp_lib_get_port(sk, snum, hash2_nulladdr);
 355}
 356
 357static int compute_score(struct sock *sk, struct net *net,
 358			 __be32 saddr, __be16 sport,
 359			 __be32 daddr, unsigned short hnum,
 360			 int dif, int sdif)
 361{
 362	int score;
 363	struct inet_sock *inet;
 364	bool dev_match;
 365
 366	if (!net_eq(sock_net(sk), net) ||
 367	    udp_sk(sk)->udp_port_hash != hnum ||
 368	    ipv6_only_sock(sk))
 369		return -1;
 370
 371	if (sk->sk_rcv_saddr != daddr)
 372		return -1;
 373
 374	score = (sk->sk_family == PF_INET) ? 2 : 1;
 375
 376	inet = inet_sk(sk);
 377	if (inet->inet_daddr) {
 378		if (inet->inet_daddr != saddr)
 379			return -1;
 380		score += 4;
 381	}
 382
 383	if (inet->inet_dport) {
 384		if (inet->inet_dport != sport)
 385			return -1;
 386		score += 4;
 
 387	}
 388
 389	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
 390					dif, sdif);
 391	if (!dev_match)
 392		return -1;
 393	score += 4;
 394
 395	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
 396		score++;
 397	return score;
 398}
 399
 400static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
 401		       const __u16 lport, const __be32 faddr,
 402		       const __be16 fport)
 
 
 
 
 403{
 404	static u32 udp_ehash_secret __read_mostly;
 405
 406	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
 
 407
 408	return __inet_ehashfn(laddr, lport, faddr, fport,
 409			      udp_ehash_secret + net_hash_mix(net));
 410}
 
 411
 412static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
 413				     struct sk_buff *skb,
 414				     __be32 saddr, __be16 sport,
 415				     __be32 daddr, unsigned short hnum)
 416{
 417	struct sock *reuse_sk = NULL;
 418	u32 hash;
 419
 420	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
 421		hash = udp_ehashfn(net, daddr, hnum, saddr, sport);
 422		reuse_sk = reuseport_select_sock(sk, hash, skb,
 423						 sizeof(struct udphdr));
 
 
 
 
 424	}
 425	return reuse_sk;
 426}
 427
 428/* called with rcu_read_lock() */
 
 429static struct sock *udp4_lib_lookup2(struct net *net,
 430				     __be32 saddr, __be16 sport,
 431				     __be32 daddr, unsigned int hnum,
 432				     int dif, int sdif,
 433				     struct udp_hslot *hslot2,
 434				     struct sk_buff *skb)
 435{
 436	struct sock *sk, *result;
 
 437	int score, badness;
 438
 
 439	result = NULL;
 440	badness = 0;
 441	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 442		score = compute_score(sk, net, saddr, sport,
 443				      daddr, hnum, dif, sdif);
 444		if (score > badness) {
 445			result = lookup_reuseport(net, sk, skb,
 446						  saddr, sport, daddr, hnum);
 447			/* Fall back to scoring if group has connections */
 448			if (result && !reuseport_has_conns(sk, false))
 449				return result;
 450
 451			result = result ? : sk;
 452			badness = score;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453		}
 454	}
 455	return result;
 456}
 457
 458static struct sock *udp4_lookup_run_bpf(struct net *net,
 459					struct udp_table *udptable,
 460					struct sk_buff *skb,
 461					__be32 saddr, __be16 sport,
 462					__be32 daddr, u16 hnum)
 463{
 464	struct sock *sk, *reuse_sk;
 465	bool no_reuseport;
 466
 467	if (udptable != &udp_table)
 468		return NULL; /* only UDP is supported */
 469
 470	no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP,
 471					    saddr, sport, daddr, hnum, &sk);
 472	if (no_reuseport || IS_ERR_OR_NULL(sk))
 473		return sk;
 474
 475	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
 476	if (reuse_sk)
 477		sk = reuse_sk;
 478	return sk;
 479}
 480
 481/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 482 * harder than this. -DaveM
 483 */
 484struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 485		__be16 sport, __be32 daddr, __be16 dport, int dif,
 486		int sdif, struct udp_table *udptable, struct sk_buff *skb)
 487{
 
 
 488	unsigned short hnum = ntohs(dport);
 489	unsigned int hash2, slot2;
 490	struct udp_hslot *hslot2;
 491	struct sock *result, *sk;
 492
 493	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
 494	slot2 = hash2 & udptable->mask;
 495	hslot2 = &udptable->hash2[slot2];
 496
 497	/* Lookup connected or non-wildcard socket */
 498	result = udp4_lib_lookup2(net, saddr, sport,
 499				  daddr, hnum, dif, sdif,
 500				  hslot2, skb);
 501	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
 502		goto done;
 503
 504	/* Lookup redirect from BPF */
 505	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
 506		sk = udp4_lookup_run_bpf(net, udptable, skb,
 507					 saddr, sport, daddr, hnum);
 508		if (sk) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 509			result = sk;
 510			goto done;
 511		}
 512	}
 513
 514	/* Got non-wildcard socket or error on first lookup */
 515	if (result)
 516		goto done;
 517
 518	/* Lookup wildcard sockets */
 519	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 520	slot2 = hash2 & udptable->mask;
 521	hslot2 = &udptable->hash2[slot2];
 522
 523	result = udp4_lib_lookup2(net, saddr, sport,
 524				  htonl(INADDR_ANY), hnum, dif, sdif,
 525				  hslot2, skb);
 526done:
 527	if (IS_ERR(result))
 528		return NULL;
 
 
 529	return result;
 530}
 531EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
 532
 533static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
 534						 __be16 sport, __be16 dport,
 535						 struct udp_table *udptable)
 536{
 
 537	const struct iphdr *iph = ip_hdr(skb);
 538
 539	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
 540				 iph->daddr, dport, inet_iif(skb),
 541				 inet_sdif(skb), udptable, skb);
 
 
 
 542}
 543
 544struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
 545				 __be16 sport, __be16 dport)
 546{
 547	const struct iphdr *iph = ip_hdr(skb);
 548
 549	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
 550				 iph->daddr, dport, inet_iif(skb),
 551				 inet_sdif(skb), &udp_table, NULL);
 552}
 553EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
 554
 555/* Must be called under rcu_read_lock().
 556 * Does increment socket refcount.
 557 */
 558#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
 559struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 560			     __be32 daddr, __be16 dport, int dif)
 561{
 562	struct sock *sk;
 563
 564	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
 565			       dif, 0, &udp_table, NULL);
 566	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 567		sk = NULL;
 568	return sk;
 569}
 570EXPORT_SYMBOL_GPL(udp4_lib_lookup);
 571#endif
 572
 573static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
 574				       __be16 loc_port, __be32 loc_addr,
 575				       __be16 rmt_port, __be32 rmt_addr,
 576				       int dif, int sdif, unsigned short hnum)
 577{
 578	struct inet_sock *inet = inet_sk(sk);
 
 
 579
 580	if (!net_eq(sock_net(sk), net) ||
 581	    udp_sk(sk)->udp_port_hash != hnum ||
 582	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
 583	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
 584	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
 585	    ipv6_only_sock(sk) ||
 586	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
 587		return false;
 588	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
 589		return false;
 590	return true;
 591}
 592
 593DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
 594void udp_encap_enable(void)
 595{
 596	static_branch_inc(&udp_encap_needed_key);
 597}
 598EXPORT_SYMBOL(udp_encap_enable);
 599
 600/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 601 * through error handlers in encapsulations looking for a match.
 602 */
 603static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
 604{
 605	int i;
 606
 607	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
 608		int (*handler)(struct sk_buff *skb, u32 info);
 609		const struct ip_tunnel_encap_ops *encap;
 610
 611		encap = rcu_dereference(iptun_encaps[i]);
 612		if (!encap)
 613			continue;
 614		handler = encap->err_handler;
 615		if (handler && !handler(skb, info))
 616			return 0;
 617	}
 618
 619	return -ENOENT;
 620}
 621
 622/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 623 * reversing source and destination port: this will match tunnels that force the
 624 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 625 * lwtunnels might actually break this assumption by being configured with
 626 * different destination ports on endpoints, in this case we won't be able to
 627 * trace ICMP messages back to them.
 628 *
 629 * If this doesn't match any socket, probe tunnels with arbitrary destination
 630 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 631 * we've sent packets to won't necessarily match the local destination port.
 632 *
 633 * Then ask the tunnel implementation to match the error against a valid
 634 * association.
 635 *
 636 * Return an error if we can't find a match, the socket if we need further
 637 * processing, zero otherwise.
 638 */
 639static struct sock *__udp4_lib_err_encap(struct net *net,
 640					 const struct iphdr *iph,
 641					 struct udphdr *uh,
 642					 struct udp_table *udptable,
 643					 struct sk_buff *skb, u32 info)
 644{
 645	int network_offset, transport_offset;
 646	struct sock *sk;
 647
 648	network_offset = skb_network_offset(skb);
 649	transport_offset = skb_transport_offset(skb);
 650
 651	/* Network header needs to point to the outer IPv4 header inside ICMP */
 652	skb_reset_network_header(skb);
 653
 654	/* Transport header needs to point to the UDP header */
 655	skb_set_transport_header(skb, iph->ihl << 2);
 656
 657	sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
 658			       iph->saddr, uh->dest, skb->dev->ifindex, 0,
 659			       udptable, NULL);
 660	if (sk) {
 661		int (*lookup)(struct sock *sk, struct sk_buff *skb);
 662		struct udp_sock *up = udp_sk(sk);
 663
 664		lookup = READ_ONCE(up->encap_err_lookup);
 665		if (!lookup || lookup(sk, skb))
 666			sk = NULL;
 667	}
 668
 669	if (!sk)
 670		sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
 671
 672	skb_set_transport_header(skb, transport_offset);
 673	skb_set_network_header(skb, network_offset);
 674
 675	return sk;
 676}
 677
 678/*
 679 * This routine is called by the ICMP module when it gets some
 680 * sort of error condition.  If err < 0 then the socket should
 681 * be closed and the error returned to the user.  If err > 0
 682 * it's just the icmp type << 8 | icmp code.
 683 * Header points to the ip header of the error packet. We move
 684 * on past this. Then (as it used to claim before adjustment)
 685 * header points to the first 8 bytes of the udp header.  We need
 686 * to find the appropriate port.
 687 */
 688
 689int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 690{
 691	struct inet_sock *inet;
 692	const struct iphdr *iph = (const struct iphdr *)skb->data;
 693	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
 694	const int type = icmp_hdr(skb)->type;
 695	const int code = icmp_hdr(skb)->code;
 696	bool tunnel = false;
 697	struct sock *sk;
 698	int harderr;
 699	int err;
 700	struct net *net = dev_net(skb->dev);
 701
 702	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
 703			       iph->saddr, uh->source, skb->dev->ifindex,
 704			       inet_sdif(skb), udptable, NULL);
 705	if (!sk) {
 706		/* No socket for error: try tunnels before discarding */
 707		sk = ERR_PTR(-ENOENT);
 708		if (static_branch_unlikely(&udp_encap_needed_key)) {
 709			sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
 710						  info);
 711			if (!sk)
 712				return 0;
 713		}
 714
 715		if (IS_ERR(sk)) {
 716			__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 717			return PTR_ERR(sk);
 718		}
 719
 720		tunnel = true;
 721	}
 722
 723	err = 0;
 724	harderr = 0;
 725	inet = inet_sk(sk);
 726
 727	switch (type) {
 728	default:
 729	case ICMP_TIME_EXCEEDED:
 730		err = EHOSTUNREACH;
 731		break;
 732	case ICMP_SOURCE_QUENCH:
 733		goto out;
 734	case ICMP_PARAMETERPROB:
 735		err = EPROTO;
 736		harderr = 1;
 737		break;
 738	case ICMP_DEST_UNREACH:
 739		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
 740			ipv4_sk_update_pmtu(skb, sk, info);
 741			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
 742				err = EMSGSIZE;
 743				harderr = 1;
 744				break;
 745			}
 746			goto out;
 747		}
 748		err = EHOSTUNREACH;
 749		if (code <= NR_ICMP_UNREACH) {
 750			harderr = icmp_err_convert[code].fatal;
 751			err = icmp_err_convert[code].errno;
 752		}
 753		break;
 754	case ICMP_REDIRECT:
 755		ipv4_sk_redirect(skb, sk);
 756		goto out;
 757	}
 758
 759	/*
 760	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
 761	 *	4.1.3.3.
 762	 */
 763	if (tunnel) {
 764		/* ...not for tunnels though: we don't have a sending socket */
 765		goto out;
 766	}
 767	if (!inet->recverr) {
 768		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 769			goto out;
 770	} else
 771		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
 772
 773	sk->sk_err = err;
 774	sk->sk_error_report(sk);
 775out:
 776	return 0;
 777}
 778
 779int udp_err(struct sk_buff *skb, u32 info)
 780{
 781	return __udp4_lib_err(skb, info, &udp_table);
 782}
 783
 784/*
 785 * Throw away all pending data and cancel the corking. Socket is locked.
 786 */
 787void udp_flush_pending_frames(struct sock *sk)
 788{
 789	struct udp_sock *up = udp_sk(sk);
 790
 791	if (up->pending) {
 792		up->len = 0;
 793		up->pending = 0;
 794		ip_flush_pending_frames(sk);
 795	}
 796}
 797EXPORT_SYMBOL(udp_flush_pending_frames);
 798
 799/**
 800 * 	udp4_hwcsum  -  handle outgoing HW checksumming
 801 * 	@skb: 	sk_buff containing the filled-in UDP header
 802 * 	        (checksum field must be zeroed out)
 803 *	@src:	source IP address
 804 *	@dst:	destination IP address
 805 */
 806void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
 807{
 808	struct udphdr *uh = udp_hdr(skb);
 
 809	int offset = skb_transport_offset(skb);
 810	int len = skb->len - offset;
 811	int hlen = len;
 812	__wsum csum = 0;
 813
 814	if (!skb_has_frag_list(skb)) {
 815		/*
 816		 * Only one fragment on the socket.
 817		 */
 818		skb->csum_start = skb_transport_header(skb) - skb->head;
 819		skb->csum_offset = offsetof(struct udphdr, check);
 820		uh->check = ~csum_tcpudp_magic(src, dst, len,
 821					       IPPROTO_UDP, 0);
 822	} else {
 823		struct sk_buff *frags;
 824
 825		/*
 826		 * HW-checksum won't work as there are two or more
 827		 * fragments on the socket so that all csums of sk_buffs
 828		 * should be together
 829		 */
 830		skb_walk_frags(skb, frags) {
 831			csum = csum_add(csum, frags->csum);
 832			hlen -= frags->len;
 833		}
 834
 835		csum = skb_checksum(skb, offset, hlen, csum);
 836		skb->ip_summed = CHECKSUM_NONE;
 837
 838		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
 839		if (uh->check == 0)
 840			uh->check = CSUM_MANGLED_0;
 841	}
 842}
 843EXPORT_SYMBOL_GPL(udp4_hwcsum);
 844
 845/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
 846 * for the simple case like when setting the checksum for a UDP tunnel.
 847 */
 848void udp_set_csum(bool nocheck, struct sk_buff *skb,
 849		  __be32 saddr, __be32 daddr, int len)
 850{
 851	struct udphdr *uh = udp_hdr(skb);
 852
 853	if (nocheck) {
 854		uh->check = 0;
 855	} else if (skb_is_gso(skb)) {
 856		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
 857	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 858		uh->check = 0;
 859		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
 860		if (uh->check == 0)
 861			uh->check = CSUM_MANGLED_0;
 862	} else {
 863		skb->ip_summed = CHECKSUM_PARTIAL;
 864		skb->csum_start = skb_transport_header(skb) - skb->head;
 865		skb->csum_offset = offsetof(struct udphdr, check);
 866		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
 867	}
 868}
 869EXPORT_SYMBOL(udp_set_csum);
 870
 871static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
 872			struct inet_cork *cork)
 873{
 874	struct sock *sk = skb->sk;
 875	struct inet_sock *inet = inet_sk(sk);
 876	struct udphdr *uh;
 877	int err = 0;
 878	int is_udplite = IS_UDPLITE(sk);
 879	int offset = skb_transport_offset(skb);
 880	int len = skb->len - offset;
 881	int datalen = len - sizeof(*uh);
 882	__wsum csum = 0;
 883
 884	/*
 885	 * Create a UDP header
 886	 */
 887	uh = udp_hdr(skb);
 888	uh->source = inet->inet_sport;
 889	uh->dest = fl4->fl4_dport;
 890	uh->len = htons(len);
 891	uh->check = 0;
 892
 893	if (cork->gso_size) {
 894		const int hlen = skb_network_header_len(skb) +
 895				 sizeof(struct udphdr);
 896
 897		if (hlen + cork->gso_size > cork->fragsize) {
 898			kfree_skb(skb);
 899			return -EINVAL;
 900		}
 901		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
 902			kfree_skb(skb);
 903			return -EINVAL;
 904		}
 905		if (sk->sk_no_check_tx) {
 906			kfree_skb(skb);
 907			return -EINVAL;
 908		}
 909		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
 910		    dst_xfrm(skb_dst(skb))) {
 911			kfree_skb(skb);
 912			return -EIO;
 913		}
 914
 915		if (datalen > cork->gso_size) {
 916			skb_shinfo(skb)->gso_size = cork->gso_size;
 917			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
 918			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
 919								 cork->gso_size);
 920		}
 921		goto csum_partial;
 922	}
 923
 924	if (is_udplite)  				 /*     UDP-Lite      */
 925		csum = udplite_csum(skb);
 926
 927	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
 928
 929		skb->ip_summed = CHECKSUM_NONE;
 930		goto send;
 931
 932	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 933csum_partial:
 934
 935		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
 936		goto send;
 937
 938	} else
 939		csum = udp_csum(skb);
 940
 941	/* add protocol-dependent pseudo-header */
 942	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
 943				      sk->sk_protocol, csum);
 944	if (uh->check == 0)
 945		uh->check = CSUM_MANGLED_0;
 946
 947send:
 948	err = ip_send_skb(sock_net(sk), skb);
 949	if (err) {
 950		if (err == -ENOBUFS && !inet->recverr) {
 951			UDP_INC_STATS(sock_net(sk),
 952				      UDP_MIB_SNDBUFERRORS, is_udplite);
 953			err = 0;
 954		}
 955	} else
 956		UDP_INC_STATS(sock_net(sk),
 957			      UDP_MIB_OUTDATAGRAMS, is_udplite);
 958	return err;
 959}
 960
 961/*
 962 * Push out all pending data as one UDP datagram. Socket is locked.
 963 */
 964int udp_push_pending_frames(struct sock *sk)
 965{
 966	struct udp_sock  *up = udp_sk(sk);
 967	struct inet_sock *inet = inet_sk(sk);
 968	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
 969	struct sk_buff *skb;
 970	int err = 0;
 971
 972	skb = ip_finish_skb(sk, fl4);
 973	if (!skb)
 974		goto out;
 975
 976	err = udp_send_skb(skb, fl4, &inet->cork.base);
 977
 978out:
 979	up->len = 0;
 980	up->pending = 0;
 981	return err;
 982}
 983EXPORT_SYMBOL(udp_push_pending_frames);
 984
 985static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
 986{
 987	switch (cmsg->cmsg_type) {
 988	case UDP_SEGMENT:
 989		if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
 990			return -EINVAL;
 991		*gso_size = *(__u16 *)CMSG_DATA(cmsg);
 992		return 0;
 993	default:
 994		return -EINVAL;
 995	}
 996}
 997
 998int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
 999{
1000	struct cmsghdr *cmsg;
1001	bool need_ip = false;
1002	int err;
1003
1004	for_each_cmsghdr(cmsg, msg) {
1005		if (!CMSG_OK(msg, cmsg))
1006			return -EINVAL;
1007
1008		if (cmsg->cmsg_level != SOL_UDP) {
1009			need_ip = true;
1010			continue;
1011		}
1012
1013		err = __udp_cmsg_send(cmsg, gso_size);
1014		if (err)
1015			return err;
1016	}
1017
1018	return need_ip;
1019}
1020EXPORT_SYMBOL_GPL(udp_cmsg_send);
1021
1022int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
1023{
1024	struct inet_sock *inet = inet_sk(sk);
1025	struct udp_sock *up = udp_sk(sk);
1026	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1027	struct flowi4 fl4_stack;
1028	struct flowi4 *fl4;
1029	int ulen = len;
1030	struct ipcm_cookie ipc;
1031	struct rtable *rt = NULL;
1032	int free = 0;
1033	int connected = 0;
1034	__be32 daddr, faddr, saddr;
1035	__be16 dport;
1036	u8  tos;
1037	int err, is_udplite = IS_UDPLITE(sk);
1038	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1039	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1040	struct sk_buff *skb;
1041	struct ip_options_data opt_copy;
1042
1043	if (len > 0xFFFF)
1044		return -EMSGSIZE;
1045
1046	/*
1047	 *	Check the flags.
1048	 */
1049
1050	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
1051		return -EOPNOTSUPP;
1052
 
 
 
1053	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1054
1055	fl4 = &inet->cork.fl.u.ip4;
1056	if (up->pending) {
1057		/*
1058		 * There are pending frames.
1059		 * The socket lock must be held while it's corked.
1060		 */
1061		lock_sock(sk);
1062		if (likely(up->pending)) {
1063			if (unlikely(up->pending != AF_INET)) {
1064				release_sock(sk);
1065				return -EINVAL;
1066			}
1067			goto do_append_data;
1068		}
1069		release_sock(sk);
1070	}
1071	ulen += sizeof(struct udphdr);
1072
1073	/*
1074	 *	Get and verify the address.
1075	 */
1076	if (usin) {
 
1077		if (msg->msg_namelen < sizeof(*usin))
1078			return -EINVAL;
1079		if (usin->sin_family != AF_INET) {
1080			if (usin->sin_family != AF_UNSPEC)
1081				return -EAFNOSUPPORT;
1082		}
1083
1084		daddr = usin->sin_addr.s_addr;
1085		dport = usin->sin_port;
1086		if (dport == 0)
1087			return -EINVAL;
1088	} else {
1089		if (sk->sk_state != TCP_ESTABLISHED)
1090			return -EDESTADDRREQ;
1091		daddr = inet->inet_daddr;
1092		dport = inet->inet_dport;
1093		/* Open fast path for connected socket.
1094		   Route will not be used, if at least one option is set.
1095		 */
1096		connected = 1;
1097	}
 
1098
1099	ipcm_init_sk(&ipc, inet);
1100	ipc.gso_size = up->gso_size;
1101
 
1102	if (msg->msg_controllen) {
1103		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
1104		if (err > 0)
1105			err = ip_cmsg_send(sk, msg, &ipc,
1106					   sk->sk_family == AF_INET6);
1107		if (unlikely(err < 0)) {
1108			kfree(ipc.opt);
1109			return err;
1110		}
1111		if (ipc.opt)
1112			free = 1;
1113		connected = 0;
1114	}
1115	if (!ipc.opt) {
1116		struct ip_options_rcu *inet_opt;
1117
1118		rcu_read_lock();
1119		inet_opt = rcu_dereference(inet->inet_opt);
1120		if (inet_opt) {
1121			memcpy(&opt_copy, inet_opt,
1122			       sizeof(*inet_opt) + inet_opt->opt.optlen);
1123			ipc.opt = &opt_copy.opt;
1124		}
1125		rcu_read_unlock();
1126	}
1127
1128	if (cgroup_bpf_enabled && !connected) {
1129		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
1130					    (struct sockaddr *)usin, &ipc.addr);
1131		if (err)
1132			goto out_free;
1133		if (usin) {
1134			if (usin->sin_port == 0) {
1135				/* BPF program set invalid port. Reject it. */
1136				err = -EINVAL;
1137				goto out_free;
1138			}
1139			daddr = usin->sin_addr.s_addr;
1140			dport = usin->sin_port;
1141		}
1142	}
1143
1144	saddr = ipc.addr;
1145	ipc.addr = faddr = daddr;
1146
1147	if (ipc.opt && ipc.opt->opt.srr) {
1148		if (!daddr) {
1149			err = -EINVAL;
1150			goto out_free;
1151		}
1152		faddr = ipc.opt->opt.faddr;
1153		connected = 0;
1154	}
1155	tos = get_rttos(&ipc, inet);
1156	if (sock_flag(sk, SOCK_LOCALROUTE) ||
1157	    (msg->msg_flags & MSG_DONTROUTE) ||
1158	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
1159		tos |= RTO_ONLINK;
1160		connected = 0;
1161	}
1162
1163	if (ipv4_is_multicast(daddr)) {
1164		if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
1165			ipc.oif = inet->mc_index;
1166		if (!saddr)
1167			saddr = inet->mc_addr;
1168		connected = 0;
1169	} else if (!ipc.oif) {
1170		ipc.oif = inet->uc_index;
1171	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
1172		/* oif is set, packet is to local broadcast and
1173		 * and uc_index is set. oif is most likely set
1174		 * by sk_bound_dev_if. If uc_index != oif check if the
1175		 * oif is an L3 master and uc_index is an L3 slave.
1176		 * If so, we want to allow the send using the uc_index.
1177		 */
1178		if (ipc.oif != inet->uc_index &&
1179		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
1180							      inet->uc_index)) {
1181			ipc.oif = inet->uc_index;
1182		}
1183	}
1184
1185	if (connected)
1186		rt = (struct rtable *)sk_dst_check(sk, 0);
1187
1188	if (!rt) {
1189		struct net *net = sock_net(sk);
1190		__u8 flow_flags = inet_sk_flowi_flags(sk);
1191
1192		fl4 = &fl4_stack;
1193
1194		flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos,
1195				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
1196				   flow_flags,
1197				   faddr, saddr, dport, inet->inet_sport,
1198				   sk->sk_uid);
1199
1200		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
1201		rt = ip_route_output_flow(net, fl4, sk);
1202		if (IS_ERR(rt)) {
1203			err = PTR_ERR(rt);
1204			rt = NULL;
1205			if (err == -ENETUNREACH)
1206				IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1207			goto out;
1208		}
1209
1210		err = -EACCES;
1211		if ((rt->rt_flags & RTCF_BROADCAST) &&
1212		    !sock_flag(sk, SOCK_BROADCAST))
1213			goto out;
1214		if (connected)
1215			sk_dst_set(sk, dst_clone(&rt->dst));
1216	}
1217
1218	if (msg->msg_flags&MSG_CONFIRM)
1219		goto do_confirm;
1220back_from_confirm:
1221
1222	saddr = fl4->saddr;
1223	if (!ipc.addr)
1224		daddr = ipc.addr = fl4->daddr;
1225
1226	/* Lockless fast path for the non-corking case. */
1227	if (!corkreq) {
1228		struct inet_cork cork;
1229
1230		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
1231				  sizeof(struct udphdr), &ipc, &rt,
1232				  &cork, msg->msg_flags);
1233		err = PTR_ERR(skb);
1234		if (!IS_ERR_OR_NULL(skb))
1235			err = udp_send_skb(skb, fl4, &cork);
1236		goto out;
1237	}
1238
1239	lock_sock(sk);
1240	if (unlikely(up->pending)) {
1241		/* The socket is already corked while preparing it. */
1242		/* ... which is an evident application bug. --ANK */
1243		release_sock(sk);
1244
1245		net_dbg_ratelimited("socket already corked\n");
1246		err = -EINVAL;
1247		goto out;
1248	}
1249	/*
1250	 *	Now cork the socket to pend data.
1251	 */
1252	fl4 = &inet->cork.fl.u.ip4;
1253	fl4->daddr = daddr;
1254	fl4->saddr = saddr;
1255	fl4->fl4_dport = dport;
1256	fl4->fl4_sport = inet->inet_sport;
1257	up->pending = AF_INET;
1258
1259do_append_data:
1260	up->len += ulen;
1261	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1262			     sizeof(struct udphdr), &ipc, &rt,
1263			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1264	if (err)
1265		udp_flush_pending_frames(sk);
1266	else if (!corkreq)
1267		err = udp_push_pending_frames(sk);
1268	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1269		up->pending = 0;
1270	release_sock(sk);
1271
1272out:
1273	ip_rt_put(rt);
1274out_free:
1275	if (free)
1276		kfree(ipc.opt);
1277	if (!err)
1278		return len;
1279	/*
1280	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1281	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1282	 * we don't have a good statistic (IpOutDiscards but it can be too many
1283	 * things).  We could add another new stat but at least for now that
1284	 * seems like overkill.
1285	 */
1286	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1287		UDP_INC_STATS(sock_net(sk),
1288			      UDP_MIB_SNDBUFERRORS, is_udplite);
1289	}
1290	return err;
1291
1292do_confirm:
1293	if (msg->msg_flags & MSG_PROBE)
1294		dst_confirm_neigh(&rt->dst, &fl4->daddr);
1295	if (!(msg->msg_flags&MSG_PROBE) || len)
1296		goto back_from_confirm;
1297	err = 0;
1298	goto out;
1299}
1300EXPORT_SYMBOL(udp_sendmsg);
1301
1302int udp_sendpage(struct sock *sk, struct page *page, int offset,
1303		 size_t size, int flags)
1304{
1305	struct inet_sock *inet = inet_sk(sk);
1306	struct udp_sock *up = udp_sk(sk);
1307	int ret;
1308
1309	if (flags & MSG_SENDPAGE_NOTLAST)
1310		flags |= MSG_MORE;
1311
1312	if (!up->pending) {
1313		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
1314
1315		/* Call udp_sendmsg to specify destination address which
1316		 * sendpage interface can't pass.
1317		 * This will succeed only when the socket is connected.
1318		 */
1319		ret = udp_sendmsg(sk, &msg, 0);
1320		if (ret < 0)
1321			return ret;
1322	}
1323
1324	lock_sock(sk);
1325
1326	if (unlikely(!up->pending)) {
1327		release_sock(sk);
1328
1329		net_dbg_ratelimited("cork failed\n");
1330		return -EINVAL;
1331	}
1332
1333	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1334			     page, offset, size, flags);
1335	if (ret == -EOPNOTSUPP) {
1336		release_sock(sk);
1337		return sock_no_sendpage(sk->sk_socket, page, offset,
1338					size, flags);
1339	}
1340	if (ret < 0) {
1341		udp_flush_pending_frames(sk);
1342		goto out;
1343	}
1344
1345	up->len += size;
1346	if (!(up->corkflag || (flags&MSG_MORE)))
1347		ret = udp_push_pending_frames(sk);
1348	if (!ret)
1349		ret = size;
1350out:
1351	release_sock(sk);
1352	return ret;
1353}
1354
1355#define UDP_SKB_IS_STATELESS 0x80000000
1356
1357/* all head states (dst, sk, nf conntrack) except skb extensions are
1358 * cleared by udp_rcv().
 
1359 *
1360 * We need to preserve secpath, if present, to eventually process
1361 * IP_CMSG_PASSSEC at recvmsg() time.
1362 *
1363 * Other extensions can be cleared.
1364 */
1365static bool udp_try_make_stateless(struct sk_buff *skb)
1366{
1367	if (!skb_has_extensions(skb))
1368		return true;
 
1369
1370	if (!secpath_exists(skb)) {
1371		skb_ext_reset(skb);
1372		return true;
1373	}
1374
1375	return false;
1376}
1377
1378static void udp_set_dev_scratch(struct sk_buff *skb)
1379{
1380	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
1381
1382	BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
1383	scratch->_tsize_state = skb->truesize;
1384#if BITS_PER_LONG == 64
1385	scratch->len = skb->len;
1386	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
1387	scratch->is_linear = !skb_is_nonlinear(skb);
1388#endif
1389	if (udp_try_make_stateless(skb))
1390		scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1391}
1392
1393static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
1394{
1395	/* We come here after udp_lib_checksum_complete() returned 0.
1396	 * This means that __skb_checksum_complete() might have
1397	 * set skb->csum_valid to 1.
1398	 * On 64bit platforms, we can set csum_unnecessary
1399	 * to true, but only if the skb is not shared.
1400	 */
1401#if BITS_PER_LONG == 64
1402	if (!skb_shared(skb))
1403		udp_skb_scratch(skb)->csum_unnecessary = true;
1404#endif
1405}
1406
1407static int udp_skb_truesize(struct sk_buff *skb)
1408{
1409	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1410}
1411
1412static bool udp_skb_has_head_state(struct sk_buff *skb)
1413{
1414	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1415}
1416
1417/* fully reclaim rmem/fwd memory allocated for skb */
1418static void udp_rmem_release(struct sock *sk, int size, int partial,
1419			     bool rx_queue_lock_held)
1420{
1421	struct udp_sock *up = udp_sk(sk);
1422	struct sk_buff_head *sk_queue;
1423	int amt;
1424
1425	if (likely(partial)) {
1426		up->forward_deficit += size;
1427		size = up->forward_deficit;
1428		if (size < (sk->sk_rcvbuf >> 2) &&
1429		    !skb_queue_empty(&up->reader_queue))
1430			return;
1431	} else {
1432		size += up->forward_deficit;
1433	}
1434	up->forward_deficit = 0;
1435
1436	/* acquire the sk_receive_queue for fwd allocated memory scheduling,
1437	 * if the called don't held it already
1438	 */
1439	sk_queue = &sk->sk_receive_queue;
1440	if (!rx_queue_lock_held)
1441		spin_lock(&sk_queue->lock);
1442
1443
1444	sk->sk_forward_alloc += size;
1445	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
1446	sk->sk_forward_alloc -= amt;
1447
1448	if (amt)
1449		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
1450
1451	atomic_sub(size, &sk->sk_rmem_alloc);
1452
1453	/* this can save us from acquiring the rx queue lock on next receive */
1454	skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
1455
1456	if (!rx_queue_lock_held)
1457		spin_unlock(&sk_queue->lock);
1458}
1459
1460/* Note: called with reader_queue.lock held.
1461 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1462 * This avoids a cache line miss while receive_queue lock is held.
1463 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1464 */
1465void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
1466{
1467	prefetch(&skb->data);
1468	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
1469}
1470EXPORT_SYMBOL(udp_skb_destructor);
1471
1472/* as above, but the caller held the rx queue lock, too */
1473static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
1474{
1475	prefetch(&skb->data);
1476	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
1477}
1478
1479/* Idea of busylocks is to let producers grab an extra spinlock
1480 * to relieve pressure on the receive_queue spinlock shared by consumer.
1481 * Under flood, this means that only one producer can be in line
1482 * trying to acquire the receive_queue spinlock.
1483 * These busylock can be allocated on a per cpu manner, instead of a
1484 * per socket one (that would consume a cache line per socket)
1485 */
1486static int udp_busylocks_log __read_mostly;
1487static spinlock_t *udp_busylocks __read_mostly;
1488
1489static spinlock_t *busylock_acquire(void *ptr)
1490{
1491	spinlock_t *busy;
1492
1493	busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
1494	spin_lock(busy);
1495	return busy;
1496}
1497
1498static void busylock_release(spinlock_t *busy)
1499{
1500	if (busy)
1501		spin_unlock(busy);
1502}
1503
1504int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1505{
1506	struct sk_buff_head *list = &sk->sk_receive_queue;
1507	int rmem, delta, amt, err = -ENOMEM;
1508	spinlock_t *busy = NULL;
1509	int size;
1510
1511	/* try to avoid the costly atomic add/sub pair when the receive
1512	 * queue is full; always allow at least a packet
1513	 */
1514	rmem = atomic_read(&sk->sk_rmem_alloc);
1515	if (rmem > sk->sk_rcvbuf)
1516		goto drop;
1517
1518	/* Under mem pressure, it might be helpful to help udp_recvmsg()
1519	 * having linear skbs :
1520	 * - Reduce memory overhead and thus increase receive queue capacity
1521	 * - Less cache line misses at copyout() time
1522	 * - Less work at consume_skb() (less alien page frag freeing)
1523	 */
1524	if (rmem > (sk->sk_rcvbuf >> 1)) {
1525		skb_condense(skb);
1526
1527		busy = busylock_acquire(sk);
1528	}
1529	size = skb->truesize;
1530	udp_set_dev_scratch(skb);
1531
1532	/* we drop only if the receive buf is full and the receive
1533	 * queue contains some other skb
1534	 */
1535	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
1536	if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
1537		goto uncharge_drop;
1538
1539	spin_lock(&list->lock);
1540	if (size >= sk->sk_forward_alloc) {
1541		amt = sk_mem_pages(size);
1542		delta = amt << SK_MEM_QUANTUM_SHIFT;
1543		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
1544			err = -ENOBUFS;
1545			spin_unlock(&list->lock);
1546			goto uncharge_drop;
1547		}
1548
1549		sk->sk_forward_alloc += delta;
1550	}
1551
1552	sk->sk_forward_alloc -= size;
1553
1554	/* no need to setup a destructor, we will explicitly release the
1555	 * forward allocated memory on dequeue
1556	 */
1557	sock_skb_set_dropcount(sk, skb);
1558
1559	__skb_queue_tail(list, skb);
1560	spin_unlock(&list->lock);
1561
1562	if (!sock_flag(sk, SOCK_DEAD))
1563		sk->sk_data_ready(sk);
1564
1565	busylock_release(busy);
1566	return 0;
1567
1568uncharge_drop:
1569	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1570
1571drop:
1572	atomic_inc(&sk->sk_drops);
1573	busylock_release(busy);
1574	return err;
1575}
1576EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
1577
1578void udp_destruct_sock(struct sock *sk)
1579{
1580	/* reclaim completely the forward allocated memory */
1581	struct udp_sock *up = udp_sk(sk);
1582	unsigned int total = 0;
1583	struct sk_buff *skb;
1584
1585	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
1586	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
1587		total += skb->truesize;
1588		kfree_skb(skb);
1589	}
1590	udp_rmem_release(sk, total, 0, true);
1591
1592	inet_sock_destruct(sk);
1593}
1594EXPORT_SYMBOL_GPL(udp_destruct_sock);
1595
1596int udp_init_sock(struct sock *sk)
1597{
1598	skb_queue_head_init(&udp_sk(sk)->reader_queue);
1599	sk->sk_destruct = udp_destruct_sock;
1600	return 0;
1601}
1602EXPORT_SYMBOL_GPL(udp_init_sock);
1603
1604void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
1605{
1606	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
1607		bool slow = lock_sock_fast(sk);
1608
1609		sk_peek_offset_bwd(sk, len);
 
1610		unlock_sock_fast(sk, slow);
1611	}
1612
1613	if (!skb_unref(skb))
1614		return;
1615
1616	/* In the more common cases we cleared the head states previously,
1617	 * see __udp_queue_rcv_skb().
1618	 */
1619	if (unlikely(udp_skb_has_head_state(skb)))
1620		skb_release_head_state(skb);
1621	__consume_stateless_skb(skb);
1622}
1623EXPORT_SYMBOL_GPL(skb_consume_udp);
1624
1625static struct sk_buff *__first_packet_length(struct sock *sk,
1626					     struct sk_buff_head *rcvq,
1627					     int *total)
1628{
1629	struct sk_buff *skb;
1630
1631	while ((skb = skb_peek(rcvq)) != NULL) {
1632		if (udp_lib_checksum_complete(skb)) {
1633			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
1634					IS_UDPLITE(sk));
1635			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
1636					IS_UDPLITE(sk));
1637			atomic_inc(&sk->sk_drops);
1638			__skb_unlink(skb, rcvq);
1639			*total += skb->truesize;
1640			kfree_skb(skb);
1641		} else {
1642			udp_skb_csum_unnecessary_set(skb);
1643			break;
1644		}
1645	}
1646	return skb;
1647}
1648
1649/**
1650 *	first_packet_length	- return length of first packet in receive queue
1651 *	@sk: socket
1652 *
1653 *	Drops all bad checksum frames, until a valid one is found.
1654 *	Returns the length of found skb, or -1 if none is found.
1655 */
1656static int first_packet_length(struct sock *sk)
1657{
1658	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
1659	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1660	struct sk_buff *skb;
1661	int total = 0;
1662	int res;
1663
1664	spin_lock_bh(&rcvq->lock);
1665	skb = __first_packet_length(sk, rcvq, &total);
1666	if (!skb && !skb_queue_empty_lockless(sk_queue)) {
1667		spin_lock(&sk_queue->lock);
1668		skb_queue_splice_tail_init(sk_queue, rcvq);
1669		spin_unlock(&sk_queue->lock);
1670
1671		skb = __first_packet_length(sk, rcvq, &total);
1672	}
1673	res = skb ? skb->len : -1;
1674	if (total)
1675		udp_rmem_release(sk, total, 1, false);
1676	spin_unlock_bh(&rcvq->lock);
1677	return res;
1678}
1679
1680/*
1681 *	IOCTL requests applicable to the UDP protocol
1682 */
1683
1684int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1685{
1686	switch (cmd) {
1687	case SIOCOUTQ:
1688	{
1689		int amount = sk_wmem_alloc_get(sk);
1690
1691		return put_user(amount, (int __user *)arg);
1692	}
1693
1694	case SIOCINQ:
1695	{
1696		int amount = max_t(int, 0, first_packet_length(sk));
 
 
 
 
 
 
 
 
1697
1698		return put_user(amount, (int __user *)arg);
1699	}
1700
1701	default:
1702		return -ENOIOCTLCMD;
1703	}
1704
1705	return 0;
1706}
1707EXPORT_SYMBOL(udp_ioctl);
1708
1709struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
1710			       int noblock, int *off, int *err)
1711{
1712	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
1713	struct sk_buff_head *queue;
1714	struct sk_buff *last;
1715	long timeo;
1716	int error;
1717
1718	queue = &udp_sk(sk)->reader_queue;
1719	flags |= noblock ? MSG_DONTWAIT : 0;
1720	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1721	do {
1722		struct sk_buff *skb;
1723
1724		error = sock_error(sk);
1725		if (error)
1726			break;
1727
1728		error = -EAGAIN;
1729		do {
1730			spin_lock_bh(&queue->lock);
1731			skb = __skb_try_recv_from_queue(sk, queue, flags, off,
1732							err, &last);
1733			if (skb) {
1734				if (!(flags & MSG_PEEK))
1735					udp_skb_destructor(sk, skb);
1736				spin_unlock_bh(&queue->lock);
1737				return skb;
1738			}
1739
1740			if (skb_queue_empty_lockless(sk_queue)) {
1741				spin_unlock_bh(&queue->lock);
1742				goto busy_check;
1743			}
1744
1745			/* refill the reader queue and walk it again
1746			 * keep both queues locked to avoid re-acquiring
1747			 * the sk_receive_queue lock if fwd memory scheduling
1748			 * is needed.
1749			 */
1750			spin_lock(&sk_queue->lock);
1751			skb_queue_splice_tail_init(sk_queue, queue);
1752
1753			skb = __skb_try_recv_from_queue(sk, queue, flags, off,
1754							err, &last);
1755			if (skb && !(flags & MSG_PEEK))
1756				udp_skb_dtor_locked(sk, skb);
1757			spin_unlock(&sk_queue->lock);
1758			spin_unlock_bh(&queue->lock);
1759			if (skb)
1760				return skb;
1761
1762busy_check:
1763			if (!sk_can_busy_loop(sk))
1764				break;
1765
1766			sk_busy_loop(sk, flags & MSG_DONTWAIT);
1767		} while (!skb_queue_empty_lockless(sk_queue));
1768
1769		/* sk_queue is empty, reader_queue may contain peeked packets */
1770	} while (timeo &&
1771		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
1772					      &error, &timeo,
1773					      (struct sk_buff *)sk_queue));
1774
1775	*err = error;
1776	return NULL;
1777}
1778EXPORT_SYMBOL(__skb_recv_udp);
1779
1780/*
1781 * 	This should be easy, if there is something there we
1782 * 	return it, otherwise we block.
1783 */
1784
1785int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
1786		int flags, int *addr_len)
1787{
1788	struct inet_sock *inet = inet_sk(sk);
1789	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1790	struct sk_buff *skb;
1791	unsigned int ulen, copied;
1792	int off, err, peeking = flags & MSG_PEEK;
 
1793	int is_udplite = IS_UDPLITE(sk);
1794	bool checksum_valid = false;
 
 
 
 
 
 
1795
1796	if (flags & MSG_ERRQUEUE)
1797		return ip_recv_error(sk, msg, len, addr_len);
1798
1799try_again:
1800	off = sk_peek_offset(sk, flags);
1801	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
1802	if (!skb)
1803		return err;
1804
1805	ulen = udp_skb_len(skb);
1806	copied = len;
1807	if (copied > ulen - off)
1808		copied = ulen - off;
1809	else if (copied < ulen)
1810		msg->msg_flags |= MSG_TRUNC;
1811
1812	/*
1813	 * If checksum is needed at all, try to do it while copying the
1814	 * data.  If the data is truncated, or if we only want a partial
1815	 * coverage checksum (UDP-Lite), do it before the copy.
1816	 */
1817
1818	if (copied < ulen || peeking ||
1819	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
1820		checksum_valid = udp_skb_csum_unnecessary(skb) ||
1821				!__udp_lib_checksum_complete(skb);
1822		if (!checksum_valid)
1823			goto csum_copy_err;
1824	}
1825
1826	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
1827		if (udp_skb_is_linear(skb))
1828			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
1829		else
1830			err = skb_copy_datagram_msg(skb, off, msg, copied);
1831	} else {
1832		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
1833
1834		if (err == -EINVAL)
1835			goto csum_copy_err;
1836	}
1837
1838	if (unlikely(err)) {
1839		if (!peeking) {
1840			atomic_inc(&sk->sk_drops);
1841			UDP_INC_STATS(sock_net(sk),
1842				      UDP_MIB_INERRORS, is_udplite);
1843		}
1844		kfree_skb(skb);
1845		return err;
1846	}
1847
1848	if (!peeking)
1849		UDP_INC_STATS(sock_net(sk),
1850			      UDP_MIB_INDATAGRAMS, is_udplite);
1851
1852	sock_recv_ts_and_drops(msg, sk, skb);
1853
1854	/* Copy the address. */
1855	if (sin) {
1856		sin->sin_family = AF_INET;
1857		sin->sin_port = udp_hdr(skb)->source;
1858		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1859		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1860		*addr_len = sizeof(*sin);
1861
1862		if (cgroup_bpf_enabled)
1863			BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
1864							(struct sockaddr *)sin);
1865	}
1866
1867	if (udp_sk(sk)->gro_enabled)
1868		udp_cmsg_recv(msg, sk, skb);
1869
1870	if (inet->cmsg_flags)
1871		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
1872
1873	err = copied;
1874	if (flags & MSG_TRUNC)
1875		err = ulen;
1876
1877	skb_consume_udp(sk, skb, peeking ? -err : err);
 
 
1878	return err;
1879
1880csum_copy_err:
1881	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
1882				 udp_skb_destructor)) {
1883		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1884		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1885	}
1886	kfree_skb(skb);
 
1887
1888	/* starting over for a new packet, but check if we need to yield */
1889	cond_resched();
1890	msg->msg_flags &= ~MSG_TRUNC;
1891	goto try_again;
1892}
1893
1894int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1895{
1896	/* This check is replicated from __ip4_datagram_connect() and
1897	 * intended to prevent BPF program called below from accessing bytes
1898	 * that are out of the bound specified by user in addr_len.
1899	 */
1900	if (addr_len < sizeof(struct sockaddr_in))
1901		return -EINVAL;
1902
1903	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
1904}
1905EXPORT_SYMBOL(udp_pre_connect);
1906
1907int __udp_disconnect(struct sock *sk, int flags)
1908{
1909	struct inet_sock *inet = inet_sk(sk);
1910	/*
1911	 *	1003.1g - break association.
1912	 */
1913
1914	sk->sk_state = TCP_CLOSE;
1915	inet->inet_daddr = 0;
1916	inet->inet_dport = 0;
1917	sock_rps_reset_rxhash(sk);
1918	sk->sk_bound_dev_if = 0;
1919	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
1920		inet_reset_saddr(sk);
1921		if (sk->sk_prot->rehash &&
1922		    (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1923			sk->sk_prot->rehash(sk);
1924	}
1925
1926	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1927		sk->sk_prot->unhash(sk);
1928		inet->inet_sport = 0;
1929	}
1930	sk_dst_reset(sk);
1931	return 0;
1932}
1933EXPORT_SYMBOL(__udp_disconnect);
1934
1935int udp_disconnect(struct sock *sk, int flags)
1936{
1937	lock_sock(sk);
1938	__udp_disconnect(sk, flags);
1939	release_sock(sk);
1940	return 0;
1941}
1942EXPORT_SYMBOL(udp_disconnect);
1943
1944void udp_lib_unhash(struct sock *sk)
1945{
1946	if (sk_hashed(sk)) {
1947		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1948		struct udp_hslot *hslot, *hslot2;
1949
1950		hslot  = udp_hashslot(udptable, sock_net(sk),
1951				      udp_sk(sk)->udp_port_hash);
1952		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1953
1954		spin_lock_bh(&hslot->lock);
1955		if (rcu_access_pointer(sk->sk_reuseport_cb))
1956			reuseport_detach_sock(sk);
1957		if (sk_del_node_init_rcu(sk)) {
1958			hslot->count--;
1959			inet_sk(sk)->inet_num = 0;
1960			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1961
1962			spin_lock(&hslot2->lock);
1963			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1964			hslot2->count--;
1965			spin_unlock(&hslot2->lock);
1966		}
1967		spin_unlock_bh(&hslot->lock);
1968	}
1969}
1970EXPORT_SYMBOL(udp_lib_unhash);
1971
1972/*
1973 * inet_rcv_saddr was changed, we must rehash secondary hash
1974 */
1975void udp_lib_rehash(struct sock *sk, u16 newhash)
1976{
1977	if (sk_hashed(sk)) {
1978		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1979		struct udp_hslot *hslot, *hslot2, *nhslot2;
1980
1981		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1982		nhslot2 = udp_hashslot2(udptable, newhash);
1983		udp_sk(sk)->udp_portaddr_hash = newhash;
1984
1985		if (hslot2 != nhslot2 ||
1986		    rcu_access_pointer(sk->sk_reuseport_cb)) {
1987			hslot = udp_hashslot(udptable, sock_net(sk),
1988					     udp_sk(sk)->udp_port_hash);
1989			/* we must lock primary chain too */
1990			spin_lock_bh(&hslot->lock);
1991			if (rcu_access_pointer(sk->sk_reuseport_cb))
1992				reuseport_detach_sock(sk);
1993
1994			if (hslot2 != nhslot2) {
1995				spin_lock(&hslot2->lock);
1996				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1997				hslot2->count--;
1998				spin_unlock(&hslot2->lock);
1999
2000				spin_lock(&nhslot2->lock);
2001				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
2002							 &nhslot2->head);
2003				nhslot2->count++;
2004				spin_unlock(&nhslot2->lock);
2005			}
2006
2007			spin_unlock_bh(&hslot->lock);
2008		}
2009	}
2010}
2011EXPORT_SYMBOL(udp_lib_rehash);
2012
2013void udp_v4_rehash(struct sock *sk)
2014{
2015	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
2016					  inet_sk(sk)->inet_rcv_saddr,
2017					  inet_sk(sk)->inet_num);
2018	udp_lib_rehash(sk, new_hash);
2019}
2020
2021static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2022{
2023	int rc;
2024
2025	if (inet_sk(sk)->inet_daddr) {
2026		sock_rps_save_rxhash(sk, skb);
2027		sk_mark_napi_id(sk, skb);
2028		sk_incoming_cpu_update(sk);
2029	} else {
2030		sk_mark_napi_id_once(sk, skb);
2031	}
2032
2033	rc = __udp_enqueue_schedule_skb(sk, skb);
2034	if (rc < 0) {
2035		int is_udplite = IS_UDPLITE(sk);
2036
2037		/* Note that an ENOMEM error is charged twice */
2038		if (rc == -ENOMEM)
2039			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
2040					is_udplite);
2041		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
2042		kfree_skb(skb);
2043		trace_udp_fail_queue_rcv_skb(rc, sk);
2044		return -1;
2045	}
2046
2047	return 0;
 
2048}
2049
2050/* returns:
2051 *  -1: error
2052 *   0: success
2053 *  >0: "udp encap" protocol resubmission
2054 *
2055 * Note that in the success and error cases, the skb is assumed to
2056 * have either been requeued or freed.
2057 */
2058static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
2059{
2060	struct udp_sock *up = udp_sk(sk);
 
2061	int is_udplite = IS_UDPLITE(sk);
2062
2063	/*
2064	 *	Charge it to the socket, dropping if the queue is full.
2065	 */
2066	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2067		goto drop;
2068	nf_reset_ct(skb);
2069
2070	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
2071		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
2072
 
2073		/*
2074		 * This is an encapsulation socket so pass the skb to
2075		 * the socket's udp_encap_rcv() hook. Otherwise, just
2076		 * fall through and pass this up the UDP socket.
2077		 * up->encap_rcv() returns the following value:
2078		 * =0 if skb was successfully passed to the encap
2079		 *    handler or was discarded by it.
2080		 * >0 if skb should be passed on to UDP.
2081		 * <0 if skb should be resubmitted as proto -N
2082		 */
2083
2084		/* if we're overly short, let UDP handle it */
2085		encap_rcv = READ_ONCE(up->encap_rcv);
2086		if (encap_rcv) {
2087			int ret;
2088
2089			/* Verify checksum before giving to encap */
2090			if (udp_lib_checksum_complete(skb))
2091				goto csum_error;
2092
2093			ret = encap_rcv(sk, skb);
2094			if (ret <= 0) {
2095				__UDP_INC_STATS(sock_net(sk),
2096						UDP_MIB_INDATAGRAMS,
2097						is_udplite);
2098				return -ret;
2099			}
2100		}
2101
2102		/* FALLTHROUGH -- it's a UDP Packet */
2103	}
2104
2105	/*
2106	 * 	UDP-Lite specific tests, ignored on UDP sockets
2107	 */
2108	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
2109
2110		/*
2111		 * MIB statistics other than incrementing the error count are
2112		 * disabled for the following two types of errors: these depend
2113		 * on the application settings, not on the functioning of the
2114		 * protocol stack as such.
2115		 *
2116		 * RFC 3828 here recommends (sec 3.3): "There should also be a
2117		 * way ... to ... at least let the receiving application block
2118		 * delivery of packets with coverage values less than a value
2119		 * provided by the application."
2120		 */
2121		if (up->pcrlen == 0) {          /* full coverage was set  */
2122			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2123					    UDP_SKB_CB(skb)->cscov, skb->len);
 
2124			goto drop;
2125		}
2126		/* The next case involves violating the min. coverage requested
2127		 * by the receiver. This is subtle: if receiver wants x and x is
2128		 * greater than the buffersize/MTU then receiver will complain
2129		 * that it wants x while sender emits packets of smaller size y.
2130		 * Therefore the above ...()->partial_cov statement is essential.
2131		 */
2132		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
2133			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2134					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
 
2135			goto drop;
2136		}
2137	}
2138
2139	prefetch(&sk->sk_rmem_alloc);
2140	if (rcu_access_pointer(sk->sk_filter) &&
2141	    udp_lib_checksum_complete(skb))
2142			goto csum_error;
 
2143
2144	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
2145		goto drop;
2146
2147	udp_csum_pull_header(skb);
2148
2149	ipv4_pktinfo_prepare(sk, skb);
2150	return __udp_queue_rcv_skb(sk, skb);
 
 
 
 
 
 
 
 
2151
2152csum_error:
2153	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2154drop:
2155	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
2156	atomic_inc(&sk->sk_drops);
2157	kfree_skb(skb);
2158	return -1;
2159}
2160
2161static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
 
2162{
2163	struct sk_buff *next, *segs;
2164	int ret;
 
2165
2166	if (likely(!udp_unexpected_gso(sk, skb)))
2167		return udp_queue_rcv_one_skb(sk, skb);
 
 
2168
2169	BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET);
2170	__skb_push(skb, -skb_mac_offset(skb));
2171	segs = udp_rcv_segment(sk, skb, true);
2172	skb_list_walk_safe(segs, skb, next) {
2173		__skb_pull(skb, skb_transport_offset(skb));
2174		ret = udp_queue_rcv_one_skb(sk, skb);
2175		if (ret > 0)
2176			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
2177	}
2178	return 0;
2179}
2180
2181/* For TCP sockets, sk_rx_dst is protected by socket lock
2182 * For UDP, we use xchg() to guard against concurrent changes.
2183 */
2184bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
2185{
2186	struct dst_entry *old;
2187
2188	if (dst_hold_safe(dst)) {
2189		old = xchg(&sk->sk_rx_dst, dst);
2190		dst_release(old);
2191		return old != dst;
2192	}
2193	return false;
 
2194}
2195EXPORT_SYMBOL(udp_sk_rx_dst_set);
2196
2197/*
2198 *	Multicasts and broadcasts go to each listener.
2199 *
2200 *	Note: called only from the BH handler context.
2201 */
2202static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
2203				    struct udphdr  *uh,
2204				    __be32 saddr, __be32 daddr,
2205				    struct udp_table *udptable,
2206				    int proto)
2207{
2208	struct sock *sk, *first = NULL;
2209	unsigned short hnum = ntohs(uh->dest);
2210	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
2211	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
2212	unsigned int offset = offsetof(typeof(*sk), sk_node);
2213	int dif = skb->dev->ifindex;
2214	int sdif = inet_sdif(skb);
2215	struct hlist_node *node;
2216	struct sk_buff *nskb;
2217
2218	if (use_hash2) {
2219		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
2220			    udptable->mask;
2221		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
2222start_lookup:
2223		hslot = &udptable->hash2[hash2];
2224		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
2225	}
2226
2227	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
2228		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
2229					 uh->source, saddr, dif, sdif, hnum))
2230			continue;
2231
2232		if (!first) {
2233			first = sk;
2234			continue;
2235		}
2236		nskb = skb_clone(skb, GFP_ATOMIC);
 
 
 
 
 
2237
2238		if (unlikely(!nskb)) {
2239			atomic_inc(&sk->sk_drops);
2240			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
2241					IS_UDPLITE(sk));
2242			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
2243					IS_UDPLITE(sk));
2244			continue;
2245		}
2246		if (udp_queue_rcv_skb(sk, nskb) > 0)
2247			consume_skb(nskb);
2248	}
2249
2250	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
2251	if (use_hash2 && hash2 != hash2_any) {
2252		hash2 = hash2_any;
2253		goto start_lookup;
2254	}
2255
2256	if (first) {
2257		if (udp_queue_rcv_skb(first, skb) > 0)
2258			consume_skb(skb);
2259	} else {
2260		kfree_skb(skb);
2261		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
2262				proto == IPPROTO_UDPLITE);
2263	}
2264	return 0;
2265}
2266
2267/* Initialize UDP checksum. If exited with zero value (success),
2268 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2269 * Otherwise, csum completion requires checksumming packet body,
2270 * including udp header and folding it to skb->csum.
2271 */
2272static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2273				 int proto)
2274{
 
2275	int err;
2276
2277	UDP_SKB_CB(skb)->partial_cov = 0;
2278	UDP_SKB_CB(skb)->cscov = skb->len;
2279
2280	if (proto == IPPROTO_UDPLITE) {
2281		err = udplite_checksum_init(skb, uh);
2282		if (err)
2283			return err;
2284
2285		if (UDP_SKB_CB(skb)->partial_cov) {
2286			skb->csum = inet_compute_pseudo(skb, proto);
2287			return 0;
2288		}
2289	}
2290
2291	/* Note, we are only interested in != 0 or == 0, thus the
2292	 * force to int.
 
 
 
 
 
 
 
 
 
 
 
2293	 */
2294	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2295							inet_compute_pseudo);
2296	if (err)
2297		return err;
2298
2299	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
2300		/* If SW calculated the value, we know it's bad */
2301		if (skb->csum_complete_sw)
2302			return 1;
2303
2304		/* HW says the value is bad. Let's validate that.
2305		 * skb->csum is no longer the full packet checksum,
2306		 * so don't treat it as such.
2307		 */
2308		skb_checksum_complete_unset(skb);
2309	}
2310
2311	return 0;
2312}
2313
2314/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2315 * return code conversion for ip layer consumption
2316 */
2317static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2318			       struct udphdr *uh)
2319{
2320	int ret;
2321
2322	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2323		skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
2324
2325	ret = udp_queue_rcv_skb(sk, skb);
2326
2327	/* a return value > 0 means to resubmit the input, but
2328	 * it wants the return to be -protocol, or 0
2329	 */
2330	if (ret > 0)
2331		return -ret;
2332	return 0;
2333}
2334
2335/*
2336 *	All we need to do is get the socket, and then do a checksum.
2337 */
2338
2339int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2340		   int proto)
2341{
2342	struct sock *sk;
2343	struct udphdr *uh;
2344	unsigned short ulen;
2345	struct rtable *rt = skb_rtable(skb);
2346	__be32 saddr, daddr;
2347	struct net *net = dev_net(skb->dev);
2348	bool refcounted;
2349
2350	/*
2351	 *  Validate the packet.
2352	 */
2353	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2354		goto drop;		/* No space for header. */
2355
2356	uh   = udp_hdr(skb);
2357	ulen = ntohs(uh->len);
2358	saddr = ip_hdr(skb)->saddr;
2359	daddr = ip_hdr(skb)->daddr;
2360
2361	if (ulen > skb->len)
2362		goto short_packet;
2363
2364	if (proto == IPPROTO_UDP) {
2365		/* UDP validates ulen. */
2366		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
2367			goto short_packet;
2368		uh = udp_hdr(skb);
2369	}
2370
2371	if (udp4_csum_init(skb, uh, proto))
2372		goto csum_error;
2373
2374	sk = skb_steal_sock(skb, &refcounted);
2375	if (sk) {
2376		struct dst_entry *dst = skb_dst(skb);
2377		int ret;
2378
2379		if (unlikely(sk->sk_rx_dst != dst))
2380			udp_sk_rx_dst_set(sk, dst);
2381
2382		ret = udp_unicast_rcv_skb(sk, skb, uh);
2383		if (refcounted)
2384			sock_put(sk);
2385		return ret;
2386	}
2387
2388	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
2389		return __udp4_lib_mcast_deliver(net, skb, uh,
2390						saddr, daddr, udptable, proto);
2391
2392	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2393	if (sk)
2394		return udp_unicast_rcv_skb(sk, skb, uh);
 
 
 
 
 
 
 
 
 
 
2395
2396	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2397		goto drop;
2398	nf_reset_ct(skb);
2399
2400	/* No socket. Drop packet silently, if checksum is wrong */
2401	if (udp_lib_checksum_complete(skb))
2402		goto csum_error;
2403
2404	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
2405	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
2406
2407	/*
2408	 * Hmm.  We got an UDP packet to a port to which we
2409	 * don't wanna listen.  Ignore it.
2410	 */
2411	kfree_skb(skb);
2412	return 0;
2413
2414short_packet:
2415	net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2416			    proto == IPPROTO_UDPLITE ? "Lite" : "",
2417			    &saddr, ntohs(uh->source),
2418			    ulen, skb->len,
2419			    &daddr, ntohs(uh->dest));
 
 
 
2420	goto drop;
2421
2422csum_error:
2423	/*
2424	 * RFC1122: OK.  Discards the bad packet silently (as far as
2425	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2426	 */
2427	net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2428			    proto == IPPROTO_UDPLITE ? "Lite" : "",
2429			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
2430			    ulen);
2431	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 
 
2432drop:
2433	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
2434	kfree_skb(skb);
2435	return 0;
2436}
2437
2438/* We can only early demux multicast if there is a single matching socket.
2439 * If more than one socket found returns NULL
2440 */
2441static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
2442						  __be16 loc_port, __be32 loc_addr,
2443						  __be16 rmt_port, __be32 rmt_addr,
2444						  int dif, int sdif)
2445{
2446	struct sock *sk, *result;
2447	unsigned short hnum = ntohs(loc_port);
2448	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
2449	struct udp_hslot *hslot = &udp_table.hash[slot];
2450
2451	/* Do not bother scanning a too big list */
2452	if (hslot->count > 10)
2453		return NULL;
2454
2455	result = NULL;
2456	sk_for_each_rcu(sk, &hslot->head) {
2457		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
2458					rmt_port, rmt_addr, dif, sdif, hnum)) {
2459			if (result)
2460				return NULL;
2461			result = sk;
2462		}
2463	}
2464
2465	return result;
2466}
2467
2468/* For unicast we should only early demux connected sockets or we can
2469 * break forwarding setups.  The chains here can be long so only check
2470 * if the first socket is an exact match and if not move on.
2471 */
2472static struct sock *__udp4_lib_demux_lookup(struct net *net,
2473					    __be16 loc_port, __be32 loc_addr,
2474					    __be16 rmt_port, __be32 rmt_addr,
2475					    int dif, int sdif)
2476{
2477	unsigned short hnum = ntohs(loc_port);
2478	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
2479	unsigned int slot2 = hash2 & udp_table.mask;
2480	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
2481	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
2482	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
2483	struct sock *sk;
2484
2485	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
2486		if (INET_MATCH(sk, net, acookie, rmt_addr,
2487			       loc_addr, ports, dif, sdif))
2488			return sk;
2489		/* Only check first socket in chain */
2490		break;
2491	}
2492	return NULL;
2493}
2494
2495int udp_v4_early_demux(struct sk_buff *skb)
2496{
2497	struct net *net = dev_net(skb->dev);
2498	struct in_device *in_dev = NULL;
2499	const struct iphdr *iph;
2500	const struct udphdr *uh;
2501	struct sock *sk = NULL;
2502	struct dst_entry *dst;
2503	int dif = skb->dev->ifindex;
2504	int sdif = inet_sdif(skb);
2505	int ours;
2506
2507	/* validate the packet */
2508	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2509		return 0;
2510
2511	iph = ip_hdr(skb);
2512	uh = udp_hdr(skb);
2513
2514	if (skb->pkt_type == PACKET_MULTICAST) {
2515		in_dev = __in_dev_get_rcu(skb->dev);
2516
2517		if (!in_dev)
2518			return 0;
2519
2520		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
2521				       iph->protocol);
2522		if (!ours)
2523			return 0;
2524
2525		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2526						   uh->source, iph->saddr,
2527						   dif, sdif);
2528	} else if (skb->pkt_type == PACKET_HOST) {
2529		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2530					     uh->source, iph->saddr, dif, sdif);
2531	}
2532
2533	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
2534		return 0;
2535
2536	skb->sk = sk;
2537	skb->destructor = sock_efree;
2538	dst = READ_ONCE(sk->sk_rx_dst);
2539
2540	if (dst)
2541		dst = dst_check(dst, 0);
2542	if (dst) {
2543		u32 itag = 0;
2544
2545		/* set noref for now.
2546		 * any place which wants to hold dst has to call
2547		 * dst_hold_safe()
2548		 */
2549		skb_dst_set_noref(skb, dst);
2550
2551		/* for unconnected multicast sockets we need to validate
2552		 * the source on each packet
2553		 */
2554		if (!inet_sk(sk)->inet_daddr && in_dev)
2555			return ip_mc_validate_source(skb, iph->daddr,
2556						     iph->saddr, iph->tos,
2557						     skb->dev, in_dev, &itag);
2558	}
2559	return 0;
2560}
2561
2562int udp_rcv(struct sk_buff *skb)
2563{
2564	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
2565}
2566
2567void udp_destroy_sock(struct sock *sk)
2568{
2569	struct udp_sock *up = udp_sk(sk);
2570	bool slow = lock_sock_fast(sk);
2571	udp_flush_pending_frames(sk);
2572	unlock_sock_fast(sk, slow);
2573	if (static_branch_unlikely(&udp_encap_needed_key)) {
2574		if (up->encap_type) {
2575			void (*encap_destroy)(struct sock *sk);
2576			encap_destroy = READ_ONCE(up->encap_destroy);
2577			if (encap_destroy)
2578				encap_destroy(sk);
2579		}
2580		if (up->encap_enabled)
2581			static_branch_dec(&udp_encap_needed_key);
2582	}
2583}
2584
2585/*
2586 *	Socket option code for UDP
2587 */
2588int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2589		       sockptr_t optval, unsigned int optlen,
2590		       int (*push_pending_frames)(struct sock *))
2591{
2592	struct udp_sock *up = udp_sk(sk);
2593	int val, valbool;
2594	int err = 0;
2595	int is_udplite = IS_UDPLITE(sk);
2596
2597	if (optlen < sizeof(int))
2598		return -EINVAL;
2599
2600	if (copy_from_sockptr(&val, optval, sizeof(val)))
2601		return -EFAULT;
2602
2603	valbool = val ? 1 : 0;
2604
2605	switch (optname) {
2606	case UDP_CORK:
2607		if (val != 0) {
2608			up->corkflag = 1;
2609		} else {
2610			up->corkflag = 0;
2611			lock_sock(sk);
2612			push_pending_frames(sk);
2613			release_sock(sk);
2614		}
2615		break;
2616
2617	case UDP_ENCAP:
2618		switch (val) {
2619		case 0:
2620#ifdef CONFIG_XFRM
2621		case UDP_ENCAP_ESPINUDP:
2622		case UDP_ENCAP_ESPINUDP_NON_IKE:
2623#if IS_ENABLED(CONFIG_IPV6)
2624			if (sk->sk_family == AF_INET6)
2625				up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
2626			else
2627#endif
2628				up->encap_rcv = xfrm4_udp_encap_rcv;
2629#endif
2630			fallthrough;
2631		case UDP_ENCAP_L2TPINUDP:
2632			up->encap_type = val;
2633			lock_sock(sk);
2634			udp_tunnel_encap_enable(sk->sk_socket);
2635			release_sock(sk);
2636			break;
2637		default:
2638			err = -ENOPROTOOPT;
2639			break;
2640		}
2641		break;
2642
2643	case UDP_NO_CHECK6_TX:
2644		up->no_check6_tx = valbool;
2645		break;
2646
2647	case UDP_NO_CHECK6_RX:
2648		up->no_check6_rx = valbool;
2649		break;
2650
2651	case UDP_SEGMENT:
2652		if (val < 0 || val > USHRT_MAX)
2653			return -EINVAL;
2654		up->gso_size = val;
2655		break;
2656
2657	case UDP_GRO:
2658		lock_sock(sk);
2659		if (valbool)
2660			udp_tunnel_encap_enable(sk->sk_socket);
2661		up->gro_enabled = valbool;
2662		release_sock(sk);
2663		break;
2664
2665	/*
2666	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
2667	 */
2668	/* The sender sets actual checksum coverage length via this option.
2669	 * The case coverage > packet length is handled by send module. */
2670	case UDPLITE_SEND_CSCOV:
2671		if (!is_udplite)         /* Disable the option on UDP sockets */
2672			return -ENOPROTOOPT;
2673		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
2674			val = 8;
2675		else if (val > USHRT_MAX)
2676			val = USHRT_MAX;
2677		up->pcslen = val;
2678		up->pcflag |= UDPLITE_SEND_CC;
2679		break;
2680
2681	/* The receiver specifies a minimum checksum coverage value. To make
2682	 * sense, this should be set to at least 8 (as done below). If zero is
2683	 * used, this again means full checksum coverage.                     */
2684	case UDPLITE_RECV_CSCOV:
2685		if (!is_udplite)         /* Disable the option on UDP sockets */
2686			return -ENOPROTOOPT;
2687		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
2688			val = 8;
2689		else if (val > USHRT_MAX)
2690			val = USHRT_MAX;
2691		up->pcrlen = val;
2692		up->pcflag |= UDPLITE_RECV_CC;
2693		break;
2694
2695	default:
2696		err = -ENOPROTOOPT;
2697		break;
2698	}
2699
2700	return err;
2701}
2702EXPORT_SYMBOL(udp_lib_setsockopt);
2703
2704int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
2705		   unsigned int optlen)
2706{
2707	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
2708		return udp_lib_setsockopt(sk, level, optname,
2709					  optval, optlen,
2710					  udp_push_pending_frames);
2711	return ip_setsockopt(sk, level, optname, optval, optlen);
2712}
2713
 
 
 
 
 
 
 
 
 
 
 
2714int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2715		       char __user *optval, int __user *optlen)
2716{
2717	struct udp_sock *up = udp_sk(sk);
2718	int val, len;
2719
2720	if (get_user(len, optlen))
2721		return -EFAULT;
2722
2723	len = min_t(unsigned int, len, sizeof(int));
2724
2725	if (len < 0)
2726		return -EINVAL;
2727
2728	switch (optname) {
2729	case UDP_CORK:
2730		val = up->corkflag;
2731		break;
2732
2733	case UDP_ENCAP:
2734		val = up->encap_type;
2735		break;
2736
2737	case UDP_NO_CHECK6_TX:
2738		val = up->no_check6_tx;
2739		break;
2740
2741	case UDP_NO_CHECK6_RX:
2742		val = up->no_check6_rx;
2743		break;
2744
2745	case UDP_SEGMENT:
2746		val = up->gso_size;
2747		break;
2748
2749	/* The following two cannot be changed on UDP sockets, the return is
2750	 * always 0 (which corresponds to the full checksum coverage of UDP). */
2751	case UDPLITE_SEND_CSCOV:
2752		val = up->pcslen;
2753		break;
2754
2755	case UDPLITE_RECV_CSCOV:
2756		val = up->pcrlen;
2757		break;
2758
2759	default:
2760		return -ENOPROTOOPT;
2761	}
2762
2763	if (put_user(len, optlen))
2764		return -EFAULT;
2765	if (copy_to_user(optval, &val, len))
2766		return -EFAULT;
2767	return 0;
2768}
2769EXPORT_SYMBOL(udp_lib_getsockopt);
2770
2771int udp_getsockopt(struct sock *sk, int level, int optname,
2772		   char __user *optval, int __user *optlen)
2773{
2774	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
2775		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2776	return ip_getsockopt(sk, level, optname, optval, optlen);
2777}
2778
 
 
 
 
 
 
 
 
 
2779/**
2780 * 	udp_poll - wait for a UDP event.
2781 *	@file: - file struct
2782 *	@sock: - socket
2783 *	@wait: - poll table
2784 *
2785 *	This is same as datagram poll, except for the special case of
2786 *	blocking sockets. If application is using a blocking fd
2787 *	and a packet with checksum error is in the queue;
2788 *	then it could get return from select indicating data available
2789 *	but then block when reading it. Add special case code
2790 *	to work around these arguably broken applications.
2791 */
2792__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2793{
2794	__poll_t mask = datagram_poll(file, sock, wait);
2795	struct sock *sk = sock->sk;
2796
2797	if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
2798		mask |= EPOLLIN | EPOLLRDNORM;
2799
2800	/* Check for false positives due to checksum errors */
2801	if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2802	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2803		mask &= ~(EPOLLIN | EPOLLRDNORM);
2804
2805	return mask;
2806
2807}
2808EXPORT_SYMBOL(udp_poll);
2809
2810int udp_abort(struct sock *sk, int err)
2811{
2812	lock_sock(sk);
2813
2814	sk->sk_err = err;
2815	sk->sk_error_report(sk);
2816	__udp_disconnect(sk, 0);
2817
2818	release_sock(sk);
2819
2820	return 0;
2821}
2822EXPORT_SYMBOL_GPL(udp_abort);
2823
2824struct proto udp_prot = {
2825	.name			= "UDP",
2826	.owner			= THIS_MODULE,
2827	.close			= udp_lib_close,
2828	.pre_connect		= udp_pre_connect,
2829	.connect		= ip4_datagram_connect,
2830	.disconnect		= udp_disconnect,
2831	.ioctl			= udp_ioctl,
2832	.init			= udp_init_sock,
2833	.destroy		= udp_destroy_sock,
2834	.setsockopt		= udp_setsockopt,
2835	.getsockopt		= udp_getsockopt,
2836	.sendmsg		= udp_sendmsg,
2837	.recvmsg		= udp_recvmsg,
2838	.sendpage		= udp_sendpage,
2839	.release_cb		= ip4_datagram_release_cb,
2840	.hash			= udp_lib_hash,
2841	.unhash			= udp_lib_unhash,
2842	.rehash			= udp_v4_rehash,
2843	.get_port		= udp_v4_get_port,
2844	.memory_allocated	= &udp_memory_allocated,
2845	.sysctl_mem		= sysctl_udp_mem,
2846	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_udp_wmem_min),
2847	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_udp_rmem_min),
2848	.obj_size		= sizeof(struct udp_sock),
2849	.h.udp_table		= &udp_table,
2850	.diag_destroy		= udp_abort,
 
 
 
2851};
2852EXPORT_SYMBOL(udp_prot);
2853
2854/* ------------------------------------------------------------------------ */
2855#ifdef CONFIG_PROC_FS
2856
2857static struct sock *udp_get_first(struct seq_file *seq, int start)
2858{
2859	struct sock *sk;
2860	struct udp_seq_afinfo *afinfo;
2861	struct udp_iter_state *state = seq->private;
2862	struct net *net = seq_file_net(seq);
2863
2864	if (state->bpf_seq_afinfo)
2865		afinfo = state->bpf_seq_afinfo;
2866	else
2867		afinfo = PDE_DATA(file_inode(seq->file));
2868
2869	for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
2870	     ++state->bucket) {
2871		struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
 
2872
2873		if (hlist_empty(&hslot->head))
2874			continue;
2875
2876		spin_lock_bh(&hslot->lock);
2877		sk_for_each(sk, &hslot->head) {
2878			if (!net_eq(sock_net(sk), net))
2879				continue;
2880			if (afinfo->family == AF_UNSPEC ||
2881			    sk->sk_family == afinfo->family)
2882				goto found;
2883		}
2884		spin_unlock_bh(&hslot->lock);
2885	}
2886	sk = NULL;
2887found:
2888	return sk;
2889}
2890
2891static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
2892{
2893	struct udp_seq_afinfo *afinfo;
2894	struct udp_iter_state *state = seq->private;
2895	struct net *net = seq_file_net(seq);
2896
2897	if (state->bpf_seq_afinfo)
2898		afinfo = state->bpf_seq_afinfo;
2899	else
2900		afinfo = PDE_DATA(file_inode(seq->file));
2901
2902	do {
2903		sk = sk_next(sk);
2904	} while (sk && (!net_eq(sock_net(sk), net) ||
2905			(afinfo->family != AF_UNSPEC &&
2906			 sk->sk_family != afinfo->family)));
2907
2908	if (!sk) {
2909		if (state->bucket <= afinfo->udp_table->mask)
2910			spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
2911		return udp_get_first(seq, state->bucket + 1);
2912	}
2913	return sk;
2914}
2915
2916static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2917{
2918	struct sock *sk = udp_get_first(seq, 0);
2919
2920	if (sk)
2921		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
2922			--pos;
2923	return pos ? NULL : sk;
2924}
2925
2926void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2927{
2928	struct udp_iter_state *state = seq->private;
2929	state->bucket = MAX_UDP_PORTS;
2930
2931	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
2932}
2933EXPORT_SYMBOL(udp_seq_start);
2934
2935void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2936{
2937	struct sock *sk;
2938
2939	if (v == SEQ_START_TOKEN)
2940		sk = udp_get_idx(seq, 0);
2941	else
2942		sk = udp_get_next(seq, v);
2943
2944	++*pos;
2945	return sk;
2946}
2947EXPORT_SYMBOL(udp_seq_next);
2948
2949void udp_seq_stop(struct seq_file *seq, void *v)
2950{
2951	struct udp_seq_afinfo *afinfo;
2952	struct udp_iter_state *state = seq->private;
2953
2954	if (state->bpf_seq_afinfo)
2955		afinfo = state->bpf_seq_afinfo;
2956	else
2957		afinfo = PDE_DATA(file_inode(seq->file));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2958
2959	if (state->bucket <= afinfo->udp_table->mask)
2960		spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
 
2961}
2962EXPORT_SYMBOL(udp_seq_stop);
2963
2964/* ------------------------------------------------------------------------ */
2965static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2966		int bucket)
2967{
2968	struct inet_sock *inet = inet_sk(sp);
2969	__be32 dest = inet->inet_daddr;
2970	__be32 src  = inet->inet_rcv_saddr;
2971	__u16 destp	  = ntohs(inet->inet_dport);
2972	__u16 srcp	  = ntohs(inet->inet_sport);
2973
2974	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2975		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
2976		bucket, src, srcp, dest, destp, sp->sk_state,
2977		sk_wmem_alloc_get(sp),
2978		udp_rqueue_get(sp),
2979		0, 0L, 0,
2980		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2981		0, sock_i_ino(sp),
2982		refcount_read(&sp->sk_refcnt), sp,
2983		atomic_read(&sp->sk_drops));
2984}
2985
2986int udp4_seq_show(struct seq_file *seq, void *v)
2987{
2988	seq_setwidth(seq, 127);
2989	if (v == SEQ_START_TOKEN)
2990		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
 
2991			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2992			   "inode ref pointer drops");
2993	else {
2994		struct udp_iter_state *state = seq->private;
 
2995
2996		udp4_format_sock(v, seq, state->bucket);
 
2997	}
2998	seq_pad(seq, '\n');
2999	return 0;
3000}
3001
3002#ifdef CONFIG_BPF_SYSCALL
3003struct bpf_iter__udp {
3004	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3005	__bpf_md_ptr(struct udp_sock *, udp_sk);
3006	uid_t uid __aligned(8);
3007	int bucket __aligned(8);
3008};
3009
3010static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3011			     struct udp_sock *udp_sk, uid_t uid, int bucket)
3012{
3013	struct bpf_iter__udp ctx;
3014
3015	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3016	ctx.meta = meta;
3017	ctx.udp_sk = udp_sk;
3018	ctx.uid = uid;
3019	ctx.bucket = bucket;
3020	return bpf_iter_run_prog(prog, &ctx);
3021}
3022
3023static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
3024{
3025	struct udp_iter_state *state = seq->private;
3026	struct bpf_iter_meta meta;
3027	struct bpf_prog *prog;
3028	struct sock *sk = v;
3029	uid_t uid;
3030
3031	if (v == SEQ_START_TOKEN)
3032		return 0;
3033
3034	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3035	meta.seq = seq;
3036	prog = bpf_iter_get_info(&meta, false);
3037	return udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
3038}
3039
3040static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
3041{
3042	struct bpf_iter_meta meta;
3043	struct bpf_prog *prog;
3044
3045	if (!v) {
3046		meta.seq = seq;
3047		prog = bpf_iter_get_info(&meta, true);
3048		if (prog)
3049			(void)udp_prog_seq_show(prog, &meta, v, 0, 0);
3050	}
3051
3052	udp_seq_stop(seq, v);
3053}
3054
3055static const struct seq_operations bpf_iter_udp_seq_ops = {
3056	.start		= udp_seq_start,
3057	.next		= udp_seq_next,
3058	.stop		= bpf_iter_udp_seq_stop,
3059	.show		= bpf_iter_udp_seq_show,
3060};
3061#endif
3062
3063const struct seq_operations udp_seq_ops = {
3064	.start		= udp_seq_start,
3065	.next		= udp_seq_next,
3066	.stop		= udp_seq_stop,
3067	.show		= udp4_seq_show,
3068};
3069EXPORT_SYMBOL(udp_seq_ops);
3070
3071static struct udp_seq_afinfo udp4_seq_afinfo = {
 
3072	.family		= AF_INET,
3073	.udp_table	= &udp_table,
 
 
 
 
 
 
3074};
3075
3076static int __net_init udp4_proc_init_net(struct net *net)
3077{
3078	if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
3079			sizeof(struct udp_iter_state), &udp4_seq_afinfo))
3080		return -ENOMEM;
3081	return 0;
3082}
3083
3084static void __net_exit udp4_proc_exit_net(struct net *net)
3085{
3086	remove_proc_entry("udp", net->proc_net);
3087}
3088
3089static struct pernet_operations udp4_net_ops = {
3090	.init = udp4_proc_init_net,
3091	.exit = udp4_proc_exit_net,
3092};
3093
3094int __init udp4_proc_init(void)
3095{
3096	return register_pernet_subsys(&udp4_net_ops);
3097}
3098
3099void udp4_proc_exit(void)
3100{
3101	unregister_pernet_subsys(&udp4_net_ops);
3102}
3103#endif /* CONFIG_PROC_FS */
3104
3105static __initdata unsigned long uhash_entries;
3106static int __init set_uhash_entries(char *str)
3107{
3108	ssize_t ret;
3109
3110	if (!str)
3111		return 0;
3112
3113	ret = kstrtoul(str, 0, &uhash_entries);
3114	if (ret)
3115		return 0;
3116
3117	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
3118		uhash_entries = UDP_HTABLE_SIZE_MIN;
3119	return 1;
3120}
3121__setup("uhash_entries=", set_uhash_entries);
3122
3123void __init udp_table_init(struct udp_table *table, const char *name)
3124{
3125	unsigned int i;
3126
3127	table->hash = alloc_large_system_hash(name,
3128					      2 * sizeof(struct udp_hslot),
3129					      uhash_entries,
3130					      21, /* one slot per 2 MB */
3131					      0,
3132					      &table->log,
3133					      &table->mask,
3134					      UDP_HTABLE_SIZE_MIN,
3135					      64 * 1024);
3136
 
 
 
 
 
 
 
 
 
 
3137	table->hash2 = table->hash + (table->mask + 1);
3138	for (i = 0; i <= table->mask; i++) {
3139		INIT_HLIST_HEAD(&table->hash[i].head);
3140		table->hash[i].count = 0;
3141		spin_lock_init(&table->hash[i].lock);
3142	}
3143	for (i = 0; i <= table->mask; i++) {
3144		INIT_HLIST_HEAD(&table->hash2[i].head);
3145		table->hash2[i].count = 0;
3146		spin_lock_init(&table->hash2[i].lock);
3147	}
3148}
3149
3150u32 udp_flow_hashrnd(void)
3151{
3152	static u32 hashrnd __read_mostly;
3153
3154	net_get_random_once(&hashrnd, sizeof(hashrnd));
 
 
 
 
 
3155
3156	return hashrnd;
 
3157}
3158EXPORT_SYMBOL(udp_flow_hashrnd);
3159
3160static void __udp_sysctl_init(struct net *net)
3161{
3162	net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
3163	net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;
3164
3165#ifdef CONFIG_NET_L3_MASTER_DEV
3166	net->ipv4.sysctl_udp_l3mdev_accept = 0;
3167#endif
3168}
 
3169
3170static int __net_init udp_sysctl_init(struct net *net)
3171{
3172	__udp_sysctl_init(net);
 
 
3173	return 0;
3174}
3175
3176static struct pernet_operations __net_initdata udp_sysctl_ops = {
3177	.init	= udp_sysctl_init,
3178};
 
 
 
3179
3180#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3181DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
3182		     struct udp_sock *udp_sk, uid_t uid, int bucket)
3183
3184static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux)
3185{
3186	struct udp_iter_state *st = priv_data;
3187	struct udp_seq_afinfo *afinfo;
3188	int ret;
3189
3190	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
3191	if (!afinfo)
3192		return -ENOMEM;
3193
3194	afinfo->family = AF_UNSPEC;
3195	afinfo->udp_table = &udp_table;
3196	st->bpf_seq_afinfo = afinfo;
3197	ret = bpf_iter_init_seq_net(priv_data, aux);
3198	if (ret)
3199		kfree(afinfo);
3200	return ret;
3201}
3202
3203static void bpf_iter_fini_udp(void *priv_data)
3204{
3205	struct udp_iter_state *st = priv_data;
3206
3207	kfree(st->bpf_seq_afinfo);
3208	bpf_iter_fini_seq_net(priv_data);
3209}
3210
3211static const struct bpf_iter_seq_info udp_seq_info = {
3212	.seq_ops		= &bpf_iter_udp_seq_ops,
3213	.init_seq_private	= bpf_iter_init_udp,
3214	.fini_seq_private	= bpf_iter_fini_udp,
3215	.seq_priv_size		= sizeof(struct udp_iter_state),
3216};
 
 
3217
3218static struct bpf_iter_reg udp_reg_info = {
3219	.target			= "udp",
3220	.ctx_arg_info_size	= 1,
3221	.ctx_arg_info		= {
3222		{ offsetof(struct bpf_iter__udp, udp_sk),
3223		  PTR_TO_BTF_ID_OR_NULL },
3224	},
3225	.seq_info		= &udp_seq_info,
3226};
3227
3228static void __init bpf_iter_register(void)
3229{
3230	udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP];
3231	if (bpf_iter_reg_target(&udp_reg_info))
3232		pr_warn("Warning: could not register bpf iterator udp\n");
3233}
3234#endif
3235
3236void __init udp_init(void)
3237{
3238	unsigned long limit;
3239	unsigned int i;
3240
3241	udp_table_init(&udp_table, "UDP");
3242	limit = nr_free_buffer_pages() / 8;
3243	limit = max(limit, 128UL);
3244	sysctl_udp_mem[0] = limit / 4 * 3;
3245	sysctl_udp_mem[1] = limit;
3246	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
3247
3248	__udp_sysctl_init(&init_net);
3249
3250	/* 16 spinlocks per cpu */
3251	udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
3252	udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
3253				GFP_KERNEL);
3254	if (!udp_busylocks)
3255		panic("UDP: failed to alloc udp_busylocks\n");
3256	for (i = 0; i < (1U << udp_busylocks_log); i++)
3257		spin_lock_init(udp_busylocks + i);
3258
3259	if (register_pernet_subsys(&udp_sysctl_ops))
3260		panic("UDP: failed to init sysctl parameters.\n");
3261
3262#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3263	bpf_iter_register();
3264#endif
3265}