Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Generic socket support routines. Memory allocators, socket lock/release
   8 *		handler for protocols to use and generic option handler.
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 */
  85
  86#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  87
  88#include <asm/unaligned.h>
  89#include <linux/capability.h>
  90#include <linux/errno.h>
  91#include <linux/errqueue.h>
  92#include <linux/types.h>
  93#include <linux/socket.h>
  94#include <linux/in.h>
  95#include <linux/kernel.h>
  96#include <linux/module.h>
  97#include <linux/proc_fs.h>
  98#include <linux/seq_file.h>
  99#include <linux/sched.h>
 100#include <linux/sched/mm.h>
 101#include <linux/timer.h>
 102#include <linux/string.h>
 103#include <linux/sockios.h>
 104#include <linux/net.h>
 105#include <linux/mm.h>
 106#include <linux/slab.h>
 107#include <linux/interrupt.h>
 108#include <linux/poll.h>
 109#include <linux/tcp.h>
 
 110#include <linux/init.h>
 111#include <linux/highmem.h>
 112#include <linux/user_namespace.h>
 113#include <linux/static_key.h>
 114#include <linux/memcontrol.h>
 115#include <linux/prefetch.h>
 116#include <linux/compat.h>
 
 
 
 117
 118#include <linux/uaccess.h>
 119
 120#include <linux/netdevice.h>
 121#include <net/protocol.h>
 122#include <linux/skbuff.h>
 123#include <net/net_namespace.h>
 124#include <net/request_sock.h>
 125#include <net/sock.h>
 126#include <linux/net_tstamp.h>
 127#include <net/xfrm.h>
 128#include <linux/ipsec.h>
 129#include <net/cls_cgroup.h>
 130#include <net/netprio_cgroup.h>
 131#include <linux/sock_diag.h>
 132
 133#include <linux/filter.h>
 134#include <net/sock_reuseport.h>
 135#include <net/bpf_sk_storage.h>
 136
 137#include <trace/events/sock.h>
 138
 139#include <net/tcp.h>
 140#include <net/busy_poll.h>
 
 141
 142#include <linux/ethtool.h>
 143
 
 
 144static DEFINE_MUTEX(proto_list_mutex);
 145static LIST_HEAD(proto_list);
 146
 147static void sock_inuse_add(struct net *net, int val);
 
 148
 149/**
 150 * sk_ns_capable - General socket capability test
 151 * @sk: Socket to use a capability on or through
 152 * @user_ns: The user namespace of the capability to use
 153 * @cap: The capability to use
 154 *
 155 * Test to see if the opener of the socket had when the socket was
 156 * created and the current process has the capability @cap in the user
 157 * namespace @user_ns.
 158 */
 159bool sk_ns_capable(const struct sock *sk,
 160		   struct user_namespace *user_ns, int cap)
 161{
 162	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 163		ns_capable(user_ns, cap);
 164}
 165EXPORT_SYMBOL(sk_ns_capable);
 166
 167/**
 168 * sk_capable - Socket global capability test
 169 * @sk: Socket to use a capability on or through
 170 * @cap: The global capability to use
 171 *
 172 * Test to see if the opener of the socket had when the socket was
 173 * created and the current process has the capability @cap in all user
 174 * namespaces.
 175 */
 176bool sk_capable(const struct sock *sk, int cap)
 177{
 178	return sk_ns_capable(sk, &init_user_ns, cap);
 179}
 180EXPORT_SYMBOL(sk_capable);
 181
 182/**
 183 * sk_net_capable - Network namespace socket capability test
 184 * @sk: Socket to use a capability on or through
 185 * @cap: The capability to use
 186 *
 187 * Test to see if the opener of the socket had when the socket was created
 188 * and the current process has the capability @cap over the network namespace
 189 * the socket is a member of.
 190 */
 191bool sk_net_capable(const struct sock *sk, int cap)
 192{
 193	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 194}
 195EXPORT_SYMBOL(sk_net_capable);
 196
 197/*
 198 * Each address family might have different locking rules, so we have
 199 * one slock key per address family and separate keys for internal and
 200 * userspace sockets.
 201 */
 202static struct lock_class_key af_family_keys[AF_MAX];
 203static struct lock_class_key af_family_kern_keys[AF_MAX];
 204static struct lock_class_key af_family_slock_keys[AF_MAX];
 205static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
 206
 207/*
 208 * Make lock validator output more readable. (we pre-construct these
 209 * strings build-time, so that runtime initialization of socket
 210 * locks is fast):
 211 */
 212
 213#define _sock_locks(x)						  \
 214  x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
 215  x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
 216  x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
 217  x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
 218  x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
 219  x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
 220  x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
 221  x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
 222  x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
 223  x "27"       ,	x "28"          ,	x "AF_CAN"      , \
 224  x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
 225  x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
 226  x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
 227  x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
 228  x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
 
 229  x "AF_MAX"
 230
 231static const char *const af_family_key_strings[AF_MAX+1] = {
 232	_sock_locks("sk_lock-")
 233};
 234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 235	_sock_locks("slock-")
 236};
 237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 238	_sock_locks("clock-")
 239};
 240
 241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
 242	_sock_locks("k-sk_lock-")
 243};
 244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
 245	_sock_locks("k-slock-")
 246};
 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
 248	_sock_locks("k-clock-")
 249};
 250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
 251	_sock_locks("rlock-")
 252};
 253static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
 254	_sock_locks("wlock-")
 255};
 256static const char *const af_family_elock_key_strings[AF_MAX+1] = {
 257	_sock_locks("elock-")
 258};
 259
 260/*
 261 * sk_callback_lock and sk queues locking rules are per-address-family,
 262 * so split the lock classes by using a per-AF key:
 263 */
 264static struct lock_class_key af_callback_keys[AF_MAX];
 265static struct lock_class_key af_rlock_keys[AF_MAX];
 266static struct lock_class_key af_wlock_keys[AF_MAX];
 267static struct lock_class_key af_elock_keys[AF_MAX];
 268static struct lock_class_key af_kern_callback_keys[AF_MAX];
 269
 270/* Run time adjustable parameters. */
 271__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 272EXPORT_SYMBOL(sysctl_wmem_max);
 273__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 274EXPORT_SYMBOL(sysctl_rmem_max);
 275__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 276__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 277
 278/* Maximal space eaten by iovec or ancillary data plus some space */
 279int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 280EXPORT_SYMBOL(sysctl_optmem_max);
 281
 282int sysctl_tstamp_allow_data __read_mostly = 1;
 283
 284DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
 285EXPORT_SYMBOL_GPL(memalloc_socks_key);
 286
 287/**
 288 * sk_set_memalloc - sets %SOCK_MEMALLOC
 289 * @sk: socket to set it on
 290 *
 291 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 292 * It's the responsibility of the admin to adjust min_free_kbytes
 293 * to meet the requirements
 294 */
 295void sk_set_memalloc(struct sock *sk)
 296{
 297	sock_set_flag(sk, SOCK_MEMALLOC);
 298	sk->sk_allocation |= __GFP_MEMALLOC;
 299	static_branch_inc(&memalloc_socks_key);
 300}
 301EXPORT_SYMBOL_GPL(sk_set_memalloc);
 302
 303void sk_clear_memalloc(struct sock *sk)
 304{
 305	sock_reset_flag(sk, SOCK_MEMALLOC);
 306	sk->sk_allocation &= ~__GFP_MEMALLOC;
 307	static_branch_dec(&memalloc_socks_key);
 308
 309	/*
 310	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 311	 * progress of swapping. SOCK_MEMALLOC may be cleared while
 312	 * it has rmem allocations due to the last swapfile being deactivated
 313	 * but there is a risk that the socket is unusable due to exceeding
 314	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
 315	 */
 316	sk_mem_reclaim(sk);
 317}
 318EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 319
 320int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 321{
 322	int ret;
 323	unsigned int noreclaim_flag;
 324
 325	/* these should have been dropped before queueing */
 326	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 327
 328	noreclaim_flag = memalloc_noreclaim_save();
 329	ret = sk->sk_backlog_rcv(sk, skb);
 
 
 
 330	memalloc_noreclaim_restore(noreclaim_flag);
 331
 332	return ret;
 333}
 334EXPORT_SYMBOL(__sk_backlog_rcv);
 335
 336void sk_error_report(struct sock *sk)
 337{
 338	sk->sk_error_report(sk);
 339
 340	switch (sk->sk_family) {
 341	case AF_INET:
 342		fallthrough;
 343	case AF_INET6:
 344		trace_inet_sk_error_report(sk);
 345		break;
 346	default:
 347		break;
 348	}
 349}
 350EXPORT_SYMBOL(sk_error_report);
 351
 352static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
 353{
 354	struct __kernel_sock_timeval tv;
 355
 356	if (timeo == MAX_SCHEDULE_TIMEOUT) {
 357		tv.tv_sec = 0;
 358		tv.tv_usec = 0;
 359	} else {
 360		tv.tv_sec = timeo / HZ;
 361		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
 362	}
 363
 364	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 365		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
 366		*(struct old_timeval32 *)optval = tv32;
 367		return sizeof(tv32);
 368	}
 369
 370	if (old_timeval) {
 371		struct __kernel_old_timeval old_tv;
 372		old_tv.tv_sec = tv.tv_sec;
 373		old_tv.tv_usec = tv.tv_usec;
 374		*(struct __kernel_old_timeval *)optval = old_tv;
 375		return sizeof(old_tv);
 376	}
 377
 378	*(struct __kernel_sock_timeval *)optval = tv;
 379	return sizeof(tv);
 380}
 
 381
 382static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
 383			    bool old_timeval)
 384{
 385	struct __kernel_sock_timeval tv;
 386
 387	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 388		struct old_timeval32 tv32;
 389
 390		if (optlen < sizeof(tv32))
 391			return -EINVAL;
 392
 393		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
 394			return -EFAULT;
 395		tv.tv_sec = tv32.tv_sec;
 396		tv.tv_usec = tv32.tv_usec;
 397	} else if (old_timeval) {
 398		struct __kernel_old_timeval old_tv;
 399
 400		if (optlen < sizeof(old_tv))
 401			return -EINVAL;
 402		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
 403			return -EFAULT;
 404		tv.tv_sec = old_tv.tv_sec;
 405		tv.tv_usec = old_tv.tv_usec;
 406	} else {
 407		if (optlen < sizeof(tv))
 408			return -EINVAL;
 409		if (copy_from_sockptr(&tv, optval, sizeof(tv)))
 410			return -EFAULT;
 411	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 413		return -EDOM;
 414
 415	if (tv.tv_sec < 0) {
 416		static int warned __read_mostly;
 417
 418		*timeo_p = 0;
 419		if (warned < 10 && net_ratelimit()) {
 420			warned++;
 421			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 422				__func__, current->comm, task_pid_nr(current));
 423		}
 424		return 0;
 425	}
 426	*timeo_p = MAX_SCHEDULE_TIMEOUT;
 427	if (tv.tv_sec == 0 && tv.tv_usec == 0)
 428		return 0;
 429	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
 430		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
 
 431	return 0;
 432}
 433
 434static bool sock_needs_netstamp(const struct sock *sk)
 435{
 436	switch (sk->sk_family) {
 437	case AF_UNSPEC:
 438	case AF_UNIX:
 439		return false;
 440	default:
 441		return true;
 442	}
 443}
 444
 445static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 446{
 447	if (sk->sk_flags & flags) {
 448		sk->sk_flags &= ~flags;
 449		if (sock_needs_netstamp(sk) &&
 450		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 451			net_disable_timestamp();
 452	}
 453}
 454
 455
 456int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 457{
 458	unsigned long flags;
 459	struct sk_buff_head *list = &sk->sk_receive_queue;
 460
 461	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 462		atomic_inc(&sk->sk_drops);
 463		trace_sock_rcvqueue_full(sk, skb);
 464		return -ENOMEM;
 465	}
 466
 467	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 468		atomic_inc(&sk->sk_drops);
 469		return -ENOBUFS;
 470	}
 471
 472	skb->dev = NULL;
 473	skb_set_owner_r(skb, sk);
 474
 475	/* we escape from rcu protected region, make sure we dont leak
 476	 * a norefcounted dst
 477	 */
 478	skb_dst_force(skb);
 479
 480	spin_lock_irqsave(&list->lock, flags);
 481	sock_skb_set_dropcount(sk, skb);
 482	__skb_queue_tail(list, skb);
 483	spin_unlock_irqrestore(&list->lock, flags);
 484
 485	if (!sock_flag(sk, SOCK_DEAD))
 486		sk->sk_data_ready(sk);
 487	return 0;
 488}
 489EXPORT_SYMBOL(__sock_queue_rcv_skb);
 490
 491int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
 492{
 
 493	int err;
 494
 495	err = sk_filter(sk, skb);
 496	if (err)
 497		return err;
 498
 499	return __sock_queue_rcv_skb(sk, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500}
 501EXPORT_SYMBOL(sock_queue_rcv_skb);
 502
 503int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 504		     const int nested, unsigned int trim_cap, bool refcounted)
 505{
 506	int rc = NET_RX_SUCCESS;
 507
 508	if (sk_filter_trim_cap(sk, skb, trim_cap))
 509		goto discard_and_relse;
 510
 511	skb->dev = NULL;
 512
 513	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 514		atomic_inc(&sk->sk_drops);
 515		goto discard_and_relse;
 516	}
 517	if (nested)
 518		bh_lock_sock_nested(sk);
 519	else
 520		bh_lock_sock(sk);
 521	if (!sock_owned_by_user(sk)) {
 522		/*
 523		 * trylock + unlock semantics:
 524		 */
 525		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 526
 527		rc = sk_backlog_rcv(sk, skb);
 528
 529		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
 530	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
 531		bh_unlock_sock(sk);
 532		atomic_inc(&sk->sk_drops);
 533		goto discard_and_relse;
 534	}
 535
 536	bh_unlock_sock(sk);
 537out:
 538	if (refcounted)
 539		sock_put(sk);
 540	return rc;
 541discard_and_relse:
 542	kfree_skb(skb);
 543	goto out;
 544}
 545EXPORT_SYMBOL(__sk_receive_skb);
 546
 547INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
 548							  u32));
 549INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
 550							   u32));
 551struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 552{
 553	struct dst_entry *dst = __sk_dst_get(sk);
 554
 555	if (dst && dst->obsolete &&
 556	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 557			       dst, cookie) == NULL) {
 558		sk_tx_queue_clear(sk);
 559		sk->sk_dst_pending_confirm = 0;
 560		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 561		dst_release(dst);
 562		return NULL;
 563	}
 564
 565	return dst;
 566}
 567EXPORT_SYMBOL(__sk_dst_check);
 568
 569struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 570{
 571	struct dst_entry *dst = sk_dst_get(sk);
 572
 573	if (dst && dst->obsolete &&
 574	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 575			       dst, cookie) == NULL) {
 576		sk_dst_reset(sk);
 577		dst_release(dst);
 578		return NULL;
 579	}
 580
 581	return dst;
 582}
 583EXPORT_SYMBOL(sk_dst_check);
 584
 585static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
 586{
 587	int ret = -ENOPROTOOPT;
 588#ifdef CONFIG_NETDEVICES
 589	struct net *net = sock_net(sk);
 590
 591	/* Sorry... */
 592	ret = -EPERM;
 593	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
 594		goto out;
 595
 596	ret = -EINVAL;
 597	if (ifindex < 0)
 598		goto out;
 599
 600	sk->sk_bound_dev_if = ifindex;
 
 
 601	if (sk->sk_prot->rehash)
 602		sk->sk_prot->rehash(sk);
 603	sk_dst_reset(sk);
 604
 605	ret = 0;
 606
 607out:
 608#endif
 609
 610	return ret;
 611}
 612
 613int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
 614{
 615	int ret;
 616
 617	if (lock_sk)
 618		lock_sock(sk);
 619	ret = sock_bindtoindex_locked(sk, ifindex);
 620	if (lock_sk)
 621		release_sock(sk);
 622
 623	return ret;
 624}
 625EXPORT_SYMBOL(sock_bindtoindex);
 626
 627static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
 628{
 629	int ret = -ENOPROTOOPT;
 630#ifdef CONFIG_NETDEVICES
 631	struct net *net = sock_net(sk);
 632	char devname[IFNAMSIZ];
 633	int index;
 634
 635	ret = -EINVAL;
 636	if (optlen < 0)
 637		goto out;
 638
 639	/* Bind this socket to a particular device like "eth0",
 640	 * as specified in the passed interface name. If the
 641	 * name is "" or the option length is zero the socket
 642	 * is not bound.
 643	 */
 644	if (optlen > IFNAMSIZ - 1)
 645		optlen = IFNAMSIZ - 1;
 646	memset(devname, 0, sizeof(devname));
 647
 648	ret = -EFAULT;
 649	if (copy_from_sockptr(devname, optval, optlen))
 650		goto out;
 651
 652	index = 0;
 653	if (devname[0] != '\0') {
 654		struct net_device *dev;
 655
 656		rcu_read_lock();
 657		dev = dev_get_by_name_rcu(net, devname);
 658		if (dev)
 659			index = dev->ifindex;
 660		rcu_read_unlock();
 661		ret = -ENODEV;
 662		if (!dev)
 663			goto out;
 664	}
 665
 666	return sock_bindtoindex(sk, index, true);
 
 
 667out:
 668#endif
 669
 670	return ret;
 671}
 672
 673static int sock_getbindtodevice(struct sock *sk, char __user *optval,
 674				int __user *optlen, int len)
 675{
 676	int ret = -ENOPROTOOPT;
 677#ifdef CONFIG_NETDEVICES
 
 678	struct net *net = sock_net(sk);
 679	char devname[IFNAMSIZ];
 680
 681	if (sk->sk_bound_dev_if == 0) {
 682		len = 0;
 683		goto zero;
 684	}
 685
 686	ret = -EINVAL;
 687	if (len < IFNAMSIZ)
 688		goto out;
 689
 690	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
 691	if (ret)
 692		goto out;
 693
 694	len = strlen(devname) + 1;
 695
 696	ret = -EFAULT;
 697	if (copy_to_user(optval, devname, len))
 698		goto out;
 699
 700zero:
 701	ret = -EFAULT;
 702	if (put_user(len, optlen))
 703		goto out;
 704
 705	ret = 0;
 706
 707out:
 708#endif
 709
 710	return ret;
 711}
 712
 713bool sk_mc_loop(struct sock *sk)
 714{
 715	if (dev_recursion_level())
 716		return false;
 717	if (!sk)
 718		return true;
 719	switch (sk->sk_family) {
 
 720	case AF_INET:
 721		return inet_sk(sk)->mc_loop;
 722#if IS_ENABLED(CONFIG_IPV6)
 723	case AF_INET6:
 724		return inet6_sk(sk)->mc_loop;
 725#endif
 726	}
 727	WARN_ON_ONCE(1);
 728	return true;
 729}
 730EXPORT_SYMBOL(sk_mc_loop);
 731
 732void sock_set_reuseaddr(struct sock *sk)
 733{
 734	lock_sock(sk);
 735	sk->sk_reuse = SK_CAN_REUSE;
 736	release_sock(sk);
 737}
 738EXPORT_SYMBOL(sock_set_reuseaddr);
 739
 740void sock_set_reuseport(struct sock *sk)
 741{
 742	lock_sock(sk);
 743	sk->sk_reuseport = true;
 744	release_sock(sk);
 745}
 746EXPORT_SYMBOL(sock_set_reuseport);
 747
 748void sock_no_linger(struct sock *sk)
 749{
 750	lock_sock(sk);
 751	sk->sk_lingertime = 0;
 752	sock_set_flag(sk, SOCK_LINGER);
 753	release_sock(sk);
 754}
 755EXPORT_SYMBOL(sock_no_linger);
 756
 757void sock_set_priority(struct sock *sk, u32 priority)
 758{
 759	lock_sock(sk);
 760	sk->sk_priority = priority;
 761	release_sock(sk);
 762}
 763EXPORT_SYMBOL(sock_set_priority);
 764
 765void sock_set_sndtimeo(struct sock *sk, s64 secs)
 766{
 767	lock_sock(sk);
 768	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
 769		sk->sk_sndtimeo = secs * HZ;
 770	else
 771		sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
 772	release_sock(sk);
 773}
 774EXPORT_SYMBOL(sock_set_sndtimeo);
 775
 776static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
 777{
 778	if (val)  {
 779		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
 780		sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
 781		sock_set_flag(sk, SOCK_RCVTSTAMP);
 782		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 783	} else {
 784		sock_reset_flag(sk, SOCK_RCVTSTAMP);
 785		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 786	}
 787}
 788
 789void sock_enable_timestamps(struct sock *sk)
 790{
 791	lock_sock(sk);
 792	__sock_set_timestamps(sk, true, false, true);
 793	release_sock(sk);
 794}
 795EXPORT_SYMBOL(sock_enable_timestamps);
 796
 797void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
 798{
 799	switch (optname) {
 800	case SO_TIMESTAMP_OLD:
 801		__sock_set_timestamps(sk, valbool, false, false);
 802		break;
 803	case SO_TIMESTAMP_NEW:
 804		__sock_set_timestamps(sk, valbool, true, false);
 805		break;
 806	case SO_TIMESTAMPNS_OLD:
 807		__sock_set_timestamps(sk, valbool, false, true);
 808		break;
 809	case SO_TIMESTAMPNS_NEW:
 810		__sock_set_timestamps(sk, valbool, true, true);
 811		break;
 812	}
 813}
 814
 815static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
 816{
 817	struct net *net = sock_net(sk);
 818	struct net_device *dev = NULL;
 819	bool match = false;
 820	int *vclock_index;
 821	int i, num;
 822
 823	if (sk->sk_bound_dev_if)
 824		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
 825
 826	if (!dev) {
 827		pr_err("%s: sock not bind to device\n", __func__);
 828		return -EOPNOTSUPP;
 829	}
 830
 831	num = ethtool_get_phc_vclocks(dev, &vclock_index);
 
 
 832	for (i = 0; i < num; i++) {
 833		if (*(vclock_index + i) == phc_index) {
 834			match = true;
 835			break;
 836		}
 837	}
 838
 839	if (num > 0)
 840		kfree(vclock_index);
 841
 842	if (!match)
 843		return -EINVAL;
 844
 845	sk->sk_bind_phc = phc_index;
 846
 847	return 0;
 848}
 849
 850int sock_set_timestamping(struct sock *sk, int optname,
 851			  struct so_timestamping timestamping)
 852{
 853	int val = timestamping.flags;
 854	int ret;
 855
 856	if (val & ~SOF_TIMESTAMPING_MASK)
 857		return -EINVAL;
 858
 
 
 
 
 859	if (val & SOF_TIMESTAMPING_OPT_ID &&
 860	    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 861		if (sk->sk_protocol == IPPROTO_TCP &&
 862		    sk->sk_type == SOCK_STREAM) {
 863			if ((1 << sk->sk_state) &
 864			    (TCPF_CLOSE | TCPF_LISTEN))
 865				return -EINVAL;
 866			sk->sk_tskey = tcp_sk(sk)->snd_una;
 
 
 
 867		} else {
 868			sk->sk_tskey = 0;
 869		}
 870	}
 871
 872	if (val & SOF_TIMESTAMPING_OPT_STATS &&
 873	    !(val & SOF_TIMESTAMPING_OPT_TSONLY))
 874		return -EINVAL;
 875
 876	if (val & SOF_TIMESTAMPING_BIND_PHC) {
 877		ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
 878		if (ret)
 879			return ret;
 880	}
 881
 882	sk->sk_tsflags = val;
 883	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
 884
 885	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 886		sock_enable_timestamp(sk,
 887				      SOCK_TIMESTAMPING_RX_SOFTWARE);
 888	else
 889		sock_disable_timestamp(sk,
 890				       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 891	return 0;
 892}
 893
 894void sock_set_keepalive(struct sock *sk)
 895{
 896	lock_sock(sk);
 897	if (sk->sk_prot->keepalive)
 898		sk->sk_prot->keepalive(sk, true);
 899	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
 900	release_sock(sk);
 901}
 902EXPORT_SYMBOL(sock_set_keepalive);
 903
 904static void __sock_set_rcvbuf(struct sock *sk, int val)
 905{
 906	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
 907	 * as a negative value.
 908	 */
 909	val = min_t(int, val, INT_MAX / 2);
 910	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 911
 912	/* We double it on the way in to account for "struct sk_buff" etc.
 913	 * overhead.   Applications assume that the SO_RCVBUF setting they make
 914	 * will allow that much actual data to be received on that socket.
 915	 *
 916	 * Applications are unaware that "struct sk_buff" and other overheads
 917	 * allocate from the receive buffer during socket buffer allocation.
 918	 *
 919	 * And after considering the possible alternatives, returning the value
 920	 * we actually used in getsockopt is the most desirable behavior.
 921	 */
 922	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
 923}
 924
 925void sock_set_rcvbuf(struct sock *sk, int val)
 926{
 927	lock_sock(sk);
 928	__sock_set_rcvbuf(sk, val);
 929	release_sock(sk);
 930}
 931EXPORT_SYMBOL(sock_set_rcvbuf);
 932
 933static void __sock_set_mark(struct sock *sk, u32 val)
 934{
 935	if (val != sk->sk_mark) {
 936		sk->sk_mark = val;
 937		sk_dst_reset(sk);
 938	}
 939}
 940
 941void sock_set_mark(struct sock *sk, u32 val)
 942{
 943	lock_sock(sk);
 944	__sock_set_mark(sk, val);
 945	release_sock(sk);
 946}
 947EXPORT_SYMBOL(sock_set_mark);
 948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949/*
 950 *	This is meant for all protocols to use and covers goings on
 951 *	at the socket level. Everything here is generic.
 952 */
 953
 954int sock_setsockopt(struct socket *sock, int level, int optname,
 955		    sockptr_t optval, unsigned int optlen)
 956{
 957	struct so_timestamping timestamping;
 
 958	struct sock_txtime sk_txtime;
 959	struct sock *sk = sock->sk;
 960	int val;
 961	int valbool;
 962	struct linger ling;
 963	int ret = 0;
 964
 965	/*
 966	 *	Options without arguments
 967	 */
 968
 969	if (optname == SO_BINDTODEVICE)
 970		return sock_setbindtodevice(sk, optval, optlen);
 971
 972	if (optlen < sizeof(int))
 973		return -EINVAL;
 974
 975	if (copy_from_sockptr(&val, optval, sizeof(val)))
 976		return -EFAULT;
 977
 978	valbool = val ? 1 : 0;
 979
 980	lock_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981
 982	switch (optname) {
 983	case SO_DEBUG:
 984		if (val && !capable(CAP_NET_ADMIN))
 985			ret = -EACCES;
 986		else
 987			sock_valbool_flag(sk, SOCK_DBG, valbool);
 988		break;
 989	case SO_REUSEADDR:
 990		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 991		break;
 992	case SO_REUSEPORT:
 993		sk->sk_reuseport = valbool;
 994		break;
 995	case SO_TYPE:
 996	case SO_PROTOCOL:
 997	case SO_DOMAIN:
 998	case SO_ERROR:
 999		ret = -ENOPROTOOPT;
1000		break;
1001	case SO_DONTROUTE:
1002		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1003		sk_dst_reset(sk);
1004		break;
1005	case SO_BROADCAST:
1006		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1007		break;
1008	case SO_SNDBUF:
1009		/* Don't error on this BSD doesn't and if you think
1010		 * about it this is right. Otherwise apps have to
1011		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1012		 * are treated in BSD as hints
1013		 */
1014		val = min_t(u32, val, sysctl_wmem_max);
1015set_sndbuf:
1016		/* Ensure val * 2 fits into an int, to prevent max_t()
1017		 * from treating it as a negative value.
1018		 */
1019		val = min_t(int, val, INT_MAX / 2);
1020		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1021		WRITE_ONCE(sk->sk_sndbuf,
1022			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
1023		/* Wake up sending tasks if we upped the value. */
1024		sk->sk_write_space(sk);
1025		break;
1026
1027	case SO_SNDBUFFORCE:
1028		if (!capable(CAP_NET_ADMIN)) {
1029			ret = -EPERM;
1030			break;
1031		}
1032
1033		/* No negative values (to prevent underflow, as val will be
1034		 * multiplied by 2).
1035		 */
1036		if (val < 0)
1037			val = 0;
1038		goto set_sndbuf;
1039
1040	case SO_RCVBUF:
1041		/* Don't error on this BSD doesn't and if you think
1042		 * about it this is right. Otherwise apps have to
1043		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1044		 * are treated in BSD as hints
1045		 */
1046		__sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
1047		break;
1048
1049	case SO_RCVBUFFORCE:
1050		if (!capable(CAP_NET_ADMIN)) {
1051			ret = -EPERM;
1052			break;
1053		}
1054
1055		/* No negative values (to prevent underflow, as val will be
1056		 * multiplied by 2).
1057		 */
1058		__sock_set_rcvbuf(sk, max(val, 0));
1059		break;
1060
1061	case SO_KEEPALIVE:
1062		if (sk->sk_prot->keepalive)
1063			sk->sk_prot->keepalive(sk, valbool);
1064		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1065		break;
1066
1067	case SO_OOBINLINE:
1068		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1069		break;
1070
1071	case SO_NO_CHECK:
1072		sk->sk_no_check_tx = valbool;
1073		break;
1074
1075	case SO_PRIORITY:
1076		if ((val >= 0 && val <= 6) ||
1077		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1078			sk->sk_priority = val;
1079		else
1080			ret = -EPERM;
1081		break;
1082
1083	case SO_LINGER:
1084		if (optlen < sizeof(ling)) {
1085			ret = -EINVAL;	/* 1003.1g */
1086			break;
1087		}
1088		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1089			ret = -EFAULT;
1090			break;
1091		}
1092		if (!ling.l_onoff)
1093			sock_reset_flag(sk, SOCK_LINGER);
1094		else {
1095#if (BITS_PER_LONG == 32)
1096			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
1097				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 
1098			else
1099#endif
1100				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
1101			sock_set_flag(sk, SOCK_LINGER);
1102		}
1103		break;
1104
1105	case SO_BSDCOMPAT:
1106		break;
1107
1108	case SO_PASSCRED:
1109		if (valbool)
1110			set_bit(SOCK_PASSCRED, &sock->flags);
1111		else
1112			clear_bit(SOCK_PASSCRED, &sock->flags);
1113		break;
1114
1115	case SO_TIMESTAMP_OLD:
1116	case SO_TIMESTAMP_NEW:
1117	case SO_TIMESTAMPNS_OLD:
1118	case SO_TIMESTAMPNS_NEW:
1119		sock_set_timestamp(sk, optname, valbool);
1120		break;
1121
1122	case SO_TIMESTAMPING_NEW:
1123	case SO_TIMESTAMPING_OLD:
1124		if (optlen == sizeof(timestamping)) {
1125			if (copy_from_sockptr(&timestamping, optval,
1126					      sizeof(timestamping))) {
1127				ret = -EFAULT;
1128				break;
1129			}
1130		} else {
1131			memset(&timestamping, 0, sizeof(timestamping));
1132			timestamping.flags = val;
1133		}
1134		ret = sock_set_timestamping(sk, optname, timestamping);
1135		break;
1136
1137	case SO_RCVLOWAT:
 
 
 
1138		if (val < 0)
1139			val = INT_MAX;
1140		if (sock->ops->set_rcvlowat)
1141			ret = sock->ops->set_rcvlowat(sk, val);
 
 
1142		else
1143			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1144		break;
1145
1146	case SO_RCVTIMEO_OLD:
1147	case SO_RCVTIMEO_NEW:
1148		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1149				       optlen, optname == SO_RCVTIMEO_OLD);
1150		break;
1151
1152	case SO_SNDTIMEO_OLD:
1153	case SO_SNDTIMEO_NEW:
1154		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1155				       optlen, optname == SO_SNDTIMEO_OLD);
1156		break;
1157
1158	case SO_ATTACH_FILTER: {
1159		struct sock_fprog fprog;
1160
1161		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1162		if (!ret)
1163			ret = sk_attach_filter(&fprog, sk);
1164		break;
1165	}
1166	case SO_ATTACH_BPF:
1167		ret = -EINVAL;
1168		if (optlen == sizeof(u32)) {
1169			u32 ufd;
1170
1171			ret = -EFAULT;
1172			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1173				break;
1174
1175			ret = sk_attach_bpf(ufd, sk);
1176		}
1177		break;
1178
1179	case SO_ATTACH_REUSEPORT_CBPF: {
1180		struct sock_fprog fprog;
1181
1182		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1183		if (!ret)
1184			ret = sk_reuseport_attach_filter(&fprog, sk);
1185		break;
1186	}
1187	case SO_ATTACH_REUSEPORT_EBPF:
1188		ret = -EINVAL;
1189		if (optlen == sizeof(u32)) {
1190			u32 ufd;
1191
1192			ret = -EFAULT;
1193			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1194				break;
1195
1196			ret = sk_reuseport_attach_bpf(ufd, sk);
1197		}
1198		break;
1199
1200	case SO_DETACH_REUSEPORT_BPF:
1201		ret = reuseport_detach_prog(sk);
1202		break;
1203
1204	case SO_DETACH_FILTER:
1205		ret = sk_detach_filter(sk);
1206		break;
1207
1208	case SO_LOCK_FILTER:
1209		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1210			ret = -EPERM;
1211		else
1212			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1213		break;
1214
1215	case SO_PASSSEC:
1216		if (valbool)
1217			set_bit(SOCK_PASSSEC, &sock->flags);
1218		else
1219			clear_bit(SOCK_PASSSEC, &sock->flags);
1220		break;
1221	case SO_MARK:
1222		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
 
1223			ret = -EPERM;
1224			break;
1225		}
1226
1227		__sock_set_mark(sk, val);
1228		break;
 
 
 
1229
1230	case SO_RXQ_OVFL:
1231		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1232		break;
1233
1234	case SO_WIFI_STATUS:
1235		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1236		break;
1237
1238	case SO_PEEK_OFF:
1239		if (sock->ops->set_peek_off)
1240			ret = sock->ops->set_peek_off(sk, val);
1241		else
1242			ret = -EOPNOTSUPP;
1243		break;
1244
1245	case SO_NOFCS:
1246		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1247		break;
1248
1249	case SO_SELECT_ERR_QUEUE:
1250		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1251		break;
1252
1253#ifdef CONFIG_NET_RX_BUSY_POLL
1254	case SO_BUSY_POLL:
1255		/* allow unprivileged users to decrease the value */
1256		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1257			ret = -EPERM;
1258		else {
1259			if (val < 0)
1260				ret = -EINVAL;
1261			else
1262				WRITE_ONCE(sk->sk_ll_usec, val);
1263		}
1264		break;
1265	case SO_PREFER_BUSY_POLL:
1266		if (valbool && !capable(CAP_NET_ADMIN))
1267			ret = -EPERM;
1268		else
1269			WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1270		break;
1271	case SO_BUSY_POLL_BUDGET:
1272		if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) {
1273			ret = -EPERM;
1274		} else {
1275			if (val < 0 || val > U16_MAX)
1276				ret = -EINVAL;
1277			else
1278				WRITE_ONCE(sk->sk_busy_poll_budget, val);
1279		}
1280		break;
1281#endif
1282
1283	case SO_MAX_PACING_RATE:
1284		{
1285		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1286
1287		if (sizeof(ulval) != sizeof(val) &&
1288		    optlen >= sizeof(ulval) &&
1289		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1290			ret = -EFAULT;
1291			break;
1292		}
1293		if (ulval != ~0UL)
1294			cmpxchg(&sk->sk_pacing_status,
1295				SK_PACING_NONE,
1296				SK_PACING_NEEDED);
1297		sk->sk_max_pacing_rate = ulval;
1298		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1299		break;
1300		}
1301	case SO_INCOMING_CPU:
1302		WRITE_ONCE(sk->sk_incoming_cpu, val);
1303		break;
1304
1305	case SO_CNX_ADVICE:
1306		if (val == 1)
1307			dst_negative_advice(sk);
1308		break;
1309
1310	case SO_ZEROCOPY:
1311		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1312			if (!((sk->sk_type == SOCK_STREAM &&
1313			       sk->sk_protocol == IPPROTO_TCP) ||
1314			      (sk->sk_type == SOCK_DGRAM &&
1315			       sk->sk_protocol == IPPROTO_UDP)))
1316				ret = -ENOTSUPP;
1317		} else if (sk->sk_family != PF_RDS) {
1318			ret = -ENOTSUPP;
1319		}
1320		if (!ret) {
1321			if (val < 0 || val > 1)
1322				ret = -EINVAL;
1323			else
1324				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1325		}
1326		break;
1327
1328	case SO_TXTIME:
1329		if (optlen != sizeof(struct sock_txtime)) {
1330			ret = -EINVAL;
1331			break;
1332		} else if (copy_from_sockptr(&sk_txtime, optval,
1333			   sizeof(struct sock_txtime))) {
1334			ret = -EFAULT;
1335			break;
1336		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1337			ret = -EINVAL;
1338			break;
1339		}
1340		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1341		 * scheduler has enough safe guards.
1342		 */
1343		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1344		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1345			ret = -EPERM;
1346			break;
1347		}
1348		sock_valbool_flag(sk, SOCK_TXTIME, true);
1349		sk->sk_clockid = sk_txtime.clockid;
1350		sk->sk_txtime_deadline_mode =
1351			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1352		sk->sk_txtime_report_errors =
1353			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1354		break;
1355
1356	case SO_BINDTOIFINDEX:
1357		ret = sock_bindtoindex_locked(sk, val);
1358		break;
1359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360	default:
1361		ret = -ENOPROTOOPT;
1362		break;
1363	}
1364	release_sock(sk);
1365	return ret;
1366}
 
 
 
 
 
 
 
1367EXPORT_SYMBOL(sock_setsockopt);
1368
1369static const struct cred *sk_get_peer_cred(struct sock *sk)
1370{
1371	const struct cred *cred;
1372
1373	spin_lock(&sk->sk_peer_lock);
1374	cred = get_cred(sk->sk_peer_cred);
1375	spin_unlock(&sk->sk_peer_lock);
1376
1377	return cred;
1378}
1379
1380static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1381			  struct ucred *ucred)
1382{
1383	ucred->pid = pid_vnr(pid);
1384	ucred->uid = ucred->gid = -1;
1385	if (cred) {
1386		struct user_namespace *current_ns = current_user_ns();
1387
1388		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1389		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1390	}
1391}
1392
1393static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1394{
1395	struct user_namespace *user_ns = current_user_ns();
1396	int i;
1397
1398	for (i = 0; i < src->ngroups; i++)
1399		if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
 
 
1400			return -EFAULT;
 
1401
1402	return 0;
1403}
1404
1405int sock_getsockopt(struct socket *sock, int level, int optname,
1406		    char __user *optval, int __user *optlen)
1407{
1408	struct sock *sk = sock->sk;
1409
1410	union {
1411		int val;
1412		u64 val64;
1413		unsigned long ulval;
1414		struct linger ling;
1415		struct old_timeval32 tm32;
1416		struct __kernel_old_timeval tm;
1417		struct  __kernel_sock_timeval stm;
1418		struct sock_txtime txtime;
1419		struct so_timestamping timestamping;
1420	} v;
1421
1422	int lv = sizeof(int);
1423	int len;
1424
1425	if (get_user(len, optlen))
1426		return -EFAULT;
1427	if (len < 0)
1428		return -EINVAL;
1429
1430	memset(&v, 0, sizeof(v));
1431
1432	switch (optname) {
1433	case SO_DEBUG:
1434		v.val = sock_flag(sk, SOCK_DBG);
1435		break;
1436
1437	case SO_DONTROUTE:
1438		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1439		break;
1440
1441	case SO_BROADCAST:
1442		v.val = sock_flag(sk, SOCK_BROADCAST);
1443		break;
1444
1445	case SO_SNDBUF:
1446		v.val = sk->sk_sndbuf;
1447		break;
1448
1449	case SO_RCVBUF:
1450		v.val = sk->sk_rcvbuf;
1451		break;
1452
1453	case SO_REUSEADDR:
1454		v.val = sk->sk_reuse;
1455		break;
1456
1457	case SO_REUSEPORT:
1458		v.val = sk->sk_reuseport;
1459		break;
1460
1461	case SO_KEEPALIVE:
1462		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1463		break;
1464
1465	case SO_TYPE:
1466		v.val = sk->sk_type;
1467		break;
1468
1469	case SO_PROTOCOL:
1470		v.val = sk->sk_protocol;
1471		break;
1472
1473	case SO_DOMAIN:
1474		v.val = sk->sk_family;
1475		break;
1476
1477	case SO_ERROR:
1478		v.val = -sock_error(sk);
1479		if (v.val == 0)
1480			v.val = xchg(&sk->sk_err_soft, 0);
1481		break;
1482
1483	case SO_OOBINLINE:
1484		v.val = sock_flag(sk, SOCK_URGINLINE);
1485		break;
1486
1487	case SO_NO_CHECK:
1488		v.val = sk->sk_no_check_tx;
1489		break;
1490
1491	case SO_PRIORITY:
1492		v.val = sk->sk_priority;
1493		break;
1494
1495	case SO_LINGER:
1496		lv		= sizeof(v.ling);
1497		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1498		v.ling.l_linger	= sk->sk_lingertime / HZ;
1499		break;
1500
1501	case SO_BSDCOMPAT:
1502		break;
1503
1504	case SO_TIMESTAMP_OLD:
1505		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1506				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1507				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1508		break;
1509
1510	case SO_TIMESTAMPNS_OLD:
1511		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1512		break;
1513
1514	case SO_TIMESTAMP_NEW:
1515		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1516		break;
1517
1518	case SO_TIMESTAMPNS_NEW:
1519		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1520		break;
1521
1522	case SO_TIMESTAMPING_OLD:
 
1523		lv = sizeof(v.timestamping);
1524		v.timestamping.flags = sk->sk_tsflags;
1525		v.timestamping.bind_phc = sk->sk_bind_phc;
 
 
 
 
 
 
1526		break;
1527
1528	case SO_RCVTIMEO_OLD:
1529	case SO_RCVTIMEO_NEW:
1530		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
 
1531		break;
1532
1533	case SO_SNDTIMEO_OLD:
1534	case SO_SNDTIMEO_NEW:
1535		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
 
1536		break;
1537
1538	case SO_RCVLOWAT:
1539		v.val = sk->sk_rcvlowat;
1540		break;
1541
1542	case SO_SNDLOWAT:
1543		v.val = 1;
1544		break;
1545
1546	case SO_PASSCRED:
1547		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1548		break;
1549
 
 
 
 
1550	case SO_PEERCRED:
1551	{
1552		struct ucred peercred;
1553		if (len > sizeof(peercred))
1554			len = sizeof(peercred);
1555
1556		spin_lock(&sk->sk_peer_lock);
1557		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1558		spin_unlock(&sk->sk_peer_lock);
1559
1560		if (copy_to_user(optval, &peercred, len))
1561			return -EFAULT;
1562		goto lenout;
1563	}
1564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565	case SO_PEERGROUPS:
1566	{
1567		const struct cred *cred;
1568		int ret, n;
1569
1570		cred = sk_get_peer_cred(sk);
1571		if (!cred)
1572			return -ENODATA;
1573
1574		n = cred->group_info->ngroups;
1575		if (len < n * sizeof(gid_t)) {
1576			len = n * sizeof(gid_t);
1577			put_cred(cred);
1578			return put_user(len, optlen) ? -EFAULT : -ERANGE;
1579		}
1580		len = n * sizeof(gid_t);
1581
1582		ret = groups_to_user((gid_t __user *)optval, cred->group_info);
1583		put_cred(cred);
1584		if (ret)
1585			return ret;
1586		goto lenout;
1587	}
1588
1589	case SO_PEERNAME:
1590	{
1591		char address[128];
1592
1593		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1594		if (lv < 0)
1595			return -ENOTCONN;
1596		if (lv < len)
1597			return -EINVAL;
1598		if (copy_to_user(optval, address, len))
1599			return -EFAULT;
1600		goto lenout;
1601	}
1602
1603	/* Dubious BSD thing... Probably nobody even uses it, but
1604	 * the UNIX standard wants it for whatever reason... -DaveM
1605	 */
1606	case SO_ACCEPTCONN:
1607		v.val = sk->sk_state == TCP_LISTEN;
1608		break;
1609
1610	case SO_PASSSEC:
1611		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1612		break;
1613
1614	case SO_PEERSEC:
1615		return security_socket_getpeersec_stream(sock, optval, optlen, len);
 
1616
1617	case SO_MARK:
1618		v.val = sk->sk_mark;
 
 
 
 
1619		break;
1620
1621	case SO_RXQ_OVFL:
1622		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1623		break;
1624
1625	case SO_WIFI_STATUS:
1626		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1627		break;
1628
1629	case SO_PEEK_OFF:
1630		if (!sock->ops->set_peek_off)
1631			return -EOPNOTSUPP;
1632
1633		v.val = sk->sk_peek_off;
1634		break;
1635	case SO_NOFCS:
1636		v.val = sock_flag(sk, SOCK_NOFCS);
1637		break;
1638
1639	case SO_BINDTODEVICE:
1640		return sock_getbindtodevice(sk, optval, optlen, len);
1641
1642	case SO_GET_FILTER:
1643		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1644		if (len < 0)
1645			return len;
1646
1647		goto lenout;
1648
1649	case SO_LOCK_FILTER:
1650		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1651		break;
1652
1653	case SO_BPF_EXTENSIONS:
1654		v.val = bpf_tell_extensions();
1655		break;
1656
1657	case SO_SELECT_ERR_QUEUE:
1658		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1659		break;
1660
1661#ifdef CONFIG_NET_RX_BUSY_POLL
1662	case SO_BUSY_POLL:
1663		v.val = sk->sk_ll_usec;
1664		break;
1665	case SO_PREFER_BUSY_POLL:
1666		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
1667		break;
1668#endif
1669
1670	case SO_MAX_PACING_RATE:
 
1671		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1672			lv = sizeof(v.ulval);
1673			v.ulval = sk->sk_max_pacing_rate;
1674		} else {
1675			/* 32bit version */
1676			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
 
1677		}
1678		break;
1679
1680	case SO_INCOMING_CPU:
1681		v.val = READ_ONCE(sk->sk_incoming_cpu);
1682		break;
1683
1684	case SO_MEMINFO:
1685	{
1686		u32 meminfo[SK_MEMINFO_VARS];
1687
1688		sk_get_meminfo(sk, meminfo);
1689
1690		len = min_t(unsigned int, len, sizeof(meminfo));
1691		if (copy_to_user(optval, &meminfo, len))
1692			return -EFAULT;
1693
1694		goto lenout;
1695	}
1696
1697#ifdef CONFIG_NET_RX_BUSY_POLL
1698	case SO_INCOMING_NAPI_ID:
1699		v.val = READ_ONCE(sk->sk_napi_id);
1700
1701		/* aggregate non-NAPI IDs down to 0 */
1702		if (v.val < MIN_NAPI_ID)
1703			v.val = 0;
1704
1705		break;
1706#endif
1707
1708	case SO_COOKIE:
1709		lv = sizeof(u64);
1710		if (len < lv)
1711			return -EINVAL;
1712		v.val64 = sock_gen_cookie(sk);
1713		break;
1714
1715	case SO_ZEROCOPY:
1716		v.val = sock_flag(sk, SOCK_ZEROCOPY);
1717		break;
1718
1719	case SO_TXTIME:
1720		lv = sizeof(v.txtime);
1721		v.txtime.clockid = sk->sk_clockid;
1722		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1723				  SOF_TXTIME_DEADLINE_MODE : 0;
1724		v.txtime.flags |= sk->sk_txtime_report_errors ?
1725				  SOF_TXTIME_REPORT_ERRORS : 0;
1726		break;
1727
1728	case SO_BINDTOIFINDEX:
1729		v.val = sk->sk_bound_dev_if;
1730		break;
1731
1732	case SO_NETNS_COOKIE:
1733		lv = sizeof(u64);
1734		if (len != lv)
1735			return -EINVAL;
1736		v.val64 = sock_net(sk)->net_cookie;
1737		break;
1738
 
 
 
 
 
 
 
 
 
 
 
 
 
1739	default:
1740		/* We implement the SO_SNDLOWAT etc to not be settable
1741		 * (1003.1g 7).
1742		 */
1743		return -ENOPROTOOPT;
1744	}
1745
1746	if (len > lv)
1747		len = lv;
1748	if (copy_to_user(optval, &v, len))
1749		return -EFAULT;
1750lenout:
1751	if (put_user(len, optlen))
1752		return -EFAULT;
1753	return 0;
1754}
1755
1756/*
1757 * Initialize an sk_lock.
1758 *
1759 * (We also register the sk_lock with the lock validator.)
1760 */
1761static inline void sock_lock_init(struct sock *sk)
1762{
1763	if (sk->sk_kern_sock)
1764		sock_lock_init_class_and_name(
1765			sk,
1766			af_family_kern_slock_key_strings[sk->sk_family],
1767			af_family_kern_slock_keys + sk->sk_family,
1768			af_family_kern_key_strings[sk->sk_family],
1769			af_family_kern_keys + sk->sk_family);
1770	else
1771		sock_lock_init_class_and_name(
1772			sk,
1773			af_family_slock_key_strings[sk->sk_family],
1774			af_family_slock_keys + sk->sk_family,
1775			af_family_key_strings[sk->sk_family],
1776			af_family_keys + sk->sk_family);
1777}
1778
1779/*
1780 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1781 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1782 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1783 */
1784static void sock_copy(struct sock *nsk, const struct sock *osk)
1785{
1786	const struct proto *prot = READ_ONCE(osk->sk_prot);
1787#ifdef CONFIG_SECURITY_NETWORK
1788	void *sptr = nsk->sk_security;
1789#endif
1790
1791	/* If we move sk_tx_queue_mapping out of the private section,
1792	 * we must check if sk_tx_queue_clear() is called after
1793	 * sock_copy() in sk_clone_lock().
1794	 */
1795	BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
1796		     offsetof(struct sock, sk_dontcopy_begin) ||
1797		     offsetof(struct sock, sk_tx_queue_mapping) >=
1798		     offsetof(struct sock, sk_dontcopy_end));
1799
1800	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1801
1802	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1803	       prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1804
1805#ifdef CONFIG_SECURITY_NETWORK
1806	nsk->sk_security = sptr;
1807	security_sk_clone(osk, nsk);
1808#endif
1809}
1810
1811static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1812		int family)
1813{
1814	struct sock *sk;
1815	struct kmem_cache *slab;
1816
1817	slab = prot->slab;
1818	if (slab != NULL) {
1819		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1820		if (!sk)
1821			return sk;
1822		if (want_init_on_alloc(priority))
1823			sk_prot_clear_nulls(sk, prot->obj_size);
1824	} else
1825		sk = kmalloc(prot->obj_size, priority);
1826
1827	if (sk != NULL) {
1828		if (security_sk_alloc(sk, family, priority))
1829			goto out_free;
1830
1831		if (!try_module_get(prot->owner))
1832			goto out_free_sec;
1833	}
1834
1835	return sk;
1836
1837out_free_sec:
1838	security_sk_free(sk);
1839out_free:
1840	if (slab != NULL)
1841		kmem_cache_free(slab, sk);
1842	else
1843		kfree(sk);
1844	return NULL;
1845}
1846
1847static void sk_prot_free(struct proto *prot, struct sock *sk)
1848{
1849	struct kmem_cache *slab;
1850	struct module *owner;
1851
1852	owner = prot->owner;
1853	slab = prot->slab;
1854
1855	cgroup_sk_free(&sk->sk_cgrp_data);
1856	mem_cgroup_sk_free(sk);
1857	security_sk_free(sk);
1858	if (slab != NULL)
1859		kmem_cache_free(slab, sk);
1860	else
1861		kfree(sk);
1862	module_put(owner);
1863}
1864
1865/**
1866 *	sk_alloc - All socket objects are allocated here
1867 *	@net: the applicable net namespace
1868 *	@family: protocol family
1869 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1870 *	@prot: struct proto associated with this new sock instance
1871 *	@kern: is this to be a kernel socket?
1872 */
1873struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1874		      struct proto *prot, int kern)
1875{
1876	struct sock *sk;
1877
1878	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1879	if (sk) {
1880		sk->sk_family = family;
1881		/*
1882		 * See comment in struct sock definition to understand
1883		 * why we need sk_prot_creator -acme
1884		 */
1885		sk->sk_prot = sk->sk_prot_creator = prot;
1886		sk->sk_kern_sock = kern;
1887		sock_lock_init(sk);
1888		sk->sk_net_refcnt = kern ? 0 : 1;
1889		if (likely(sk->sk_net_refcnt)) {
1890			get_net(net);
1891			sock_inuse_add(net, 1);
 
 
 
1892		}
1893
1894		sock_net_set(sk, net);
1895		refcount_set(&sk->sk_wmem_alloc, 1);
1896
1897		mem_cgroup_sk_alloc(sk);
1898		cgroup_sk_alloc(&sk->sk_cgrp_data);
1899		sock_update_classid(&sk->sk_cgrp_data);
1900		sock_update_netprioidx(&sk->sk_cgrp_data);
1901		sk_tx_queue_clear(sk);
1902	}
1903
1904	return sk;
1905}
1906EXPORT_SYMBOL(sk_alloc);
1907
1908/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1909 * grace period. This is the case for UDP sockets and TCP listeners.
1910 */
1911static void __sk_destruct(struct rcu_head *head)
1912{
1913	struct sock *sk = container_of(head, struct sock, sk_rcu);
1914	struct sk_filter *filter;
1915
1916	if (sk->sk_destruct)
1917		sk->sk_destruct(sk);
1918
1919	filter = rcu_dereference_check(sk->sk_filter,
1920				       refcount_read(&sk->sk_wmem_alloc) == 0);
1921	if (filter) {
1922		sk_filter_uncharge(sk, filter);
1923		RCU_INIT_POINTER(sk->sk_filter, NULL);
1924	}
1925
1926	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1927
1928#ifdef CONFIG_BPF_SYSCALL
1929	bpf_sk_storage_free(sk);
1930#endif
1931
1932	if (atomic_read(&sk->sk_omem_alloc))
1933		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1934			 __func__, atomic_read(&sk->sk_omem_alloc));
1935
1936	if (sk->sk_frag.page) {
1937		put_page(sk->sk_frag.page);
1938		sk->sk_frag.page = NULL;
1939	}
1940
1941	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
1942	put_cred(sk->sk_peer_cred);
1943	put_pid(sk->sk_peer_pid);
1944
1945	if (likely(sk->sk_net_refcnt))
1946		put_net(sock_net(sk));
 
 
 
1947	sk_prot_free(sk->sk_prot_creator, sk);
1948}
1949
1950void sk_destruct(struct sock *sk)
1951{
1952	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1953
1954	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1955		reuseport_detach_sock(sk);
1956		use_call_rcu = true;
1957	}
1958
1959	if (use_call_rcu)
1960		call_rcu(&sk->sk_rcu, __sk_destruct);
1961	else
1962		__sk_destruct(&sk->sk_rcu);
1963}
1964
1965static void __sk_free(struct sock *sk)
1966{
1967	if (likely(sk->sk_net_refcnt))
1968		sock_inuse_add(sock_net(sk), -1);
1969
1970	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1971		sock_diag_broadcast_destroy(sk);
1972	else
1973		sk_destruct(sk);
1974}
1975
1976void sk_free(struct sock *sk)
1977{
1978	/*
1979	 * We subtract one from sk_wmem_alloc and can know if
1980	 * some packets are still in some tx queue.
1981	 * If not null, sock_wfree() will call __sk_free(sk) later
1982	 */
1983	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1984		__sk_free(sk);
1985}
1986EXPORT_SYMBOL(sk_free);
1987
1988static void sk_init_common(struct sock *sk)
1989{
1990	skb_queue_head_init(&sk->sk_receive_queue);
1991	skb_queue_head_init(&sk->sk_write_queue);
1992	skb_queue_head_init(&sk->sk_error_queue);
1993
1994	rwlock_init(&sk->sk_callback_lock);
1995	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1996			af_rlock_keys + sk->sk_family,
1997			af_family_rlock_key_strings[sk->sk_family]);
1998	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1999			af_wlock_keys + sk->sk_family,
2000			af_family_wlock_key_strings[sk->sk_family]);
2001	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2002			af_elock_keys + sk->sk_family,
2003			af_family_elock_key_strings[sk->sk_family]);
2004	lockdep_set_class_and_name(&sk->sk_callback_lock,
2005			af_callback_keys + sk->sk_family,
2006			af_family_clock_key_strings[sk->sk_family]);
2007}
2008
2009/**
2010 *	sk_clone_lock - clone a socket, and lock its clone
2011 *	@sk: the socket to clone
2012 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2013 *
2014 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2015 */
2016struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2017{
2018	struct proto *prot = READ_ONCE(sk->sk_prot);
2019	struct sk_filter *filter;
2020	bool is_charged = true;
2021	struct sock *newsk;
2022
2023	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2024	if (!newsk)
2025		goto out;
2026
2027	sock_copy(newsk, sk);
2028
2029	newsk->sk_prot_creator = prot;
2030
2031	/* SANITY */
2032	if (likely(newsk->sk_net_refcnt))
2033		get_net(sock_net(newsk));
 
 
 
 
 
 
 
 
 
 
2034	sk_node_init(&newsk->sk_node);
2035	sock_lock_init(newsk);
2036	bh_lock_sock(newsk);
2037	newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
2038	newsk->sk_backlog.len = 0;
2039
2040	atomic_set(&newsk->sk_rmem_alloc, 0);
2041
2042	/* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2043	refcount_set(&newsk->sk_wmem_alloc, 1);
2044
2045	atomic_set(&newsk->sk_omem_alloc, 0);
2046	sk_init_common(newsk);
2047
2048	newsk->sk_dst_cache	= NULL;
2049	newsk->sk_dst_pending_confirm = 0;
2050	newsk->sk_wmem_queued	= 0;
2051	newsk->sk_forward_alloc = 0;
 
2052	atomic_set(&newsk->sk_drops, 0);
2053	newsk->sk_send_head	= NULL;
2054	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2055	atomic_set(&newsk->sk_zckey, 0);
2056
2057	sock_reset_flag(newsk, SOCK_DONE);
2058
2059	/* sk->sk_memcg will be populated at accept() time */
2060	newsk->sk_memcg = NULL;
2061
2062	cgroup_sk_clone(&newsk->sk_cgrp_data);
2063
2064	rcu_read_lock();
2065	filter = rcu_dereference(sk->sk_filter);
2066	if (filter != NULL)
2067		/* though it's an empty new sock, the charging may fail
2068		 * if sysctl_optmem_max was changed between creation of
2069		 * original socket and cloning
2070		 */
2071		is_charged = sk_filter_charge(newsk, filter);
2072	RCU_INIT_POINTER(newsk->sk_filter, filter);
2073	rcu_read_unlock();
2074
2075	if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2076		/* We need to make sure that we don't uncharge the new
2077		 * socket if we couldn't charge it in the first place
2078		 * as otherwise we uncharge the parent's filter.
2079		 */
2080		if (!is_charged)
2081			RCU_INIT_POINTER(newsk->sk_filter, NULL);
2082		sk_free_unlock_clone(newsk);
2083		newsk = NULL;
2084		goto out;
2085	}
2086	RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2087
2088	if (bpf_sk_storage_clone(sk, newsk)) {
2089		sk_free_unlock_clone(newsk);
2090		newsk = NULL;
2091		goto out;
2092	}
2093
2094	/* Clear sk_user_data if parent had the pointer tagged
2095	 * as not suitable for copying when cloning.
2096	 */
2097	if (sk_user_data_is_nocopy(newsk))
2098		newsk->sk_user_data = NULL;
2099
2100	newsk->sk_err	   = 0;
2101	newsk->sk_err_soft = 0;
2102	newsk->sk_priority = 0;
2103	newsk->sk_incoming_cpu = raw_smp_processor_id();
2104	if (likely(newsk->sk_net_refcnt))
2105		sock_inuse_add(sock_net(newsk), 1);
2106
2107	/* Before updating sk_refcnt, we must commit prior changes to memory
2108	 * (Documentation/RCU/rculist_nulls.rst for details)
2109	 */
2110	smp_wmb();
2111	refcount_set(&newsk->sk_refcnt, 2);
2112
2113	/* Increment the counter in the same struct proto as the master
2114	 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
2115	 * is the same as sk->sk_prot->socks, as this field was copied
2116	 * with memcpy).
2117	 *
2118	 * This _changes_ the previous behaviour, where
2119	 * tcp_create_openreq_child always was incrementing the
2120	 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
2121	 * to be taken into account in all callers. -acme
2122	 */
2123	sk_refcnt_debug_inc(newsk);
2124	sk_set_socket(newsk, NULL);
2125	sk_tx_queue_clear(newsk);
2126	RCU_INIT_POINTER(newsk->sk_wq, NULL);
2127
2128	if (newsk->sk_prot->sockets_allocated)
2129		sk_sockets_allocated_inc(newsk);
2130
2131	if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2132		net_enable_timestamp();
2133out:
2134	return newsk;
2135}
2136EXPORT_SYMBOL_GPL(sk_clone_lock);
2137
2138void sk_free_unlock_clone(struct sock *sk)
2139{
2140	/* It is still raw copy of parent, so invalidate
2141	 * destructor and make plain sk_free() */
2142	sk->sk_destruct = NULL;
2143	bh_unlock_sock(sk);
2144	sk_free(sk);
2145}
2146EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2148void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2149{
2150	u32 max_segs = 1;
2151
2152	sk_dst_set(sk, dst);
2153	sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
 
2154	if (sk->sk_route_caps & NETIF_F_GSO)
2155		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2156	sk->sk_route_caps &= ~sk->sk_route_nocaps;
 
2157	if (sk_can_gso(sk)) {
2158		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2159			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2160		} else {
2161			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2162			sk->sk_gso_max_size = dst->dev->gso_max_size;
2163			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
 
2164		}
2165	}
2166	sk->sk_gso_max_segs = max_segs;
 
2167}
2168EXPORT_SYMBOL_GPL(sk_setup_caps);
2169
2170/*
2171 *	Simple resource managers for sockets.
2172 */
2173
2174
2175/*
2176 * Write buffer destructor automatically called from kfree_skb.
2177 */
2178void sock_wfree(struct sk_buff *skb)
2179{
2180	struct sock *sk = skb->sk;
2181	unsigned int len = skb->truesize;
 
2182
2183	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
 
 
 
 
 
 
 
 
 
 
 
2184		/*
2185		 * Keep a reference on sk_wmem_alloc, this will be released
2186		 * after sk_write_space() call
2187		 */
2188		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2189		sk->sk_write_space(sk);
2190		len = 1;
2191	}
2192	/*
2193	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2194	 * could not do because of in-flight packets
2195	 */
2196	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2197		__sk_free(sk);
2198}
2199EXPORT_SYMBOL(sock_wfree);
2200
2201/* This variant of sock_wfree() is used by TCP,
2202 * since it sets SOCK_USE_WRITE_QUEUE.
2203 */
2204void __sock_wfree(struct sk_buff *skb)
2205{
2206	struct sock *sk = skb->sk;
2207
2208	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2209		__sk_free(sk);
2210}
2211
2212void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2213{
2214	skb_orphan(skb);
2215	skb->sk = sk;
2216#ifdef CONFIG_INET
2217	if (unlikely(!sk_fullsock(sk))) {
2218		skb->destructor = sock_edemux;
2219		sock_hold(sk);
2220		return;
2221	}
2222#endif
2223	skb->destructor = sock_wfree;
2224	skb_set_hash_from_sk(skb, sk);
2225	/*
2226	 * We used to take a refcount on sk, but following operation
2227	 * is enough to guarantee sk_free() wont free this sock until
2228	 * all in-flight packets are completed
2229	 */
2230	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2231}
2232EXPORT_SYMBOL(skb_set_owner_w);
2233
2234static bool can_skb_orphan_partial(const struct sk_buff *skb)
2235{
2236#ifdef CONFIG_TLS_DEVICE
2237	/* Drivers depend on in-order delivery for crypto offload,
2238	 * partial orphan breaks out-of-order-OK logic.
2239	 */
2240	if (skb->decrypted)
2241		return false;
2242#endif
2243	return (skb->destructor == sock_wfree ||
2244		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2245}
2246
2247/* This helper is used by netem, as it can hold packets in its
2248 * delay queue. We want to allow the owner socket to send more
2249 * packets, as if they were already TX completed by a typical driver.
2250 * But we also want to keep skb->sk set because some packet schedulers
2251 * rely on it (sch_fq for example).
2252 */
2253void skb_orphan_partial(struct sk_buff *skb)
2254{
2255	if (skb_is_tcp_pure_ack(skb))
2256		return;
2257
2258	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2259		return;
2260
2261	skb_orphan(skb);
2262}
2263EXPORT_SYMBOL(skb_orphan_partial);
2264
2265/*
2266 * Read buffer destructor automatically called from kfree_skb.
2267 */
2268void sock_rfree(struct sk_buff *skb)
2269{
2270	struct sock *sk = skb->sk;
2271	unsigned int len = skb->truesize;
2272
2273	atomic_sub(len, &sk->sk_rmem_alloc);
2274	sk_mem_uncharge(sk, len);
2275}
2276EXPORT_SYMBOL(sock_rfree);
2277
2278/*
2279 * Buffer destructor for skbs that are not used directly in read or write
2280 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2281 */
2282void sock_efree(struct sk_buff *skb)
2283{
2284	sock_put(skb->sk);
2285}
2286EXPORT_SYMBOL(sock_efree);
2287
2288/* Buffer destructor for prefetch/receive path where reference count may
2289 * not be held, e.g. for listen sockets.
2290 */
2291#ifdef CONFIG_INET
2292void sock_pfree(struct sk_buff *skb)
2293{
2294	if (sk_is_refcounted(skb->sk))
2295		sock_gen_put(skb->sk);
2296}
2297EXPORT_SYMBOL(sock_pfree);
2298#endif /* CONFIG_INET */
2299
2300kuid_t sock_i_uid(struct sock *sk)
2301{
2302	kuid_t uid;
2303
2304	read_lock_bh(&sk->sk_callback_lock);
2305	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2306	read_unlock_bh(&sk->sk_callback_lock);
2307	return uid;
2308}
2309EXPORT_SYMBOL(sock_i_uid);
2310
2311unsigned long sock_i_ino(struct sock *sk)
2312{
2313	unsigned long ino;
2314
2315	read_lock_bh(&sk->sk_callback_lock);
2316	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2317	read_unlock_bh(&sk->sk_callback_lock);
 
 
 
 
 
 
 
 
 
 
 
2318	return ino;
2319}
2320EXPORT_SYMBOL(sock_i_ino);
2321
2322/*
2323 * Allocate a skb from the socket's send buffer.
2324 */
2325struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2326			     gfp_t priority)
2327{
2328	if (force ||
2329	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2330		struct sk_buff *skb = alloc_skb(size, priority);
2331
2332		if (skb) {
2333			skb_set_owner_w(skb, sk);
2334			return skb;
2335		}
2336	}
2337	return NULL;
2338}
2339EXPORT_SYMBOL(sock_wmalloc);
2340
2341static void sock_ofree(struct sk_buff *skb)
2342{
2343	struct sock *sk = skb->sk;
2344
2345	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2346}
2347
2348struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2349			     gfp_t priority)
2350{
2351	struct sk_buff *skb;
2352
2353	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2354	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2355	    sysctl_optmem_max)
2356		return NULL;
2357
2358	skb = alloc_skb(size, priority);
2359	if (!skb)
2360		return NULL;
2361
2362	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2363	skb->sk = sk;
2364	skb->destructor = sock_ofree;
2365	return skb;
2366}
2367
2368/*
2369 * Allocate a memory block from the socket's option memory buffer.
2370 */
2371void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2372{
2373	if ((unsigned int)size <= sysctl_optmem_max &&
2374	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
 
 
2375		void *mem;
2376		/* First do the add, to avoid the race if kmalloc
2377		 * might sleep.
2378		 */
2379		atomic_add(size, &sk->sk_omem_alloc);
2380		mem = kmalloc(size, priority);
2381		if (mem)
2382			return mem;
2383		atomic_sub(size, &sk->sk_omem_alloc);
2384	}
2385	return NULL;
2386}
2387EXPORT_SYMBOL(sock_kmalloc);
2388
2389/* Free an option memory block. Note, we actually want the inline
2390 * here as this allows gcc to detect the nullify and fold away the
2391 * condition entirely.
2392 */
2393static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2394				  const bool nullify)
2395{
2396	if (WARN_ON_ONCE(!mem))
2397		return;
2398	if (nullify)
2399		kfree_sensitive(mem);
2400	else
2401		kfree(mem);
2402	atomic_sub(size, &sk->sk_omem_alloc);
2403}
2404
2405void sock_kfree_s(struct sock *sk, void *mem, int size)
2406{
2407	__sock_kfree_s(sk, mem, size, false);
2408}
2409EXPORT_SYMBOL(sock_kfree_s);
2410
2411void sock_kzfree_s(struct sock *sk, void *mem, int size)
2412{
2413	__sock_kfree_s(sk, mem, size, true);
2414}
2415EXPORT_SYMBOL(sock_kzfree_s);
2416
2417/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2418   I think, these locks should be removed for datagram sockets.
2419 */
2420static long sock_wait_for_wmem(struct sock *sk, long timeo)
2421{
2422	DEFINE_WAIT(wait);
2423
2424	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2425	for (;;) {
2426		if (!timeo)
2427			break;
2428		if (signal_pending(current))
2429			break;
2430		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2431		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2432		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2433			break;
2434		if (sk->sk_shutdown & SEND_SHUTDOWN)
2435			break;
2436		if (sk->sk_err)
2437			break;
2438		timeo = schedule_timeout(timeo);
2439	}
2440	finish_wait(sk_sleep(sk), &wait);
2441	return timeo;
2442}
2443
2444
2445/*
2446 *	Generic send/receive buffer handlers
2447 */
2448
2449struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2450				     unsigned long data_len, int noblock,
2451				     int *errcode, int max_page_order)
2452{
2453	struct sk_buff *skb;
2454	long timeo;
2455	int err;
2456
2457	timeo = sock_sndtimeo(sk, noblock);
2458	for (;;) {
2459		err = sock_error(sk);
2460		if (err != 0)
2461			goto failure;
2462
2463		err = -EPIPE;
2464		if (sk->sk_shutdown & SEND_SHUTDOWN)
2465			goto failure;
2466
2467		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2468			break;
2469
2470		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2471		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2472		err = -EAGAIN;
2473		if (!timeo)
2474			goto failure;
2475		if (signal_pending(current))
2476			goto interrupted;
2477		timeo = sock_wait_for_wmem(sk, timeo);
2478	}
2479	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2480				   errcode, sk->sk_allocation);
2481	if (skb)
2482		skb_set_owner_w(skb, sk);
2483	return skb;
2484
2485interrupted:
2486	err = sock_intr_errno(timeo);
2487failure:
2488	*errcode = err;
2489	return NULL;
2490}
2491EXPORT_SYMBOL(sock_alloc_send_pskb);
2492
2493struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2494				    int noblock, int *errcode)
2495{
2496	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2497}
2498EXPORT_SYMBOL(sock_alloc_send_skb);
2499
2500int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2501		     struct sockcm_cookie *sockc)
2502{
2503	u32 tsflags;
2504
2505	switch (cmsg->cmsg_type) {
2506	case SO_MARK:
2507		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 
2508			return -EPERM;
2509		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2510			return -EINVAL;
2511		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2512		break;
2513	case SO_TIMESTAMPING_OLD:
 
2514		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2515			return -EINVAL;
2516
2517		tsflags = *(u32 *)CMSG_DATA(cmsg);
2518		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2519			return -EINVAL;
2520
2521		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2522		sockc->tsflags |= tsflags;
2523		break;
2524	case SCM_TXTIME:
2525		if (!sock_flag(sk, SOCK_TXTIME))
2526			return -EINVAL;
2527		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2528			return -EINVAL;
2529		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2530		break;
2531	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2532	case SCM_RIGHTS:
2533	case SCM_CREDENTIALS:
2534		break;
2535	default:
2536		return -EINVAL;
2537	}
2538	return 0;
2539}
2540EXPORT_SYMBOL(__sock_cmsg_send);
2541
2542int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2543		   struct sockcm_cookie *sockc)
2544{
2545	struct cmsghdr *cmsg;
2546	int ret;
2547
2548	for_each_cmsghdr(cmsg, msg) {
2549		if (!CMSG_OK(msg, cmsg))
2550			return -EINVAL;
2551		if (cmsg->cmsg_level != SOL_SOCKET)
2552			continue;
2553		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2554		if (ret)
2555			return ret;
2556	}
2557	return 0;
2558}
2559EXPORT_SYMBOL(sock_cmsg_send);
2560
2561static void sk_enter_memory_pressure(struct sock *sk)
2562{
2563	if (!sk->sk_prot->enter_memory_pressure)
2564		return;
2565
2566	sk->sk_prot->enter_memory_pressure(sk);
2567}
2568
2569static void sk_leave_memory_pressure(struct sock *sk)
2570{
2571	if (sk->sk_prot->leave_memory_pressure) {
2572		sk->sk_prot->leave_memory_pressure(sk);
 
2573	} else {
2574		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2575
2576		if (memory_pressure && READ_ONCE(*memory_pressure))
2577			WRITE_ONCE(*memory_pressure, 0);
2578	}
2579}
2580
2581#define SKB_FRAG_PAGE_ORDER	get_order(32768)
2582DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2583
2584/**
2585 * skb_page_frag_refill - check that a page_frag contains enough room
2586 * @sz: minimum size of the fragment we want to get
2587 * @pfrag: pointer to page_frag
2588 * @gfp: priority for memory allocation
2589 *
2590 * Note: While this allocator tries to use high order pages, there is
2591 * no guarantee that allocations succeed. Therefore, @sz MUST be
2592 * less or equal than PAGE_SIZE.
2593 */
2594bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2595{
2596	if (pfrag->page) {
2597		if (page_ref_count(pfrag->page) == 1) {
2598			pfrag->offset = 0;
2599			return true;
2600		}
2601		if (pfrag->offset + sz <= pfrag->size)
2602			return true;
2603		put_page(pfrag->page);
2604	}
2605
2606	pfrag->offset = 0;
2607	if (SKB_FRAG_PAGE_ORDER &&
2608	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2609		/* Avoid direct reclaim but allow kswapd to wake */
2610		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2611					  __GFP_COMP | __GFP_NOWARN |
2612					  __GFP_NORETRY,
2613					  SKB_FRAG_PAGE_ORDER);
2614		if (likely(pfrag->page)) {
2615			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2616			return true;
2617		}
2618	}
2619	pfrag->page = alloc_page(gfp);
2620	if (likely(pfrag->page)) {
2621		pfrag->size = PAGE_SIZE;
2622		return true;
2623	}
2624	return false;
2625}
2626EXPORT_SYMBOL(skb_page_frag_refill);
2627
2628bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2629{
2630	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2631		return true;
2632
2633	sk_enter_memory_pressure(sk);
2634	sk_stream_moderate_sndbuf(sk);
2635	return false;
2636}
2637EXPORT_SYMBOL(sk_page_frag_refill);
2638
2639void __lock_sock(struct sock *sk)
2640	__releases(&sk->sk_lock.slock)
2641	__acquires(&sk->sk_lock.slock)
2642{
2643	DEFINE_WAIT(wait);
2644
2645	for (;;) {
2646		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2647					TASK_UNINTERRUPTIBLE);
2648		spin_unlock_bh(&sk->sk_lock.slock);
2649		schedule();
2650		spin_lock_bh(&sk->sk_lock.slock);
2651		if (!sock_owned_by_user(sk))
2652			break;
2653	}
2654	finish_wait(&sk->sk_lock.wq, &wait);
2655}
2656
2657void __release_sock(struct sock *sk)
2658	__releases(&sk->sk_lock.slock)
2659	__acquires(&sk->sk_lock.slock)
2660{
2661	struct sk_buff *skb, *next;
2662
2663	while ((skb = sk->sk_backlog.head) != NULL) {
2664		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2665
2666		spin_unlock_bh(&sk->sk_lock.slock);
2667
2668		do {
2669			next = skb->next;
2670			prefetch(next);
2671			WARN_ON_ONCE(skb_dst_is_noref(skb));
2672			skb_mark_not_on_list(skb);
2673			sk_backlog_rcv(sk, skb);
2674
2675			cond_resched();
2676
2677			skb = next;
2678		} while (skb != NULL);
2679
2680		spin_lock_bh(&sk->sk_lock.slock);
2681	}
2682
2683	/*
2684	 * Doing the zeroing here guarantee we can not loop forever
2685	 * while a wild producer attempts to flood us.
2686	 */
2687	sk->sk_backlog.len = 0;
2688}
2689
2690void __sk_flush_backlog(struct sock *sk)
2691{
2692	spin_lock_bh(&sk->sk_lock.slock);
2693	__release_sock(sk);
 
 
 
 
 
2694	spin_unlock_bh(&sk->sk_lock.slock);
2695}
 
2696
2697/**
2698 * sk_wait_data - wait for data to arrive at sk_receive_queue
2699 * @sk:    sock to wait on
2700 * @timeo: for how long
2701 * @skb:   last skb seen on sk_receive_queue
2702 *
2703 * Now socket state including sk->sk_err is changed only under lock,
2704 * hence we may omit checks after joining wait queue.
2705 * We check receive queue before schedule() only as optimization;
2706 * it is very likely that release_sock() added new data.
2707 */
2708int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2709{
2710	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2711	int rc;
2712
2713	add_wait_queue(sk_sleep(sk), &wait);
2714	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2715	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2716	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2717	remove_wait_queue(sk_sleep(sk), &wait);
2718	return rc;
2719}
2720EXPORT_SYMBOL(sk_wait_data);
2721
2722/**
2723 *	__sk_mem_raise_allocated - increase memory_allocated
2724 *	@sk: socket
2725 *	@size: memory size to allocate
2726 *	@amt: pages to allocate
2727 *	@kind: allocation type
2728 *
2729 *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
 
 
 
 
 
 
2730 */
2731int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2732{
 
2733	struct proto *prot = sk->sk_prot;
2734	long allocated = sk_memory_allocated_add(sk, amt);
2735	bool charged = true;
2736
2737	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2738	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
2739		goto suppress_allocation;
 
 
 
 
 
2740
2741	/* Under limit. */
2742	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2743		sk_leave_memory_pressure(sk);
2744		return 1;
2745	}
2746
2747	/* Under pressure. */
2748	if (allocated > sk_prot_mem_limits(sk, 1))
2749		sk_enter_memory_pressure(sk);
2750
2751	/* Over hard limit. */
2752	if (allocated > sk_prot_mem_limits(sk, 2))
2753		goto suppress_allocation;
2754
2755	/* guarantee minimum buffer size under pressure */
 
 
 
 
 
 
 
2756	if (kind == SK_MEM_RECV) {
2757		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2758			return 1;
2759
2760	} else { /* SK_MEM_SEND */
2761		int wmem0 = sk_get_wmem0(sk, prot);
2762
2763		if (sk->sk_type == SOCK_STREAM) {
2764			if (sk->sk_wmem_queued < wmem0)
2765				return 1;
2766		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2767				return 1;
2768		}
2769	}
2770
2771	if (sk_has_memory_pressure(sk)) {
2772		u64 alloc;
2773
2774		if (!sk_under_memory_pressure(sk))
 
 
 
 
2775			return 1;
 
 
 
 
 
2776		alloc = sk_sockets_allocated_read_positive(sk);
2777		if (sk_prot_mem_limits(sk, 2) > alloc *
2778		    sk_mem_pages(sk->sk_wmem_queued +
2779				 atomic_read(&sk->sk_rmem_alloc) +
2780				 sk->sk_forward_alloc))
2781			return 1;
2782	}
2783
2784suppress_allocation:
2785
2786	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2787		sk_stream_moderate_sndbuf(sk);
2788
2789		/* Fail only if socket is _under_ its sndbuf.
2790		 * In this case we cannot block, so that we have to fail.
2791		 */
2792		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
 
 
 
 
 
2793			return 1;
 
2794	}
2795
2796	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2797		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2798
2799	sk_memory_allocated_sub(sk, amt);
2800
2801	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2802		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2803
2804	return 0;
2805}
2806EXPORT_SYMBOL(__sk_mem_raise_allocated);
2807
2808/**
2809 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2810 *	@sk: socket
2811 *	@size: memory size to allocate
2812 *	@kind: allocation type
2813 *
2814 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2815 *	rmem allocation. This function assumes that protocols which have
2816 *	memory_pressure use sk_wmem_queued as write buffer accounting.
2817 */
2818int __sk_mem_schedule(struct sock *sk, int size, int kind)
2819{
2820	int ret, amt = sk_mem_pages(size);
2821
2822	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2823	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2824	if (!ret)
2825		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2826	return ret;
2827}
2828EXPORT_SYMBOL(__sk_mem_schedule);
2829
2830/**
2831 *	__sk_mem_reduce_allocated - reclaim memory_allocated
2832 *	@sk: socket
2833 *	@amount: number of quanta
2834 *
2835 *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2836 */
2837void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2838{
2839	sk_memory_allocated_sub(sk, amount);
2840
2841	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2842		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2843
2844	if (sk_under_memory_pressure(sk) &&
2845	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2846		sk_leave_memory_pressure(sk);
2847}
2848EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2849
2850/**
2851 *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2852 *	@sk: socket
2853 *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2854 */
2855void __sk_mem_reclaim(struct sock *sk, int amount)
2856{
2857	amount >>= SK_MEM_QUANTUM_SHIFT;
2858	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2859	__sk_mem_reduce_allocated(sk, amount);
2860}
2861EXPORT_SYMBOL(__sk_mem_reclaim);
2862
2863int sk_set_peek_off(struct sock *sk, int val)
2864{
2865	sk->sk_peek_off = val;
2866	return 0;
2867}
2868EXPORT_SYMBOL_GPL(sk_set_peek_off);
2869
2870/*
2871 * Set of default routines for initialising struct proto_ops when
2872 * the protocol does not support a particular function. In certain
2873 * cases where it makes no sense for a protocol to have a "do nothing"
2874 * function, some default processing is provided.
2875 */
2876
2877int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2878{
2879	return -EOPNOTSUPP;
2880}
2881EXPORT_SYMBOL(sock_no_bind);
2882
2883int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2884		    int len, int flags)
2885{
2886	return -EOPNOTSUPP;
2887}
2888EXPORT_SYMBOL(sock_no_connect);
2889
2890int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2891{
2892	return -EOPNOTSUPP;
2893}
2894EXPORT_SYMBOL(sock_no_socketpair);
2895
2896int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2897		   bool kern)
2898{
2899	return -EOPNOTSUPP;
2900}
2901EXPORT_SYMBOL(sock_no_accept);
2902
2903int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2904		    int peer)
2905{
2906	return -EOPNOTSUPP;
2907}
2908EXPORT_SYMBOL(sock_no_getname);
2909
2910int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2911{
2912	return -EOPNOTSUPP;
2913}
2914EXPORT_SYMBOL(sock_no_ioctl);
2915
2916int sock_no_listen(struct socket *sock, int backlog)
2917{
2918	return -EOPNOTSUPP;
2919}
2920EXPORT_SYMBOL(sock_no_listen);
2921
2922int sock_no_shutdown(struct socket *sock, int how)
2923{
2924	return -EOPNOTSUPP;
2925}
2926EXPORT_SYMBOL(sock_no_shutdown);
2927
2928int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2929{
2930	return -EOPNOTSUPP;
2931}
2932EXPORT_SYMBOL(sock_no_sendmsg);
2933
2934int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2935{
2936	return -EOPNOTSUPP;
2937}
2938EXPORT_SYMBOL(sock_no_sendmsg_locked);
2939
2940int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2941		    int flags)
2942{
2943	return -EOPNOTSUPP;
2944}
2945EXPORT_SYMBOL(sock_no_recvmsg);
2946
2947int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2948{
2949	/* Mirror missing mmap method error code */
2950	return -ENODEV;
2951}
2952EXPORT_SYMBOL(sock_no_mmap);
2953
2954/*
2955 * When a file is received (via SCM_RIGHTS, etc), we must bump the
2956 * various sock-based usage counts.
2957 */
2958void __receive_sock(struct file *file)
2959{
2960	struct socket *sock;
2961
2962	sock = sock_from_file(file);
2963	if (sock) {
2964		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
2965		sock_update_classid(&sock->sk->sk_cgrp_data);
2966	}
2967}
2968
2969ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2970{
2971	ssize_t res;
2972	struct msghdr msg = {.msg_flags = flags};
2973	struct kvec iov;
2974	char *kaddr = kmap(page);
2975	iov.iov_base = kaddr + offset;
2976	iov.iov_len = size;
2977	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2978	kunmap(page);
2979	return res;
2980}
2981EXPORT_SYMBOL(sock_no_sendpage);
2982
2983ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2984				int offset, size_t size, int flags)
2985{
2986	ssize_t res;
2987	struct msghdr msg = {.msg_flags = flags};
2988	struct kvec iov;
2989	char *kaddr = kmap(page);
2990
2991	iov.iov_base = kaddr + offset;
2992	iov.iov_len = size;
2993	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2994	kunmap(page);
2995	return res;
2996}
2997EXPORT_SYMBOL(sock_no_sendpage_locked);
2998
2999/*
3000 *	Default Socket Callbacks
3001 */
3002
3003static void sock_def_wakeup(struct sock *sk)
3004{
3005	struct socket_wq *wq;
3006
3007	rcu_read_lock();
3008	wq = rcu_dereference(sk->sk_wq);
3009	if (skwq_has_sleeper(wq))
3010		wake_up_interruptible_all(&wq->wait);
3011	rcu_read_unlock();
3012}
3013
3014static void sock_def_error_report(struct sock *sk)
3015{
3016	struct socket_wq *wq;
3017
3018	rcu_read_lock();
3019	wq = rcu_dereference(sk->sk_wq);
3020	if (skwq_has_sleeper(wq))
3021		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3022	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
3023	rcu_read_unlock();
3024}
3025
3026void sock_def_readable(struct sock *sk)
3027{
3028	struct socket_wq *wq;
3029
 
 
3030	rcu_read_lock();
3031	wq = rcu_dereference(sk->sk_wq);
3032	if (skwq_has_sleeper(wq))
3033		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3034						EPOLLRDNORM | EPOLLRDBAND);
3035	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3036	rcu_read_unlock();
3037}
3038
3039static void sock_def_write_space(struct sock *sk)
3040{
3041	struct socket_wq *wq;
3042
3043	rcu_read_lock();
3044
3045	/* Do not wake up a writer until he can make "significant"
3046	 * progress.  --DaveM
3047	 */
3048	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
3049		wq = rcu_dereference(sk->sk_wq);
3050		if (skwq_has_sleeper(wq))
3051			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3052						EPOLLWRNORM | EPOLLWRBAND);
3053
3054		/* Should agree with poll, otherwise some programs break */
3055		if (sock_writeable(sk))
3056			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3057	}
3058
3059	rcu_read_unlock();
3060}
3061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3062static void sock_def_destruct(struct sock *sk)
3063{
3064}
3065
3066void sk_send_sigurg(struct sock *sk)
3067{
3068	if (sk->sk_socket && sk->sk_socket->file)
3069		if (send_sigurg(&sk->sk_socket->file->f_owner))
3070			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3071}
3072EXPORT_SYMBOL(sk_send_sigurg);
3073
3074void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3075		    unsigned long expires)
3076{
3077	if (!mod_timer(timer, expires))
3078		sock_hold(sk);
3079}
3080EXPORT_SYMBOL(sk_reset_timer);
3081
3082void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3083{
3084	if (del_timer(timer))
3085		__sock_put(sk);
3086}
3087EXPORT_SYMBOL(sk_stop_timer);
3088
3089void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3090{
3091	if (del_timer_sync(timer))
3092		__sock_put(sk);
3093}
3094EXPORT_SYMBOL(sk_stop_timer_sync);
3095
3096void sock_init_data(struct socket *sock, struct sock *sk)
3097{
3098	sk_init_common(sk);
3099	sk->sk_send_head	=	NULL;
3100
3101	timer_setup(&sk->sk_timer, NULL, 0);
3102
3103	sk->sk_allocation	=	GFP_KERNEL;
3104	sk->sk_rcvbuf		=	sysctl_rmem_default;
3105	sk->sk_sndbuf		=	sysctl_wmem_default;
3106	sk->sk_state		=	TCP_CLOSE;
 
3107	sk_set_socket(sk, sock);
3108
3109	sock_set_flag(sk, SOCK_ZAPPED);
3110
3111	if (sock) {
3112		sk->sk_type	=	sock->type;
3113		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3114		sock->sk	=	sk;
3115		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
3116	} else {
3117		RCU_INIT_POINTER(sk->sk_wq, NULL);
3118		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
3119	}
 
3120
3121	rwlock_init(&sk->sk_callback_lock);
3122	if (sk->sk_kern_sock)
3123		lockdep_set_class_and_name(
3124			&sk->sk_callback_lock,
3125			af_kern_callback_keys + sk->sk_family,
3126			af_family_kern_clock_key_strings[sk->sk_family]);
3127	else
3128		lockdep_set_class_and_name(
3129			&sk->sk_callback_lock,
3130			af_callback_keys + sk->sk_family,
3131			af_family_clock_key_strings[sk->sk_family]);
3132
3133	sk->sk_state_change	=	sock_def_wakeup;
3134	sk->sk_data_ready	=	sock_def_readable;
3135	sk->sk_write_space	=	sock_def_write_space;
3136	sk->sk_error_report	=	sock_def_error_report;
3137	sk->sk_destruct		=	sock_def_destruct;
3138
3139	sk->sk_frag.page	=	NULL;
3140	sk->sk_frag.offset	=	0;
3141	sk->sk_peek_off		=	-1;
3142
3143	sk->sk_peer_pid 	=	NULL;
3144	sk->sk_peer_cred	=	NULL;
3145	spin_lock_init(&sk->sk_peer_lock);
3146
3147	sk->sk_write_pending	=	0;
3148	sk->sk_rcvlowat		=	1;
3149	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3150	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3151
3152	sk->sk_stamp = SK_DEFAULT_STAMP;
3153#if BITS_PER_LONG==32
3154	seqlock_init(&sk->sk_stamp_seq);
3155#endif
3156	atomic_set(&sk->sk_zckey, 0);
3157
3158#ifdef CONFIG_NET_RX_BUSY_POLL
3159	sk->sk_napi_id		=	0;
3160	sk->sk_ll_usec		=	sysctl_net_busy_read;
3161#endif
3162
3163	sk->sk_max_pacing_rate = ~0UL;
3164	sk->sk_pacing_rate = ~0UL;
3165	WRITE_ONCE(sk->sk_pacing_shift, 10);
3166	sk->sk_incoming_cpu = -1;
3167
3168	sk_rx_queue_clear(sk);
3169	/*
3170	 * Before updating sk_refcnt, we must commit prior changes to memory
3171	 * (Documentation/RCU/rculist_nulls.rst for details)
3172	 */
3173	smp_wmb();
3174	refcount_set(&sk->sk_refcnt, 1);
3175	atomic_set(&sk->sk_drops, 0);
3176}
 
 
 
 
 
 
 
 
 
 
3177EXPORT_SYMBOL(sock_init_data);
3178
3179void lock_sock_nested(struct sock *sk, int subclass)
3180{
 
 
 
3181	might_sleep();
3182	spin_lock_bh(&sk->sk_lock.slock);
3183	if (sk->sk_lock.owned)
3184		__lock_sock(sk);
3185	sk->sk_lock.owned = 1;
3186	spin_unlock(&sk->sk_lock.slock);
3187	/*
3188	 * The sk_lock has mutex_lock() semantics here:
3189	 */
3190	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3191	local_bh_enable();
3192}
3193EXPORT_SYMBOL(lock_sock_nested);
3194
3195void release_sock(struct sock *sk)
3196{
3197	spin_lock_bh(&sk->sk_lock.slock);
3198	if (sk->sk_backlog.tail)
3199		__release_sock(sk);
3200
3201	/* Warning : release_cb() might need to release sk ownership,
3202	 * ie call sock_release_ownership(sk) before us.
3203	 */
3204	if (sk->sk_prot->release_cb)
3205		sk->sk_prot->release_cb(sk);
 
3206
3207	sock_release_ownership(sk);
3208	if (waitqueue_active(&sk->sk_lock.wq))
3209		wake_up(&sk->sk_lock.wq);
3210	spin_unlock_bh(&sk->sk_lock.slock);
3211}
3212EXPORT_SYMBOL(release_sock);
3213
3214/**
3215 * lock_sock_fast - fast version of lock_sock
3216 * @sk: socket
3217 *
3218 * This version should be used for very small section, where process wont block
3219 * return false if fast path is taken:
3220 *
3221 *   sk_lock.slock locked, owned = 0, BH disabled
3222 *
3223 * return true if slow path is taken:
3224 *
3225 *   sk_lock.slock unlocked, owned = 1, BH enabled
3226 */
3227bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3228{
3229	might_sleep();
3230	spin_lock_bh(&sk->sk_lock.slock);
3231
3232	if (!sk->sk_lock.owned)
3233		/*
3234		 * Note : We must disable BH
 
 
 
 
 
 
 
 
 
 
 
 
3235		 */
3236		return false;
 
3237
3238	__lock_sock(sk);
3239	sk->sk_lock.owned = 1;
3240	spin_unlock(&sk->sk_lock.slock);
3241	/*
3242	 * The sk_lock has mutex_lock() semantics here:
3243	 */
3244	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
3245	__acquire(&sk->sk_lock.slock);
3246	local_bh_enable();
3247	return true;
3248}
3249EXPORT_SYMBOL(lock_sock_fast);
3250
3251int sock_gettstamp(struct socket *sock, void __user *userstamp,
3252		   bool timeval, bool time32)
3253{
3254	struct sock *sk = sock->sk;
3255	struct timespec64 ts;
3256
3257	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3258	ts = ktime_to_timespec64(sock_read_timestamp(sk));
3259	if (ts.tv_sec == -1)
3260		return -ENOENT;
3261	if (ts.tv_sec == 0) {
3262		ktime_t kt = ktime_get_real();
3263		sock_write_timestamp(sk, kt);
3264		ts = ktime_to_timespec64(kt);
3265	}
3266
3267	if (timeval)
3268		ts.tv_nsec /= 1000;
3269
3270#ifdef CONFIG_COMPAT_32BIT_TIME
3271	if (time32)
3272		return put_old_timespec32(&ts, userstamp);
3273#endif
3274#ifdef CONFIG_SPARC64
3275	/* beware of padding in sparc64 timeval */
3276	if (timeval && !in_compat_syscall()) {
3277		struct __kernel_old_timeval __user tv = {
3278			.tv_sec = ts.tv_sec,
3279			.tv_usec = ts.tv_nsec,
3280		};
3281		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3282			return -EFAULT;
3283		return 0;
3284	}
3285#endif
3286	return put_timespec64(&ts, userstamp);
3287}
3288EXPORT_SYMBOL(sock_gettstamp);
3289
3290void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3291{
3292	if (!sock_flag(sk, flag)) {
3293		unsigned long previous_flags = sk->sk_flags;
3294
3295		sock_set_flag(sk, flag);
3296		/*
3297		 * we just set one of the two flags which require net
3298		 * time stamping, but time stamping might have been on
3299		 * already because of the other one
3300		 */
3301		if (sock_needs_netstamp(sk) &&
3302		    !(previous_flags & SK_FLAGS_TIMESTAMP))
3303			net_enable_timestamp();
3304	}
3305}
3306
3307int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3308		       int level, int type)
3309{
3310	struct sock_exterr_skb *serr;
3311	struct sk_buff *skb;
3312	int copied, err;
3313
3314	err = -EAGAIN;
3315	skb = sock_dequeue_err_skb(sk);
3316	if (skb == NULL)
3317		goto out;
3318
3319	copied = skb->len;
3320	if (copied > len) {
3321		msg->msg_flags |= MSG_TRUNC;
3322		copied = len;
3323	}
3324	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3325	if (err)
3326		goto out_free_skb;
3327
3328	sock_recv_timestamp(msg, sk, skb);
3329
3330	serr = SKB_EXT_ERR(skb);
3331	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3332
3333	msg->msg_flags |= MSG_ERRQUEUE;
3334	err = copied;
3335
3336out_free_skb:
3337	kfree_skb(skb);
3338out:
3339	return err;
3340}
3341EXPORT_SYMBOL(sock_recv_errqueue);
3342
3343/*
3344 *	Get a socket option on an socket.
3345 *
3346 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3347 *	asynchronous errors should be reported by getsockopt. We assume
3348 *	this means if you specify SO_ERROR (otherwise whats the point of it).
3349 */
3350int sock_common_getsockopt(struct socket *sock, int level, int optname,
3351			   char __user *optval, int __user *optlen)
3352{
3353	struct sock *sk = sock->sk;
3354
3355	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
 
3356}
3357EXPORT_SYMBOL(sock_common_getsockopt);
3358
3359int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3360			int flags)
3361{
3362	struct sock *sk = sock->sk;
3363	int addr_len = 0;
3364	int err;
3365
3366	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3367				   flags & ~MSG_DONTWAIT, &addr_len);
3368	if (err >= 0)
3369		msg->msg_namelen = addr_len;
3370	return err;
3371}
3372EXPORT_SYMBOL(sock_common_recvmsg);
3373
3374/*
3375 *	Set socket options on an inet socket.
3376 */
3377int sock_common_setsockopt(struct socket *sock, int level, int optname,
3378			   sockptr_t optval, unsigned int optlen)
3379{
3380	struct sock *sk = sock->sk;
3381
3382	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
 
3383}
3384EXPORT_SYMBOL(sock_common_setsockopt);
3385
3386void sk_common_release(struct sock *sk)
3387{
3388	if (sk->sk_prot->destroy)
3389		sk->sk_prot->destroy(sk);
3390
3391	/*
3392	 * Observation: when sk_common_release is called, processes have
3393	 * no access to socket. But net still has.
3394	 * Step one, detach it from networking:
3395	 *
3396	 * A. Remove from hash tables.
3397	 */
3398
3399	sk->sk_prot->unhash(sk);
3400
3401	/*
3402	 * In this point socket cannot receive new packets, but it is possible
3403	 * that some packets are in flight because some CPU runs receiver and
3404	 * did hash table lookup before we unhashed socket. They will achieve
3405	 * receive queue and will be purged by socket destructor.
3406	 *
3407	 * Also we still have packets pending on receive queue and probably,
3408	 * our own packets waiting in device queues. sock_destroy will drain
3409	 * receive queue, but transmitted packets will delay socket destruction
3410	 * until the last reference will be released.
3411	 */
3412
3413	sock_orphan(sk);
3414
3415	xfrm_sk_free_policy(sk);
3416
3417	sk_refcnt_debug_release(sk);
3418
3419	sock_put(sk);
3420}
3421EXPORT_SYMBOL(sk_common_release);
3422
3423void sk_get_meminfo(const struct sock *sk, u32 *mem)
3424{
3425	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3426
3427	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3428	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3429	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3430	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3431	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3432	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3433	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3434	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3435	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3436}
3437
3438#ifdef CONFIG_PROC_FS
3439#define PROTO_INUSE_NR	64	/* should be enough for the first time */
3440struct prot_inuse {
3441	int val[PROTO_INUSE_NR];
3442};
3443
3444static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3445
3446void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3447{
3448	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3449}
3450EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3451
3452int sock_prot_inuse_get(struct net *net, struct proto *prot)
3453{
3454	int cpu, idx = prot->inuse_idx;
3455	int res = 0;
3456
3457	for_each_possible_cpu(cpu)
3458		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3459
3460	return res >= 0 ? res : 0;
3461}
3462EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3463
3464static void sock_inuse_add(struct net *net, int val)
3465{
3466	this_cpu_add(*net->core.sock_inuse, val);
3467}
3468
3469int sock_inuse_get(struct net *net)
3470{
3471	int cpu, res = 0;
3472
3473	for_each_possible_cpu(cpu)
3474		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3475
3476	return res;
3477}
3478
3479EXPORT_SYMBOL_GPL(sock_inuse_get);
3480
3481static int __net_init sock_inuse_init_net(struct net *net)
3482{
3483	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3484	if (net->core.prot_inuse == NULL)
3485		return -ENOMEM;
3486
3487	net->core.sock_inuse = alloc_percpu(int);
3488	if (net->core.sock_inuse == NULL)
3489		goto out;
3490
3491	return 0;
3492
3493out:
3494	free_percpu(net->core.prot_inuse);
3495	return -ENOMEM;
3496}
3497
3498static void __net_exit sock_inuse_exit_net(struct net *net)
3499{
3500	free_percpu(net->core.prot_inuse);
3501	free_percpu(net->core.sock_inuse);
3502}
3503
3504static struct pernet_operations net_inuse_ops = {
3505	.init = sock_inuse_init_net,
3506	.exit = sock_inuse_exit_net,
3507};
3508
3509static __init int net_inuse_init(void)
3510{
3511	if (register_pernet_subsys(&net_inuse_ops))
3512		panic("Cannot initialize net inuse counters");
3513
3514	return 0;
3515}
3516
3517core_initcall(net_inuse_init);
3518
3519static int assign_proto_idx(struct proto *prot)
3520{
3521	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3522
3523	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3524		pr_err("PROTO_INUSE_NR exhausted\n");
3525		return -ENOSPC;
3526	}
3527
3528	set_bit(prot->inuse_idx, proto_inuse_idx);
3529	return 0;
3530}
3531
3532static void release_proto_idx(struct proto *prot)
3533{
3534	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3535		clear_bit(prot->inuse_idx, proto_inuse_idx);
3536}
3537#else
3538static inline int assign_proto_idx(struct proto *prot)
3539{
3540	return 0;
3541}
3542
3543static inline void release_proto_idx(struct proto *prot)
3544{
3545}
3546
3547static void sock_inuse_add(struct net *net, int val)
3548{
3549}
3550#endif
3551
3552static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3553{
3554	if (!twsk_prot)
3555		return;
3556	kfree(twsk_prot->twsk_slab_name);
3557	twsk_prot->twsk_slab_name = NULL;
3558	kmem_cache_destroy(twsk_prot->twsk_slab);
3559	twsk_prot->twsk_slab = NULL;
3560}
3561
3562static int tw_prot_init(const struct proto *prot)
3563{
3564	struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
3565
3566	if (!twsk_prot)
3567		return 0;
3568
3569	twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
3570					      prot->name);
3571	if (!twsk_prot->twsk_slab_name)
3572		return -ENOMEM;
3573
3574	twsk_prot->twsk_slab =
3575		kmem_cache_create(twsk_prot->twsk_slab_name,
3576				  twsk_prot->twsk_obj_size, 0,
3577				  SLAB_ACCOUNT | prot->slab_flags,
3578				  NULL);
3579	if (!twsk_prot->twsk_slab) {
3580		pr_crit("%s: Can't create timewait sock SLAB cache!\n",
3581			prot->name);
3582		return -ENOMEM;
3583	}
3584
3585	return 0;
3586}
3587
3588static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3589{
3590	if (!rsk_prot)
3591		return;
3592	kfree(rsk_prot->slab_name);
3593	rsk_prot->slab_name = NULL;
3594	kmem_cache_destroy(rsk_prot->slab);
3595	rsk_prot->slab = NULL;
3596}
3597
3598static int req_prot_init(const struct proto *prot)
3599{
3600	struct request_sock_ops *rsk_prot = prot->rsk_prot;
3601
3602	if (!rsk_prot)
3603		return 0;
3604
3605	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3606					prot->name);
3607	if (!rsk_prot->slab_name)
3608		return -ENOMEM;
3609
3610	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3611					   rsk_prot->obj_size, 0,
3612					   SLAB_ACCOUNT | prot->slab_flags,
3613					   NULL);
3614
3615	if (!rsk_prot->slab) {
3616		pr_crit("%s: Can't create request sock SLAB cache!\n",
3617			prot->name);
3618		return -ENOMEM;
3619	}
3620	return 0;
3621}
3622
3623int proto_register(struct proto *prot, int alloc_slab)
3624{
3625	int ret = -ENOBUFS;
3626
 
 
 
 
 
 
 
 
3627	if (alloc_slab) {
3628		prot->slab = kmem_cache_create_usercopy(prot->name,
3629					prot->obj_size, 0,
3630					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3631					prot->slab_flags,
3632					prot->useroffset, prot->usersize,
3633					NULL);
3634
3635		if (prot->slab == NULL) {
3636			pr_crit("%s: Can't create sock SLAB cache!\n",
3637				prot->name);
3638			goto out;
3639		}
3640
3641		if (req_prot_init(prot))
3642			goto out_free_request_sock_slab;
3643
3644		if (tw_prot_init(prot))
3645			goto out_free_timewait_sock_slab;
3646	}
3647
3648	mutex_lock(&proto_list_mutex);
3649	ret = assign_proto_idx(prot);
3650	if (ret) {
3651		mutex_unlock(&proto_list_mutex);
3652		goto out_free_timewait_sock_slab;
3653	}
3654	list_add(&prot->node, &proto_list);
3655	mutex_unlock(&proto_list_mutex);
3656	return ret;
3657
3658out_free_timewait_sock_slab:
3659	if (alloc_slab)
3660		tw_prot_cleanup(prot->twsk_prot);
3661out_free_request_sock_slab:
3662	if (alloc_slab) {
3663		req_prot_cleanup(prot->rsk_prot);
3664
3665		kmem_cache_destroy(prot->slab);
3666		prot->slab = NULL;
3667	}
3668out:
3669	return ret;
3670}
3671EXPORT_SYMBOL(proto_register);
3672
3673void proto_unregister(struct proto *prot)
3674{
3675	mutex_lock(&proto_list_mutex);
3676	release_proto_idx(prot);
3677	list_del(&prot->node);
3678	mutex_unlock(&proto_list_mutex);
3679
3680	kmem_cache_destroy(prot->slab);
3681	prot->slab = NULL;
3682
3683	req_prot_cleanup(prot->rsk_prot);
3684	tw_prot_cleanup(prot->twsk_prot);
3685}
3686EXPORT_SYMBOL(proto_unregister);
3687
3688int sock_load_diag_module(int family, int protocol)
3689{
3690	if (!protocol) {
3691		if (!sock_is_registered(family))
3692			return -ENOENT;
3693
3694		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3695				      NETLINK_SOCK_DIAG, family);
3696	}
3697
3698#ifdef CONFIG_INET
3699	if (family == AF_INET &&
3700	    protocol != IPPROTO_RAW &&
3701	    protocol < MAX_INET_PROTOS &&
3702	    !rcu_access_pointer(inet_protos[protocol]))
3703		return -ENOENT;
3704#endif
3705
3706	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3707			      NETLINK_SOCK_DIAG, family, protocol);
3708}
3709EXPORT_SYMBOL(sock_load_diag_module);
3710
3711#ifdef CONFIG_PROC_FS
3712static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3713	__acquires(proto_list_mutex)
3714{
3715	mutex_lock(&proto_list_mutex);
3716	return seq_list_start_head(&proto_list, *pos);
3717}
3718
3719static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3720{
3721	return seq_list_next(v, &proto_list, pos);
3722}
3723
3724static void proto_seq_stop(struct seq_file *seq, void *v)
3725	__releases(proto_list_mutex)
3726{
3727	mutex_unlock(&proto_list_mutex);
3728}
3729
3730static char proto_method_implemented(const void *method)
3731{
3732	return method == NULL ? 'n' : 'y';
3733}
3734static long sock_prot_memory_allocated(struct proto *proto)
3735{
3736	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3737}
3738
3739static const char *sock_prot_memory_pressure(struct proto *proto)
3740{
3741	return proto->memory_pressure != NULL ?
3742	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3743}
3744
3745static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3746{
3747
3748	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3749			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3750		   proto->name,
3751		   proto->obj_size,
3752		   sock_prot_inuse_get(seq_file_net(seq), proto),
3753		   sock_prot_memory_allocated(proto),
3754		   sock_prot_memory_pressure(proto),
3755		   proto->max_header,
3756		   proto->slab == NULL ? "no" : "yes",
3757		   module_name(proto->owner),
3758		   proto_method_implemented(proto->close),
3759		   proto_method_implemented(proto->connect),
3760		   proto_method_implemented(proto->disconnect),
3761		   proto_method_implemented(proto->accept),
3762		   proto_method_implemented(proto->ioctl),
3763		   proto_method_implemented(proto->init),
3764		   proto_method_implemented(proto->destroy),
3765		   proto_method_implemented(proto->shutdown),
3766		   proto_method_implemented(proto->setsockopt),
3767		   proto_method_implemented(proto->getsockopt),
3768		   proto_method_implemented(proto->sendmsg),
3769		   proto_method_implemented(proto->recvmsg),
3770		   proto_method_implemented(proto->sendpage),
3771		   proto_method_implemented(proto->bind),
3772		   proto_method_implemented(proto->backlog_rcv),
3773		   proto_method_implemented(proto->hash),
3774		   proto_method_implemented(proto->unhash),
3775		   proto_method_implemented(proto->get_port),
3776		   proto_method_implemented(proto->enter_memory_pressure));
3777}
3778
3779static int proto_seq_show(struct seq_file *seq, void *v)
3780{
3781	if (v == &proto_list)
3782		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3783			   "protocol",
3784			   "size",
3785			   "sockets",
3786			   "memory",
3787			   "press",
3788			   "maxhdr",
3789			   "slab",
3790			   "module",
3791			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3792	else
3793		proto_seq_printf(seq, list_entry(v, struct proto, node));
3794	return 0;
3795}
3796
3797static const struct seq_operations proto_seq_ops = {
3798	.start  = proto_seq_start,
3799	.next   = proto_seq_next,
3800	.stop   = proto_seq_stop,
3801	.show   = proto_seq_show,
3802};
3803
3804static __net_init int proto_init_net(struct net *net)
3805{
3806	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3807			sizeof(struct seq_net_private)))
3808		return -ENOMEM;
3809
3810	return 0;
3811}
3812
3813static __net_exit void proto_exit_net(struct net *net)
3814{
3815	remove_proc_entry("protocols", net->proc_net);
3816}
3817
3818
3819static __net_initdata struct pernet_operations proto_net_ops = {
3820	.init = proto_init_net,
3821	.exit = proto_exit_net,
3822};
3823
3824static int __init proto_init(void)
3825{
3826	return register_pernet_subsys(&proto_net_ops);
3827}
3828
3829subsys_initcall(proto_init);
3830
3831#endif /* PROC_FS */
3832
3833#ifdef CONFIG_NET_RX_BUSY_POLL
3834bool sk_busy_loop_end(void *p, unsigned long start_time)
3835{
3836	struct sock *sk = p;
3837
3838	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3839	       sk_busy_loop_timeout(sk, start_time);
 
 
 
 
 
 
3840}
3841EXPORT_SYMBOL(sk_busy_loop_end);
3842#endif /* CONFIG_NET_RX_BUSY_POLL */
3843
3844int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
3845{
3846	if (!sk->sk_prot->bind_add)
3847		return -EOPNOTSUPP;
3848	return sk->sk_prot->bind_add(sk, addr, addr_len);
3849}
3850EXPORT_SYMBOL(sock_bind_add);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Generic socket support routines. Memory allocators, socket lock/release
   8 *		handler for protocols to use and generic option handler.
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 */
  85
  86#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  87
  88#include <asm/unaligned.h>
  89#include <linux/capability.h>
  90#include <linux/errno.h>
  91#include <linux/errqueue.h>
  92#include <linux/types.h>
  93#include <linux/socket.h>
  94#include <linux/in.h>
  95#include <linux/kernel.h>
  96#include <linux/module.h>
  97#include <linux/proc_fs.h>
  98#include <linux/seq_file.h>
  99#include <linux/sched.h>
 100#include <linux/sched/mm.h>
 101#include <linux/timer.h>
 102#include <linux/string.h>
 103#include <linux/sockios.h>
 104#include <linux/net.h>
 105#include <linux/mm.h>
 106#include <linux/slab.h>
 107#include <linux/interrupt.h>
 108#include <linux/poll.h>
 109#include <linux/tcp.h>
 110#include <linux/udp.h>
 111#include <linux/init.h>
 112#include <linux/highmem.h>
 113#include <linux/user_namespace.h>
 114#include <linux/static_key.h>
 115#include <linux/memcontrol.h>
 116#include <linux/prefetch.h>
 117#include <linux/compat.h>
 118#include <linux/mroute.h>
 119#include <linux/mroute6.h>
 120#include <linux/icmpv6.h>
 121
 122#include <linux/uaccess.h>
 123
 124#include <linux/netdevice.h>
 125#include <net/protocol.h>
 126#include <linux/skbuff.h>
 127#include <net/net_namespace.h>
 128#include <net/request_sock.h>
 129#include <net/sock.h>
 130#include <linux/net_tstamp.h>
 131#include <net/xfrm.h>
 132#include <linux/ipsec.h>
 133#include <net/cls_cgroup.h>
 134#include <net/netprio_cgroup.h>
 135#include <linux/sock_diag.h>
 136
 137#include <linux/filter.h>
 138#include <net/sock_reuseport.h>
 139#include <net/bpf_sk_storage.h>
 140
 141#include <trace/events/sock.h>
 142
 143#include <net/tcp.h>
 144#include <net/busy_poll.h>
 145#include <net/phonet/phonet.h>
 146
 147#include <linux/ethtool.h>
 148
 149#include "dev.h"
 150
 151static DEFINE_MUTEX(proto_list_mutex);
 152static LIST_HEAD(proto_list);
 153
 154static void sock_def_write_space_wfree(struct sock *sk);
 155static void sock_def_write_space(struct sock *sk);
 156
 157/**
 158 * sk_ns_capable - General socket capability test
 159 * @sk: Socket to use a capability on or through
 160 * @user_ns: The user namespace of the capability to use
 161 * @cap: The capability to use
 162 *
 163 * Test to see if the opener of the socket had when the socket was
 164 * created and the current process has the capability @cap in the user
 165 * namespace @user_ns.
 166 */
 167bool sk_ns_capable(const struct sock *sk,
 168		   struct user_namespace *user_ns, int cap)
 169{
 170	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 171		ns_capable(user_ns, cap);
 172}
 173EXPORT_SYMBOL(sk_ns_capable);
 174
 175/**
 176 * sk_capable - Socket global capability test
 177 * @sk: Socket to use a capability on or through
 178 * @cap: The global capability to use
 179 *
 180 * Test to see if the opener of the socket had when the socket was
 181 * created and the current process has the capability @cap in all user
 182 * namespaces.
 183 */
 184bool sk_capable(const struct sock *sk, int cap)
 185{
 186	return sk_ns_capable(sk, &init_user_ns, cap);
 187}
 188EXPORT_SYMBOL(sk_capable);
 189
 190/**
 191 * sk_net_capable - Network namespace socket capability test
 192 * @sk: Socket to use a capability on or through
 193 * @cap: The capability to use
 194 *
 195 * Test to see if the opener of the socket had when the socket was created
 196 * and the current process has the capability @cap over the network namespace
 197 * the socket is a member of.
 198 */
 199bool sk_net_capable(const struct sock *sk, int cap)
 200{
 201	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 202}
 203EXPORT_SYMBOL(sk_net_capable);
 204
 205/*
 206 * Each address family might have different locking rules, so we have
 207 * one slock key per address family and separate keys for internal and
 208 * userspace sockets.
 209 */
 210static struct lock_class_key af_family_keys[AF_MAX];
 211static struct lock_class_key af_family_kern_keys[AF_MAX];
 212static struct lock_class_key af_family_slock_keys[AF_MAX];
 213static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
 214
 215/*
 216 * Make lock validator output more readable. (we pre-construct these
 217 * strings build-time, so that runtime initialization of socket
 218 * locks is fast):
 219 */
 220
 221#define _sock_locks(x)						  \
 222  x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
 223  x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
 224  x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
 225  x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
 226  x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
 227  x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
 228  x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
 229  x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
 230  x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
 231  x "27"       ,	x "28"          ,	x "AF_CAN"      , \
 232  x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
 233  x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
 234  x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
 235  x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
 236  x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
 237  x "AF_MCTP"  , \
 238  x "AF_MAX"
 239
 240static const char *const af_family_key_strings[AF_MAX+1] = {
 241	_sock_locks("sk_lock-")
 242};
 243static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 244	_sock_locks("slock-")
 245};
 246static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 247	_sock_locks("clock-")
 248};
 249
 250static const char *const af_family_kern_key_strings[AF_MAX+1] = {
 251	_sock_locks("k-sk_lock-")
 252};
 253static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
 254	_sock_locks("k-slock-")
 255};
 256static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
 257	_sock_locks("k-clock-")
 258};
 259static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
 260	_sock_locks("rlock-")
 261};
 262static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
 263	_sock_locks("wlock-")
 264};
 265static const char *const af_family_elock_key_strings[AF_MAX+1] = {
 266	_sock_locks("elock-")
 267};
 268
 269/*
 270 * sk_callback_lock and sk queues locking rules are per-address-family,
 271 * so split the lock classes by using a per-AF key:
 272 */
 273static struct lock_class_key af_callback_keys[AF_MAX];
 274static struct lock_class_key af_rlock_keys[AF_MAX];
 275static struct lock_class_key af_wlock_keys[AF_MAX];
 276static struct lock_class_key af_elock_keys[AF_MAX];
 277static struct lock_class_key af_kern_callback_keys[AF_MAX];
 278
 279/* Run time adjustable parameters. */
 280__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 281EXPORT_SYMBOL(sysctl_wmem_max);
 282__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 283EXPORT_SYMBOL(sysctl_rmem_max);
 284__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 285__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 286
 
 
 
 
 287int sysctl_tstamp_allow_data __read_mostly = 1;
 288
 289DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
 290EXPORT_SYMBOL_GPL(memalloc_socks_key);
 291
 292/**
 293 * sk_set_memalloc - sets %SOCK_MEMALLOC
 294 * @sk: socket to set it on
 295 *
 296 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 297 * It's the responsibility of the admin to adjust min_free_kbytes
 298 * to meet the requirements
 299 */
 300void sk_set_memalloc(struct sock *sk)
 301{
 302	sock_set_flag(sk, SOCK_MEMALLOC);
 303	sk->sk_allocation |= __GFP_MEMALLOC;
 304	static_branch_inc(&memalloc_socks_key);
 305}
 306EXPORT_SYMBOL_GPL(sk_set_memalloc);
 307
 308void sk_clear_memalloc(struct sock *sk)
 309{
 310	sock_reset_flag(sk, SOCK_MEMALLOC);
 311	sk->sk_allocation &= ~__GFP_MEMALLOC;
 312	static_branch_dec(&memalloc_socks_key);
 313
 314	/*
 315	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 316	 * progress of swapping. SOCK_MEMALLOC may be cleared while
 317	 * it has rmem allocations due to the last swapfile being deactivated
 318	 * but there is a risk that the socket is unusable due to exceeding
 319	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
 320	 */
 321	sk_mem_reclaim(sk);
 322}
 323EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 324
 325int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 326{
 327	int ret;
 328	unsigned int noreclaim_flag;
 329
 330	/* these should have been dropped before queueing */
 331	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 332
 333	noreclaim_flag = memalloc_noreclaim_save();
 334	ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
 335				 tcp_v6_do_rcv,
 336				 tcp_v4_do_rcv,
 337				 sk, skb);
 338	memalloc_noreclaim_restore(noreclaim_flag);
 339
 340	return ret;
 341}
 342EXPORT_SYMBOL(__sk_backlog_rcv);
 343
 344void sk_error_report(struct sock *sk)
 345{
 346	sk->sk_error_report(sk);
 347
 348	switch (sk->sk_family) {
 349	case AF_INET:
 350		fallthrough;
 351	case AF_INET6:
 352		trace_inet_sk_error_report(sk);
 353		break;
 354	default:
 355		break;
 356	}
 357}
 358EXPORT_SYMBOL(sk_error_report);
 359
 360int sock_get_timeout(long timeo, void *optval, bool old_timeval)
 361{
 362	struct __kernel_sock_timeval tv;
 363
 364	if (timeo == MAX_SCHEDULE_TIMEOUT) {
 365		tv.tv_sec = 0;
 366		tv.tv_usec = 0;
 367	} else {
 368		tv.tv_sec = timeo / HZ;
 369		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
 370	}
 371
 372	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 373		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
 374		*(struct old_timeval32 *)optval = tv32;
 375		return sizeof(tv32);
 376	}
 377
 378	if (old_timeval) {
 379		struct __kernel_old_timeval old_tv;
 380		old_tv.tv_sec = tv.tv_sec;
 381		old_tv.tv_usec = tv.tv_usec;
 382		*(struct __kernel_old_timeval *)optval = old_tv;
 383		return sizeof(old_tv);
 384	}
 385
 386	*(struct __kernel_sock_timeval *)optval = tv;
 387	return sizeof(tv);
 388}
 389EXPORT_SYMBOL(sock_get_timeout);
 390
 391int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
 392			   sockptr_t optval, int optlen, bool old_timeval)
 393{
 
 
 394	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 395		struct old_timeval32 tv32;
 396
 397		if (optlen < sizeof(tv32))
 398			return -EINVAL;
 399
 400		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
 401			return -EFAULT;
 402		tv->tv_sec = tv32.tv_sec;
 403		tv->tv_usec = tv32.tv_usec;
 404	} else if (old_timeval) {
 405		struct __kernel_old_timeval old_tv;
 406
 407		if (optlen < sizeof(old_tv))
 408			return -EINVAL;
 409		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
 410			return -EFAULT;
 411		tv->tv_sec = old_tv.tv_sec;
 412		tv->tv_usec = old_tv.tv_usec;
 413	} else {
 414		if (optlen < sizeof(*tv))
 415			return -EINVAL;
 416		if (copy_from_sockptr(tv, optval, sizeof(*tv)))
 417			return -EFAULT;
 418	}
 419
 420	return 0;
 421}
 422EXPORT_SYMBOL(sock_copy_user_timeval);
 423
 424static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
 425			    bool old_timeval)
 426{
 427	struct __kernel_sock_timeval tv;
 428	int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
 429	long val;
 430
 431	if (err)
 432		return err;
 433
 434	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 435		return -EDOM;
 436
 437	if (tv.tv_sec < 0) {
 438		static int warned __read_mostly;
 439
 440		WRITE_ONCE(*timeo_p, 0);
 441		if (warned < 10 && net_ratelimit()) {
 442			warned++;
 443			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 444				__func__, current->comm, task_pid_nr(current));
 445		}
 446		return 0;
 447	}
 448	val = MAX_SCHEDULE_TIMEOUT;
 449	if ((tv.tv_sec || tv.tv_usec) &&
 450	    (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)))
 451		val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec,
 452						    USEC_PER_SEC / HZ);
 453	WRITE_ONCE(*timeo_p, val);
 454	return 0;
 455}
 456
 457static bool sock_needs_netstamp(const struct sock *sk)
 458{
 459	switch (sk->sk_family) {
 460	case AF_UNSPEC:
 461	case AF_UNIX:
 462		return false;
 463	default:
 464		return true;
 465	}
 466}
 467
 468static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 469{
 470	if (sk->sk_flags & flags) {
 471		sk->sk_flags &= ~flags;
 472		if (sock_needs_netstamp(sk) &&
 473		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 474			net_disable_timestamp();
 475	}
 476}
 477
 478
 479int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 480{
 481	unsigned long flags;
 482	struct sk_buff_head *list = &sk->sk_receive_queue;
 483
 484	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 485		atomic_inc(&sk->sk_drops);
 486		trace_sock_rcvqueue_full(sk, skb);
 487		return -ENOMEM;
 488	}
 489
 490	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 491		atomic_inc(&sk->sk_drops);
 492		return -ENOBUFS;
 493	}
 494
 495	skb->dev = NULL;
 496	skb_set_owner_r(skb, sk);
 497
 498	/* we escape from rcu protected region, make sure we dont leak
 499	 * a norefcounted dst
 500	 */
 501	skb_dst_force(skb);
 502
 503	spin_lock_irqsave(&list->lock, flags);
 504	sock_skb_set_dropcount(sk, skb);
 505	__skb_queue_tail(list, skb);
 506	spin_unlock_irqrestore(&list->lock, flags);
 507
 508	if (!sock_flag(sk, SOCK_DEAD))
 509		sk->sk_data_ready(sk);
 510	return 0;
 511}
 512EXPORT_SYMBOL(__sock_queue_rcv_skb);
 513
 514int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
 515			      enum skb_drop_reason *reason)
 516{
 517	enum skb_drop_reason drop_reason;
 518	int err;
 519
 520	err = sk_filter(sk, skb);
 521	if (err) {
 522		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
 523		goto out;
 524	}
 525	err = __sock_queue_rcv_skb(sk, skb);
 526	switch (err) {
 527	case -ENOMEM:
 528		drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
 529		break;
 530	case -ENOBUFS:
 531		drop_reason = SKB_DROP_REASON_PROTO_MEM;
 532		break;
 533	default:
 534		drop_reason = SKB_NOT_DROPPED_YET;
 535		break;
 536	}
 537out:
 538	if (reason)
 539		*reason = drop_reason;
 540	return err;
 541}
 542EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
 543
 544int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 545		     const int nested, unsigned int trim_cap, bool refcounted)
 546{
 547	int rc = NET_RX_SUCCESS;
 548
 549	if (sk_filter_trim_cap(sk, skb, trim_cap))
 550		goto discard_and_relse;
 551
 552	skb->dev = NULL;
 553
 554	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 555		atomic_inc(&sk->sk_drops);
 556		goto discard_and_relse;
 557	}
 558	if (nested)
 559		bh_lock_sock_nested(sk);
 560	else
 561		bh_lock_sock(sk);
 562	if (!sock_owned_by_user(sk)) {
 563		/*
 564		 * trylock + unlock semantics:
 565		 */
 566		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 567
 568		rc = sk_backlog_rcv(sk, skb);
 569
 570		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
 571	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
 572		bh_unlock_sock(sk);
 573		atomic_inc(&sk->sk_drops);
 574		goto discard_and_relse;
 575	}
 576
 577	bh_unlock_sock(sk);
 578out:
 579	if (refcounted)
 580		sock_put(sk);
 581	return rc;
 582discard_and_relse:
 583	kfree_skb(skb);
 584	goto out;
 585}
 586EXPORT_SYMBOL(__sk_receive_skb);
 587
 588INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
 589							  u32));
 590INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
 591							   u32));
 592struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 593{
 594	struct dst_entry *dst = __sk_dst_get(sk);
 595
 596	if (dst && dst->obsolete &&
 597	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 598			       dst, cookie) == NULL) {
 599		sk_tx_queue_clear(sk);
 600		WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
 601		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 602		dst_release(dst);
 603		return NULL;
 604	}
 605
 606	return dst;
 607}
 608EXPORT_SYMBOL(__sk_dst_check);
 609
 610struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 611{
 612	struct dst_entry *dst = sk_dst_get(sk);
 613
 614	if (dst && dst->obsolete &&
 615	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 616			       dst, cookie) == NULL) {
 617		sk_dst_reset(sk);
 618		dst_release(dst);
 619		return NULL;
 620	}
 621
 622	return dst;
 623}
 624EXPORT_SYMBOL(sk_dst_check);
 625
 626static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
 627{
 628	int ret = -ENOPROTOOPT;
 629#ifdef CONFIG_NETDEVICES
 630	struct net *net = sock_net(sk);
 631
 632	/* Sorry... */
 633	ret = -EPERM;
 634	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
 635		goto out;
 636
 637	ret = -EINVAL;
 638	if (ifindex < 0)
 639		goto out;
 640
 641	/* Paired with all READ_ONCE() done locklessly. */
 642	WRITE_ONCE(sk->sk_bound_dev_if, ifindex);
 643
 644	if (sk->sk_prot->rehash)
 645		sk->sk_prot->rehash(sk);
 646	sk_dst_reset(sk);
 647
 648	ret = 0;
 649
 650out:
 651#endif
 652
 653	return ret;
 654}
 655
 656int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
 657{
 658	int ret;
 659
 660	if (lock_sk)
 661		lock_sock(sk);
 662	ret = sock_bindtoindex_locked(sk, ifindex);
 663	if (lock_sk)
 664		release_sock(sk);
 665
 666	return ret;
 667}
 668EXPORT_SYMBOL(sock_bindtoindex);
 669
 670static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
 671{
 672	int ret = -ENOPROTOOPT;
 673#ifdef CONFIG_NETDEVICES
 674	struct net *net = sock_net(sk);
 675	char devname[IFNAMSIZ];
 676	int index;
 677
 678	ret = -EINVAL;
 679	if (optlen < 0)
 680		goto out;
 681
 682	/* Bind this socket to a particular device like "eth0",
 683	 * as specified in the passed interface name. If the
 684	 * name is "" or the option length is zero the socket
 685	 * is not bound.
 686	 */
 687	if (optlen > IFNAMSIZ - 1)
 688		optlen = IFNAMSIZ - 1;
 689	memset(devname, 0, sizeof(devname));
 690
 691	ret = -EFAULT;
 692	if (copy_from_sockptr(devname, optval, optlen))
 693		goto out;
 694
 695	index = 0;
 696	if (devname[0] != '\0') {
 697		struct net_device *dev;
 698
 699		rcu_read_lock();
 700		dev = dev_get_by_name_rcu(net, devname);
 701		if (dev)
 702			index = dev->ifindex;
 703		rcu_read_unlock();
 704		ret = -ENODEV;
 705		if (!dev)
 706			goto out;
 707	}
 708
 709	sockopt_lock_sock(sk);
 710	ret = sock_bindtoindex_locked(sk, index);
 711	sockopt_release_sock(sk);
 712out:
 713#endif
 714
 715	return ret;
 716}
 717
 718static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
 719				sockptr_t optlen, int len)
 720{
 721	int ret = -ENOPROTOOPT;
 722#ifdef CONFIG_NETDEVICES
 723	int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 724	struct net *net = sock_net(sk);
 725	char devname[IFNAMSIZ];
 726
 727	if (bound_dev_if == 0) {
 728		len = 0;
 729		goto zero;
 730	}
 731
 732	ret = -EINVAL;
 733	if (len < IFNAMSIZ)
 734		goto out;
 735
 736	ret = netdev_get_name(net, devname, bound_dev_if);
 737	if (ret)
 738		goto out;
 739
 740	len = strlen(devname) + 1;
 741
 742	ret = -EFAULT;
 743	if (copy_to_sockptr(optval, devname, len))
 744		goto out;
 745
 746zero:
 747	ret = -EFAULT;
 748	if (copy_to_sockptr(optlen, &len, sizeof(int)))
 749		goto out;
 750
 751	ret = 0;
 752
 753out:
 754#endif
 755
 756	return ret;
 757}
 758
 759bool sk_mc_loop(const struct sock *sk)
 760{
 761	if (dev_recursion_level())
 762		return false;
 763	if (!sk)
 764		return true;
 765	/* IPV6_ADDRFORM can change sk->sk_family under us. */
 766	switch (READ_ONCE(sk->sk_family)) {
 767	case AF_INET:
 768		return inet_test_bit(MC_LOOP, sk);
 769#if IS_ENABLED(CONFIG_IPV6)
 770	case AF_INET6:
 771		return inet6_test_bit(MC6_LOOP, sk);
 772#endif
 773	}
 774	WARN_ON_ONCE(1);
 775	return true;
 776}
 777EXPORT_SYMBOL(sk_mc_loop);
 778
 779void sock_set_reuseaddr(struct sock *sk)
 780{
 781	lock_sock(sk);
 782	sk->sk_reuse = SK_CAN_REUSE;
 783	release_sock(sk);
 784}
 785EXPORT_SYMBOL(sock_set_reuseaddr);
 786
 787void sock_set_reuseport(struct sock *sk)
 788{
 789	lock_sock(sk);
 790	sk->sk_reuseport = true;
 791	release_sock(sk);
 792}
 793EXPORT_SYMBOL(sock_set_reuseport);
 794
 795void sock_no_linger(struct sock *sk)
 796{
 797	lock_sock(sk);
 798	WRITE_ONCE(sk->sk_lingertime, 0);
 799	sock_set_flag(sk, SOCK_LINGER);
 800	release_sock(sk);
 801}
 802EXPORT_SYMBOL(sock_no_linger);
 803
 804void sock_set_priority(struct sock *sk, u32 priority)
 805{
 806	WRITE_ONCE(sk->sk_priority, priority);
 
 
 807}
 808EXPORT_SYMBOL(sock_set_priority);
 809
 810void sock_set_sndtimeo(struct sock *sk, s64 secs)
 811{
 812	lock_sock(sk);
 813	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
 814		WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
 815	else
 816		WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
 817	release_sock(sk);
 818}
 819EXPORT_SYMBOL(sock_set_sndtimeo);
 820
 821static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
 822{
 823	if (val)  {
 824		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
 825		sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
 826		sock_set_flag(sk, SOCK_RCVTSTAMP);
 827		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 828	} else {
 829		sock_reset_flag(sk, SOCK_RCVTSTAMP);
 830		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 831	}
 832}
 833
 834void sock_enable_timestamps(struct sock *sk)
 835{
 836	lock_sock(sk);
 837	__sock_set_timestamps(sk, true, false, true);
 838	release_sock(sk);
 839}
 840EXPORT_SYMBOL(sock_enable_timestamps);
 841
 842void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
 843{
 844	switch (optname) {
 845	case SO_TIMESTAMP_OLD:
 846		__sock_set_timestamps(sk, valbool, false, false);
 847		break;
 848	case SO_TIMESTAMP_NEW:
 849		__sock_set_timestamps(sk, valbool, true, false);
 850		break;
 851	case SO_TIMESTAMPNS_OLD:
 852		__sock_set_timestamps(sk, valbool, false, true);
 853		break;
 854	case SO_TIMESTAMPNS_NEW:
 855		__sock_set_timestamps(sk, valbool, true, true);
 856		break;
 857	}
 858}
 859
 860static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
 861{
 862	struct net *net = sock_net(sk);
 863	struct net_device *dev = NULL;
 864	bool match = false;
 865	int *vclock_index;
 866	int i, num;
 867
 868	if (sk->sk_bound_dev_if)
 869		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
 870
 871	if (!dev) {
 872		pr_err("%s: sock not bind to device\n", __func__);
 873		return -EOPNOTSUPP;
 874	}
 875
 876	num = ethtool_get_phc_vclocks(dev, &vclock_index);
 877	dev_put(dev);
 878
 879	for (i = 0; i < num; i++) {
 880		if (*(vclock_index + i) == phc_index) {
 881			match = true;
 882			break;
 883		}
 884	}
 885
 886	if (num > 0)
 887		kfree(vclock_index);
 888
 889	if (!match)
 890		return -EINVAL;
 891
 892	WRITE_ONCE(sk->sk_bind_phc, phc_index);
 893
 894	return 0;
 895}
 896
 897int sock_set_timestamping(struct sock *sk, int optname,
 898			  struct so_timestamping timestamping)
 899{
 900	int val = timestamping.flags;
 901	int ret;
 902
 903	if (val & ~SOF_TIMESTAMPING_MASK)
 904		return -EINVAL;
 905
 906	if (val & SOF_TIMESTAMPING_OPT_ID_TCP &&
 907	    !(val & SOF_TIMESTAMPING_OPT_ID))
 908		return -EINVAL;
 909
 910	if (val & SOF_TIMESTAMPING_OPT_ID &&
 911	    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 912		if (sk_is_tcp(sk)) {
 
 913			if ((1 << sk->sk_state) &
 914			    (TCPF_CLOSE | TCPF_LISTEN))
 915				return -EINVAL;
 916			if (val & SOF_TIMESTAMPING_OPT_ID_TCP)
 917				atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq);
 918			else
 919				atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
 920		} else {
 921			atomic_set(&sk->sk_tskey, 0);
 922		}
 923	}
 924
 925	if (val & SOF_TIMESTAMPING_OPT_STATS &&
 926	    !(val & SOF_TIMESTAMPING_OPT_TSONLY))
 927		return -EINVAL;
 928
 929	if (val & SOF_TIMESTAMPING_BIND_PHC) {
 930		ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
 931		if (ret)
 932			return ret;
 933	}
 934
 935	WRITE_ONCE(sk->sk_tsflags, val);
 936	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
 937
 938	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 939		sock_enable_timestamp(sk,
 940				      SOCK_TIMESTAMPING_RX_SOFTWARE);
 941	else
 942		sock_disable_timestamp(sk,
 943				       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 944	return 0;
 945}
 946
 947void sock_set_keepalive(struct sock *sk)
 948{
 949	lock_sock(sk);
 950	if (sk->sk_prot->keepalive)
 951		sk->sk_prot->keepalive(sk, true);
 952	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
 953	release_sock(sk);
 954}
 955EXPORT_SYMBOL(sock_set_keepalive);
 956
 957static void __sock_set_rcvbuf(struct sock *sk, int val)
 958{
 959	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
 960	 * as a negative value.
 961	 */
 962	val = min_t(int, val, INT_MAX / 2);
 963	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 964
 965	/* We double it on the way in to account for "struct sk_buff" etc.
 966	 * overhead.   Applications assume that the SO_RCVBUF setting they make
 967	 * will allow that much actual data to be received on that socket.
 968	 *
 969	 * Applications are unaware that "struct sk_buff" and other overheads
 970	 * allocate from the receive buffer during socket buffer allocation.
 971	 *
 972	 * And after considering the possible alternatives, returning the value
 973	 * we actually used in getsockopt is the most desirable behavior.
 974	 */
 975	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
 976}
 977
 978void sock_set_rcvbuf(struct sock *sk, int val)
 979{
 980	lock_sock(sk);
 981	__sock_set_rcvbuf(sk, val);
 982	release_sock(sk);
 983}
 984EXPORT_SYMBOL(sock_set_rcvbuf);
 985
 986static void __sock_set_mark(struct sock *sk, u32 val)
 987{
 988	if (val != sk->sk_mark) {
 989		WRITE_ONCE(sk->sk_mark, val);
 990		sk_dst_reset(sk);
 991	}
 992}
 993
 994void sock_set_mark(struct sock *sk, u32 val)
 995{
 996	lock_sock(sk);
 997	__sock_set_mark(sk, val);
 998	release_sock(sk);
 999}
1000EXPORT_SYMBOL(sock_set_mark);
1001
1002static void sock_release_reserved_memory(struct sock *sk, int bytes)
1003{
1004	/* Round down bytes to multiple of pages */
1005	bytes = round_down(bytes, PAGE_SIZE);
1006
1007	WARN_ON(bytes > sk->sk_reserved_mem);
1008	WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
1009	sk_mem_reclaim(sk);
1010}
1011
1012static int sock_reserve_memory(struct sock *sk, int bytes)
1013{
1014	long allocated;
1015	bool charged;
1016	int pages;
1017
1018	if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
1019		return -EOPNOTSUPP;
1020
1021	if (!bytes)
1022		return 0;
1023
1024	pages = sk_mem_pages(bytes);
1025
1026	/* pre-charge to memcg */
1027	charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
1028					  GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1029	if (!charged)
1030		return -ENOMEM;
1031
1032	/* pre-charge to forward_alloc */
1033	sk_memory_allocated_add(sk, pages);
1034	allocated = sk_memory_allocated(sk);
1035	/* If the system goes into memory pressure with this
1036	 * precharge, give up and return error.
1037	 */
1038	if (allocated > sk_prot_mem_limits(sk, 1)) {
1039		sk_memory_allocated_sub(sk, pages);
1040		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
1041		return -ENOMEM;
1042	}
1043	sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
1044
1045	WRITE_ONCE(sk->sk_reserved_mem,
1046		   sk->sk_reserved_mem + (pages << PAGE_SHIFT));
1047
1048	return 0;
1049}
1050
1051void sockopt_lock_sock(struct sock *sk)
1052{
1053	/* When current->bpf_ctx is set, the setsockopt is called from
1054	 * a bpf prog.  bpf has ensured the sk lock has been
1055	 * acquired before calling setsockopt().
1056	 */
1057	if (has_current_bpf_ctx())
1058		return;
1059
1060	lock_sock(sk);
1061}
1062EXPORT_SYMBOL(sockopt_lock_sock);
1063
1064void sockopt_release_sock(struct sock *sk)
1065{
1066	if (has_current_bpf_ctx())
1067		return;
1068
1069	release_sock(sk);
1070}
1071EXPORT_SYMBOL(sockopt_release_sock);
1072
1073bool sockopt_ns_capable(struct user_namespace *ns, int cap)
1074{
1075	return has_current_bpf_ctx() || ns_capable(ns, cap);
1076}
1077EXPORT_SYMBOL(sockopt_ns_capable);
1078
1079bool sockopt_capable(int cap)
1080{
1081	return has_current_bpf_ctx() || capable(cap);
1082}
1083EXPORT_SYMBOL(sockopt_capable);
1084
1085/*
1086 *	This is meant for all protocols to use and covers goings on
1087 *	at the socket level. Everything here is generic.
1088 */
1089
1090int sk_setsockopt(struct sock *sk, int level, int optname,
1091		  sockptr_t optval, unsigned int optlen)
1092{
1093	struct so_timestamping timestamping;
1094	struct socket *sock = sk->sk_socket;
1095	struct sock_txtime sk_txtime;
 
1096	int val;
1097	int valbool;
1098	struct linger ling;
1099	int ret = 0;
1100
1101	/*
1102	 *	Options without arguments
1103	 */
1104
1105	if (optname == SO_BINDTODEVICE)
1106		return sock_setbindtodevice(sk, optval, optlen);
1107
1108	if (optlen < sizeof(int))
1109		return -EINVAL;
1110
1111	if (copy_from_sockptr(&val, optval, sizeof(val)))
1112		return -EFAULT;
1113
1114	valbool = val ? 1 : 0;
1115
1116	/* handle options which do not require locking the socket. */
1117	switch (optname) {
1118	case SO_PRIORITY:
1119		if ((val >= 0 && val <= 6) ||
1120		    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
1121		    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1122			sock_set_priority(sk, val);
1123			return 0;
1124		}
1125		return -EPERM;
1126	case SO_PASSSEC:
1127		assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
1128		return 0;
1129	case SO_PASSCRED:
1130		assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
1131		return 0;
1132	case SO_PASSPIDFD:
1133		assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
1134		return 0;
1135	case SO_TYPE:
1136	case SO_PROTOCOL:
1137	case SO_DOMAIN:
1138	case SO_ERROR:
1139		return -ENOPROTOOPT;
1140#ifdef CONFIG_NET_RX_BUSY_POLL
1141	case SO_BUSY_POLL:
1142		if (val < 0)
1143			return -EINVAL;
1144		WRITE_ONCE(sk->sk_ll_usec, val);
1145		return 0;
1146	case SO_PREFER_BUSY_POLL:
1147		if (valbool && !sockopt_capable(CAP_NET_ADMIN))
1148			return -EPERM;
1149		WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1150		return 0;
1151	case SO_BUSY_POLL_BUDGET:
1152		if (val > READ_ONCE(sk->sk_busy_poll_budget) &&
1153		    !sockopt_capable(CAP_NET_ADMIN))
1154			return -EPERM;
1155		if (val < 0 || val > U16_MAX)
1156			return -EINVAL;
1157		WRITE_ONCE(sk->sk_busy_poll_budget, val);
1158		return 0;
1159#endif
1160	case SO_MAX_PACING_RATE:
1161		{
1162		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1163		unsigned long pacing_rate;
1164
1165		if (sizeof(ulval) != sizeof(val) &&
1166		    optlen >= sizeof(ulval) &&
1167		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1168			return -EFAULT;
1169		}
1170		if (ulval != ~0UL)
1171			cmpxchg(&sk->sk_pacing_status,
1172				SK_PACING_NONE,
1173				SK_PACING_NEEDED);
1174		/* Pairs with READ_ONCE() from sk_getsockopt() */
1175		WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
1176		pacing_rate = READ_ONCE(sk->sk_pacing_rate);
1177		if (ulval < pacing_rate)
1178			WRITE_ONCE(sk->sk_pacing_rate, ulval);
1179		return 0;
1180		}
1181	case SO_TXREHASH:
1182		if (val < -1 || val > 1)
1183			return -EINVAL;
1184		if ((u8)val == SOCK_TXREHASH_DEFAULT)
1185			val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
1186		/* Paired with READ_ONCE() in tcp_rtx_synack()
1187		 * and sk_getsockopt().
1188		 */
1189		WRITE_ONCE(sk->sk_txrehash, (u8)val);
1190		return 0;
1191	case SO_PEEK_OFF:
1192		{
1193		int (*set_peek_off)(struct sock *sk, int val);
1194
1195		set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
1196		if (set_peek_off)
1197			ret = set_peek_off(sk, val);
1198		else
1199			ret = -EOPNOTSUPP;
1200		return ret;
1201		}
1202	}
1203
1204	sockopt_lock_sock(sk);
1205
1206	switch (optname) {
1207	case SO_DEBUG:
1208		if (val && !sockopt_capable(CAP_NET_ADMIN))
1209			ret = -EACCES;
1210		else
1211			sock_valbool_flag(sk, SOCK_DBG, valbool);
1212		break;
1213	case SO_REUSEADDR:
1214		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
1215		break;
1216	case SO_REUSEPORT:
1217		sk->sk_reuseport = valbool;
1218		break;
 
 
 
 
 
 
1219	case SO_DONTROUTE:
1220		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1221		sk_dst_reset(sk);
1222		break;
1223	case SO_BROADCAST:
1224		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1225		break;
1226	case SO_SNDBUF:
1227		/* Don't error on this BSD doesn't and if you think
1228		 * about it this is right. Otherwise apps have to
1229		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1230		 * are treated in BSD as hints
1231		 */
1232		val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
1233set_sndbuf:
1234		/* Ensure val * 2 fits into an int, to prevent max_t()
1235		 * from treating it as a negative value.
1236		 */
1237		val = min_t(int, val, INT_MAX / 2);
1238		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1239		WRITE_ONCE(sk->sk_sndbuf,
1240			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
1241		/* Wake up sending tasks if we upped the value. */
1242		sk->sk_write_space(sk);
1243		break;
1244
1245	case SO_SNDBUFFORCE:
1246		if (!sockopt_capable(CAP_NET_ADMIN)) {
1247			ret = -EPERM;
1248			break;
1249		}
1250
1251		/* No negative values (to prevent underflow, as val will be
1252		 * multiplied by 2).
1253		 */
1254		if (val < 0)
1255			val = 0;
1256		goto set_sndbuf;
1257
1258	case SO_RCVBUF:
1259		/* Don't error on this BSD doesn't and if you think
1260		 * about it this is right. Otherwise apps have to
1261		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1262		 * are treated in BSD as hints
1263		 */
1264		__sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
1265		break;
1266
1267	case SO_RCVBUFFORCE:
1268		if (!sockopt_capable(CAP_NET_ADMIN)) {
1269			ret = -EPERM;
1270			break;
1271		}
1272
1273		/* No negative values (to prevent underflow, as val will be
1274		 * multiplied by 2).
1275		 */
1276		__sock_set_rcvbuf(sk, max(val, 0));
1277		break;
1278
1279	case SO_KEEPALIVE:
1280		if (sk->sk_prot->keepalive)
1281			sk->sk_prot->keepalive(sk, valbool);
1282		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1283		break;
1284
1285	case SO_OOBINLINE:
1286		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1287		break;
1288
1289	case SO_NO_CHECK:
1290		sk->sk_no_check_tx = valbool;
1291		break;
1292
 
 
 
 
 
 
 
 
1293	case SO_LINGER:
1294		if (optlen < sizeof(ling)) {
1295			ret = -EINVAL;	/* 1003.1g */
1296			break;
1297		}
1298		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1299			ret = -EFAULT;
1300			break;
1301		}
1302		if (!ling.l_onoff) {
1303			sock_reset_flag(sk, SOCK_LINGER);
1304		} else {
1305			unsigned long t_sec = ling.l_linger;
1306
1307			if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
1308				WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
1309			else
1310				WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
 
1311			sock_set_flag(sk, SOCK_LINGER);
1312		}
1313		break;
1314
1315	case SO_BSDCOMPAT:
1316		break;
1317
 
 
 
 
 
 
 
1318	case SO_TIMESTAMP_OLD:
1319	case SO_TIMESTAMP_NEW:
1320	case SO_TIMESTAMPNS_OLD:
1321	case SO_TIMESTAMPNS_NEW:
1322		sock_set_timestamp(sk, optname, valbool);
1323		break;
1324
1325	case SO_TIMESTAMPING_NEW:
1326	case SO_TIMESTAMPING_OLD:
1327		if (optlen == sizeof(timestamping)) {
1328			if (copy_from_sockptr(&timestamping, optval,
1329					      sizeof(timestamping))) {
1330				ret = -EFAULT;
1331				break;
1332			}
1333		} else {
1334			memset(&timestamping, 0, sizeof(timestamping));
1335			timestamping.flags = val;
1336		}
1337		ret = sock_set_timestamping(sk, optname, timestamping);
1338		break;
1339
1340	case SO_RCVLOWAT:
1341		{
1342		int (*set_rcvlowat)(struct sock *sk, int val) = NULL;
1343
1344		if (val < 0)
1345			val = INT_MAX;
1346		if (sock)
1347			set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat;
1348		if (set_rcvlowat)
1349			ret = set_rcvlowat(sk, val);
1350		else
1351			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1352		break;
1353		}
1354	case SO_RCVTIMEO_OLD:
1355	case SO_RCVTIMEO_NEW:
1356		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1357				       optlen, optname == SO_RCVTIMEO_OLD);
1358		break;
1359
1360	case SO_SNDTIMEO_OLD:
1361	case SO_SNDTIMEO_NEW:
1362		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1363				       optlen, optname == SO_SNDTIMEO_OLD);
1364		break;
1365
1366	case SO_ATTACH_FILTER: {
1367		struct sock_fprog fprog;
1368
1369		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1370		if (!ret)
1371			ret = sk_attach_filter(&fprog, sk);
1372		break;
1373	}
1374	case SO_ATTACH_BPF:
1375		ret = -EINVAL;
1376		if (optlen == sizeof(u32)) {
1377			u32 ufd;
1378
1379			ret = -EFAULT;
1380			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1381				break;
1382
1383			ret = sk_attach_bpf(ufd, sk);
1384		}
1385		break;
1386
1387	case SO_ATTACH_REUSEPORT_CBPF: {
1388		struct sock_fprog fprog;
1389
1390		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1391		if (!ret)
1392			ret = sk_reuseport_attach_filter(&fprog, sk);
1393		break;
1394	}
1395	case SO_ATTACH_REUSEPORT_EBPF:
1396		ret = -EINVAL;
1397		if (optlen == sizeof(u32)) {
1398			u32 ufd;
1399
1400			ret = -EFAULT;
1401			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1402				break;
1403
1404			ret = sk_reuseport_attach_bpf(ufd, sk);
1405		}
1406		break;
1407
1408	case SO_DETACH_REUSEPORT_BPF:
1409		ret = reuseport_detach_prog(sk);
1410		break;
1411
1412	case SO_DETACH_FILTER:
1413		ret = sk_detach_filter(sk);
1414		break;
1415
1416	case SO_LOCK_FILTER:
1417		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1418			ret = -EPERM;
1419		else
1420			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1421		break;
1422
 
 
 
 
 
 
1423	case SO_MARK:
1424		if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1425		    !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1426			ret = -EPERM;
1427			break;
1428		}
1429
1430		__sock_set_mark(sk, val);
1431		break;
1432	case SO_RCVMARK:
1433		sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
1434		break;
1435
1436	case SO_RXQ_OVFL:
1437		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1438		break;
1439
1440	case SO_WIFI_STATUS:
1441		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1442		break;
1443
 
 
 
 
 
 
 
1444	case SO_NOFCS:
1445		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1446		break;
1447
1448	case SO_SELECT_ERR_QUEUE:
1449		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1450		break;
1451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453	case SO_INCOMING_CPU:
1454		reuseport_update_incoming_cpu(sk, val);
1455		break;
1456
1457	case SO_CNX_ADVICE:
1458		if (val == 1)
1459			dst_negative_advice(sk);
1460		break;
1461
1462	case SO_ZEROCOPY:
1463		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1464			if (!(sk_is_tcp(sk) ||
 
1465			      (sk->sk_type == SOCK_DGRAM &&
1466			       sk->sk_protocol == IPPROTO_UDP)))
1467				ret = -EOPNOTSUPP;
1468		} else if (sk->sk_family != PF_RDS) {
1469			ret = -EOPNOTSUPP;
1470		}
1471		if (!ret) {
1472			if (val < 0 || val > 1)
1473				ret = -EINVAL;
1474			else
1475				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1476		}
1477		break;
1478
1479	case SO_TXTIME:
1480		if (optlen != sizeof(struct sock_txtime)) {
1481			ret = -EINVAL;
1482			break;
1483		} else if (copy_from_sockptr(&sk_txtime, optval,
1484			   sizeof(struct sock_txtime))) {
1485			ret = -EFAULT;
1486			break;
1487		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1488			ret = -EINVAL;
1489			break;
1490		}
1491		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1492		 * scheduler has enough safe guards.
1493		 */
1494		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1495		    !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1496			ret = -EPERM;
1497			break;
1498		}
1499		sock_valbool_flag(sk, SOCK_TXTIME, true);
1500		sk->sk_clockid = sk_txtime.clockid;
1501		sk->sk_txtime_deadline_mode =
1502			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1503		sk->sk_txtime_report_errors =
1504			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1505		break;
1506
1507	case SO_BINDTOIFINDEX:
1508		ret = sock_bindtoindex_locked(sk, val);
1509		break;
1510
1511	case SO_BUF_LOCK:
1512		if (val & ~SOCK_BUF_LOCK_MASK) {
1513			ret = -EINVAL;
1514			break;
1515		}
1516		sk->sk_userlocks = val | (sk->sk_userlocks &
1517					  ~SOCK_BUF_LOCK_MASK);
1518		break;
1519
1520	case SO_RESERVE_MEM:
1521	{
1522		int delta;
1523
1524		if (val < 0) {
1525			ret = -EINVAL;
1526			break;
1527		}
1528
1529		delta = val - sk->sk_reserved_mem;
1530		if (delta < 0)
1531			sock_release_reserved_memory(sk, -delta);
1532		else
1533			ret = sock_reserve_memory(sk, delta);
1534		break;
1535	}
1536
1537	default:
1538		ret = -ENOPROTOOPT;
1539		break;
1540	}
1541	sockopt_release_sock(sk);
1542	return ret;
1543}
1544
1545int sock_setsockopt(struct socket *sock, int level, int optname,
1546		    sockptr_t optval, unsigned int optlen)
1547{
1548	return sk_setsockopt(sock->sk, level, optname,
1549			     optval, optlen);
1550}
1551EXPORT_SYMBOL(sock_setsockopt);
1552
1553static const struct cred *sk_get_peer_cred(struct sock *sk)
1554{
1555	const struct cred *cred;
1556
1557	spin_lock(&sk->sk_peer_lock);
1558	cred = get_cred(sk->sk_peer_cred);
1559	spin_unlock(&sk->sk_peer_lock);
1560
1561	return cred;
1562}
1563
1564static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1565			  struct ucred *ucred)
1566{
1567	ucred->pid = pid_vnr(pid);
1568	ucred->uid = ucred->gid = -1;
1569	if (cred) {
1570		struct user_namespace *current_ns = current_user_ns();
1571
1572		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1573		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1574	}
1575}
1576
1577static int groups_to_user(sockptr_t dst, const struct group_info *src)
1578{
1579	struct user_namespace *user_ns = current_user_ns();
1580	int i;
1581
1582	for (i = 0; i < src->ngroups; i++) {
1583		gid_t gid = from_kgid_munged(user_ns, src->gid[i]);
1584
1585		if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid)))
1586			return -EFAULT;
1587	}
1588
1589	return 0;
1590}
1591
1592int sk_getsockopt(struct sock *sk, int level, int optname,
1593		  sockptr_t optval, sockptr_t optlen)
1594{
1595	struct socket *sock = sk->sk_socket;
1596
1597	union {
1598		int val;
1599		u64 val64;
1600		unsigned long ulval;
1601		struct linger ling;
1602		struct old_timeval32 tm32;
1603		struct __kernel_old_timeval tm;
1604		struct  __kernel_sock_timeval stm;
1605		struct sock_txtime txtime;
1606		struct so_timestamping timestamping;
1607	} v;
1608
1609	int lv = sizeof(int);
1610	int len;
1611
1612	if (copy_from_sockptr(&len, optlen, sizeof(int)))
1613		return -EFAULT;
1614	if (len < 0)
1615		return -EINVAL;
1616
1617	memset(&v, 0, sizeof(v));
1618
1619	switch (optname) {
1620	case SO_DEBUG:
1621		v.val = sock_flag(sk, SOCK_DBG);
1622		break;
1623
1624	case SO_DONTROUTE:
1625		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1626		break;
1627
1628	case SO_BROADCAST:
1629		v.val = sock_flag(sk, SOCK_BROADCAST);
1630		break;
1631
1632	case SO_SNDBUF:
1633		v.val = READ_ONCE(sk->sk_sndbuf);
1634		break;
1635
1636	case SO_RCVBUF:
1637		v.val = READ_ONCE(sk->sk_rcvbuf);
1638		break;
1639
1640	case SO_REUSEADDR:
1641		v.val = sk->sk_reuse;
1642		break;
1643
1644	case SO_REUSEPORT:
1645		v.val = sk->sk_reuseport;
1646		break;
1647
1648	case SO_KEEPALIVE:
1649		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1650		break;
1651
1652	case SO_TYPE:
1653		v.val = sk->sk_type;
1654		break;
1655
1656	case SO_PROTOCOL:
1657		v.val = sk->sk_protocol;
1658		break;
1659
1660	case SO_DOMAIN:
1661		v.val = sk->sk_family;
1662		break;
1663
1664	case SO_ERROR:
1665		v.val = -sock_error(sk);
1666		if (v.val == 0)
1667			v.val = xchg(&sk->sk_err_soft, 0);
1668		break;
1669
1670	case SO_OOBINLINE:
1671		v.val = sock_flag(sk, SOCK_URGINLINE);
1672		break;
1673
1674	case SO_NO_CHECK:
1675		v.val = sk->sk_no_check_tx;
1676		break;
1677
1678	case SO_PRIORITY:
1679		v.val = READ_ONCE(sk->sk_priority);
1680		break;
1681
1682	case SO_LINGER:
1683		lv		= sizeof(v.ling);
1684		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1685		v.ling.l_linger	= READ_ONCE(sk->sk_lingertime) / HZ;
1686		break;
1687
1688	case SO_BSDCOMPAT:
1689		break;
1690
1691	case SO_TIMESTAMP_OLD:
1692		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1693				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1694				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1695		break;
1696
1697	case SO_TIMESTAMPNS_OLD:
1698		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1699		break;
1700
1701	case SO_TIMESTAMP_NEW:
1702		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1703		break;
1704
1705	case SO_TIMESTAMPNS_NEW:
1706		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1707		break;
1708
1709	case SO_TIMESTAMPING_OLD:
1710	case SO_TIMESTAMPING_NEW:
1711		lv = sizeof(v.timestamping);
1712		/* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
1713		 * returning the flags when they were set through the same option.
1714		 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
1715		 */
1716		if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
1717			v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
1718			v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
1719		}
1720		break;
1721
1722	case SO_RCVTIMEO_OLD:
1723	case SO_RCVTIMEO_NEW:
1724		lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
1725				      SO_RCVTIMEO_OLD == optname);
1726		break;
1727
1728	case SO_SNDTIMEO_OLD:
1729	case SO_SNDTIMEO_NEW:
1730		lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
1731				      SO_SNDTIMEO_OLD == optname);
1732		break;
1733
1734	case SO_RCVLOWAT:
1735		v.val = READ_ONCE(sk->sk_rcvlowat);
1736		break;
1737
1738	case SO_SNDLOWAT:
1739		v.val = 1;
1740		break;
1741
1742	case SO_PASSCRED:
1743		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1744		break;
1745
1746	case SO_PASSPIDFD:
1747		v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags);
1748		break;
1749
1750	case SO_PEERCRED:
1751	{
1752		struct ucred peercred;
1753		if (len > sizeof(peercred))
1754			len = sizeof(peercred);
1755
1756		spin_lock(&sk->sk_peer_lock);
1757		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1758		spin_unlock(&sk->sk_peer_lock);
1759
1760		if (copy_to_sockptr(optval, &peercred, len))
1761			return -EFAULT;
1762		goto lenout;
1763	}
1764
1765	case SO_PEERPIDFD:
1766	{
1767		struct pid *peer_pid;
1768		struct file *pidfd_file = NULL;
1769		int pidfd;
1770
1771		if (len > sizeof(pidfd))
1772			len = sizeof(pidfd);
1773
1774		spin_lock(&sk->sk_peer_lock);
1775		peer_pid = get_pid(sk->sk_peer_pid);
1776		spin_unlock(&sk->sk_peer_lock);
1777
1778		if (!peer_pid)
1779			return -ENODATA;
1780
1781		pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file);
1782		put_pid(peer_pid);
1783		if (pidfd < 0)
1784			return pidfd;
1785
1786		if (copy_to_sockptr(optval, &pidfd, len) ||
1787		    copy_to_sockptr(optlen, &len, sizeof(int))) {
1788			put_unused_fd(pidfd);
1789			fput(pidfd_file);
1790
1791			return -EFAULT;
1792		}
1793
1794		fd_install(pidfd, pidfd_file);
1795		return 0;
1796	}
1797
1798	case SO_PEERGROUPS:
1799	{
1800		const struct cred *cred;
1801		int ret, n;
1802
1803		cred = sk_get_peer_cred(sk);
1804		if (!cred)
1805			return -ENODATA;
1806
1807		n = cred->group_info->ngroups;
1808		if (len < n * sizeof(gid_t)) {
1809			len = n * sizeof(gid_t);
1810			put_cred(cred);
1811			return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE;
1812		}
1813		len = n * sizeof(gid_t);
1814
1815		ret = groups_to_user(optval, cred->group_info);
1816		put_cred(cred);
1817		if (ret)
1818			return ret;
1819		goto lenout;
1820	}
1821
1822	case SO_PEERNAME:
1823	{
1824		struct sockaddr_storage address;
1825
1826		lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2);
1827		if (lv < 0)
1828			return -ENOTCONN;
1829		if (lv < len)
1830			return -EINVAL;
1831		if (copy_to_sockptr(optval, &address, len))
1832			return -EFAULT;
1833		goto lenout;
1834	}
1835
1836	/* Dubious BSD thing... Probably nobody even uses it, but
1837	 * the UNIX standard wants it for whatever reason... -DaveM
1838	 */
1839	case SO_ACCEPTCONN:
1840		v.val = sk->sk_state == TCP_LISTEN;
1841		break;
1842
1843	case SO_PASSSEC:
1844		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1845		break;
1846
1847	case SO_PEERSEC:
1848		return security_socket_getpeersec_stream(sock,
1849							 optval, optlen, len);
1850
1851	case SO_MARK:
1852		v.val = READ_ONCE(sk->sk_mark);
1853		break;
1854
1855	case SO_RCVMARK:
1856		v.val = sock_flag(sk, SOCK_RCVMARK);
1857		break;
1858
1859	case SO_RXQ_OVFL:
1860		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1861		break;
1862
1863	case SO_WIFI_STATUS:
1864		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1865		break;
1866
1867	case SO_PEEK_OFF:
1868		if (!READ_ONCE(sock->ops)->set_peek_off)
1869			return -EOPNOTSUPP;
1870
1871		v.val = READ_ONCE(sk->sk_peek_off);
1872		break;
1873	case SO_NOFCS:
1874		v.val = sock_flag(sk, SOCK_NOFCS);
1875		break;
1876
1877	case SO_BINDTODEVICE:
1878		return sock_getbindtodevice(sk, optval, optlen, len);
1879
1880	case SO_GET_FILTER:
1881		len = sk_get_filter(sk, optval, len);
1882		if (len < 0)
1883			return len;
1884
1885		goto lenout;
1886
1887	case SO_LOCK_FILTER:
1888		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1889		break;
1890
1891	case SO_BPF_EXTENSIONS:
1892		v.val = bpf_tell_extensions();
1893		break;
1894
1895	case SO_SELECT_ERR_QUEUE:
1896		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1897		break;
1898
1899#ifdef CONFIG_NET_RX_BUSY_POLL
1900	case SO_BUSY_POLL:
1901		v.val = READ_ONCE(sk->sk_ll_usec);
1902		break;
1903	case SO_PREFER_BUSY_POLL:
1904		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
1905		break;
1906#endif
1907
1908	case SO_MAX_PACING_RATE:
1909		/* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
1910		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1911			lv = sizeof(v.ulval);
1912			v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
1913		} else {
1914			/* 32bit version */
1915			v.val = min_t(unsigned long, ~0U,
1916				      READ_ONCE(sk->sk_max_pacing_rate));
1917		}
1918		break;
1919
1920	case SO_INCOMING_CPU:
1921		v.val = READ_ONCE(sk->sk_incoming_cpu);
1922		break;
1923
1924	case SO_MEMINFO:
1925	{
1926		u32 meminfo[SK_MEMINFO_VARS];
1927
1928		sk_get_meminfo(sk, meminfo);
1929
1930		len = min_t(unsigned int, len, sizeof(meminfo));
1931		if (copy_to_sockptr(optval, &meminfo, len))
1932			return -EFAULT;
1933
1934		goto lenout;
1935	}
1936
1937#ifdef CONFIG_NET_RX_BUSY_POLL
1938	case SO_INCOMING_NAPI_ID:
1939		v.val = READ_ONCE(sk->sk_napi_id);
1940
1941		/* aggregate non-NAPI IDs down to 0 */
1942		if (v.val < MIN_NAPI_ID)
1943			v.val = 0;
1944
1945		break;
1946#endif
1947
1948	case SO_COOKIE:
1949		lv = sizeof(u64);
1950		if (len < lv)
1951			return -EINVAL;
1952		v.val64 = sock_gen_cookie(sk);
1953		break;
1954
1955	case SO_ZEROCOPY:
1956		v.val = sock_flag(sk, SOCK_ZEROCOPY);
1957		break;
1958
1959	case SO_TXTIME:
1960		lv = sizeof(v.txtime);
1961		v.txtime.clockid = sk->sk_clockid;
1962		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1963				  SOF_TXTIME_DEADLINE_MODE : 0;
1964		v.txtime.flags |= sk->sk_txtime_report_errors ?
1965				  SOF_TXTIME_REPORT_ERRORS : 0;
1966		break;
1967
1968	case SO_BINDTOIFINDEX:
1969		v.val = READ_ONCE(sk->sk_bound_dev_if);
1970		break;
1971
1972	case SO_NETNS_COOKIE:
1973		lv = sizeof(u64);
1974		if (len != lv)
1975			return -EINVAL;
1976		v.val64 = sock_net(sk)->net_cookie;
1977		break;
1978
1979	case SO_BUF_LOCK:
1980		v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
1981		break;
1982
1983	case SO_RESERVE_MEM:
1984		v.val = READ_ONCE(sk->sk_reserved_mem);
1985		break;
1986
1987	case SO_TXREHASH:
1988		/* Paired with WRITE_ONCE() in sk_setsockopt() */
1989		v.val = READ_ONCE(sk->sk_txrehash);
1990		break;
1991
1992	default:
1993		/* We implement the SO_SNDLOWAT etc to not be settable
1994		 * (1003.1g 7).
1995		 */
1996		return -ENOPROTOOPT;
1997	}
1998
1999	if (len > lv)
2000		len = lv;
2001	if (copy_to_sockptr(optval, &v, len))
2002		return -EFAULT;
2003lenout:
2004	if (copy_to_sockptr(optlen, &len, sizeof(int)))
2005		return -EFAULT;
2006	return 0;
2007}
2008
2009/*
2010 * Initialize an sk_lock.
2011 *
2012 * (We also register the sk_lock with the lock validator.)
2013 */
2014static inline void sock_lock_init(struct sock *sk)
2015{
2016	if (sk->sk_kern_sock)
2017		sock_lock_init_class_and_name(
2018			sk,
2019			af_family_kern_slock_key_strings[sk->sk_family],
2020			af_family_kern_slock_keys + sk->sk_family,
2021			af_family_kern_key_strings[sk->sk_family],
2022			af_family_kern_keys + sk->sk_family);
2023	else
2024		sock_lock_init_class_and_name(
2025			sk,
2026			af_family_slock_key_strings[sk->sk_family],
2027			af_family_slock_keys + sk->sk_family,
2028			af_family_key_strings[sk->sk_family],
2029			af_family_keys + sk->sk_family);
2030}
2031
2032/*
2033 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
2034 * even temporarly, because of RCU lookups. sk_node should also be left as is.
2035 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
2036 */
2037static void sock_copy(struct sock *nsk, const struct sock *osk)
2038{
2039	const struct proto *prot = READ_ONCE(osk->sk_prot);
2040#ifdef CONFIG_SECURITY_NETWORK
2041	void *sptr = nsk->sk_security;
2042#endif
2043
2044	/* If we move sk_tx_queue_mapping out of the private section,
2045	 * we must check if sk_tx_queue_clear() is called after
2046	 * sock_copy() in sk_clone_lock().
2047	 */
2048	BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
2049		     offsetof(struct sock, sk_dontcopy_begin) ||
2050		     offsetof(struct sock, sk_tx_queue_mapping) >=
2051		     offsetof(struct sock, sk_dontcopy_end));
2052
2053	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
2054
2055	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
2056	       prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
2057
2058#ifdef CONFIG_SECURITY_NETWORK
2059	nsk->sk_security = sptr;
2060	security_sk_clone(osk, nsk);
2061#endif
2062}
2063
2064static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
2065		int family)
2066{
2067	struct sock *sk;
2068	struct kmem_cache *slab;
2069
2070	slab = prot->slab;
2071	if (slab != NULL) {
2072		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
2073		if (!sk)
2074			return sk;
2075		if (want_init_on_alloc(priority))
2076			sk_prot_clear_nulls(sk, prot->obj_size);
2077	} else
2078		sk = kmalloc(prot->obj_size, priority);
2079
2080	if (sk != NULL) {
2081		if (security_sk_alloc(sk, family, priority))
2082			goto out_free;
2083
2084		if (!try_module_get(prot->owner))
2085			goto out_free_sec;
2086	}
2087
2088	return sk;
2089
2090out_free_sec:
2091	security_sk_free(sk);
2092out_free:
2093	if (slab != NULL)
2094		kmem_cache_free(slab, sk);
2095	else
2096		kfree(sk);
2097	return NULL;
2098}
2099
2100static void sk_prot_free(struct proto *prot, struct sock *sk)
2101{
2102	struct kmem_cache *slab;
2103	struct module *owner;
2104
2105	owner = prot->owner;
2106	slab = prot->slab;
2107
2108	cgroup_sk_free(&sk->sk_cgrp_data);
2109	mem_cgroup_sk_free(sk);
2110	security_sk_free(sk);
2111	if (slab != NULL)
2112		kmem_cache_free(slab, sk);
2113	else
2114		kfree(sk);
2115	module_put(owner);
2116}
2117
2118/**
2119 *	sk_alloc - All socket objects are allocated here
2120 *	@net: the applicable net namespace
2121 *	@family: protocol family
2122 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2123 *	@prot: struct proto associated with this new sock instance
2124 *	@kern: is this to be a kernel socket?
2125 */
2126struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
2127		      struct proto *prot, int kern)
2128{
2129	struct sock *sk;
2130
2131	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
2132	if (sk) {
2133		sk->sk_family = family;
2134		/*
2135		 * See comment in struct sock definition to understand
2136		 * why we need sk_prot_creator -acme
2137		 */
2138		sk->sk_prot = sk->sk_prot_creator = prot;
2139		sk->sk_kern_sock = kern;
2140		sock_lock_init(sk);
2141		sk->sk_net_refcnt = kern ? 0 : 1;
2142		if (likely(sk->sk_net_refcnt)) {
2143			get_net_track(net, &sk->ns_tracker, priority);
2144			sock_inuse_add(net, 1);
2145		} else {
2146			__netns_tracker_alloc(net, &sk->ns_tracker,
2147					      false, priority);
2148		}
2149
2150		sock_net_set(sk, net);
2151		refcount_set(&sk->sk_wmem_alloc, 1);
2152
2153		mem_cgroup_sk_alloc(sk);
2154		cgroup_sk_alloc(&sk->sk_cgrp_data);
2155		sock_update_classid(&sk->sk_cgrp_data);
2156		sock_update_netprioidx(&sk->sk_cgrp_data);
2157		sk_tx_queue_clear(sk);
2158	}
2159
2160	return sk;
2161}
2162EXPORT_SYMBOL(sk_alloc);
2163
2164/* Sockets having SOCK_RCU_FREE will call this function after one RCU
2165 * grace period. This is the case for UDP sockets and TCP listeners.
2166 */
2167static void __sk_destruct(struct rcu_head *head)
2168{
2169	struct sock *sk = container_of(head, struct sock, sk_rcu);
2170	struct sk_filter *filter;
2171
2172	if (sk->sk_destruct)
2173		sk->sk_destruct(sk);
2174
2175	filter = rcu_dereference_check(sk->sk_filter,
2176				       refcount_read(&sk->sk_wmem_alloc) == 0);
2177	if (filter) {
2178		sk_filter_uncharge(sk, filter);
2179		RCU_INIT_POINTER(sk->sk_filter, NULL);
2180	}
2181
2182	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
2183
2184#ifdef CONFIG_BPF_SYSCALL
2185	bpf_sk_storage_free(sk);
2186#endif
2187
2188	if (atomic_read(&sk->sk_omem_alloc))
2189		pr_debug("%s: optmem leakage (%d bytes) detected\n",
2190			 __func__, atomic_read(&sk->sk_omem_alloc));
2191
2192	if (sk->sk_frag.page) {
2193		put_page(sk->sk_frag.page);
2194		sk->sk_frag.page = NULL;
2195	}
2196
2197	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
2198	put_cred(sk->sk_peer_cred);
2199	put_pid(sk->sk_peer_pid);
2200
2201	if (likely(sk->sk_net_refcnt))
2202		put_net_track(sock_net(sk), &sk->ns_tracker);
2203	else
2204		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
2205
2206	sk_prot_free(sk->sk_prot_creator, sk);
2207}
2208
2209void sk_destruct(struct sock *sk)
2210{
2211	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
2212
2213	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
2214		reuseport_detach_sock(sk);
2215		use_call_rcu = true;
2216	}
2217
2218	if (use_call_rcu)
2219		call_rcu(&sk->sk_rcu, __sk_destruct);
2220	else
2221		__sk_destruct(&sk->sk_rcu);
2222}
2223
2224static void __sk_free(struct sock *sk)
2225{
2226	if (likely(sk->sk_net_refcnt))
2227		sock_inuse_add(sock_net(sk), -1);
2228
2229	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
2230		sock_diag_broadcast_destroy(sk);
2231	else
2232		sk_destruct(sk);
2233}
2234
2235void sk_free(struct sock *sk)
2236{
2237	/*
2238	 * We subtract one from sk_wmem_alloc and can know if
2239	 * some packets are still in some tx queue.
2240	 * If not null, sock_wfree() will call __sk_free(sk) later
2241	 */
2242	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2243		__sk_free(sk);
2244}
2245EXPORT_SYMBOL(sk_free);
2246
2247static void sk_init_common(struct sock *sk)
2248{
2249	skb_queue_head_init(&sk->sk_receive_queue);
2250	skb_queue_head_init(&sk->sk_write_queue);
2251	skb_queue_head_init(&sk->sk_error_queue);
2252
2253	rwlock_init(&sk->sk_callback_lock);
2254	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
2255			af_rlock_keys + sk->sk_family,
2256			af_family_rlock_key_strings[sk->sk_family]);
2257	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
2258			af_wlock_keys + sk->sk_family,
2259			af_family_wlock_key_strings[sk->sk_family]);
2260	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2261			af_elock_keys + sk->sk_family,
2262			af_family_elock_key_strings[sk->sk_family]);
2263	lockdep_set_class_and_name(&sk->sk_callback_lock,
2264			af_callback_keys + sk->sk_family,
2265			af_family_clock_key_strings[sk->sk_family]);
2266}
2267
2268/**
2269 *	sk_clone_lock - clone a socket, and lock its clone
2270 *	@sk: the socket to clone
2271 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2272 *
2273 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2274 */
2275struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2276{
2277	struct proto *prot = READ_ONCE(sk->sk_prot);
2278	struct sk_filter *filter;
2279	bool is_charged = true;
2280	struct sock *newsk;
2281
2282	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2283	if (!newsk)
2284		goto out;
2285
2286	sock_copy(newsk, sk);
2287
2288	newsk->sk_prot_creator = prot;
2289
2290	/* SANITY */
2291	if (likely(newsk->sk_net_refcnt)) {
2292		get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
2293		sock_inuse_add(sock_net(newsk), 1);
2294	} else {
2295		/* Kernel sockets are not elevating the struct net refcount.
2296		 * Instead, use a tracker to more easily detect if a layer
2297		 * is not properly dismantling its kernel sockets at netns
2298		 * destroy time.
2299		 */
2300		__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
2301				      false, priority);
2302	}
2303	sk_node_init(&newsk->sk_node);
2304	sock_lock_init(newsk);
2305	bh_lock_sock(newsk);
2306	newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
2307	newsk->sk_backlog.len = 0;
2308
2309	atomic_set(&newsk->sk_rmem_alloc, 0);
2310
2311	/* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2312	refcount_set(&newsk->sk_wmem_alloc, 1);
2313
2314	atomic_set(&newsk->sk_omem_alloc, 0);
2315	sk_init_common(newsk);
2316
2317	newsk->sk_dst_cache	= NULL;
2318	newsk->sk_dst_pending_confirm = 0;
2319	newsk->sk_wmem_queued	= 0;
2320	newsk->sk_forward_alloc = 0;
2321	newsk->sk_reserved_mem  = 0;
2322	atomic_set(&newsk->sk_drops, 0);
2323	newsk->sk_send_head	= NULL;
2324	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2325	atomic_set(&newsk->sk_zckey, 0);
2326
2327	sock_reset_flag(newsk, SOCK_DONE);
2328
2329	/* sk->sk_memcg will be populated at accept() time */
2330	newsk->sk_memcg = NULL;
2331
2332	cgroup_sk_clone(&newsk->sk_cgrp_data);
2333
2334	rcu_read_lock();
2335	filter = rcu_dereference(sk->sk_filter);
2336	if (filter != NULL)
2337		/* though it's an empty new sock, the charging may fail
2338		 * if sysctl_optmem_max was changed between creation of
2339		 * original socket and cloning
2340		 */
2341		is_charged = sk_filter_charge(newsk, filter);
2342	RCU_INIT_POINTER(newsk->sk_filter, filter);
2343	rcu_read_unlock();
2344
2345	if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2346		/* We need to make sure that we don't uncharge the new
2347		 * socket if we couldn't charge it in the first place
2348		 * as otherwise we uncharge the parent's filter.
2349		 */
2350		if (!is_charged)
2351			RCU_INIT_POINTER(newsk->sk_filter, NULL);
2352		sk_free_unlock_clone(newsk);
2353		newsk = NULL;
2354		goto out;
2355	}
2356	RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2357
2358	if (bpf_sk_storage_clone(sk, newsk)) {
2359		sk_free_unlock_clone(newsk);
2360		newsk = NULL;
2361		goto out;
2362	}
2363
2364	/* Clear sk_user_data if parent had the pointer tagged
2365	 * as not suitable for copying when cloning.
2366	 */
2367	if (sk_user_data_is_nocopy(newsk))
2368		newsk->sk_user_data = NULL;
2369
2370	newsk->sk_err	   = 0;
2371	newsk->sk_err_soft = 0;
2372	newsk->sk_priority = 0;
2373	newsk->sk_incoming_cpu = raw_smp_processor_id();
 
 
2374
2375	/* Before updating sk_refcnt, we must commit prior changes to memory
2376	 * (Documentation/RCU/rculist_nulls.rst for details)
2377	 */
2378	smp_wmb();
2379	refcount_set(&newsk->sk_refcnt, 2);
2380
 
 
 
 
 
 
 
 
 
 
 
2381	sk_set_socket(newsk, NULL);
2382	sk_tx_queue_clear(newsk);
2383	RCU_INIT_POINTER(newsk->sk_wq, NULL);
2384
2385	if (newsk->sk_prot->sockets_allocated)
2386		sk_sockets_allocated_inc(newsk);
2387
2388	if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2389		net_enable_timestamp();
2390out:
2391	return newsk;
2392}
2393EXPORT_SYMBOL_GPL(sk_clone_lock);
2394
2395void sk_free_unlock_clone(struct sock *sk)
2396{
2397	/* It is still raw copy of parent, so invalidate
2398	 * destructor and make plain sk_free() */
2399	sk->sk_destruct = NULL;
2400	bh_unlock_sock(sk);
2401	sk_free(sk);
2402}
2403EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2404
2405static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
2406{
2407	bool is_ipv6 = false;
2408	u32 max_size;
2409
2410#if IS_ENABLED(CONFIG_IPV6)
2411	is_ipv6 = (sk->sk_family == AF_INET6 &&
2412		   !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
2413#endif
2414	/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
2415	max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
2416			READ_ONCE(dst->dev->gso_ipv4_max_size);
2417	if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
2418		max_size = GSO_LEGACY_MAX_SIZE;
2419
2420	return max_size - (MAX_TCP_HEADER + 1);
2421}
2422
2423void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2424{
2425	u32 max_segs = 1;
2426
2427	sk->sk_route_caps = dst->dev->features;
2428	if (sk_is_tcp(sk))
2429		sk->sk_route_caps |= NETIF_F_GSO;
2430	if (sk->sk_route_caps & NETIF_F_GSO)
2431		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2432	if (unlikely(sk->sk_gso_disabled))
2433		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2434	if (sk_can_gso(sk)) {
2435		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2436			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2437		} else {
2438			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2439			sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
2440			/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
2441			max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
2442		}
2443	}
2444	sk->sk_gso_max_segs = max_segs;
2445	sk_dst_set(sk, dst);
2446}
2447EXPORT_SYMBOL_GPL(sk_setup_caps);
2448
2449/*
2450 *	Simple resource managers for sockets.
2451 */
2452
2453
2454/*
2455 * Write buffer destructor automatically called from kfree_skb.
2456 */
2457void sock_wfree(struct sk_buff *skb)
2458{
2459	struct sock *sk = skb->sk;
2460	unsigned int len = skb->truesize;
2461	bool free;
2462
2463	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2464		if (sock_flag(sk, SOCK_RCU_FREE) &&
2465		    sk->sk_write_space == sock_def_write_space) {
2466			rcu_read_lock();
2467			free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
2468			sock_def_write_space_wfree(sk);
2469			rcu_read_unlock();
2470			if (unlikely(free))
2471				__sk_free(sk);
2472			return;
2473		}
2474
2475		/*
2476		 * Keep a reference on sk_wmem_alloc, this will be released
2477		 * after sk_write_space() call
2478		 */
2479		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2480		sk->sk_write_space(sk);
2481		len = 1;
2482	}
2483	/*
2484	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2485	 * could not do because of in-flight packets
2486	 */
2487	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2488		__sk_free(sk);
2489}
2490EXPORT_SYMBOL(sock_wfree);
2491
2492/* This variant of sock_wfree() is used by TCP,
2493 * since it sets SOCK_USE_WRITE_QUEUE.
2494 */
2495void __sock_wfree(struct sk_buff *skb)
2496{
2497	struct sock *sk = skb->sk;
2498
2499	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2500		__sk_free(sk);
2501}
2502
2503void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2504{
2505	skb_orphan(skb);
2506	skb->sk = sk;
2507#ifdef CONFIG_INET
2508	if (unlikely(!sk_fullsock(sk))) {
2509		skb->destructor = sock_edemux;
2510		sock_hold(sk);
2511		return;
2512	}
2513#endif
2514	skb->destructor = sock_wfree;
2515	skb_set_hash_from_sk(skb, sk);
2516	/*
2517	 * We used to take a refcount on sk, but following operation
2518	 * is enough to guarantee sk_free() wont free this sock until
2519	 * all in-flight packets are completed
2520	 */
2521	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2522}
2523EXPORT_SYMBOL(skb_set_owner_w);
2524
2525static bool can_skb_orphan_partial(const struct sk_buff *skb)
2526{
2527#ifdef CONFIG_TLS_DEVICE
2528	/* Drivers depend on in-order delivery for crypto offload,
2529	 * partial orphan breaks out-of-order-OK logic.
2530	 */
2531	if (skb->decrypted)
2532		return false;
2533#endif
2534	return (skb->destructor == sock_wfree ||
2535		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2536}
2537
2538/* This helper is used by netem, as it can hold packets in its
2539 * delay queue. We want to allow the owner socket to send more
2540 * packets, as if they were already TX completed by a typical driver.
2541 * But we also want to keep skb->sk set because some packet schedulers
2542 * rely on it (sch_fq for example).
2543 */
2544void skb_orphan_partial(struct sk_buff *skb)
2545{
2546	if (skb_is_tcp_pure_ack(skb))
2547		return;
2548
2549	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2550		return;
2551
2552	skb_orphan(skb);
2553}
2554EXPORT_SYMBOL(skb_orphan_partial);
2555
2556/*
2557 * Read buffer destructor automatically called from kfree_skb.
2558 */
2559void sock_rfree(struct sk_buff *skb)
2560{
2561	struct sock *sk = skb->sk;
2562	unsigned int len = skb->truesize;
2563
2564	atomic_sub(len, &sk->sk_rmem_alloc);
2565	sk_mem_uncharge(sk, len);
2566}
2567EXPORT_SYMBOL(sock_rfree);
2568
2569/*
2570 * Buffer destructor for skbs that are not used directly in read or write
2571 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2572 */
2573void sock_efree(struct sk_buff *skb)
2574{
2575	sock_put(skb->sk);
2576}
2577EXPORT_SYMBOL(sock_efree);
2578
2579/* Buffer destructor for prefetch/receive path where reference count may
2580 * not be held, e.g. for listen sockets.
2581 */
2582#ifdef CONFIG_INET
2583void sock_pfree(struct sk_buff *skb)
2584{
2585	if (sk_is_refcounted(skb->sk))
2586		sock_gen_put(skb->sk);
2587}
2588EXPORT_SYMBOL(sock_pfree);
2589#endif /* CONFIG_INET */
2590
2591kuid_t sock_i_uid(struct sock *sk)
2592{
2593	kuid_t uid;
2594
2595	read_lock_bh(&sk->sk_callback_lock);
2596	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2597	read_unlock_bh(&sk->sk_callback_lock);
2598	return uid;
2599}
2600EXPORT_SYMBOL(sock_i_uid);
2601
2602unsigned long __sock_i_ino(struct sock *sk)
2603{
2604	unsigned long ino;
2605
2606	read_lock(&sk->sk_callback_lock);
2607	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2608	read_unlock(&sk->sk_callback_lock);
2609	return ino;
2610}
2611EXPORT_SYMBOL(__sock_i_ino);
2612
2613unsigned long sock_i_ino(struct sock *sk)
2614{
2615	unsigned long ino;
2616
2617	local_bh_disable();
2618	ino = __sock_i_ino(sk);
2619	local_bh_enable();
2620	return ino;
2621}
2622EXPORT_SYMBOL(sock_i_ino);
2623
2624/*
2625 * Allocate a skb from the socket's send buffer.
2626 */
2627struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2628			     gfp_t priority)
2629{
2630	if (force ||
2631	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2632		struct sk_buff *skb = alloc_skb(size, priority);
2633
2634		if (skb) {
2635			skb_set_owner_w(skb, sk);
2636			return skb;
2637		}
2638	}
2639	return NULL;
2640}
2641EXPORT_SYMBOL(sock_wmalloc);
2642
2643static void sock_ofree(struct sk_buff *skb)
2644{
2645	struct sock *sk = skb->sk;
2646
2647	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2648}
2649
2650struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2651			     gfp_t priority)
2652{
2653	struct sk_buff *skb;
2654
2655	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2656	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2657	    READ_ONCE(sock_net(sk)->core.sysctl_optmem_max))
2658		return NULL;
2659
2660	skb = alloc_skb(size, priority);
2661	if (!skb)
2662		return NULL;
2663
2664	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2665	skb->sk = sk;
2666	skb->destructor = sock_ofree;
2667	return skb;
2668}
2669
2670/*
2671 * Allocate a memory block from the socket's option memory buffer.
2672 */
2673void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2674{
2675	int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max);
2676
2677	if ((unsigned int)size <= optmem_max &&
2678	    atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
2679		void *mem;
2680		/* First do the add, to avoid the race if kmalloc
2681		 * might sleep.
2682		 */
2683		atomic_add(size, &sk->sk_omem_alloc);
2684		mem = kmalloc(size, priority);
2685		if (mem)
2686			return mem;
2687		atomic_sub(size, &sk->sk_omem_alloc);
2688	}
2689	return NULL;
2690}
2691EXPORT_SYMBOL(sock_kmalloc);
2692
2693/* Free an option memory block. Note, we actually want the inline
2694 * here as this allows gcc to detect the nullify and fold away the
2695 * condition entirely.
2696 */
2697static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2698				  const bool nullify)
2699{
2700	if (WARN_ON_ONCE(!mem))
2701		return;
2702	if (nullify)
2703		kfree_sensitive(mem);
2704	else
2705		kfree(mem);
2706	atomic_sub(size, &sk->sk_omem_alloc);
2707}
2708
2709void sock_kfree_s(struct sock *sk, void *mem, int size)
2710{
2711	__sock_kfree_s(sk, mem, size, false);
2712}
2713EXPORT_SYMBOL(sock_kfree_s);
2714
2715void sock_kzfree_s(struct sock *sk, void *mem, int size)
2716{
2717	__sock_kfree_s(sk, mem, size, true);
2718}
2719EXPORT_SYMBOL(sock_kzfree_s);
2720
2721/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2722   I think, these locks should be removed for datagram sockets.
2723 */
2724static long sock_wait_for_wmem(struct sock *sk, long timeo)
2725{
2726	DEFINE_WAIT(wait);
2727
2728	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2729	for (;;) {
2730		if (!timeo)
2731			break;
2732		if (signal_pending(current))
2733			break;
2734		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2735		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2736		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2737			break;
2738		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2739			break;
2740		if (READ_ONCE(sk->sk_err))
2741			break;
2742		timeo = schedule_timeout(timeo);
2743	}
2744	finish_wait(sk_sleep(sk), &wait);
2745	return timeo;
2746}
2747
2748
2749/*
2750 *	Generic send/receive buffer handlers
2751 */
2752
2753struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2754				     unsigned long data_len, int noblock,
2755				     int *errcode, int max_page_order)
2756{
2757	struct sk_buff *skb;
2758	long timeo;
2759	int err;
2760
2761	timeo = sock_sndtimeo(sk, noblock);
2762	for (;;) {
2763		err = sock_error(sk);
2764		if (err != 0)
2765			goto failure;
2766
2767		err = -EPIPE;
2768		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2769			goto failure;
2770
2771		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2772			break;
2773
2774		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2775		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2776		err = -EAGAIN;
2777		if (!timeo)
2778			goto failure;
2779		if (signal_pending(current))
2780			goto interrupted;
2781		timeo = sock_wait_for_wmem(sk, timeo);
2782	}
2783	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2784				   errcode, sk->sk_allocation);
2785	if (skb)
2786		skb_set_owner_w(skb, sk);
2787	return skb;
2788
2789interrupted:
2790	err = sock_intr_errno(timeo);
2791failure:
2792	*errcode = err;
2793	return NULL;
2794}
2795EXPORT_SYMBOL(sock_alloc_send_pskb);
2796
2797int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 
 
 
 
 
 
 
2798		     struct sockcm_cookie *sockc)
2799{
2800	u32 tsflags;
2801
2802	switch (cmsg->cmsg_type) {
2803	case SO_MARK:
2804		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
2805		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2806			return -EPERM;
2807		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2808			return -EINVAL;
2809		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2810		break;
2811	case SO_TIMESTAMPING_OLD:
2812	case SO_TIMESTAMPING_NEW:
2813		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2814			return -EINVAL;
2815
2816		tsflags = *(u32 *)CMSG_DATA(cmsg);
2817		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2818			return -EINVAL;
2819
2820		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2821		sockc->tsflags |= tsflags;
2822		break;
2823	case SCM_TXTIME:
2824		if (!sock_flag(sk, SOCK_TXTIME))
2825			return -EINVAL;
2826		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2827			return -EINVAL;
2828		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2829		break;
2830	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2831	case SCM_RIGHTS:
2832	case SCM_CREDENTIALS:
2833		break;
2834	default:
2835		return -EINVAL;
2836	}
2837	return 0;
2838}
2839EXPORT_SYMBOL(__sock_cmsg_send);
2840
2841int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2842		   struct sockcm_cookie *sockc)
2843{
2844	struct cmsghdr *cmsg;
2845	int ret;
2846
2847	for_each_cmsghdr(cmsg, msg) {
2848		if (!CMSG_OK(msg, cmsg))
2849			return -EINVAL;
2850		if (cmsg->cmsg_level != SOL_SOCKET)
2851			continue;
2852		ret = __sock_cmsg_send(sk, cmsg, sockc);
2853		if (ret)
2854			return ret;
2855	}
2856	return 0;
2857}
2858EXPORT_SYMBOL(sock_cmsg_send);
2859
2860static void sk_enter_memory_pressure(struct sock *sk)
2861{
2862	if (!sk->sk_prot->enter_memory_pressure)
2863		return;
2864
2865	sk->sk_prot->enter_memory_pressure(sk);
2866}
2867
2868static void sk_leave_memory_pressure(struct sock *sk)
2869{
2870	if (sk->sk_prot->leave_memory_pressure) {
2871		INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
2872				     tcp_leave_memory_pressure, sk);
2873	} else {
2874		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2875
2876		if (memory_pressure && READ_ONCE(*memory_pressure))
2877			WRITE_ONCE(*memory_pressure, 0);
2878	}
2879}
2880
 
2881DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2882
2883/**
2884 * skb_page_frag_refill - check that a page_frag contains enough room
2885 * @sz: minimum size of the fragment we want to get
2886 * @pfrag: pointer to page_frag
2887 * @gfp: priority for memory allocation
2888 *
2889 * Note: While this allocator tries to use high order pages, there is
2890 * no guarantee that allocations succeed. Therefore, @sz MUST be
2891 * less or equal than PAGE_SIZE.
2892 */
2893bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2894{
2895	if (pfrag->page) {
2896		if (page_ref_count(pfrag->page) == 1) {
2897			pfrag->offset = 0;
2898			return true;
2899		}
2900		if (pfrag->offset + sz <= pfrag->size)
2901			return true;
2902		put_page(pfrag->page);
2903	}
2904
2905	pfrag->offset = 0;
2906	if (SKB_FRAG_PAGE_ORDER &&
2907	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2908		/* Avoid direct reclaim but allow kswapd to wake */
2909		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2910					  __GFP_COMP | __GFP_NOWARN |
2911					  __GFP_NORETRY,
2912					  SKB_FRAG_PAGE_ORDER);
2913		if (likely(pfrag->page)) {
2914			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2915			return true;
2916		}
2917	}
2918	pfrag->page = alloc_page(gfp);
2919	if (likely(pfrag->page)) {
2920		pfrag->size = PAGE_SIZE;
2921		return true;
2922	}
2923	return false;
2924}
2925EXPORT_SYMBOL(skb_page_frag_refill);
2926
2927bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2928{
2929	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2930		return true;
2931
2932	sk_enter_memory_pressure(sk);
2933	sk_stream_moderate_sndbuf(sk);
2934	return false;
2935}
2936EXPORT_SYMBOL(sk_page_frag_refill);
2937
2938void __lock_sock(struct sock *sk)
2939	__releases(&sk->sk_lock.slock)
2940	__acquires(&sk->sk_lock.slock)
2941{
2942	DEFINE_WAIT(wait);
2943
2944	for (;;) {
2945		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2946					TASK_UNINTERRUPTIBLE);
2947		spin_unlock_bh(&sk->sk_lock.slock);
2948		schedule();
2949		spin_lock_bh(&sk->sk_lock.slock);
2950		if (!sock_owned_by_user(sk))
2951			break;
2952	}
2953	finish_wait(&sk->sk_lock.wq, &wait);
2954}
2955
2956void __release_sock(struct sock *sk)
2957	__releases(&sk->sk_lock.slock)
2958	__acquires(&sk->sk_lock.slock)
2959{
2960	struct sk_buff *skb, *next;
2961
2962	while ((skb = sk->sk_backlog.head) != NULL) {
2963		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2964
2965		spin_unlock_bh(&sk->sk_lock.slock);
2966
2967		do {
2968			next = skb->next;
2969			prefetch(next);
2970			DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
2971			skb_mark_not_on_list(skb);
2972			sk_backlog_rcv(sk, skb);
2973
2974			cond_resched();
2975
2976			skb = next;
2977		} while (skb != NULL);
2978
2979		spin_lock_bh(&sk->sk_lock.slock);
2980	}
2981
2982	/*
2983	 * Doing the zeroing here guarantee we can not loop forever
2984	 * while a wild producer attempts to flood us.
2985	 */
2986	sk->sk_backlog.len = 0;
2987}
2988
2989void __sk_flush_backlog(struct sock *sk)
2990{
2991	spin_lock_bh(&sk->sk_lock.slock);
2992	__release_sock(sk);
2993
2994	if (sk->sk_prot->release_cb)
2995		INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
2996				     tcp_release_cb, sk);
2997
2998	spin_unlock_bh(&sk->sk_lock.slock);
2999}
3000EXPORT_SYMBOL_GPL(__sk_flush_backlog);
3001
3002/**
3003 * sk_wait_data - wait for data to arrive at sk_receive_queue
3004 * @sk:    sock to wait on
3005 * @timeo: for how long
3006 * @skb:   last skb seen on sk_receive_queue
3007 *
3008 * Now socket state including sk->sk_err is changed only under lock,
3009 * hence we may omit checks after joining wait queue.
3010 * We check receive queue before schedule() only as optimization;
3011 * it is very likely that release_sock() added new data.
3012 */
3013int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
3014{
3015	DEFINE_WAIT_FUNC(wait, woken_wake_function);
3016	int rc;
3017
3018	add_wait_queue(sk_sleep(sk), &wait);
3019	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3020	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
3021	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3022	remove_wait_queue(sk_sleep(sk), &wait);
3023	return rc;
3024}
3025EXPORT_SYMBOL(sk_wait_data);
3026
3027/**
3028 *	__sk_mem_raise_allocated - increase memory_allocated
3029 *	@sk: socket
3030 *	@size: memory size to allocate
3031 *	@amt: pages to allocate
3032 *	@kind: allocation type
3033 *
3034 *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc.
3035 *
3036 *	Unlike the globally shared limits among the sockets under same protocol,
3037 *	consuming the budget of a memcg won't have direct effect on other ones.
3038 *	So be optimistic about memcg's tolerance, and leave the callers to decide
3039 *	whether or not to raise allocated through sk_under_memory_pressure() or
3040 *	its variants.
3041 */
3042int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3043{
3044	struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
3045	struct proto *prot = sk->sk_prot;
3046	bool charged = false;
3047	long allocated;
3048
3049	sk_memory_allocated_add(sk, amt);
3050	allocated = sk_memory_allocated(sk);
3051
3052	if (memcg) {
3053		if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
3054			goto suppress_allocation;
3055		charged = true;
3056	}
3057
3058	/* Under limit. */
3059	if (allocated <= sk_prot_mem_limits(sk, 0)) {
3060		sk_leave_memory_pressure(sk);
3061		return 1;
3062	}
3063
3064	/* Under pressure. */
3065	if (allocated > sk_prot_mem_limits(sk, 1))
3066		sk_enter_memory_pressure(sk);
3067
3068	/* Over hard limit. */
3069	if (allocated > sk_prot_mem_limits(sk, 2))
3070		goto suppress_allocation;
3071
3072	/* Guarantee minimum buffer size under pressure (either global
3073	 * or memcg) to make sure features described in RFC 7323 (TCP
3074	 * Extensions for High Performance) work properly.
3075	 *
3076	 * This rule does NOT stand when exceeds global or memcg's hard
3077	 * limit, or else a DoS attack can be taken place by spawning
3078	 * lots of sockets whose usage are under minimum buffer size.
3079	 */
3080	if (kind == SK_MEM_RECV) {
3081		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3082			return 1;
3083
3084	} else { /* SK_MEM_SEND */
3085		int wmem0 = sk_get_wmem0(sk, prot);
3086
3087		if (sk->sk_type == SOCK_STREAM) {
3088			if (sk->sk_wmem_queued < wmem0)
3089				return 1;
3090		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3091				return 1;
3092		}
3093	}
3094
3095	if (sk_has_memory_pressure(sk)) {
3096		u64 alloc;
3097
3098		/* The following 'average' heuristic is within the
3099		 * scope of global accounting, so it only makes
3100		 * sense for global memory pressure.
3101		 */
3102		if (!sk_under_global_memory_pressure(sk))
3103			return 1;
3104
3105		/* Try to be fair among all the sockets under global
3106		 * pressure by allowing the ones that below average
3107		 * usage to raise.
3108		 */
3109		alloc = sk_sockets_allocated_read_positive(sk);
3110		if (sk_prot_mem_limits(sk, 2) > alloc *
3111		    sk_mem_pages(sk->sk_wmem_queued +
3112				 atomic_read(&sk->sk_rmem_alloc) +
3113				 sk->sk_forward_alloc))
3114			return 1;
3115	}
3116
3117suppress_allocation:
3118
3119	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
3120		sk_stream_moderate_sndbuf(sk);
3121
3122		/* Fail only if socket is _under_ its sndbuf.
3123		 * In this case we cannot block, so that we have to fail.
3124		 */
3125		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
3126			/* Force charge with __GFP_NOFAIL */
3127			if (memcg && !charged) {
3128				mem_cgroup_charge_skmem(memcg, amt,
3129					gfp_memcg_charge() | __GFP_NOFAIL);
3130			}
3131			return 1;
3132		}
3133	}
3134
3135	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
3136		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3137
3138	sk_memory_allocated_sub(sk, amt);
3139
3140	if (charged)
3141		mem_cgroup_uncharge_skmem(memcg, amt);
3142
3143	return 0;
3144}
 
3145
3146/**
3147 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
3148 *	@sk: socket
3149 *	@size: memory size to allocate
3150 *	@kind: allocation type
3151 *
3152 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
3153 *	rmem allocation. This function assumes that protocols which have
3154 *	memory_pressure use sk_wmem_queued as write buffer accounting.
3155 */
3156int __sk_mem_schedule(struct sock *sk, int size, int kind)
3157{
3158	int ret, amt = sk_mem_pages(size);
3159
3160	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3161	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
3162	if (!ret)
3163		sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
3164	return ret;
3165}
3166EXPORT_SYMBOL(__sk_mem_schedule);
3167
3168/**
3169 *	__sk_mem_reduce_allocated - reclaim memory_allocated
3170 *	@sk: socket
3171 *	@amount: number of quanta
3172 *
3173 *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
3174 */
3175void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3176{
3177	sk_memory_allocated_sub(sk, amount);
3178
3179	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3180		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
3181
3182	if (sk_under_global_memory_pressure(sk) &&
3183	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
3184		sk_leave_memory_pressure(sk);
3185}
 
3186
3187/**
3188 *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
3189 *	@sk: socket
3190 *	@amount: number of bytes (rounded down to a PAGE_SIZE multiple)
3191 */
3192void __sk_mem_reclaim(struct sock *sk, int amount)
3193{
3194	amount >>= PAGE_SHIFT;
3195	sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
3196	__sk_mem_reduce_allocated(sk, amount);
3197}
3198EXPORT_SYMBOL(__sk_mem_reclaim);
3199
3200int sk_set_peek_off(struct sock *sk, int val)
3201{
3202	WRITE_ONCE(sk->sk_peek_off, val);
3203	return 0;
3204}
3205EXPORT_SYMBOL_GPL(sk_set_peek_off);
3206
3207/*
3208 * Set of default routines for initialising struct proto_ops when
3209 * the protocol does not support a particular function. In certain
3210 * cases where it makes no sense for a protocol to have a "do nothing"
3211 * function, some default processing is provided.
3212 */
3213
3214int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
3215{
3216	return -EOPNOTSUPP;
3217}
3218EXPORT_SYMBOL(sock_no_bind);
3219
3220int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
3221		    int len, int flags)
3222{
3223	return -EOPNOTSUPP;
3224}
3225EXPORT_SYMBOL(sock_no_connect);
3226
3227int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
3228{
3229	return -EOPNOTSUPP;
3230}
3231EXPORT_SYMBOL(sock_no_socketpair);
3232
3233int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
3234		   bool kern)
3235{
3236	return -EOPNOTSUPP;
3237}
3238EXPORT_SYMBOL(sock_no_accept);
3239
3240int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
3241		    int peer)
3242{
3243	return -EOPNOTSUPP;
3244}
3245EXPORT_SYMBOL(sock_no_getname);
3246
3247int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3248{
3249	return -EOPNOTSUPP;
3250}
3251EXPORT_SYMBOL(sock_no_ioctl);
3252
3253int sock_no_listen(struct socket *sock, int backlog)
3254{
3255	return -EOPNOTSUPP;
3256}
3257EXPORT_SYMBOL(sock_no_listen);
3258
3259int sock_no_shutdown(struct socket *sock, int how)
3260{
3261	return -EOPNOTSUPP;
3262}
3263EXPORT_SYMBOL(sock_no_shutdown);
3264
3265int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
3266{
3267	return -EOPNOTSUPP;
3268}
3269EXPORT_SYMBOL(sock_no_sendmsg);
3270
3271int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
3272{
3273	return -EOPNOTSUPP;
3274}
3275EXPORT_SYMBOL(sock_no_sendmsg_locked);
3276
3277int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
3278		    int flags)
3279{
3280	return -EOPNOTSUPP;
3281}
3282EXPORT_SYMBOL(sock_no_recvmsg);
3283
3284int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
3285{
3286	/* Mirror missing mmap method error code */
3287	return -ENODEV;
3288}
3289EXPORT_SYMBOL(sock_no_mmap);
3290
3291/*
3292 * When a file is received (via SCM_RIGHTS, etc), we must bump the
3293 * various sock-based usage counts.
3294 */
3295void __receive_sock(struct file *file)
3296{
3297	struct socket *sock;
3298
3299	sock = sock_from_file(file);
3300	if (sock) {
3301		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
3302		sock_update_classid(&sock->sk->sk_cgrp_data);
3303	}
3304}
3305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3306/*
3307 *	Default Socket Callbacks
3308 */
3309
3310static void sock_def_wakeup(struct sock *sk)
3311{
3312	struct socket_wq *wq;
3313
3314	rcu_read_lock();
3315	wq = rcu_dereference(sk->sk_wq);
3316	if (skwq_has_sleeper(wq))
3317		wake_up_interruptible_all(&wq->wait);
3318	rcu_read_unlock();
3319}
3320
3321static void sock_def_error_report(struct sock *sk)
3322{
3323	struct socket_wq *wq;
3324
3325	rcu_read_lock();
3326	wq = rcu_dereference(sk->sk_wq);
3327	if (skwq_has_sleeper(wq))
3328		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3329	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
3330	rcu_read_unlock();
3331}
3332
3333void sock_def_readable(struct sock *sk)
3334{
3335	struct socket_wq *wq;
3336
3337	trace_sk_data_ready(sk);
3338
3339	rcu_read_lock();
3340	wq = rcu_dereference(sk->sk_wq);
3341	if (skwq_has_sleeper(wq))
3342		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3343						EPOLLRDNORM | EPOLLRDBAND);
3344	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3345	rcu_read_unlock();
3346}
3347
3348static void sock_def_write_space(struct sock *sk)
3349{
3350	struct socket_wq *wq;
3351
3352	rcu_read_lock();
3353
3354	/* Do not wake up a writer until he can make "significant"
3355	 * progress.  --DaveM
3356	 */
3357	if (sock_writeable(sk)) {
3358		wq = rcu_dereference(sk->sk_wq);
3359		if (skwq_has_sleeper(wq))
3360			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3361						EPOLLWRNORM | EPOLLWRBAND);
3362
3363		/* Should agree with poll, otherwise some programs break */
3364		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 
3365	}
3366
3367	rcu_read_unlock();
3368}
3369
3370/* An optimised version of sock_def_write_space(), should only be called
3371 * for SOCK_RCU_FREE sockets under RCU read section and after putting
3372 * ->sk_wmem_alloc.
3373 */
3374static void sock_def_write_space_wfree(struct sock *sk)
3375{
3376	/* Do not wake up a writer until he can make "significant"
3377	 * progress.  --DaveM
3378	 */
3379	if (sock_writeable(sk)) {
3380		struct socket_wq *wq = rcu_dereference(sk->sk_wq);
3381
3382		/* rely on refcount_sub from sock_wfree() */
3383		smp_mb__after_atomic();
3384		if (wq && waitqueue_active(&wq->wait))
3385			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3386						EPOLLWRNORM | EPOLLWRBAND);
3387
3388		/* Should agree with poll, otherwise some programs break */
3389		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3390	}
3391}
3392
3393static void sock_def_destruct(struct sock *sk)
3394{
3395}
3396
3397void sk_send_sigurg(struct sock *sk)
3398{
3399	if (sk->sk_socket && sk->sk_socket->file)
3400		if (send_sigurg(&sk->sk_socket->file->f_owner))
3401			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3402}
3403EXPORT_SYMBOL(sk_send_sigurg);
3404
3405void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3406		    unsigned long expires)
3407{
3408	if (!mod_timer(timer, expires))
3409		sock_hold(sk);
3410}
3411EXPORT_SYMBOL(sk_reset_timer);
3412
3413void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3414{
3415	if (del_timer(timer))
3416		__sock_put(sk);
3417}
3418EXPORT_SYMBOL(sk_stop_timer);
3419
3420void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3421{
3422	if (del_timer_sync(timer))
3423		__sock_put(sk);
3424}
3425EXPORT_SYMBOL(sk_stop_timer_sync);
3426
3427void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
3428{
3429	sk_init_common(sk);
3430	sk->sk_send_head	=	NULL;
3431
3432	timer_setup(&sk->sk_timer, NULL, 0);
3433
3434	sk->sk_allocation	=	GFP_KERNEL;
3435	sk->sk_rcvbuf		=	READ_ONCE(sysctl_rmem_default);
3436	sk->sk_sndbuf		=	READ_ONCE(sysctl_wmem_default);
3437	sk->sk_state		=	TCP_CLOSE;
3438	sk->sk_use_task_frag	=	true;
3439	sk_set_socket(sk, sock);
3440
3441	sock_set_flag(sk, SOCK_ZAPPED);
3442
3443	if (sock) {
3444		sk->sk_type	=	sock->type;
3445		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3446		sock->sk	=	sk;
 
3447	} else {
3448		RCU_INIT_POINTER(sk->sk_wq, NULL);
 
3449	}
3450	sk->sk_uid	=	uid;
3451
3452	rwlock_init(&sk->sk_callback_lock);
3453	if (sk->sk_kern_sock)
3454		lockdep_set_class_and_name(
3455			&sk->sk_callback_lock,
3456			af_kern_callback_keys + sk->sk_family,
3457			af_family_kern_clock_key_strings[sk->sk_family]);
3458	else
3459		lockdep_set_class_and_name(
3460			&sk->sk_callback_lock,
3461			af_callback_keys + sk->sk_family,
3462			af_family_clock_key_strings[sk->sk_family]);
3463
3464	sk->sk_state_change	=	sock_def_wakeup;
3465	sk->sk_data_ready	=	sock_def_readable;
3466	sk->sk_write_space	=	sock_def_write_space;
3467	sk->sk_error_report	=	sock_def_error_report;
3468	sk->sk_destruct		=	sock_def_destruct;
3469
3470	sk->sk_frag.page	=	NULL;
3471	sk->sk_frag.offset	=	0;
3472	sk->sk_peek_off		=	-1;
3473
3474	sk->sk_peer_pid 	=	NULL;
3475	sk->sk_peer_cred	=	NULL;
3476	spin_lock_init(&sk->sk_peer_lock);
3477
3478	sk->sk_write_pending	=	0;
3479	sk->sk_rcvlowat		=	1;
3480	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3481	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3482
3483	sk->sk_stamp = SK_DEFAULT_STAMP;
3484#if BITS_PER_LONG==32
3485	seqlock_init(&sk->sk_stamp_seq);
3486#endif
3487	atomic_set(&sk->sk_zckey, 0);
3488
3489#ifdef CONFIG_NET_RX_BUSY_POLL
3490	sk->sk_napi_id		=	0;
3491	sk->sk_ll_usec		=	READ_ONCE(sysctl_net_busy_read);
3492#endif
3493
3494	sk->sk_max_pacing_rate = ~0UL;
3495	sk->sk_pacing_rate = ~0UL;
3496	WRITE_ONCE(sk->sk_pacing_shift, 10);
3497	sk->sk_incoming_cpu = -1;
3498
3499	sk_rx_queue_clear(sk);
3500	/*
3501	 * Before updating sk_refcnt, we must commit prior changes to memory
3502	 * (Documentation/RCU/rculist_nulls.rst for details)
3503	 */
3504	smp_wmb();
3505	refcount_set(&sk->sk_refcnt, 1);
3506	atomic_set(&sk->sk_drops, 0);
3507}
3508EXPORT_SYMBOL(sock_init_data_uid);
3509
3510void sock_init_data(struct socket *sock, struct sock *sk)
3511{
3512	kuid_t uid = sock ?
3513		SOCK_INODE(sock)->i_uid :
3514		make_kuid(sock_net(sk)->user_ns, 0);
3515
3516	sock_init_data_uid(sock, sk, uid);
3517}
3518EXPORT_SYMBOL(sock_init_data);
3519
3520void lock_sock_nested(struct sock *sk, int subclass)
3521{
3522	/* The sk_lock has mutex_lock() semantics here. */
3523	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3524
3525	might_sleep();
3526	spin_lock_bh(&sk->sk_lock.slock);
3527	if (sock_owned_by_user_nocheck(sk))
3528		__lock_sock(sk);
3529	sk->sk_lock.owned = 1;
3530	spin_unlock_bh(&sk->sk_lock.slock);
 
 
 
 
 
3531}
3532EXPORT_SYMBOL(lock_sock_nested);
3533
3534void release_sock(struct sock *sk)
3535{
3536	spin_lock_bh(&sk->sk_lock.slock);
3537	if (sk->sk_backlog.tail)
3538		__release_sock(sk);
3539
 
 
 
3540	if (sk->sk_prot->release_cb)
3541		INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
3542				     tcp_release_cb, sk);
3543
3544	sock_release_ownership(sk);
3545	if (waitqueue_active(&sk->sk_lock.wq))
3546		wake_up(&sk->sk_lock.wq);
3547	spin_unlock_bh(&sk->sk_lock.slock);
3548}
3549EXPORT_SYMBOL(release_sock);
3550
3551bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
 
 
 
 
 
 
 
 
 
 
 
 
 
3552{
3553	might_sleep();
3554	spin_lock_bh(&sk->sk_lock.slock);
3555
3556	if (!sock_owned_by_user_nocheck(sk)) {
3557		/*
3558		 * Fast path return with bottom halves disabled and
3559		 * sock::sk_lock.slock held.
3560		 *
3561		 * The 'mutex' is not contended and holding
3562		 * sock::sk_lock.slock prevents all other lockers to
3563		 * proceed so the corresponding unlock_sock_fast() can
3564		 * avoid the slow path of release_sock() completely and
3565		 * just release slock.
3566		 *
3567		 * From a semantical POV this is equivalent to 'acquiring'
3568		 * the 'mutex', hence the corresponding lockdep
3569		 * mutex_release() has to happen in the fast path of
3570		 * unlock_sock_fast().
3571		 */
3572		return false;
3573	}
3574
3575	__lock_sock(sk);
3576	sk->sk_lock.owned = 1;
 
 
 
 
 
3577	__acquire(&sk->sk_lock.slock);
3578	spin_unlock_bh(&sk->sk_lock.slock);
3579	return true;
3580}
3581EXPORT_SYMBOL(__lock_sock_fast);
3582
3583int sock_gettstamp(struct socket *sock, void __user *userstamp,
3584		   bool timeval, bool time32)
3585{
3586	struct sock *sk = sock->sk;
3587	struct timespec64 ts;
3588
3589	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3590	ts = ktime_to_timespec64(sock_read_timestamp(sk));
3591	if (ts.tv_sec == -1)
3592		return -ENOENT;
3593	if (ts.tv_sec == 0) {
3594		ktime_t kt = ktime_get_real();
3595		sock_write_timestamp(sk, kt);
3596		ts = ktime_to_timespec64(kt);
3597	}
3598
3599	if (timeval)
3600		ts.tv_nsec /= 1000;
3601
3602#ifdef CONFIG_COMPAT_32BIT_TIME
3603	if (time32)
3604		return put_old_timespec32(&ts, userstamp);
3605#endif
3606#ifdef CONFIG_SPARC64
3607	/* beware of padding in sparc64 timeval */
3608	if (timeval && !in_compat_syscall()) {
3609		struct __kernel_old_timeval __user tv = {
3610			.tv_sec = ts.tv_sec,
3611			.tv_usec = ts.tv_nsec,
3612		};
3613		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3614			return -EFAULT;
3615		return 0;
3616	}
3617#endif
3618	return put_timespec64(&ts, userstamp);
3619}
3620EXPORT_SYMBOL(sock_gettstamp);
3621
3622void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3623{
3624	if (!sock_flag(sk, flag)) {
3625		unsigned long previous_flags = sk->sk_flags;
3626
3627		sock_set_flag(sk, flag);
3628		/*
3629		 * we just set one of the two flags which require net
3630		 * time stamping, but time stamping might have been on
3631		 * already because of the other one
3632		 */
3633		if (sock_needs_netstamp(sk) &&
3634		    !(previous_flags & SK_FLAGS_TIMESTAMP))
3635			net_enable_timestamp();
3636	}
3637}
3638
3639int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3640		       int level, int type)
3641{
3642	struct sock_exterr_skb *serr;
3643	struct sk_buff *skb;
3644	int copied, err;
3645
3646	err = -EAGAIN;
3647	skb = sock_dequeue_err_skb(sk);
3648	if (skb == NULL)
3649		goto out;
3650
3651	copied = skb->len;
3652	if (copied > len) {
3653		msg->msg_flags |= MSG_TRUNC;
3654		copied = len;
3655	}
3656	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3657	if (err)
3658		goto out_free_skb;
3659
3660	sock_recv_timestamp(msg, sk, skb);
3661
3662	serr = SKB_EXT_ERR(skb);
3663	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3664
3665	msg->msg_flags |= MSG_ERRQUEUE;
3666	err = copied;
3667
3668out_free_skb:
3669	kfree_skb(skb);
3670out:
3671	return err;
3672}
3673EXPORT_SYMBOL(sock_recv_errqueue);
3674
3675/*
3676 *	Get a socket option on an socket.
3677 *
3678 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3679 *	asynchronous errors should be reported by getsockopt. We assume
3680 *	this means if you specify SO_ERROR (otherwise whats the point of it).
3681 */
3682int sock_common_getsockopt(struct socket *sock, int level, int optname,
3683			   char __user *optval, int __user *optlen)
3684{
3685	struct sock *sk = sock->sk;
3686
3687	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3688	return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
3689}
3690EXPORT_SYMBOL(sock_common_getsockopt);
3691
3692int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3693			int flags)
3694{
3695	struct sock *sk = sock->sk;
3696	int addr_len = 0;
3697	int err;
3698
3699	err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
 
3700	if (err >= 0)
3701		msg->msg_namelen = addr_len;
3702	return err;
3703}
3704EXPORT_SYMBOL(sock_common_recvmsg);
3705
3706/*
3707 *	Set socket options on an inet socket.
3708 */
3709int sock_common_setsockopt(struct socket *sock, int level, int optname,
3710			   sockptr_t optval, unsigned int optlen)
3711{
3712	struct sock *sk = sock->sk;
3713
3714	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3715	return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
3716}
3717EXPORT_SYMBOL(sock_common_setsockopt);
3718
3719void sk_common_release(struct sock *sk)
3720{
3721	if (sk->sk_prot->destroy)
3722		sk->sk_prot->destroy(sk);
3723
3724	/*
3725	 * Observation: when sk_common_release is called, processes have
3726	 * no access to socket. But net still has.
3727	 * Step one, detach it from networking:
3728	 *
3729	 * A. Remove from hash tables.
3730	 */
3731
3732	sk->sk_prot->unhash(sk);
3733
3734	/*
3735	 * In this point socket cannot receive new packets, but it is possible
3736	 * that some packets are in flight because some CPU runs receiver and
3737	 * did hash table lookup before we unhashed socket. They will achieve
3738	 * receive queue and will be purged by socket destructor.
3739	 *
3740	 * Also we still have packets pending on receive queue and probably,
3741	 * our own packets waiting in device queues. sock_destroy will drain
3742	 * receive queue, but transmitted packets will delay socket destruction
3743	 * until the last reference will be released.
3744	 */
3745
3746	sock_orphan(sk);
3747
3748	xfrm_sk_free_policy(sk);
3749
 
 
3750	sock_put(sk);
3751}
3752EXPORT_SYMBOL(sk_common_release);
3753
3754void sk_get_meminfo(const struct sock *sk, u32 *mem)
3755{
3756	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3757
3758	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3759	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3760	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3761	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3762	mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
3763	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3764	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3765	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3766	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3767}
3768
3769#ifdef CONFIG_PROC_FS
 
 
 
 
 
3770static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3771
 
 
 
 
 
 
3772int sock_prot_inuse_get(struct net *net, struct proto *prot)
3773{
3774	int cpu, idx = prot->inuse_idx;
3775	int res = 0;
3776
3777	for_each_possible_cpu(cpu)
3778		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3779
3780	return res >= 0 ? res : 0;
3781}
3782EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3783
 
 
 
 
 
3784int sock_inuse_get(struct net *net)
3785{
3786	int cpu, res = 0;
3787
3788	for_each_possible_cpu(cpu)
3789		res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
3790
3791	return res;
3792}
3793
3794EXPORT_SYMBOL_GPL(sock_inuse_get);
3795
3796static int __net_init sock_inuse_init_net(struct net *net)
3797{
3798	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3799	if (net->core.prot_inuse == NULL)
3800		return -ENOMEM;
 
 
 
 
 
3801	return 0;
 
 
 
 
3802}
3803
3804static void __net_exit sock_inuse_exit_net(struct net *net)
3805{
3806	free_percpu(net->core.prot_inuse);
 
3807}
3808
3809static struct pernet_operations net_inuse_ops = {
3810	.init = sock_inuse_init_net,
3811	.exit = sock_inuse_exit_net,
3812};
3813
3814static __init int net_inuse_init(void)
3815{
3816	if (register_pernet_subsys(&net_inuse_ops))
3817		panic("Cannot initialize net inuse counters");
3818
3819	return 0;
3820}
3821
3822core_initcall(net_inuse_init);
3823
3824static int assign_proto_idx(struct proto *prot)
3825{
3826	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3827
3828	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3829		pr_err("PROTO_INUSE_NR exhausted\n");
3830		return -ENOSPC;
3831	}
3832
3833	set_bit(prot->inuse_idx, proto_inuse_idx);
3834	return 0;
3835}
3836
3837static void release_proto_idx(struct proto *prot)
3838{
3839	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3840		clear_bit(prot->inuse_idx, proto_inuse_idx);
3841}
3842#else
3843static inline int assign_proto_idx(struct proto *prot)
3844{
3845	return 0;
3846}
3847
3848static inline void release_proto_idx(struct proto *prot)
3849{
3850}
3851
 
 
 
3852#endif
3853
3854static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3855{
3856	if (!twsk_prot)
3857		return;
3858	kfree(twsk_prot->twsk_slab_name);
3859	twsk_prot->twsk_slab_name = NULL;
3860	kmem_cache_destroy(twsk_prot->twsk_slab);
3861	twsk_prot->twsk_slab = NULL;
3862}
3863
3864static int tw_prot_init(const struct proto *prot)
3865{
3866	struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
3867
3868	if (!twsk_prot)
3869		return 0;
3870
3871	twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
3872					      prot->name);
3873	if (!twsk_prot->twsk_slab_name)
3874		return -ENOMEM;
3875
3876	twsk_prot->twsk_slab =
3877		kmem_cache_create(twsk_prot->twsk_slab_name,
3878				  twsk_prot->twsk_obj_size, 0,
3879				  SLAB_ACCOUNT | prot->slab_flags,
3880				  NULL);
3881	if (!twsk_prot->twsk_slab) {
3882		pr_crit("%s: Can't create timewait sock SLAB cache!\n",
3883			prot->name);
3884		return -ENOMEM;
3885	}
3886
3887	return 0;
3888}
3889
3890static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3891{
3892	if (!rsk_prot)
3893		return;
3894	kfree(rsk_prot->slab_name);
3895	rsk_prot->slab_name = NULL;
3896	kmem_cache_destroy(rsk_prot->slab);
3897	rsk_prot->slab = NULL;
3898}
3899
3900static int req_prot_init(const struct proto *prot)
3901{
3902	struct request_sock_ops *rsk_prot = prot->rsk_prot;
3903
3904	if (!rsk_prot)
3905		return 0;
3906
3907	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3908					prot->name);
3909	if (!rsk_prot->slab_name)
3910		return -ENOMEM;
3911
3912	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3913					   rsk_prot->obj_size, 0,
3914					   SLAB_ACCOUNT | prot->slab_flags,
3915					   NULL);
3916
3917	if (!rsk_prot->slab) {
3918		pr_crit("%s: Can't create request sock SLAB cache!\n",
3919			prot->name);
3920		return -ENOMEM;
3921	}
3922	return 0;
3923}
3924
3925int proto_register(struct proto *prot, int alloc_slab)
3926{
3927	int ret = -ENOBUFS;
3928
3929	if (prot->memory_allocated && !prot->sysctl_mem) {
3930		pr_err("%s: missing sysctl_mem\n", prot->name);
3931		return -EINVAL;
3932	}
3933	if (prot->memory_allocated && !prot->per_cpu_fw_alloc) {
3934		pr_err("%s: missing per_cpu_fw_alloc\n", prot->name);
3935		return -EINVAL;
3936	}
3937	if (alloc_slab) {
3938		prot->slab = kmem_cache_create_usercopy(prot->name,
3939					prot->obj_size, 0,
3940					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3941					prot->slab_flags,
3942					prot->useroffset, prot->usersize,
3943					NULL);
3944
3945		if (prot->slab == NULL) {
3946			pr_crit("%s: Can't create sock SLAB cache!\n",
3947				prot->name);
3948			goto out;
3949		}
3950
3951		if (req_prot_init(prot))
3952			goto out_free_request_sock_slab;
3953
3954		if (tw_prot_init(prot))
3955			goto out_free_timewait_sock_slab;
3956	}
3957
3958	mutex_lock(&proto_list_mutex);
3959	ret = assign_proto_idx(prot);
3960	if (ret) {
3961		mutex_unlock(&proto_list_mutex);
3962		goto out_free_timewait_sock_slab;
3963	}
3964	list_add(&prot->node, &proto_list);
3965	mutex_unlock(&proto_list_mutex);
3966	return ret;
3967
3968out_free_timewait_sock_slab:
3969	if (alloc_slab)
3970		tw_prot_cleanup(prot->twsk_prot);
3971out_free_request_sock_slab:
3972	if (alloc_slab) {
3973		req_prot_cleanup(prot->rsk_prot);
3974
3975		kmem_cache_destroy(prot->slab);
3976		prot->slab = NULL;
3977	}
3978out:
3979	return ret;
3980}
3981EXPORT_SYMBOL(proto_register);
3982
3983void proto_unregister(struct proto *prot)
3984{
3985	mutex_lock(&proto_list_mutex);
3986	release_proto_idx(prot);
3987	list_del(&prot->node);
3988	mutex_unlock(&proto_list_mutex);
3989
3990	kmem_cache_destroy(prot->slab);
3991	prot->slab = NULL;
3992
3993	req_prot_cleanup(prot->rsk_prot);
3994	tw_prot_cleanup(prot->twsk_prot);
3995}
3996EXPORT_SYMBOL(proto_unregister);
3997
3998int sock_load_diag_module(int family, int protocol)
3999{
4000	if (!protocol) {
4001		if (!sock_is_registered(family))
4002			return -ENOENT;
4003
4004		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
4005				      NETLINK_SOCK_DIAG, family);
4006	}
4007
4008#ifdef CONFIG_INET
4009	if (family == AF_INET &&
4010	    protocol != IPPROTO_RAW &&
4011	    protocol < MAX_INET_PROTOS &&
4012	    !rcu_access_pointer(inet_protos[protocol]))
4013		return -ENOENT;
4014#endif
4015
4016	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
4017			      NETLINK_SOCK_DIAG, family, protocol);
4018}
4019EXPORT_SYMBOL(sock_load_diag_module);
4020
4021#ifdef CONFIG_PROC_FS
4022static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
4023	__acquires(proto_list_mutex)
4024{
4025	mutex_lock(&proto_list_mutex);
4026	return seq_list_start_head(&proto_list, *pos);
4027}
4028
4029static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4030{
4031	return seq_list_next(v, &proto_list, pos);
4032}
4033
4034static void proto_seq_stop(struct seq_file *seq, void *v)
4035	__releases(proto_list_mutex)
4036{
4037	mutex_unlock(&proto_list_mutex);
4038}
4039
4040static char proto_method_implemented(const void *method)
4041{
4042	return method == NULL ? 'n' : 'y';
4043}
4044static long sock_prot_memory_allocated(struct proto *proto)
4045{
4046	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
4047}
4048
4049static const char *sock_prot_memory_pressure(struct proto *proto)
4050{
4051	return proto->memory_pressure != NULL ?
4052	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
4053}
4054
4055static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
4056{
4057
4058	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
4059			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
4060		   proto->name,
4061		   proto->obj_size,
4062		   sock_prot_inuse_get(seq_file_net(seq), proto),
4063		   sock_prot_memory_allocated(proto),
4064		   sock_prot_memory_pressure(proto),
4065		   proto->max_header,
4066		   proto->slab == NULL ? "no" : "yes",
4067		   module_name(proto->owner),
4068		   proto_method_implemented(proto->close),
4069		   proto_method_implemented(proto->connect),
4070		   proto_method_implemented(proto->disconnect),
4071		   proto_method_implemented(proto->accept),
4072		   proto_method_implemented(proto->ioctl),
4073		   proto_method_implemented(proto->init),
4074		   proto_method_implemented(proto->destroy),
4075		   proto_method_implemented(proto->shutdown),
4076		   proto_method_implemented(proto->setsockopt),
4077		   proto_method_implemented(proto->getsockopt),
4078		   proto_method_implemented(proto->sendmsg),
4079		   proto_method_implemented(proto->recvmsg),
 
4080		   proto_method_implemented(proto->bind),
4081		   proto_method_implemented(proto->backlog_rcv),
4082		   proto_method_implemented(proto->hash),
4083		   proto_method_implemented(proto->unhash),
4084		   proto_method_implemented(proto->get_port),
4085		   proto_method_implemented(proto->enter_memory_pressure));
4086}
4087
4088static int proto_seq_show(struct seq_file *seq, void *v)
4089{
4090	if (v == &proto_list)
4091		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
4092			   "protocol",
4093			   "size",
4094			   "sockets",
4095			   "memory",
4096			   "press",
4097			   "maxhdr",
4098			   "slab",
4099			   "module",
4100			   "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n");
4101	else
4102		proto_seq_printf(seq, list_entry(v, struct proto, node));
4103	return 0;
4104}
4105
4106static const struct seq_operations proto_seq_ops = {
4107	.start  = proto_seq_start,
4108	.next   = proto_seq_next,
4109	.stop   = proto_seq_stop,
4110	.show   = proto_seq_show,
4111};
4112
4113static __net_init int proto_init_net(struct net *net)
4114{
4115	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
4116			sizeof(struct seq_net_private)))
4117		return -ENOMEM;
4118
4119	return 0;
4120}
4121
4122static __net_exit void proto_exit_net(struct net *net)
4123{
4124	remove_proc_entry("protocols", net->proc_net);
4125}
4126
4127
4128static __net_initdata struct pernet_operations proto_net_ops = {
4129	.init = proto_init_net,
4130	.exit = proto_exit_net,
4131};
4132
4133static int __init proto_init(void)
4134{
4135	return register_pernet_subsys(&proto_net_ops);
4136}
4137
4138subsys_initcall(proto_init);
4139
4140#endif /* PROC_FS */
4141
4142#ifdef CONFIG_NET_RX_BUSY_POLL
4143bool sk_busy_loop_end(void *p, unsigned long start_time)
4144{
4145	struct sock *sk = p;
4146
4147	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
4148		return true;
4149
4150	if (sk_is_udp(sk) &&
4151	    !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
4152		return true;
4153
4154	return sk_busy_loop_timeout(sk, start_time);
4155}
4156EXPORT_SYMBOL(sk_busy_loop_end);
4157#endif /* CONFIG_NET_RX_BUSY_POLL */
4158
4159int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
4160{
4161	if (!sk->sk_prot->bind_add)
4162		return -EOPNOTSUPP;
4163	return sk->sk_prot->bind_add(sk, addr, addr_len);
4164}
4165EXPORT_SYMBOL(sock_bind_add);
4166
4167/* Copy 'size' bytes from userspace and return `size` back to userspace */
4168int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
4169		     void __user *arg, void *karg, size_t size)
4170{
4171	int ret;
4172
4173	if (copy_from_user(karg, arg, size))
4174		return -EFAULT;
4175
4176	ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg);
4177	if (ret)
4178		return ret;
4179
4180	if (copy_to_user(arg, karg, size))
4181		return -EFAULT;
4182
4183	return 0;
4184}
4185EXPORT_SYMBOL(sock_ioctl_inout);
4186
4187/* This is the most common ioctl prep function, where the result (4 bytes) is
4188 * copied back to userspace if the ioctl() returns successfully. No input is
4189 * copied from userspace as input argument.
4190 */
4191static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg)
4192{
4193	int ret, karg = 0;
4194
4195	ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg);
4196	if (ret)
4197		return ret;
4198
4199	return put_user(karg, (int __user *)arg);
4200}
4201
4202/* A wrapper around sock ioctls, which copies the data from userspace
4203 * (depending on the protocol/ioctl), and copies back the result to userspace.
4204 * The main motivation for this function is to pass kernel memory to the
4205 * protocol ioctl callbacks, instead of userspace memory.
4206 */
4207int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
4208{
4209	int rc = 1;
4210
4211	if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET)
4212		rc = ipmr_sk_ioctl(sk, cmd, arg);
4213	else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6)
4214		rc = ip6mr_sk_ioctl(sk, cmd, arg);
4215	else if (sk_is_phonet(sk))
4216		rc = phonet_sk_ioctl(sk, cmd, arg);
4217
4218	/* If ioctl was processed, returns its value */
4219	if (rc <= 0)
4220		return rc;
4221
4222	/* Otherwise call the default handler */
4223	return sock_ioctl_out(sk, cmd, arg);
4224}
4225EXPORT_SYMBOL(sk_ioctl);