Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Generic socket support routines. Memory allocators, socket lock/release
   8 *		handler for protocols to use and generic option handler.
   9 *
 
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
 
 
 
 
 
 
  84 */
  85
  86#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  87
  88#include <asm/unaligned.h>
  89#include <linux/capability.h>
  90#include <linux/errno.h>
  91#include <linux/errqueue.h>
  92#include <linux/types.h>
  93#include <linux/socket.h>
  94#include <linux/in.h>
  95#include <linux/kernel.h>
  96#include <linux/module.h>
  97#include <linux/proc_fs.h>
  98#include <linux/seq_file.h>
  99#include <linux/sched.h>
 100#include <linux/sched/mm.h>
 101#include <linux/timer.h>
 102#include <linux/string.h>
 103#include <linux/sockios.h>
 104#include <linux/net.h>
 105#include <linux/mm.h>
 106#include <linux/slab.h>
 107#include <linux/interrupt.h>
 108#include <linux/poll.h>
 109#include <linux/tcp.h>
 110#include <linux/init.h>
 111#include <linux/highmem.h>
 112#include <linux/user_namespace.h>
 113#include <linux/static_key.h>
 114#include <linux/memcontrol.h>
 115#include <linux/prefetch.h>
 116#include <linux/compat.h>
 117
 118#include <linux/uaccess.h>
 
 119
 120#include <linux/netdevice.h>
 121#include <net/protocol.h>
 122#include <linux/skbuff.h>
 123#include <net/net_namespace.h>
 124#include <net/request_sock.h>
 125#include <net/sock.h>
 126#include <linux/net_tstamp.h>
 127#include <net/xfrm.h>
 128#include <linux/ipsec.h>
 129#include <net/cls_cgroup.h>
 130#include <net/netprio_cgroup.h>
 131#include <linux/sock_diag.h>
 132
 133#include <linux/filter.h>
 134#include <net/sock_reuseport.h>
 135#include <net/bpf_sk_storage.h>
 136
 137#include <trace/events/sock.h>
 138
 
 139#include <net/tcp.h>
 140#include <net/busy_poll.h>
 141
 142#include <linux/ethtool.h>
 143
 144static DEFINE_MUTEX(proto_list_mutex);
 145static LIST_HEAD(proto_list);
 146
 147static void sock_inuse_add(struct net *net, int val);
 148
 149/**
 150 * sk_ns_capable - General socket capability test
 151 * @sk: Socket to use a capability on or through
 152 * @user_ns: The user namespace of the capability to use
 153 * @cap: The capability to use
 154 *
 155 * Test to see if the opener of the socket had when the socket was
 156 * created and the current process has the capability @cap in the user
 157 * namespace @user_ns.
 158 */
 159bool sk_ns_capable(const struct sock *sk,
 160		   struct user_namespace *user_ns, int cap)
 161{
 162	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 163		ns_capable(user_ns, cap);
 164}
 165EXPORT_SYMBOL(sk_ns_capable);
 166
 167/**
 168 * sk_capable - Socket global capability test
 169 * @sk: Socket to use a capability on or through
 170 * @cap: The global capability to use
 171 *
 172 * Test to see if the opener of the socket had when the socket was
 173 * created and the current process has the capability @cap in all user
 174 * namespaces.
 175 */
 176bool sk_capable(const struct sock *sk, int cap)
 177{
 178	return sk_ns_capable(sk, &init_user_ns, cap);
 179}
 180EXPORT_SYMBOL(sk_capable);
 181
 182/**
 183 * sk_net_capable - Network namespace socket capability test
 184 * @sk: Socket to use a capability on or through
 185 * @cap: The capability to use
 186 *
 187 * Test to see if the opener of the socket had when the socket was created
 188 * and the current process has the capability @cap over the network namespace
 189 * the socket is a member of.
 190 */
 191bool sk_net_capable(const struct sock *sk, int cap)
 192{
 193	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 194}
 195EXPORT_SYMBOL(sk_net_capable);
 196
 197/*
 198 * Each address family might have different locking rules, so we have
 199 * one slock key per address family and separate keys for internal and
 200 * userspace sockets.
 201 */
 202static struct lock_class_key af_family_keys[AF_MAX];
 203static struct lock_class_key af_family_kern_keys[AF_MAX];
 204static struct lock_class_key af_family_slock_keys[AF_MAX];
 205static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
 206
 207/*
 208 * Make lock validator output more readable. (we pre-construct these
 209 * strings build-time, so that runtime initialization of socket
 210 * locks is fast):
 211 */
 212
 213#define _sock_locks(x)						  \
 214  x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
 215  x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
 216  x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
 217  x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
 218  x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
 219  x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
 220  x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
 221  x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
 222  x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
 223  x "27"       ,	x "28"          ,	x "AF_CAN"      , \
 224  x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
 225  x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
 226  x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
 227  x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
 228  x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
 229  x "AF_MAX"
 230
 231static const char *const af_family_key_strings[AF_MAX+1] = {
 232	_sock_locks("sk_lock-")
 
 
 
 
 
 
 
 
 
 
 
 
 
 233};
 234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 235	_sock_locks("slock-")
 
 
 
 
 
 
 
 
 
 
 
 
 
 236};
 237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 238	_sock_locks("clock-")
 239};
 240
 241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
 242	_sock_locks("k-sk_lock-")
 243};
 244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
 245	_sock_locks("k-slock-")
 246};
 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
 248	_sock_locks("k-clock-")
 249};
 250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
 251	_sock_locks("rlock-")
 252};
 253static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
 254	_sock_locks("wlock-")
 255};
 256static const char *const af_family_elock_key_strings[AF_MAX+1] = {
 257	_sock_locks("elock-")
 258};
 259
 260/*
 261 * sk_callback_lock and sk queues locking rules are per-address-family,
 262 * so split the lock classes by using a per-AF key:
 263 */
 264static struct lock_class_key af_callback_keys[AF_MAX];
 265static struct lock_class_key af_rlock_keys[AF_MAX];
 266static struct lock_class_key af_wlock_keys[AF_MAX];
 267static struct lock_class_key af_elock_keys[AF_MAX];
 268static struct lock_class_key af_kern_callback_keys[AF_MAX];
 
 
 
 
 
 
 269
 270/* Run time adjustable parameters. */
 271__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 272EXPORT_SYMBOL(sysctl_wmem_max);
 273__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 274EXPORT_SYMBOL(sysctl_rmem_max);
 275__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 276__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 277
 278/* Maximal space eaten by iovec or ancillary data plus some space */
 279int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 280EXPORT_SYMBOL(sysctl_optmem_max);
 281
 282int sysctl_tstamp_allow_data __read_mostly = 1;
 283
 284DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
 285EXPORT_SYMBOL_GPL(memalloc_socks_key);
 286
 287/**
 288 * sk_set_memalloc - sets %SOCK_MEMALLOC
 289 * @sk: socket to set it on
 290 *
 291 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 292 * It's the responsibility of the admin to adjust min_free_kbytes
 293 * to meet the requirements
 294 */
 295void sk_set_memalloc(struct sock *sk)
 296{
 297	sock_set_flag(sk, SOCK_MEMALLOC);
 298	sk->sk_allocation |= __GFP_MEMALLOC;
 299	static_branch_inc(&memalloc_socks_key);
 300}
 301EXPORT_SYMBOL_GPL(sk_set_memalloc);
 302
 303void sk_clear_memalloc(struct sock *sk)
 304{
 305	sock_reset_flag(sk, SOCK_MEMALLOC);
 306	sk->sk_allocation &= ~__GFP_MEMALLOC;
 307	static_branch_dec(&memalloc_socks_key);
 308
 309	/*
 310	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 311	 * progress of swapping. SOCK_MEMALLOC may be cleared while
 312	 * it has rmem allocations due to the last swapfile being deactivated
 313	 * but there is a risk that the socket is unusable due to exceeding
 314	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
 315	 */
 316	sk_mem_reclaim(sk);
 317}
 318EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 319
 320int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 321{
 322	int ret;
 323	unsigned int noreclaim_flag;
 324
 325	/* these should have been dropped before queueing */
 326	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 327
 328	noreclaim_flag = memalloc_noreclaim_save();
 329	ret = sk->sk_backlog_rcv(sk, skb);
 330	memalloc_noreclaim_restore(noreclaim_flag);
 331
 332	return ret;
 333}
 334EXPORT_SYMBOL(__sk_backlog_rcv);
 335
 336void sk_error_report(struct sock *sk)
 337{
 338	sk->sk_error_report(sk);
 339
 340	switch (sk->sk_family) {
 341	case AF_INET:
 342		fallthrough;
 343	case AF_INET6:
 344		trace_inet_sk_error_report(sk);
 345		break;
 346	default:
 347		break;
 348	}
 349}
 350EXPORT_SYMBOL(sk_error_report);
 351
 352static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
 353{
 354	struct __kernel_sock_timeval tv;
 355
 356	if (timeo == MAX_SCHEDULE_TIMEOUT) {
 357		tv.tv_sec = 0;
 358		tv.tv_usec = 0;
 359	} else {
 360		tv.tv_sec = timeo / HZ;
 361		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
 362	}
 363
 364	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 365		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
 366		*(struct old_timeval32 *)optval = tv32;
 367		return sizeof(tv32);
 368	}
 369
 370	if (old_timeval) {
 371		struct __kernel_old_timeval old_tv;
 372		old_tv.tv_sec = tv.tv_sec;
 373		old_tv.tv_usec = tv.tv_usec;
 374		*(struct __kernel_old_timeval *)optval = old_tv;
 375		return sizeof(old_tv);
 376	}
 377
 378	*(struct __kernel_sock_timeval *)optval = tv;
 379	return sizeof(tv);
 380}
 381
 382static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
 383			    bool old_timeval)
 384{
 385	struct __kernel_sock_timeval tv;
 386
 387	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
 388		struct old_timeval32 tv32;
 389
 390		if (optlen < sizeof(tv32))
 391			return -EINVAL;
 392
 393		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
 394			return -EFAULT;
 395		tv.tv_sec = tv32.tv_sec;
 396		tv.tv_usec = tv32.tv_usec;
 397	} else if (old_timeval) {
 398		struct __kernel_old_timeval old_tv;
 399
 400		if (optlen < sizeof(old_tv))
 401			return -EINVAL;
 402		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
 403			return -EFAULT;
 404		tv.tv_sec = old_tv.tv_sec;
 405		tv.tv_usec = old_tv.tv_usec;
 406	} else {
 407		if (optlen < sizeof(tv))
 408			return -EINVAL;
 409		if (copy_from_sockptr(&tv, optval, sizeof(tv)))
 410			return -EFAULT;
 411	}
 412	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 413		return -EDOM;
 414
 415	if (tv.tv_sec < 0) {
 416		static int warned __read_mostly;
 417
 418		*timeo_p = 0;
 419		if (warned < 10 && net_ratelimit()) {
 420			warned++;
 421			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 422				__func__, current->comm, task_pid_nr(current));
 
 423		}
 424		return 0;
 425	}
 426	*timeo_p = MAX_SCHEDULE_TIMEOUT;
 427	if (tv.tv_sec == 0 && tv.tv_usec == 0)
 428		return 0;
 429	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
 430		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
 431	return 0;
 432}
 433
 434static bool sock_needs_netstamp(const struct sock *sk)
 435{
 436	switch (sk->sk_family) {
 437	case AF_UNSPEC:
 438	case AF_UNIX:
 439		return false;
 440	default:
 441		return true;
 
 442	}
 443}
 444
 445static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 446{
 447	if (sk->sk_flags & flags) {
 448		sk->sk_flags &= ~flags;
 449		if (sock_needs_netstamp(sk) &&
 450		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 451			net_disable_timestamp();
 
 452	}
 453}
 454
 455
 456int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 457{
 
 
 458	unsigned long flags;
 459	struct sk_buff_head *list = &sk->sk_receive_queue;
 460
 461	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 
 
 
 
 462		atomic_inc(&sk->sk_drops);
 463		trace_sock_rcvqueue_full(sk, skb);
 464		return -ENOMEM;
 465	}
 466
 467	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 
 
 
 
 468		atomic_inc(&sk->sk_drops);
 469		return -ENOBUFS;
 470	}
 471
 472	skb->dev = NULL;
 473	skb_set_owner_r(skb, sk);
 474
 
 
 
 
 
 
 
 475	/* we escape from rcu protected region, make sure we dont leak
 476	 * a norefcounted dst
 477	 */
 478	skb_dst_force(skb);
 479
 480	spin_lock_irqsave(&list->lock, flags);
 481	sock_skb_set_dropcount(sk, skb);
 482	__skb_queue_tail(list, skb);
 483	spin_unlock_irqrestore(&list->lock, flags);
 484
 485	if (!sock_flag(sk, SOCK_DEAD))
 486		sk->sk_data_ready(sk);
 487	return 0;
 488}
 489EXPORT_SYMBOL(__sock_queue_rcv_skb);
 490
 491int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 492{
 493	int err;
 494
 495	err = sk_filter(sk, skb);
 496	if (err)
 497		return err;
 498
 499	return __sock_queue_rcv_skb(sk, skb);
 500}
 501EXPORT_SYMBOL(sock_queue_rcv_skb);
 502
 503int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 504		     const int nested, unsigned int trim_cap, bool refcounted)
 505{
 506	int rc = NET_RX_SUCCESS;
 507
 508	if (sk_filter_trim_cap(sk, skb, trim_cap))
 509		goto discard_and_relse;
 510
 511	skb->dev = NULL;
 512
 513	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 514		atomic_inc(&sk->sk_drops);
 515		goto discard_and_relse;
 516	}
 517	if (nested)
 518		bh_lock_sock_nested(sk);
 519	else
 520		bh_lock_sock(sk);
 521	if (!sock_owned_by_user(sk)) {
 522		/*
 523		 * trylock + unlock semantics:
 524		 */
 525		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 526
 527		rc = sk_backlog_rcv(sk, skb);
 528
 529		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
 530	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
 531		bh_unlock_sock(sk);
 532		atomic_inc(&sk->sk_drops);
 533		goto discard_and_relse;
 534	}
 535
 536	bh_unlock_sock(sk);
 537out:
 538	if (refcounted)
 539		sock_put(sk);
 540	return rc;
 541discard_and_relse:
 542	kfree_skb(skb);
 543	goto out;
 544}
 545EXPORT_SYMBOL(__sk_receive_skb);
 
 
 
 
 
 
 546
 547INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
 548							  u32));
 549INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
 550							   u32));
 551struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 552{
 553	struct dst_entry *dst = __sk_dst_get(sk);
 554
 555	if (dst && dst->obsolete &&
 556	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 557			       dst, cookie) == NULL) {
 558		sk_tx_queue_clear(sk);
 559		sk->sk_dst_pending_confirm = 0;
 560		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 561		dst_release(dst);
 562		return NULL;
 563	}
 564
 565	return dst;
 566}
 567EXPORT_SYMBOL(__sk_dst_check);
 568
 569struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 570{
 571	struct dst_entry *dst = sk_dst_get(sk);
 572
 573	if (dst && dst->obsolete &&
 574	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
 575			       dst, cookie) == NULL) {
 576		sk_dst_reset(sk);
 577		dst_release(dst);
 578		return NULL;
 579	}
 580
 581	return dst;
 582}
 583EXPORT_SYMBOL(sk_dst_check);
 584
 585static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
 586{
 587	int ret = -ENOPROTOOPT;
 588#ifdef CONFIG_NETDEVICES
 589	struct net *net = sock_net(sk);
 
 
 590
 591	/* Sorry... */
 592	ret = -EPERM;
 593	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
 594		goto out;
 595
 596	ret = -EINVAL;
 597	if (ifindex < 0)
 598		goto out;
 599
 600	sk->sk_bound_dev_if = ifindex;
 601	if (sk->sk_prot->rehash)
 602		sk->sk_prot->rehash(sk);
 603	sk_dst_reset(sk);
 604
 605	ret = 0;
 606
 607out:
 608#endif
 609
 610	return ret;
 611}
 612
 613int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
 614{
 615	int ret;
 616
 617	if (lock_sk)
 618		lock_sock(sk);
 619	ret = sock_bindtoindex_locked(sk, ifindex);
 620	if (lock_sk)
 621		release_sock(sk);
 622
 623	return ret;
 624}
 625EXPORT_SYMBOL(sock_bindtoindex);
 626
 627static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
 628{
 629	int ret = -ENOPROTOOPT;
 630#ifdef CONFIG_NETDEVICES
 631	struct net *net = sock_net(sk);
 632	char devname[IFNAMSIZ];
 633	int index;
 634
 635	ret = -EINVAL;
 636	if (optlen < 0)
 637		goto out;
 638
 639	/* Bind this socket to a particular device like "eth0",
 640	 * as specified in the passed interface name. If the
 641	 * name is "" or the option length is zero the socket
 642	 * is not bound.
 643	 */
 644	if (optlen > IFNAMSIZ - 1)
 645		optlen = IFNAMSIZ - 1;
 646	memset(devname, 0, sizeof(devname));
 647
 648	ret = -EFAULT;
 649	if (copy_from_sockptr(devname, optval, optlen))
 650		goto out;
 651
 652	index = 0;
 653	if (devname[0] != '\0') {
 654		struct net_device *dev;
 655
 656		rcu_read_lock();
 657		dev = dev_get_by_name_rcu(net, devname);
 658		if (dev)
 659			index = dev->ifindex;
 660		rcu_read_unlock();
 661		ret = -ENODEV;
 662		if (!dev)
 663			goto out;
 664	}
 665
 666	return sock_bindtoindex(sk, index, true);
 667out:
 668#endif
 669
 670	return ret;
 671}
 672
 673static int sock_getbindtodevice(struct sock *sk, char __user *optval,
 674				int __user *optlen, int len)
 675{
 676	int ret = -ENOPROTOOPT;
 677#ifdef CONFIG_NETDEVICES
 678	struct net *net = sock_net(sk);
 679	char devname[IFNAMSIZ];
 680
 681	if (sk->sk_bound_dev_if == 0) {
 682		len = 0;
 683		goto zero;
 684	}
 685
 686	ret = -EINVAL;
 687	if (len < IFNAMSIZ)
 688		goto out;
 689
 690	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
 691	if (ret)
 692		goto out;
 693
 694	len = strlen(devname) + 1;
 695
 696	ret = -EFAULT;
 697	if (copy_to_user(optval, devname, len))
 698		goto out;
 699
 700zero:
 701	ret = -EFAULT;
 702	if (put_user(len, optlen))
 703		goto out;
 704
 705	ret = 0;
 706
 707out:
 708#endif
 709
 710	return ret;
 711}
 712
 713bool sk_mc_loop(struct sock *sk)
 714{
 715	if (dev_recursion_level())
 716		return false;
 717	if (!sk)
 718		return true;
 719	switch (sk->sk_family) {
 720	case AF_INET:
 721		return inet_sk(sk)->mc_loop;
 722#if IS_ENABLED(CONFIG_IPV6)
 723	case AF_INET6:
 724		return inet6_sk(sk)->mc_loop;
 725#endif
 726	}
 727	WARN_ON_ONCE(1);
 728	return true;
 729}
 730EXPORT_SYMBOL(sk_mc_loop);
 731
 732void sock_set_reuseaddr(struct sock *sk)
 733{
 734	lock_sock(sk);
 735	sk->sk_reuse = SK_CAN_REUSE;
 736	release_sock(sk);
 737}
 738EXPORT_SYMBOL(sock_set_reuseaddr);
 739
 740void sock_set_reuseport(struct sock *sk)
 741{
 742	lock_sock(sk);
 743	sk->sk_reuseport = true;
 744	release_sock(sk);
 745}
 746EXPORT_SYMBOL(sock_set_reuseport);
 747
 748void sock_no_linger(struct sock *sk)
 749{
 750	lock_sock(sk);
 751	sk->sk_lingertime = 0;
 752	sock_set_flag(sk, SOCK_LINGER);
 753	release_sock(sk);
 754}
 755EXPORT_SYMBOL(sock_no_linger);
 756
 757void sock_set_priority(struct sock *sk, u32 priority)
 758{
 759	lock_sock(sk);
 760	sk->sk_priority = priority;
 761	release_sock(sk);
 762}
 763EXPORT_SYMBOL(sock_set_priority);
 764
 765void sock_set_sndtimeo(struct sock *sk, s64 secs)
 766{
 767	lock_sock(sk);
 768	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
 769		sk->sk_sndtimeo = secs * HZ;
 770	else
 771		sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
 772	release_sock(sk);
 773}
 774EXPORT_SYMBOL(sock_set_sndtimeo);
 775
 776static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
 777{
 778	if (val)  {
 779		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
 780		sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
 781		sock_set_flag(sk, SOCK_RCVTSTAMP);
 782		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 783	} else {
 784		sock_reset_flag(sk, SOCK_RCVTSTAMP);
 785		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 786	}
 787}
 788
 789void sock_enable_timestamps(struct sock *sk)
 790{
 791	lock_sock(sk);
 792	__sock_set_timestamps(sk, true, false, true);
 793	release_sock(sk);
 794}
 795EXPORT_SYMBOL(sock_enable_timestamps);
 796
 797void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
 798{
 799	switch (optname) {
 800	case SO_TIMESTAMP_OLD:
 801		__sock_set_timestamps(sk, valbool, false, false);
 802		break;
 803	case SO_TIMESTAMP_NEW:
 804		__sock_set_timestamps(sk, valbool, true, false);
 805		break;
 806	case SO_TIMESTAMPNS_OLD:
 807		__sock_set_timestamps(sk, valbool, false, true);
 808		break;
 809	case SO_TIMESTAMPNS_NEW:
 810		__sock_set_timestamps(sk, valbool, true, true);
 811		break;
 812	}
 813}
 814
 815static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
 816{
 817	struct net *net = sock_net(sk);
 818	struct net_device *dev = NULL;
 819	bool match = false;
 820	int *vclock_index;
 821	int i, num;
 822
 823	if (sk->sk_bound_dev_if)
 824		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
 825
 826	if (!dev) {
 827		pr_err("%s: sock not bind to device\n", __func__);
 828		return -EOPNOTSUPP;
 829	}
 830
 831	num = ethtool_get_phc_vclocks(dev, &vclock_index);
 832	for (i = 0; i < num; i++) {
 833		if (*(vclock_index + i) == phc_index) {
 834			match = true;
 835			break;
 836		}
 837	}
 838
 839	if (num > 0)
 840		kfree(vclock_index);
 841
 842	if (!match)
 843		return -EINVAL;
 844
 845	sk->sk_bind_phc = phc_index;
 846
 847	return 0;
 848}
 849
 850int sock_set_timestamping(struct sock *sk, int optname,
 851			  struct so_timestamping timestamping)
 852{
 853	int val = timestamping.flags;
 854	int ret;
 855
 856	if (val & ~SOF_TIMESTAMPING_MASK)
 857		return -EINVAL;
 858
 859	if (val & SOF_TIMESTAMPING_OPT_ID &&
 860	    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 861		if (sk->sk_protocol == IPPROTO_TCP &&
 862		    sk->sk_type == SOCK_STREAM) {
 863			if ((1 << sk->sk_state) &
 864			    (TCPF_CLOSE | TCPF_LISTEN))
 865				return -EINVAL;
 866			sk->sk_tskey = tcp_sk(sk)->snd_una;
 867		} else {
 868			sk->sk_tskey = 0;
 869		}
 870	}
 871
 872	if (val & SOF_TIMESTAMPING_OPT_STATS &&
 873	    !(val & SOF_TIMESTAMPING_OPT_TSONLY))
 874		return -EINVAL;
 875
 876	if (val & SOF_TIMESTAMPING_BIND_PHC) {
 877		ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
 878		if (ret)
 879			return ret;
 880	}
 881
 882	sk->sk_tsflags = val;
 883	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
 884
 885	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 886		sock_enable_timestamp(sk,
 887				      SOCK_TIMESTAMPING_RX_SOFTWARE);
 888	else
 889		sock_disable_timestamp(sk,
 890				       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 891	return 0;
 892}
 893
 894void sock_set_keepalive(struct sock *sk)
 895{
 896	lock_sock(sk);
 897	if (sk->sk_prot->keepalive)
 898		sk->sk_prot->keepalive(sk, true);
 899	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
 900	release_sock(sk);
 901}
 902EXPORT_SYMBOL(sock_set_keepalive);
 903
 904static void __sock_set_rcvbuf(struct sock *sk, int val)
 905{
 906	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
 907	 * as a negative value.
 908	 */
 909	val = min_t(int, val, INT_MAX / 2);
 910	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 911
 912	/* We double it on the way in to account for "struct sk_buff" etc.
 913	 * overhead.   Applications assume that the SO_RCVBUF setting they make
 914	 * will allow that much actual data to be received on that socket.
 915	 *
 916	 * Applications are unaware that "struct sk_buff" and other overheads
 917	 * allocate from the receive buffer during socket buffer allocation.
 918	 *
 919	 * And after considering the possible alternatives, returning the value
 920	 * we actually used in getsockopt is the most desirable behavior.
 921	 */
 922	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
 923}
 924
 925void sock_set_rcvbuf(struct sock *sk, int val)
 926{
 927	lock_sock(sk);
 928	__sock_set_rcvbuf(sk, val);
 929	release_sock(sk);
 930}
 931EXPORT_SYMBOL(sock_set_rcvbuf);
 932
 933static void __sock_set_mark(struct sock *sk, u32 val)
 934{
 935	if (val != sk->sk_mark) {
 936		sk->sk_mark = val;
 937		sk_dst_reset(sk);
 938	}
 939}
 940
 941void sock_set_mark(struct sock *sk, u32 val)
 942{
 943	lock_sock(sk);
 944	__sock_set_mark(sk, val);
 945	release_sock(sk);
 946}
 947EXPORT_SYMBOL(sock_set_mark);
 948
 949/*
 950 *	This is meant for all protocols to use and covers goings on
 951 *	at the socket level. Everything here is generic.
 952 */
 953
 954int sock_setsockopt(struct socket *sock, int level, int optname,
 955		    sockptr_t optval, unsigned int optlen)
 956{
 957	struct so_timestamping timestamping;
 958	struct sock_txtime sk_txtime;
 959	struct sock *sk = sock->sk;
 960	int val;
 961	int valbool;
 962	struct linger ling;
 963	int ret = 0;
 964
 965	/*
 966	 *	Options without arguments
 967	 */
 968
 969	if (optname == SO_BINDTODEVICE)
 970		return sock_setbindtodevice(sk, optval, optlen);
 971
 972	if (optlen < sizeof(int))
 973		return -EINVAL;
 974
 975	if (copy_from_sockptr(&val, optval, sizeof(val)))
 976		return -EFAULT;
 977
 978	valbool = val ? 1 : 0;
 979
 980	lock_sock(sk);
 981
 982	switch (optname) {
 983	case SO_DEBUG:
 984		if (val && !capable(CAP_NET_ADMIN))
 985			ret = -EACCES;
 986		else
 987			sock_valbool_flag(sk, SOCK_DBG, valbool);
 988		break;
 989	case SO_REUSEADDR:
 990		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 991		break;
 992	case SO_REUSEPORT:
 993		sk->sk_reuseport = valbool;
 994		break;
 995	case SO_TYPE:
 996	case SO_PROTOCOL:
 997	case SO_DOMAIN:
 998	case SO_ERROR:
 999		ret = -ENOPROTOOPT;
1000		break;
1001	case SO_DONTROUTE:
1002		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1003		sk_dst_reset(sk);
1004		break;
1005	case SO_BROADCAST:
1006		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1007		break;
1008	case SO_SNDBUF:
1009		/* Don't error on this BSD doesn't and if you think
1010		 * about it this is right. Otherwise apps have to
1011		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1012		 * are treated in BSD as hints
1013		 */
1014		val = min_t(u32, val, sysctl_wmem_max);
 
1015set_sndbuf:
1016		/* Ensure val * 2 fits into an int, to prevent max_t()
1017		 * from treating it as a negative value.
1018		 */
1019		val = min_t(int, val, INT_MAX / 2);
1020		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1021		WRITE_ONCE(sk->sk_sndbuf,
1022			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
1023		/* Wake up sending tasks if we upped the value. */
 
 
 
 
 
 
1024		sk->sk_write_space(sk);
1025		break;
1026
1027	case SO_SNDBUFFORCE:
1028		if (!capable(CAP_NET_ADMIN)) {
1029			ret = -EPERM;
1030			break;
1031		}
1032
1033		/* No negative values (to prevent underflow, as val will be
1034		 * multiplied by 2).
1035		 */
1036		if (val < 0)
1037			val = 0;
1038		goto set_sndbuf;
1039
1040	case SO_RCVBUF:
1041		/* Don't error on this BSD doesn't and if you think
1042		 * about it this is right. Otherwise apps have to
1043		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1044		 * are treated in BSD as hints
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045		 */
1046		__sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
 
 
 
1047		break;
1048
1049	case SO_RCVBUFFORCE:
1050		if (!capable(CAP_NET_ADMIN)) {
1051			ret = -EPERM;
1052			break;
1053		}
1054
1055		/* No negative values (to prevent underflow, as val will be
1056		 * multiplied by 2).
1057		 */
1058		__sock_set_rcvbuf(sk, max(val, 0));
1059		break;
1060
1061	case SO_KEEPALIVE:
1062		if (sk->sk_prot->keepalive)
1063			sk->sk_prot->keepalive(sk, valbool);
 
 
1064		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1065		break;
1066
1067	case SO_OOBINLINE:
1068		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1069		break;
1070
1071	case SO_NO_CHECK:
1072		sk->sk_no_check_tx = valbool;
1073		break;
1074
1075	case SO_PRIORITY:
1076		if ((val >= 0 && val <= 6) ||
1077		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1078			sk->sk_priority = val;
1079		else
1080			ret = -EPERM;
1081		break;
1082
1083	case SO_LINGER:
1084		if (optlen < sizeof(ling)) {
1085			ret = -EINVAL;	/* 1003.1g */
1086			break;
1087		}
1088		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1089			ret = -EFAULT;
1090			break;
1091		}
1092		if (!ling.l_onoff)
1093			sock_reset_flag(sk, SOCK_LINGER);
1094		else {
1095#if (BITS_PER_LONG == 32)
1096			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
1097				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1098			else
1099#endif
1100				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
1101			sock_set_flag(sk, SOCK_LINGER);
1102		}
1103		break;
1104
1105	case SO_BSDCOMPAT:
 
1106		break;
1107
1108	case SO_PASSCRED:
1109		if (valbool)
1110			set_bit(SOCK_PASSCRED, &sock->flags);
1111		else
1112			clear_bit(SOCK_PASSCRED, &sock->flags);
1113		break;
1114
1115	case SO_TIMESTAMP_OLD:
1116	case SO_TIMESTAMP_NEW:
1117	case SO_TIMESTAMPNS_OLD:
1118	case SO_TIMESTAMPNS_NEW:
1119		sock_set_timestamp(sk, optname, valbool);
 
 
 
 
 
 
 
 
1120		break;
1121
1122	case SO_TIMESTAMPING_NEW:
1123	case SO_TIMESTAMPING_OLD:
1124		if (optlen == sizeof(timestamping)) {
1125			if (copy_from_sockptr(&timestamping, optval,
1126					      sizeof(timestamping))) {
1127				ret = -EFAULT;
1128				break;
1129			}
1130		} else {
1131			memset(&timestamping, 0, sizeof(timestamping));
1132			timestamping.flags = val;
1133		}
1134		ret = sock_set_timestamping(sk, optname, timestamping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135		break;
1136
1137	case SO_RCVLOWAT:
1138		if (val < 0)
1139			val = INT_MAX;
1140		if (sock->ops->set_rcvlowat)
1141			ret = sock->ops->set_rcvlowat(sk, val);
1142		else
1143			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1144		break;
1145
1146	case SO_RCVTIMEO_OLD:
1147	case SO_RCVTIMEO_NEW:
1148		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1149				       optlen, optname == SO_RCVTIMEO_OLD);
1150		break;
1151
1152	case SO_SNDTIMEO_OLD:
1153	case SO_SNDTIMEO_NEW:
1154		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1155				       optlen, optname == SO_SNDTIMEO_OLD);
1156		break;
1157
1158	case SO_ATTACH_FILTER: {
1159		struct sock_fprog fprog;
1160
1161		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1162		if (!ret)
1163			ret = sk_attach_filter(&fprog, sk);
1164		break;
1165	}
1166	case SO_ATTACH_BPF:
1167		ret = -EINVAL;
1168		if (optlen == sizeof(u32)) {
1169			u32 ufd;
1170
1171			ret = -EFAULT;
1172			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1173				break;
1174
1175			ret = sk_attach_bpf(ufd, sk);
1176		}
1177		break;
1178
1179	case SO_ATTACH_REUSEPORT_CBPF: {
1180		struct sock_fprog fprog;
1181
1182		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1183		if (!ret)
1184			ret = sk_reuseport_attach_filter(&fprog, sk);
1185		break;
1186	}
1187	case SO_ATTACH_REUSEPORT_EBPF:
1188		ret = -EINVAL;
1189		if (optlen == sizeof(u32)) {
1190			u32 ufd;
1191
1192			ret = -EFAULT;
1193			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1194				break;
1195
1196			ret = sk_reuseport_attach_bpf(ufd, sk);
1197		}
1198		break;
1199
1200	case SO_DETACH_REUSEPORT_BPF:
1201		ret = reuseport_detach_prog(sk);
1202		break;
1203
1204	case SO_DETACH_FILTER:
1205		ret = sk_detach_filter(sk);
1206		break;
1207
1208	case SO_LOCK_FILTER:
1209		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1210			ret = -EPERM;
1211		else
1212			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1213		break;
1214
1215	case SO_PASSSEC:
1216		if (valbool)
1217			set_bit(SOCK_PASSSEC, &sock->flags);
1218		else
1219			clear_bit(SOCK_PASSSEC, &sock->flags);
1220		break;
1221	case SO_MARK:
1222		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1223			ret = -EPERM;
1224			break;
1225		}
1226
1227		__sock_set_mark(sk, val);
1228		break;
1229
1230	case SO_RXQ_OVFL:
1231		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1232		break;
1233
1234	case SO_WIFI_STATUS:
1235		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1236		break;
1237
1238	case SO_PEEK_OFF:
1239		if (sock->ops->set_peek_off)
1240			ret = sock->ops->set_peek_off(sk, val);
1241		else
1242			ret = -EOPNOTSUPP;
1243		break;
1244
1245	case SO_NOFCS:
1246		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1247		break;
1248
1249	case SO_SELECT_ERR_QUEUE:
1250		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1251		break;
1252
1253#ifdef CONFIG_NET_RX_BUSY_POLL
1254	case SO_BUSY_POLL:
1255		/* allow unprivileged users to decrease the value */
1256		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1257			ret = -EPERM;
1258		else {
1259			if (val < 0)
1260				ret = -EINVAL;
1261			else
1262				WRITE_ONCE(sk->sk_ll_usec, val);
1263		}
1264		break;
1265	case SO_PREFER_BUSY_POLL:
1266		if (valbool && !capable(CAP_NET_ADMIN))
1267			ret = -EPERM;
1268		else
1269			WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1270		break;
1271	case SO_BUSY_POLL_BUDGET:
1272		if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) {
1273			ret = -EPERM;
1274		} else {
1275			if (val < 0 || val > U16_MAX)
1276				ret = -EINVAL;
1277			else
1278				WRITE_ONCE(sk->sk_busy_poll_budget, val);
1279		}
1280		break;
1281#endif
1282
1283	case SO_MAX_PACING_RATE:
1284		{
1285		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1286
1287		if (sizeof(ulval) != sizeof(val) &&
1288		    optlen >= sizeof(ulval) &&
1289		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1290			ret = -EFAULT;
1291			break;
1292		}
1293		if (ulval != ~0UL)
1294			cmpxchg(&sk->sk_pacing_status,
1295				SK_PACING_NONE,
1296				SK_PACING_NEEDED);
1297		sk->sk_max_pacing_rate = ulval;
1298		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1299		break;
1300		}
1301	case SO_INCOMING_CPU:
1302		WRITE_ONCE(sk->sk_incoming_cpu, val);
1303		break;
1304
1305	case SO_CNX_ADVICE:
1306		if (val == 1)
1307			dst_negative_advice(sk);
1308		break;
1309
1310	case SO_ZEROCOPY:
1311		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1312			if (!((sk->sk_type == SOCK_STREAM &&
1313			       sk->sk_protocol == IPPROTO_TCP) ||
1314			      (sk->sk_type == SOCK_DGRAM &&
1315			       sk->sk_protocol == IPPROTO_UDP)))
1316				ret = -ENOTSUPP;
1317		} else if (sk->sk_family != PF_RDS) {
1318			ret = -ENOTSUPP;
1319		}
1320		if (!ret) {
1321			if (val < 0 || val > 1)
1322				ret = -EINVAL;
1323			else
1324				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1325		}
1326		break;
1327
1328	case SO_TXTIME:
1329		if (optlen != sizeof(struct sock_txtime)) {
1330			ret = -EINVAL;
1331			break;
1332		} else if (copy_from_sockptr(&sk_txtime, optval,
1333			   sizeof(struct sock_txtime))) {
1334			ret = -EFAULT;
1335			break;
1336		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1337			ret = -EINVAL;
1338			break;
1339		}
1340		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1341		 * scheduler has enough safe guards.
1342		 */
1343		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1344		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1345			ret = -EPERM;
1346			break;
1347		}
1348		sock_valbool_flag(sk, SOCK_TXTIME, true);
1349		sk->sk_clockid = sk_txtime.clockid;
1350		sk->sk_txtime_deadline_mode =
1351			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1352		sk->sk_txtime_report_errors =
1353			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1354		break;
1355
1356	case SO_BINDTOIFINDEX:
1357		ret = sock_bindtoindex_locked(sk, val);
1358		break;
1359
1360	default:
1361		ret = -ENOPROTOOPT;
1362		break;
1363	}
1364	release_sock(sk);
1365	return ret;
1366}
1367EXPORT_SYMBOL(sock_setsockopt);
1368
1369static const struct cred *sk_get_peer_cred(struct sock *sk)
1370{
1371	const struct cred *cred;
1372
1373	spin_lock(&sk->sk_peer_lock);
1374	cred = get_cred(sk->sk_peer_cred);
1375	spin_unlock(&sk->sk_peer_lock);
1376
1377	return cred;
1378}
1379
1380static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1381			  struct ucred *ucred)
1382{
1383	ucred->pid = pid_vnr(pid);
1384	ucred->uid = ucred->gid = -1;
1385	if (cred) {
1386		struct user_namespace *current_ns = current_user_ns();
1387
1388		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1389		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1390	}
1391}
1392
1393static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1394{
1395	struct user_namespace *user_ns = current_user_ns();
1396	int i;
1397
1398	for (i = 0; i < src->ngroups; i++)
1399		if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1400			return -EFAULT;
1401
1402	return 0;
1403}
1404
1405int sock_getsockopt(struct socket *sock, int level, int optname,
1406		    char __user *optval, int __user *optlen)
1407{
1408	struct sock *sk = sock->sk;
1409
1410	union {
1411		int val;
1412		u64 val64;
1413		unsigned long ulval;
1414		struct linger ling;
1415		struct old_timeval32 tm32;
1416		struct __kernel_old_timeval tm;
1417		struct  __kernel_sock_timeval stm;
1418		struct sock_txtime txtime;
1419		struct so_timestamping timestamping;
1420	} v;
1421
1422	int lv = sizeof(int);
1423	int len;
1424
1425	if (get_user(len, optlen))
1426		return -EFAULT;
1427	if (len < 0)
1428		return -EINVAL;
1429
1430	memset(&v, 0, sizeof(v));
1431
1432	switch (optname) {
1433	case SO_DEBUG:
1434		v.val = sock_flag(sk, SOCK_DBG);
1435		break;
1436
1437	case SO_DONTROUTE:
1438		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1439		break;
1440
1441	case SO_BROADCAST:
1442		v.val = sock_flag(sk, SOCK_BROADCAST);
1443		break;
1444
1445	case SO_SNDBUF:
1446		v.val = sk->sk_sndbuf;
1447		break;
1448
1449	case SO_RCVBUF:
1450		v.val = sk->sk_rcvbuf;
1451		break;
1452
1453	case SO_REUSEADDR:
1454		v.val = sk->sk_reuse;
1455		break;
1456
1457	case SO_REUSEPORT:
1458		v.val = sk->sk_reuseport;
1459		break;
1460
1461	case SO_KEEPALIVE:
1462		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1463		break;
1464
1465	case SO_TYPE:
1466		v.val = sk->sk_type;
1467		break;
1468
1469	case SO_PROTOCOL:
1470		v.val = sk->sk_protocol;
1471		break;
1472
1473	case SO_DOMAIN:
1474		v.val = sk->sk_family;
1475		break;
1476
1477	case SO_ERROR:
1478		v.val = -sock_error(sk);
1479		if (v.val == 0)
1480			v.val = xchg(&sk->sk_err_soft, 0);
1481		break;
1482
1483	case SO_OOBINLINE:
1484		v.val = sock_flag(sk, SOCK_URGINLINE);
1485		break;
1486
1487	case SO_NO_CHECK:
1488		v.val = sk->sk_no_check_tx;
1489		break;
1490
1491	case SO_PRIORITY:
1492		v.val = sk->sk_priority;
1493		break;
1494
1495	case SO_LINGER:
1496		lv		= sizeof(v.ling);
1497		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1498		v.ling.l_linger	= sk->sk_lingertime / HZ;
1499		break;
1500
1501	case SO_BSDCOMPAT:
 
1502		break;
1503
1504	case SO_TIMESTAMP_OLD:
1505		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1506				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1507				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1508		break;
1509
1510	case SO_TIMESTAMPNS_OLD:
1511		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1512		break;
1513
1514	case SO_TIMESTAMP_NEW:
1515		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1516		break;
1517
1518	case SO_TIMESTAMPNS_NEW:
1519		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1520		break;
1521
1522	case SO_TIMESTAMPING_OLD:
1523		lv = sizeof(v.timestamping);
1524		v.timestamping.flags = sk->sk_tsflags;
1525		v.timestamping.bind_phc = sk->sk_bind_phc;
1526		break;
1527
1528	case SO_RCVTIMEO_OLD:
1529	case SO_RCVTIMEO_NEW:
1530		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531		break;
1532
1533	case SO_SNDTIMEO_OLD:
1534	case SO_SNDTIMEO_NEW:
1535		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
 
 
 
 
 
 
1536		break;
1537
1538	case SO_RCVLOWAT:
1539		v.val = sk->sk_rcvlowat;
1540		break;
1541
1542	case SO_SNDLOWAT:
1543		v.val = 1;
1544		break;
1545
1546	case SO_PASSCRED:
1547		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1548		break;
1549
1550	case SO_PEERCRED:
1551	{
1552		struct ucred peercred;
1553		if (len > sizeof(peercred))
1554			len = sizeof(peercred);
1555
1556		spin_lock(&sk->sk_peer_lock);
1557		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1558		spin_unlock(&sk->sk_peer_lock);
1559
1560		if (copy_to_user(optval, &peercred, len))
1561			return -EFAULT;
1562		goto lenout;
1563	}
1564
1565	case SO_PEERGROUPS:
1566	{
1567		const struct cred *cred;
1568		int ret, n;
1569
1570		cred = sk_get_peer_cred(sk);
1571		if (!cred)
1572			return -ENODATA;
1573
1574		n = cred->group_info->ngroups;
1575		if (len < n * sizeof(gid_t)) {
1576			len = n * sizeof(gid_t);
1577			put_cred(cred);
1578			return put_user(len, optlen) ? -EFAULT : -ERANGE;
1579		}
1580		len = n * sizeof(gid_t);
1581
1582		ret = groups_to_user((gid_t __user *)optval, cred->group_info);
1583		put_cred(cred);
1584		if (ret)
1585			return ret;
1586		goto lenout;
1587	}
1588
1589	case SO_PEERNAME:
1590	{
1591		char address[128];
1592
1593		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1594		if (lv < 0)
1595			return -ENOTCONN;
1596		if (lv < len)
1597			return -EINVAL;
1598		if (copy_to_user(optval, address, len))
1599			return -EFAULT;
1600		goto lenout;
1601	}
1602
1603	/* Dubious BSD thing... Probably nobody even uses it, but
1604	 * the UNIX standard wants it for whatever reason... -DaveM
1605	 */
1606	case SO_ACCEPTCONN:
1607		v.val = sk->sk_state == TCP_LISTEN;
1608		break;
1609
1610	case SO_PASSSEC:
1611		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1612		break;
1613
1614	case SO_PEERSEC:
1615		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1616
1617	case SO_MARK:
1618		v.val = sk->sk_mark;
1619		break;
1620
1621	case SO_RXQ_OVFL:
1622		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1623		break;
1624
1625	case SO_WIFI_STATUS:
1626		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1627		break;
1628
1629	case SO_PEEK_OFF:
1630		if (!sock->ops->set_peek_off)
1631			return -EOPNOTSUPP;
1632
1633		v.val = sk->sk_peek_off;
1634		break;
1635	case SO_NOFCS:
1636		v.val = sock_flag(sk, SOCK_NOFCS);
1637		break;
1638
1639	case SO_BINDTODEVICE:
1640		return sock_getbindtodevice(sk, optval, optlen, len);
1641
1642	case SO_GET_FILTER:
1643		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1644		if (len < 0)
1645			return len;
1646
1647		goto lenout;
1648
1649	case SO_LOCK_FILTER:
1650		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1651		break;
1652
1653	case SO_BPF_EXTENSIONS:
1654		v.val = bpf_tell_extensions();
1655		break;
1656
1657	case SO_SELECT_ERR_QUEUE:
1658		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1659		break;
1660
1661#ifdef CONFIG_NET_RX_BUSY_POLL
1662	case SO_BUSY_POLL:
1663		v.val = sk->sk_ll_usec;
1664		break;
1665	case SO_PREFER_BUSY_POLL:
1666		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
1667		break;
1668#endif
1669
1670	case SO_MAX_PACING_RATE:
1671		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1672			lv = sizeof(v.ulval);
1673			v.ulval = sk->sk_max_pacing_rate;
1674		} else {
1675			/* 32bit version */
1676			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1677		}
1678		break;
1679
1680	case SO_INCOMING_CPU:
1681		v.val = READ_ONCE(sk->sk_incoming_cpu);
1682		break;
1683
1684	case SO_MEMINFO:
1685	{
1686		u32 meminfo[SK_MEMINFO_VARS];
1687
1688		sk_get_meminfo(sk, meminfo);
1689
1690		len = min_t(unsigned int, len, sizeof(meminfo));
1691		if (copy_to_user(optval, &meminfo, len))
1692			return -EFAULT;
1693
1694		goto lenout;
1695	}
1696
1697#ifdef CONFIG_NET_RX_BUSY_POLL
1698	case SO_INCOMING_NAPI_ID:
1699		v.val = READ_ONCE(sk->sk_napi_id);
1700
1701		/* aggregate non-NAPI IDs down to 0 */
1702		if (v.val < MIN_NAPI_ID)
1703			v.val = 0;
1704
1705		break;
1706#endif
1707
1708	case SO_COOKIE:
1709		lv = sizeof(u64);
1710		if (len < lv)
1711			return -EINVAL;
1712		v.val64 = sock_gen_cookie(sk);
1713		break;
1714
1715	case SO_ZEROCOPY:
1716		v.val = sock_flag(sk, SOCK_ZEROCOPY);
1717		break;
1718
1719	case SO_TXTIME:
1720		lv = sizeof(v.txtime);
1721		v.txtime.clockid = sk->sk_clockid;
1722		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1723				  SOF_TXTIME_DEADLINE_MODE : 0;
1724		v.txtime.flags |= sk->sk_txtime_report_errors ?
1725				  SOF_TXTIME_REPORT_ERRORS : 0;
1726		break;
1727
1728	case SO_BINDTOIFINDEX:
1729		v.val = sk->sk_bound_dev_if;
1730		break;
1731
1732	case SO_NETNS_COOKIE:
1733		lv = sizeof(u64);
1734		if (len != lv)
1735			return -EINVAL;
1736		v.val64 = sock_net(sk)->net_cookie;
1737		break;
1738
1739	default:
1740		/* We implement the SO_SNDLOWAT etc to not be settable
1741		 * (1003.1g 7).
1742		 */
1743		return -ENOPROTOOPT;
1744	}
1745
1746	if (len > lv)
1747		len = lv;
1748	if (copy_to_user(optval, &v, len))
1749		return -EFAULT;
1750lenout:
1751	if (put_user(len, optlen))
1752		return -EFAULT;
1753	return 0;
1754}
1755
1756/*
1757 * Initialize an sk_lock.
1758 *
1759 * (We also register the sk_lock with the lock validator.)
1760 */
1761static inline void sock_lock_init(struct sock *sk)
1762{
1763	if (sk->sk_kern_sock)
1764		sock_lock_init_class_and_name(
1765			sk,
1766			af_family_kern_slock_key_strings[sk->sk_family],
1767			af_family_kern_slock_keys + sk->sk_family,
1768			af_family_kern_key_strings[sk->sk_family],
1769			af_family_kern_keys + sk->sk_family);
1770	else
1771		sock_lock_init_class_and_name(
1772			sk,
1773			af_family_slock_key_strings[sk->sk_family],
1774			af_family_slock_keys + sk->sk_family,
1775			af_family_key_strings[sk->sk_family],
1776			af_family_keys + sk->sk_family);
1777}
1778
1779/*
1780 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1781 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1782 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1783 */
1784static void sock_copy(struct sock *nsk, const struct sock *osk)
1785{
1786	const struct proto *prot = READ_ONCE(osk->sk_prot);
1787#ifdef CONFIG_SECURITY_NETWORK
1788	void *sptr = nsk->sk_security;
1789#endif
1790
1791	/* If we move sk_tx_queue_mapping out of the private section,
1792	 * we must check if sk_tx_queue_clear() is called after
1793	 * sock_copy() in sk_clone_lock().
1794	 */
1795	BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
1796		     offsetof(struct sock, sk_dontcopy_begin) ||
1797		     offsetof(struct sock, sk_tx_queue_mapping) >=
1798		     offsetof(struct sock, sk_dontcopy_end));
1799
1800	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1801
1802	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1803	       prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1804
1805#ifdef CONFIG_SECURITY_NETWORK
1806	nsk->sk_security = sptr;
1807	security_sk_clone(osk, nsk);
1808#endif
1809}
1810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1811static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1812		int family)
1813{
1814	struct sock *sk;
1815	struct kmem_cache *slab;
1816
1817	slab = prot->slab;
1818	if (slab != NULL) {
1819		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1820		if (!sk)
1821			return sk;
1822		if (want_init_on_alloc(priority))
1823			sk_prot_clear_nulls(sk, prot->obj_size);
 
 
 
 
1824	} else
1825		sk = kmalloc(prot->obj_size, priority);
1826
1827	if (sk != NULL) {
 
 
1828		if (security_sk_alloc(sk, family, priority))
1829			goto out_free;
1830
1831		if (!try_module_get(prot->owner))
1832			goto out_free_sec;
 
1833	}
1834
1835	return sk;
1836
1837out_free_sec:
1838	security_sk_free(sk);
1839out_free:
1840	if (slab != NULL)
1841		kmem_cache_free(slab, sk);
1842	else
1843		kfree(sk);
1844	return NULL;
1845}
1846
1847static void sk_prot_free(struct proto *prot, struct sock *sk)
1848{
1849	struct kmem_cache *slab;
1850	struct module *owner;
1851
1852	owner = prot->owner;
1853	slab = prot->slab;
1854
1855	cgroup_sk_free(&sk->sk_cgrp_data);
1856	mem_cgroup_sk_free(sk);
1857	security_sk_free(sk);
1858	if (slab != NULL)
1859		kmem_cache_free(slab, sk);
1860	else
1861		kfree(sk);
1862	module_put(owner);
1863}
1864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1865/**
1866 *	sk_alloc - All socket objects are allocated here
1867 *	@net: the applicable net namespace
1868 *	@family: protocol family
1869 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1870 *	@prot: struct proto associated with this new sock instance
1871 *	@kern: is this to be a kernel socket?
1872 */
1873struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1874		      struct proto *prot, int kern)
1875{
1876	struct sock *sk;
1877
1878	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1879	if (sk) {
1880		sk->sk_family = family;
1881		/*
1882		 * See comment in struct sock definition to understand
1883		 * why we need sk_prot_creator -acme
1884		 */
1885		sk->sk_prot = sk->sk_prot_creator = prot;
1886		sk->sk_kern_sock = kern;
1887		sock_lock_init(sk);
1888		sk->sk_net_refcnt = kern ? 0 : 1;
1889		if (likely(sk->sk_net_refcnt)) {
1890			get_net(net);
1891			sock_inuse_add(net, 1);
1892		}
1893
1894		sock_net_set(sk, net);
1895		refcount_set(&sk->sk_wmem_alloc, 1);
1896
1897		mem_cgroup_sk_alloc(sk);
1898		cgroup_sk_alloc(&sk->sk_cgrp_data);
1899		sock_update_classid(&sk->sk_cgrp_data);
1900		sock_update_netprioidx(&sk->sk_cgrp_data);
1901		sk_tx_queue_clear(sk);
1902	}
1903
1904	return sk;
1905}
1906EXPORT_SYMBOL(sk_alloc);
1907
1908/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1909 * grace period. This is the case for UDP sockets and TCP listeners.
1910 */
1911static void __sk_destruct(struct rcu_head *head)
1912{
1913	struct sock *sk = container_of(head, struct sock, sk_rcu);
1914	struct sk_filter *filter;
1915
1916	if (sk->sk_destruct)
1917		sk->sk_destruct(sk);
1918
1919	filter = rcu_dereference_check(sk->sk_filter,
1920				       refcount_read(&sk->sk_wmem_alloc) == 0);
1921	if (filter) {
1922		sk_filter_uncharge(sk, filter);
1923		RCU_INIT_POINTER(sk->sk_filter, NULL);
1924	}
1925
1926	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1927
1928#ifdef CONFIG_BPF_SYSCALL
1929	bpf_sk_storage_free(sk);
1930#endif
1931
1932	if (atomic_read(&sk->sk_omem_alloc))
1933		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1934			 __func__, atomic_read(&sk->sk_omem_alloc));
1935
1936	if (sk->sk_frag.page) {
1937		put_page(sk->sk_frag.page);
1938		sk->sk_frag.page = NULL;
1939	}
1940
1941	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
1942	put_cred(sk->sk_peer_cred);
1943	put_pid(sk->sk_peer_pid);
1944
1945	if (likely(sk->sk_net_refcnt))
1946		put_net(sock_net(sk));
1947	sk_prot_free(sk->sk_prot_creator, sk);
1948}
1949
1950void sk_destruct(struct sock *sk)
1951{
1952	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1953
1954	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1955		reuseport_detach_sock(sk);
1956		use_call_rcu = true;
1957	}
1958
1959	if (use_call_rcu)
1960		call_rcu(&sk->sk_rcu, __sk_destruct);
1961	else
1962		__sk_destruct(&sk->sk_rcu);
1963}
1964
1965static void __sk_free(struct sock *sk)
1966{
1967	if (likely(sk->sk_net_refcnt))
1968		sock_inuse_add(sock_net(sk), -1);
1969
1970	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1971		sock_diag_broadcast_destroy(sk);
1972	else
1973		sk_destruct(sk);
1974}
1975
1976void sk_free(struct sock *sk)
1977{
1978	/*
1979	 * We subtract one from sk_wmem_alloc and can know if
1980	 * some packets are still in some tx queue.
1981	 * If not null, sock_wfree() will call __sk_free(sk) later
1982	 */
1983	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1984		__sk_free(sk);
1985}
1986EXPORT_SYMBOL(sk_free);
1987
1988static void sk_init_common(struct sock *sk)
 
 
 
 
 
 
 
1989{
1990	skb_queue_head_init(&sk->sk_receive_queue);
1991	skb_queue_head_init(&sk->sk_write_queue);
1992	skb_queue_head_init(&sk->sk_error_queue);
1993
1994	rwlock_init(&sk->sk_callback_lock);
1995	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1996			af_rlock_keys + sk->sk_family,
1997			af_family_rlock_key_strings[sk->sk_family]);
1998	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1999			af_wlock_keys + sk->sk_family,
2000			af_family_wlock_key_strings[sk->sk_family]);
2001	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2002			af_elock_keys + sk->sk_family,
2003			af_family_elock_key_strings[sk->sk_family]);
2004	lockdep_set_class_and_name(&sk->sk_callback_lock,
2005			af_callback_keys + sk->sk_family,
2006			af_family_clock_key_strings[sk->sk_family]);
2007}
 
2008
2009/**
2010 *	sk_clone_lock - clone a socket, and lock its clone
2011 *	@sk: the socket to clone
2012 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2013 *
2014 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2015 */
2016struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2017{
2018	struct proto *prot = READ_ONCE(sk->sk_prot);
2019	struct sk_filter *filter;
2020	bool is_charged = true;
2021	struct sock *newsk;
2022
2023	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2024	if (!newsk)
2025		goto out;
2026
2027	sock_copy(newsk, sk);
2028
2029	newsk->sk_prot_creator = prot;
2030
2031	/* SANITY */
2032	if (likely(newsk->sk_net_refcnt))
2033		get_net(sock_net(newsk));
2034	sk_node_init(&newsk->sk_node);
2035	sock_lock_init(newsk);
2036	bh_lock_sock(newsk);
2037	newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
2038	newsk->sk_backlog.len = 0;
2039
2040	atomic_set(&newsk->sk_rmem_alloc, 0);
2041
2042	/* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2043	refcount_set(&newsk->sk_wmem_alloc, 1);
2044
2045	atomic_set(&newsk->sk_omem_alloc, 0);
2046	sk_init_common(newsk);
2047
2048	newsk->sk_dst_cache	= NULL;
2049	newsk->sk_dst_pending_confirm = 0;
2050	newsk->sk_wmem_queued	= 0;
2051	newsk->sk_forward_alloc = 0;
2052	atomic_set(&newsk->sk_drops, 0);
2053	newsk->sk_send_head	= NULL;
2054	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2055	atomic_set(&newsk->sk_zckey, 0);
2056
2057	sock_reset_flag(newsk, SOCK_DONE);
2058
2059	/* sk->sk_memcg will be populated at accept() time */
2060	newsk->sk_memcg = NULL;
 
 
 
 
 
 
 
2061
2062	cgroup_sk_clone(&newsk->sk_cgrp_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063
2064	rcu_read_lock();
2065	filter = rcu_dereference(sk->sk_filter);
2066	if (filter != NULL)
2067		/* though it's an empty new sock, the charging may fail
2068		 * if sysctl_optmem_max was changed between creation of
2069		 * original socket and cloning
2070		 */
2071		is_charged = sk_filter_charge(newsk, filter);
2072	RCU_INIT_POINTER(newsk->sk_filter, filter);
2073	rcu_read_unlock();
2074
2075	if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2076		/* We need to make sure that we don't uncharge the new
2077		 * socket if we couldn't charge it in the first place
2078		 * as otherwise we uncharge the parent's filter.
 
 
 
 
 
 
2079		 */
2080		if (!is_charged)
2081			RCU_INIT_POINTER(newsk->sk_filter, NULL);
2082		sk_free_unlock_clone(newsk);
2083		newsk = NULL;
2084		goto out;
2085	}
2086	RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2087
2088	if (bpf_sk_storage_clone(sk, newsk)) {
2089		sk_free_unlock_clone(newsk);
2090		newsk = NULL;
2091		goto out;
2092	}
2093
2094	/* Clear sk_user_data if parent had the pointer tagged
2095	 * as not suitable for copying when cloning.
2096	 */
2097	if (sk_user_data_is_nocopy(newsk))
2098		newsk->sk_user_data = NULL;
2099
2100	newsk->sk_err	   = 0;
2101	newsk->sk_err_soft = 0;
2102	newsk->sk_priority = 0;
2103	newsk->sk_incoming_cpu = raw_smp_processor_id();
2104	if (likely(newsk->sk_net_refcnt))
2105		sock_inuse_add(sock_net(newsk), 1);
2106
2107	/* Before updating sk_refcnt, we must commit prior changes to memory
2108	 * (Documentation/RCU/rculist_nulls.rst for details)
2109	 */
2110	smp_wmb();
2111	refcount_set(&newsk->sk_refcnt, 2);
2112
2113	/* Increment the counter in the same struct proto as the master
2114	 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
2115	 * is the same as sk->sk_prot->socks, as this field was copied
2116	 * with memcpy).
2117	 *
2118	 * This _changes_ the previous behaviour, where
2119	 * tcp_create_openreq_child always was incrementing the
2120	 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
2121	 * to be taken into account in all callers. -acme
2122	 */
2123	sk_refcnt_debug_inc(newsk);
2124	sk_set_socket(newsk, NULL);
2125	sk_tx_queue_clear(newsk);
2126	RCU_INIT_POINTER(newsk->sk_wq, NULL);
2127
2128	if (newsk->sk_prot->sockets_allocated)
2129		sk_sockets_allocated_inc(newsk);
2130
2131	if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2132		net_enable_timestamp();
2133out:
2134	return newsk;
2135}
2136EXPORT_SYMBOL_GPL(sk_clone_lock);
2137
2138void sk_free_unlock_clone(struct sock *sk)
2139{
2140	/* It is still raw copy of parent, so invalidate
2141	 * destructor and make plain sk_free() */
2142	sk->sk_destruct = NULL;
2143	bh_unlock_sock(sk);
2144	sk_free(sk);
2145}
2146EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2147
2148void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2149{
2150	u32 max_segs = 1;
2151
2152	sk_dst_set(sk, dst);
2153	sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
2154	if (sk->sk_route_caps & NETIF_F_GSO)
2155		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2156	sk->sk_route_caps &= ~sk->sk_route_nocaps;
2157	if (sk_can_gso(sk)) {
2158		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2159			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2160		} else {
2161			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2162			sk->sk_gso_max_size = dst->dev->gso_max_size;
2163			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
2164		}
2165	}
2166	sk->sk_gso_max_segs = max_segs;
2167}
2168EXPORT_SYMBOL_GPL(sk_setup_caps);
2169
 
 
 
 
 
 
 
 
 
 
 
 
 
2170/*
2171 *	Simple resource managers for sockets.
2172 */
2173
2174
2175/*
2176 * Write buffer destructor automatically called from kfree_skb.
2177 */
2178void sock_wfree(struct sk_buff *skb)
2179{
2180	struct sock *sk = skb->sk;
2181	unsigned int len = skb->truesize;
2182
2183	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2184		/*
2185		 * Keep a reference on sk_wmem_alloc, this will be released
2186		 * after sk_write_space() call
2187		 */
2188		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2189		sk->sk_write_space(sk);
2190		len = 1;
2191	}
2192	/*
2193	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2194	 * could not do because of in-flight packets
2195	 */
2196	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2197		__sk_free(sk);
2198}
2199EXPORT_SYMBOL(sock_wfree);
2200
2201/* This variant of sock_wfree() is used by TCP,
2202 * since it sets SOCK_USE_WRITE_QUEUE.
2203 */
2204void __sock_wfree(struct sk_buff *skb)
2205{
2206	struct sock *sk = skb->sk;
2207
2208	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2209		__sk_free(sk);
2210}
2211
2212void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2213{
2214	skb_orphan(skb);
2215	skb->sk = sk;
2216#ifdef CONFIG_INET
2217	if (unlikely(!sk_fullsock(sk))) {
2218		skb->destructor = sock_edemux;
2219		sock_hold(sk);
2220		return;
2221	}
2222#endif
2223	skb->destructor = sock_wfree;
2224	skb_set_hash_from_sk(skb, sk);
2225	/*
2226	 * We used to take a refcount on sk, but following operation
2227	 * is enough to guarantee sk_free() wont free this sock until
2228	 * all in-flight packets are completed
2229	 */
2230	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2231}
2232EXPORT_SYMBOL(skb_set_owner_w);
2233
2234static bool can_skb_orphan_partial(const struct sk_buff *skb)
2235{
2236#ifdef CONFIG_TLS_DEVICE
2237	/* Drivers depend on in-order delivery for crypto offload,
2238	 * partial orphan breaks out-of-order-OK logic.
2239	 */
2240	if (skb->decrypted)
2241		return false;
2242#endif
2243	return (skb->destructor == sock_wfree ||
2244		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2245}
2246
2247/* This helper is used by netem, as it can hold packets in its
2248 * delay queue. We want to allow the owner socket to send more
2249 * packets, as if they were already TX completed by a typical driver.
2250 * But we also want to keep skb->sk set because some packet schedulers
2251 * rely on it (sch_fq for example).
2252 */
2253void skb_orphan_partial(struct sk_buff *skb)
2254{
2255	if (skb_is_tcp_pure_ack(skb))
2256		return;
2257
2258	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2259		return;
2260
2261	skb_orphan(skb);
2262}
2263EXPORT_SYMBOL(skb_orphan_partial);
2264
2265/*
2266 * Read buffer destructor automatically called from kfree_skb.
2267 */
2268void sock_rfree(struct sk_buff *skb)
2269{
2270	struct sock *sk = skb->sk;
2271	unsigned int len = skb->truesize;
2272
2273	atomic_sub(len, &sk->sk_rmem_alloc);
2274	sk_mem_uncharge(sk, len);
2275}
2276EXPORT_SYMBOL(sock_rfree);
2277
2278/*
2279 * Buffer destructor for skbs that are not used directly in read or write
2280 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2281 */
2282void sock_efree(struct sk_buff *skb)
2283{
2284	sock_put(skb->sk);
2285}
2286EXPORT_SYMBOL(sock_efree);
2287
2288/* Buffer destructor for prefetch/receive path where reference count may
2289 * not be held, e.g. for listen sockets.
2290 */
2291#ifdef CONFIG_INET
2292void sock_pfree(struct sk_buff *skb)
2293{
2294	if (sk_is_refcounted(skb->sk))
2295		sock_gen_put(skb->sk);
2296}
2297EXPORT_SYMBOL(sock_pfree);
2298#endif /* CONFIG_INET */
2299
2300kuid_t sock_i_uid(struct sock *sk)
2301{
2302	kuid_t uid;
2303
2304	read_lock_bh(&sk->sk_callback_lock);
2305	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2306	read_unlock_bh(&sk->sk_callback_lock);
2307	return uid;
2308}
2309EXPORT_SYMBOL(sock_i_uid);
2310
2311unsigned long sock_i_ino(struct sock *sk)
2312{
2313	unsigned long ino;
2314
2315	read_lock_bh(&sk->sk_callback_lock);
2316	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2317	read_unlock_bh(&sk->sk_callback_lock);
2318	return ino;
2319}
2320EXPORT_SYMBOL(sock_i_ino);
2321
2322/*
2323 * Allocate a skb from the socket's send buffer.
2324 */
2325struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2326			     gfp_t priority)
2327{
2328	if (force ||
2329	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2330		struct sk_buff *skb = alloc_skb(size, priority);
2331
2332		if (skb) {
2333			skb_set_owner_w(skb, sk);
2334			return skb;
2335		}
2336	}
2337	return NULL;
2338}
2339EXPORT_SYMBOL(sock_wmalloc);
2340
2341static void sock_ofree(struct sk_buff *skb)
2342{
2343	struct sock *sk = skb->sk;
2344
2345	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2346}
2347
2348struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2349			     gfp_t priority)
2350{
2351	struct sk_buff *skb;
2352
2353	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2354	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2355	    sysctl_optmem_max)
2356		return NULL;
2357
2358	skb = alloc_skb(size, priority);
2359	if (!skb)
2360		return NULL;
2361
2362	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2363	skb->sk = sk;
2364	skb->destructor = sock_ofree;
2365	return skb;
2366}
2367
2368/*
2369 * Allocate a memory block from the socket's option memory buffer.
2370 */
2371void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2372{
2373	if ((unsigned int)size <= sysctl_optmem_max &&
2374	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2375		void *mem;
2376		/* First do the add, to avoid the race if kmalloc
2377		 * might sleep.
2378		 */
2379		atomic_add(size, &sk->sk_omem_alloc);
2380		mem = kmalloc(size, priority);
2381		if (mem)
2382			return mem;
2383		atomic_sub(size, &sk->sk_omem_alloc);
2384	}
2385	return NULL;
2386}
2387EXPORT_SYMBOL(sock_kmalloc);
2388
2389/* Free an option memory block. Note, we actually want the inline
2390 * here as this allows gcc to detect the nullify and fold away the
2391 * condition entirely.
2392 */
2393static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2394				  const bool nullify)
2395{
2396	if (WARN_ON_ONCE(!mem))
2397		return;
2398	if (nullify)
2399		kfree_sensitive(mem);
2400	else
2401		kfree(mem);
2402	atomic_sub(size, &sk->sk_omem_alloc);
2403}
2404
2405void sock_kfree_s(struct sock *sk, void *mem, int size)
2406{
2407	__sock_kfree_s(sk, mem, size, false);
 
2408}
2409EXPORT_SYMBOL(sock_kfree_s);
2410
2411void sock_kzfree_s(struct sock *sk, void *mem, int size)
2412{
2413	__sock_kfree_s(sk, mem, size, true);
2414}
2415EXPORT_SYMBOL(sock_kzfree_s);
2416
2417/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2418   I think, these locks should be removed for datagram sockets.
2419 */
2420static long sock_wait_for_wmem(struct sock *sk, long timeo)
2421{
2422	DEFINE_WAIT(wait);
2423
2424	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2425	for (;;) {
2426		if (!timeo)
2427			break;
2428		if (signal_pending(current))
2429			break;
2430		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2431		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2432		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2433			break;
2434		if (sk->sk_shutdown & SEND_SHUTDOWN)
2435			break;
2436		if (sk->sk_err)
2437			break;
2438		timeo = schedule_timeout(timeo);
2439	}
2440	finish_wait(sk_sleep(sk), &wait);
2441	return timeo;
2442}
2443
2444
2445/*
2446 *	Generic send/receive buffer handlers
2447 */
2448
2449struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2450				     unsigned long data_len, int noblock,
2451				     int *errcode, int max_page_order)
2452{
2453	struct sk_buff *skb;
 
2454	long timeo;
2455	int err;
2456
 
 
 
 
2457	timeo = sock_sndtimeo(sk, noblock);
2458	for (;;) {
2459		err = sock_error(sk);
2460		if (err != 0)
2461			goto failure;
2462
2463		err = -EPIPE;
2464		if (sk->sk_shutdown & SEND_SHUTDOWN)
2465			goto failure;
2466
2467		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2468			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2469
2470		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
 
 
 
 
2471		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2472		err = -EAGAIN;
2473		if (!timeo)
2474			goto failure;
2475		if (signal_pending(current))
2476			goto interrupted;
2477		timeo = sock_wait_for_wmem(sk, timeo);
2478	}
2479	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2480				   errcode, sk->sk_allocation);
2481	if (skb)
2482		skb_set_owner_w(skb, sk);
2483	return skb;
2484
2485interrupted:
2486	err = sock_intr_errno(timeo);
2487failure:
2488	*errcode = err;
2489	return NULL;
2490}
2491EXPORT_SYMBOL(sock_alloc_send_pskb);
2492
2493struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2494				    int noblock, int *errcode)
2495{
2496	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2497}
2498EXPORT_SYMBOL(sock_alloc_send_skb);
2499
2500int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2501		     struct sockcm_cookie *sockc)
2502{
2503	u32 tsflags;
2504
2505	switch (cmsg->cmsg_type) {
2506	case SO_MARK:
2507		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2508			return -EPERM;
2509		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2510			return -EINVAL;
2511		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2512		break;
2513	case SO_TIMESTAMPING_OLD:
2514		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2515			return -EINVAL;
2516
2517		tsflags = *(u32 *)CMSG_DATA(cmsg);
2518		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2519			return -EINVAL;
2520
2521		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2522		sockc->tsflags |= tsflags;
2523		break;
2524	case SCM_TXTIME:
2525		if (!sock_flag(sk, SOCK_TXTIME))
2526			return -EINVAL;
2527		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2528			return -EINVAL;
2529		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2530		break;
2531	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2532	case SCM_RIGHTS:
2533	case SCM_CREDENTIALS:
2534		break;
2535	default:
2536		return -EINVAL;
2537	}
2538	return 0;
2539}
2540EXPORT_SYMBOL(__sock_cmsg_send);
2541
2542int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2543		   struct sockcm_cookie *sockc)
2544{
2545	struct cmsghdr *cmsg;
2546	int ret;
2547
2548	for_each_cmsghdr(cmsg, msg) {
2549		if (!CMSG_OK(msg, cmsg))
2550			return -EINVAL;
2551		if (cmsg->cmsg_level != SOL_SOCKET)
2552			continue;
2553		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2554		if (ret)
2555			return ret;
2556	}
2557	return 0;
2558}
2559EXPORT_SYMBOL(sock_cmsg_send);
2560
2561static void sk_enter_memory_pressure(struct sock *sk)
2562{
2563	if (!sk->sk_prot->enter_memory_pressure)
2564		return;
2565
2566	sk->sk_prot->enter_memory_pressure(sk);
2567}
2568
2569static void sk_leave_memory_pressure(struct sock *sk)
2570{
2571	if (sk->sk_prot->leave_memory_pressure) {
2572		sk->sk_prot->leave_memory_pressure(sk);
2573	} else {
2574		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2575
2576		if (memory_pressure && READ_ONCE(*memory_pressure))
2577			WRITE_ONCE(*memory_pressure, 0);
2578	}
2579}
2580
2581#define SKB_FRAG_PAGE_ORDER	get_order(32768)
2582DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2583
2584/**
2585 * skb_page_frag_refill - check that a page_frag contains enough room
2586 * @sz: minimum size of the fragment we want to get
2587 * @pfrag: pointer to page_frag
2588 * @gfp: priority for memory allocation
2589 *
2590 * Note: While this allocator tries to use high order pages, there is
2591 * no guarantee that allocations succeed. Therefore, @sz MUST be
2592 * less or equal than PAGE_SIZE.
2593 */
2594bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2595{
2596	if (pfrag->page) {
2597		if (page_ref_count(pfrag->page) == 1) {
2598			pfrag->offset = 0;
2599			return true;
2600		}
2601		if (pfrag->offset + sz <= pfrag->size)
2602			return true;
2603		put_page(pfrag->page);
2604	}
2605
2606	pfrag->offset = 0;
2607	if (SKB_FRAG_PAGE_ORDER &&
2608	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2609		/* Avoid direct reclaim but allow kswapd to wake */
2610		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2611					  __GFP_COMP | __GFP_NOWARN |
2612					  __GFP_NORETRY,
2613					  SKB_FRAG_PAGE_ORDER);
2614		if (likely(pfrag->page)) {
2615			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2616			return true;
2617		}
2618	}
2619	pfrag->page = alloc_page(gfp);
2620	if (likely(pfrag->page)) {
2621		pfrag->size = PAGE_SIZE;
2622		return true;
2623	}
2624	return false;
2625}
2626EXPORT_SYMBOL(skb_page_frag_refill);
2627
2628bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2629{
2630	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2631		return true;
2632
2633	sk_enter_memory_pressure(sk);
2634	sk_stream_moderate_sndbuf(sk);
2635	return false;
2636}
2637EXPORT_SYMBOL(sk_page_frag_refill);
2638
2639void __lock_sock(struct sock *sk)
2640	__releases(&sk->sk_lock.slock)
2641	__acquires(&sk->sk_lock.slock)
2642{
2643	DEFINE_WAIT(wait);
2644
2645	for (;;) {
2646		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2647					TASK_UNINTERRUPTIBLE);
2648		spin_unlock_bh(&sk->sk_lock.slock);
2649		schedule();
2650		spin_lock_bh(&sk->sk_lock.slock);
2651		if (!sock_owned_by_user(sk))
2652			break;
2653	}
2654	finish_wait(&sk->sk_lock.wq, &wait);
2655}
2656
2657void __release_sock(struct sock *sk)
2658	__releases(&sk->sk_lock.slock)
2659	__acquires(&sk->sk_lock.slock)
2660{
2661	struct sk_buff *skb, *next;
2662
2663	while ((skb = sk->sk_backlog.head) != NULL) {
2664		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2665
2666		spin_unlock_bh(&sk->sk_lock.slock);
2667
2668		do {
2669			next = skb->next;
2670			prefetch(next);
2671			WARN_ON_ONCE(skb_dst_is_noref(skb));
2672			skb_mark_not_on_list(skb);
2673			sk_backlog_rcv(sk, skb);
2674
2675			cond_resched();
 
 
 
 
 
 
2676
2677			skb = next;
2678		} while (skb != NULL);
2679
2680		spin_lock_bh(&sk->sk_lock.slock);
2681	}
2682
2683	/*
2684	 * Doing the zeroing here guarantee we can not loop forever
2685	 * while a wild producer attempts to flood us.
2686	 */
2687	sk->sk_backlog.len = 0;
2688}
2689
2690void __sk_flush_backlog(struct sock *sk)
2691{
2692	spin_lock_bh(&sk->sk_lock.slock);
2693	__release_sock(sk);
2694	spin_unlock_bh(&sk->sk_lock.slock);
2695}
2696
2697/**
2698 * sk_wait_data - wait for data to arrive at sk_receive_queue
2699 * @sk:    sock to wait on
2700 * @timeo: for how long
2701 * @skb:   last skb seen on sk_receive_queue
2702 *
2703 * Now socket state including sk->sk_err is changed only under lock,
2704 * hence we may omit checks after joining wait queue.
2705 * We check receive queue before schedule() only as optimization;
2706 * it is very likely that release_sock() added new data.
2707 */
2708int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2709{
2710	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2711	int rc;
 
2712
2713	add_wait_queue(sk_sleep(sk), &wait);
2714	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2715	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2716	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2717	remove_wait_queue(sk_sleep(sk), &wait);
2718	return rc;
2719}
2720EXPORT_SYMBOL(sk_wait_data);
2721
2722/**
2723 *	__sk_mem_raise_allocated - increase memory_allocated
2724 *	@sk: socket
2725 *	@size: memory size to allocate
2726 *	@amt: pages to allocate
2727 *	@kind: allocation type
2728 *
2729 *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
 
 
2730 */
2731int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2732{
2733	struct proto *prot = sk->sk_prot;
2734	long allocated = sk_memory_allocated_add(sk, amt);
2735	bool charged = true;
2736
2737	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2738	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
2739		goto suppress_allocation;
2740
2741	/* Under limit. */
2742	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2743		sk_leave_memory_pressure(sk);
 
2744		return 1;
2745	}
2746
2747	/* Under pressure. */
2748	if (allocated > sk_prot_mem_limits(sk, 1))
2749		sk_enter_memory_pressure(sk);
 
2750
2751	/* Over hard limit. */
2752	if (allocated > sk_prot_mem_limits(sk, 2))
2753		goto suppress_allocation;
2754
2755	/* guarantee minimum buffer size under pressure */
2756	if (kind == SK_MEM_RECV) {
2757		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2758			return 1;
2759
2760	} else { /* SK_MEM_SEND */
2761		int wmem0 = sk_get_wmem0(sk, prot);
2762
2763		if (sk->sk_type == SOCK_STREAM) {
2764			if (sk->sk_wmem_queued < wmem0)
2765				return 1;
2766		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
 
2767				return 1;
2768		}
2769	}
2770
2771	if (sk_has_memory_pressure(sk)) {
2772		u64 alloc;
2773
2774		if (!sk_under_memory_pressure(sk))
2775			return 1;
2776		alloc = sk_sockets_allocated_read_positive(sk);
2777		if (sk_prot_mem_limits(sk, 2) > alloc *
2778		    sk_mem_pages(sk->sk_wmem_queued +
2779				 atomic_read(&sk->sk_rmem_alloc) +
2780				 sk->sk_forward_alloc))
2781			return 1;
2782	}
2783
2784suppress_allocation:
2785
2786	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2787		sk_stream_moderate_sndbuf(sk);
2788
2789		/* Fail only if socket is _under_ its sndbuf.
2790		 * In this case we cannot block, so that we have to fail.
2791		 */
2792		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2793			return 1;
2794	}
2795
2796	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2797		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2798
2799	sk_memory_allocated_sub(sk, amt);
2800
2801	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2802		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2803
 
 
 
2804	return 0;
2805}
2806EXPORT_SYMBOL(__sk_mem_raise_allocated);
2807
2808/**
2809 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2810 *	@sk: socket
2811 *	@size: memory size to allocate
2812 *	@kind: allocation type
2813 *
2814 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2815 *	rmem allocation. This function assumes that protocols which have
2816 *	memory_pressure use sk_wmem_queued as write buffer accounting.
2817 */
2818int __sk_mem_schedule(struct sock *sk, int size, int kind)
2819{
2820	int ret, amt = sk_mem_pages(size);
2821
2822	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2823	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2824	if (!ret)
2825		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2826	return ret;
2827}
2828EXPORT_SYMBOL(__sk_mem_schedule);
2829
2830/**
2831 *	__sk_mem_reduce_allocated - reclaim memory_allocated
2832 *	@sk: socket
2833 *	@amount: number of quanta
2834 *
2835 *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2836 */
2837void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2838{
2839	sk_memory_allocated_sub(sk, amount);
2840
2841	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2842		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2843
2844	if (sk_under_memory_pressure(sk) &&
2845	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2846		sk_leave_memory_pressure(sk);
2847}
2848EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2849
2850/**
2851 *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2852 *	@sk: socket
2853 *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2854 */
2855void __sk_mem_reclaim(struct sock *sk, int amount)
2856{
2857	amount >>= SK_MEM_QUANTUM_SHIFT;
2858	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2859	__sk_mem_reduce_allocated(sk, amount);
2860}
2861EXPORT_SYMBOL(__sk_mem_reclaim);
2862
2863int sk_set_peek_off(struct sock *sk, int val)
2864{
2865	sk->sk_peek_off = val;
2866	return 0;
2867}
2868EXPORT_SYMBOL_GPL(sk_set_peek_off);
2869
2870/*
2871 * Set of default routines for initialising struct proto_ops when
2872 * the protocol does not support a particular function. In certain
2873 * cases where it makes no sense for a protocol to have a "do nothing"
2874 * function, some default processing is provided.
2875 */
2876
2877int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2878{
2879	return -EOPNOTSUPP;
2880}
2881EXPORT_SYMBOL(sock_no_bind);
2882
2883int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2884		    int len, int flags)
2885{
2886	return -EOPNOTSUPP;
2887}
2888EXPORT_SYMBOL(sock_no_connect);
2889
2890int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2891{
2892	return -EOPNOTSUPP;
2893}
2894EXPORT_SYMBOL(sock_no_socketpair);
2895
2896int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2897		   bool kern)
2898{
2899	return -EOPNOTSUPP;
2900}
2901EXPORT_SYMBOL(sock_no_accept);
2902
2903int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2904		    int peer)
2905{
2906	return -EOPNOTSUPP;
2907}
2908EXPORT_SYMBOL(sock_no_getname);
2909
 
 
 
 
 
 
2910int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2911{
2912	return -EOPNOTSUPP;
2913}
2914EXPORT_SYMBOL(sock_no_ioctl);
2915
2916int sock_no_listen(struct socket *sock, int backlog)
2917{
2918	return -EOPNOTSUPP;
2919}
2920EXPORT_SYMBOL(sock_no_listen);
2921
2922int sock_no_shutdown(struct socket *sock, int how)
2923{
2924	return -EOPNOTSUPP;
2925}
2926EXPORT_SYMBOL(sock_no_shutdown);
2927
2928int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 
2929{
2930	return -EOPNOTSUPP;
2931}
2932EXPORT_SYMBOL(sock_no_sendmsg);
 
 
 
 
 
 
 
2933
2934int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
 
2935{
2936	return -EOPNOTSUPP;
2937}
2938EXPORT_SYMBOL(sock_no_sendmsg_locked);
2939
2940int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2941		    int flags)
2942{
2943	return -EOPNOTSUPP;
2944}
2945EXPORT_SYMBOL(sock_no_recvmsg);
2946
2947int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2948{
2949	/* Mirror missing mmap method error code */
2950	return -ENODEV;
2951}
2952EXPORT_SYMBOL(sock_no_mmap);
2953
2954/*
2955 * When a file is received (via SCM_RIGHTS, etc), we must bump the
2956 * various sock-based usage counts.
2957 */
2958void __receive_sock(struct file *file)
2959{
2960	struct socket *sock;
2961
2962	sock = sock_from_file(file);
2963	if (sock) {
2964		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
2965		sock_update_classid(&sock->sk->sk_cgrp_data);
2966	}
2967}
2968
2969ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2970{
2971	ssize_t res;
2972	struct msghdr msg = {.msg_flags = flags};
2973	struct kvec iov;
2974	char *kaddr = kmap(page);
2975	iov.iov_base = kaddr + offset;
2976	iov.iov_len = size;
2977	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2978	kunmap(page);
2979	return res;
2980}
2981EXPORT_SYMBOL(sock_no_sendpage);
2982
2983ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2984				int offset, size_t size, int flags)
2985{
2986	ssize_t res;
2987	struct msghdr msg = {.msg_flags = flags};
2988	struct kvec iov;
2989	char *kaddr = kmap(page);
2990
2991	iov.iov_base = kaddr + offset;
2992	iov.iov_len = size;
2993	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2994	kunmap(page);
2995	return res;
2996}
2997EXPORT_SYMBOL(sock_no_sendpage_locked);
2998
2999/*
3000 *	Default Socket Callbacks
3001 */
3002
3003static void sock_def_wakeup(struct sock *sk)
3004{
3005	struct socket_wq *wq;
3006
3007	rcu_read_lock();
3008	wq = rcu_dereference(sk->sk_wq);
3009	if (skwq_has_sleeper(wq))
3010		wake_up_interruptible_all(&wq->wait);
3011	rcu_read_unlock();
3012}
3013
3014static void sock_def_error_report(struct sock *sk)
3015{
3016	struct socket_wq *wq;
3017
3018	rcu_read_lock();
3019	wq = rcu_dereference(sk->sk_wq);
3020	if (skwq_has_sleeper(wq))
3021		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3022	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
3023	rcu_read_unlock();
3024}
3025
3026void sock_def_readable(struct sock *sk)
3027{
3028	struct socket_wq *wq;
3029
3030	rcu_read_lock();
3031	wq = rcu_dereference(sk->sk_wq);
3032	if (skwq_has_sleeper(wq))
3033		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3034						EPOLLRDNORM | EPOLLRDBAND);
3035	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3036	rcu_read_unlock();
3037}
3038
3039static void sock_def_write_space(struct sock *sk)
3040{
3041	struct socket_wq *wq;
3042
3043	rcu_read_lock();
3044
3045	/* Do not wake up a writer until he can make "significant"
3046	 * progress.  --DaveM
3047	 */
3048	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
3049		wq = rcu_dereference(sk->sk_wq);
3050		if (skwq_has_sleeper(wq))
3051			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3052						EPOLLWRNORM | EPOLLWRBAND);
3053
3054		/* Should agree with poll, otherwise some programs break */
3055		if (sock_writeable(sk))
3056			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3057	}
3058
3059	rcu_read_unlock();
3060}
3061
3062static void sock_def_destruct(struct sock *sk)
3063{
 
3064}
3065
3066void sk_send_sigurg(struct sock *sk)
3067{
3068	if (sk->sk_socket && sk->sk_socket->file)
3069		if (send_sigurg(&sk->sk_socket->file->f_owner))
3070			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3071}
3072EXPORT_SYMBOL(sk_send_sigurg);
3073
3074void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3075		    unsigned long expires)
3076{
3077	if (!mod_timer(timer, expires))
3078		sock_hold(sk);
3079}
3080EXPORT_SYMBOL(sk_reset_timer);
3081
3082void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3083{
3084	if (del_timer(timer))
3085		__sock_put(sk);
3086}
3087EXPORT_SYMBOL(sk_stop_timer);
3088
3089void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3090{
3091	if (del_timer_sync(timer))
3092		__sock_put(sk);
3093}
3094EXPORT_SYMBOL(sk_stop_timer_sync);
3095
3096void sock_init_data(struct socket *sock, struct sock *sk)
3097{
3098	sk_init_common(sk);
 
 
 
 
 
 
3099	sk->sk_send_head	=	NULL;
3100
3101	timer_setup(&sk->sk_timer, NULL, 0);
3102
3103	sk->sk_allocation	=	GFP_KERNEL;
3104	sk->sk_rcvbuf		=	sysctl_rmem_default;
3105	sk->sk_sndbuf		=	sysctl_wmem_default;
3106	sk->sk_state		=	TCP_CLOSE;
3107	sk_set_socket(sk, sock);
3108
3109	sock_set_flag(sk, SOCK_ZAPPED);
3110
3111	if (sock) {
3112		sk->sk_type	=	sock->type;
3113		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3114		sock->sk	=	sk;
3115		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
3116	} else {
3117		RCU_INIT_POINTER(sk->sk_wq, NULL);
3118		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
3119	}
3120
 
3121	rwlock_init(&sk->sk_callback_lock);
3122	if (sk->sk_kern_sock)
3123		lockdep_set_class_and_name(
3124			&sk->sk_callback_lock,
3125			af_kern_callback_keys + sk->sk_family,
3126			af_family_kern_clock_key_strings[sk->sk_family]);
3127	else
3128		lockdep_set_class_and_name(
3129			&sk->sk_callback_lock,
3130			af_callback_keys + sk->sk_family,
3131			af_family_clock_key_strings[sk->sk_family]);
3132
3133	sk->sk_state_change	=	sock_def_wakeup;
3134	sk->sk_data_ready	=	sock_def_readable;
3135	sk->sk_write_space	=	sock_def_write_space;
3136	sk->sk_error_report	=	sock_def_error_report;
3137	sk->sk_destruct		=	sock_def_destruct;
3138
3139	sk->sk_frag.page	=	NULL;
3140	sk->sk_frag.offset	=	0;
3141	sk->sk_peek_off		=	-1;
3142
3143	sk->sk_peer_pid 	=	NULL;
3144	sk->sk_peer_cred	=	NULL;
3145	spin_lock_init(&sk->sk_peer_lock);
3146
3147	sk->sk_write_pending	=	0;
3148	sk->sk_rcvlowat		=	1;
3149	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3150	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3151
3152	sk->sk_stamp = SK_DEFAULT_STAMP;
3153#if BITS_PER_LONG==32
3154	seqlock_init(&sk->sk_stamp_seq);
3155#endif
3156	atomic_set(&sk->sk_zckey, 0);
3157
3158#ifdef CONFIG_NET_RX_BUSY_POLL
3159	sk->sk_napi_id		=	0;
3160	sk->sk_ll_usec		=	sysctl_net_busy_read;
3161#endif
3162
3163	sk->sk_max_pacing_rate = ~0UL;
3164	sk->sk_pacing_rate = ~0UL;
3165	WRITE_ONCE(sk->sk_pacing_shift, 10);
3166	sk->sk_incoming_cpu = -1;
3167
3168	sk_rx_queue_clear(sk);
3169	/*
3170	 * Before updating sk_refcnt, we must commit prior changes to memory
3171	 * (Documentation/RCU/rculist_nulls.rst for details)
3172	 */
3173	smp_wmb();
3174	refcount_set(&sk->sk_refcnt, 1);
3175	atomic_set(&sk->sk_drops, 0);
3176}
3177EXPORT_SYMBOL(sock_init_data);
3178
3179void lock_sock_nested(struct sock *sk, int subclass)
3180{
3181	might_sleep();
3182	spin_lock_bh(&sk->sk_lock.slock);
3183	if (sk->sk_lock.owned)
3184		__lock_sock(sk);
3185	sk->sk_lock.owned = 1;
3186	spin_unlock(&sk->sk_lock.slock);
3187	/*
3188	 * The sk_lock has mutex_lock() semantics here:
3189	 */
3190	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3191	local_bh_enable();
3192}
3193EXPORT_SYMBOL(lock_sock_nested);
3194
3195void release_sock(struct sock *sk)
3196{
 
 
 
 
 
3197	spin_lock_bh(&sk->sk_lock.slock);
3198	if (sk->sk_backlog.tail)
3199		__release_sock(sk);
3200
3201	/* Warning : release_cb() might need to release sk ownership,
3202	 * ie call sock_release_ownership(sk) before us.
3203	 */
3204	if (sk->sk_prot->release_cb)
3205		sk->sk_prot->release_cb(sk);
3206
3207	sock_release_ownership(sk);
3208	if (waitqueue_active(&sk->sk_lock.wq))
3209		wake_up(&sk->sk_lock.wq);
3210	spin_unlock_bh(&sk->sk_lock.slock);
3211}
3212EXPORT_SYMBOL(release_sock);
3213
3214/**
3215 * lock_sock_fast - fast version of lock_sock
3216 * @sk: socket
3217 *
3218 * This version should be used for very small section, where process wont block
3219 * return false if fast path is taken:
3220 *
3221 *   sk_lock.slock locked, owned = 0, BH disabled
3222 *
3223 * return true if slow path is taken:
3224 *
3225 *   sk_lock.slock unlocked, owned = 1, BH enabled
3226 */
3227bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3228{
3229	might_sleep();
3230	spin_lock_bh(&sk->sk_lock.slock);
3231
3232	if (!sk->sk_lock.owned)
3233		/*
3234		 * Note : We must disable BH
3235		 */
3236		return false;
3237
3238	__lock_sock(sk);
3239	sk->sk_lock.owned = 1;
3240	spin_unlock(&sk->sk_lock.slock);
3241	/*
3242	 * The sk_lock has mutex_lock() semantics here:
3243	 */
3244	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
3245	__acquire(&sk->sk_lock.slock);
3246	local_bh_enable();
3247	return true;
3248}
3249EXPORT_SYMBOL(lock_sock_fast);
3250
3251int sock_gettstamp(struct socket *sock, void __user *userstamp,
3252		   bool timeval, bool time32)
3253{
3254	struct sock *sk = sock->sk;
3255	struct timespec64 ts;
 
 
 
 
 
 
 
 
 
 
 
3256
3257	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3258	ts = ktime_to_timespec64(sock_read_timestamp(sk));
 
 
 
 
3259	if (ts.tv_sec == -1)
3260		return -ENOENT;
3261	if (ts.tv_sec == 0) {
3262		ktime_t kt = ktime_get_real();
3263		sock_write_timestamp(sk, kt);
3264		ts = ktime_to_timespec64(kt);
3265	}
3266
3267	if (timeval)
3268		ts.tv_nsec /= 1000;
3269
3270#ifdef CONFIG_COMPAT_32BIT_TIME
3271	if (time32)
3272		return put_old_timespec32(&ts, userstamp);
3273#endif
3274#ifdef CONFIG_SPARC64
3275	/* beware of padding in sparc64 timeval */
3276	if (timeval && !in_compat_syscall()) {
3277		struct __kernel_old_timeval __user tv = {
3278			.tv_sec = ts.tv_sec,
3279			.tv_usec = ts.tv_nsec,
3280		};
3281		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3282			return -EFAULT;
3283		return 0;
3284	}
3285#endif
3286	return put_timespec64(&ts, userstamp);
3287}
3288EXPORT_SYMBOL(sock_gettstamp);
3289
3290void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3291{
3292	if (!sock_flag(sk, flag)) {
3293		unsigned long previous_flags = sk->sk_flags;
3294
3295		sock_set_flag(sk, flag);
3296		/*
3297		 * we just set one of the two flags which require net
3298		 * time stamping, but time stamping might have been on
3299		 * already because of the other one
3300		 */
3301		if (sock_needs_netstamp(sk) &&
3302		    !(previous_flags & SK_FLAGS_TIMESTAMP))
 
 
3303			net_enable_timestamp();
3304	}
3305}
3306
3307int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3308		       int level, int type)
3309{
3310	struct sock_exterr_skb *serr;
3311	struct sk_buff *skb;
3312	int copied, err;
3313
3314	err = -EAGAIN;
3315	skb = sock_dequeue_err_skb(sk);
3316	if (skb == NULL)
3317		goto out;
3318
3319	copied = skb->len;
3320	if (copied > len) {
3321		msg->msg_flags |= MSG_TRUNC;
3322		copied = len;
3323	}
3324	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3325	if (err)
3326		goto out_free_skb;
3327
3328	sock_recv_timestamp(msg, sk, skb);
3329
3330	serr = SKB_EXT_ERR(skb);
3331	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3332
3333	msg->msg_flags |= MSG_ERRQUEUE;
3334	err = copied;
3335
3336out_free_skb:
3337	kfree_skb(skb);
3338out:
3339	return err;
3340}
3341EXPORT_SYMBOL(sock_recv_errqueue);
3342
3343/*
3344 *	Get a socket option on an socket.
3345 *
3346 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3347 *	asynchronous errors should be reported by getsockopt. We assume
3348 *	this means if you specify SO_ERROR (otherwise whats the point of it).
3349 */
3350int sock_common_getsockopt(struct socket *sock, int level, int optname,
3351			   char __user *optval, int __user *optlen)
3352{
3353	struct sock *sk = sock->sk;
3354
3355	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3356}
3357EXPORT_SYMBOL(sock_common_getsockopt);
3358
3359int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3360			int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3361{
3362	struct sock *sk = sock->sk;
3363	int addr_len = 0;
3364	int err;
3365
3366	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3367				   flags & ~MSG_DONTWAIT, &addr_len);
3368	if (err >= 0)
3369		msg->msg_namelen = addr_len;
3370	return err;
3371}
3372EXPORT_SYMBOL(sock_common_recvmsg);
3373
3374/*
3375 *	Set socket options on an inet socket.
3376 */
3377int sock_common_setsockopt(struct socket *sock, int level, int optname,
3378			   sockptr_t optval, unsigned int optlen)
3379{
3380	struct sock *sk = sock->sk;
3381
3382	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3383}
3384EXPORT_SYMBOL(sock_common_setsockopt);
3385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3386void sk_common_release(struct sock *sk)
3387{
3388	if (sk->sk_prot->destroy)
3389		sk->sk_prot->destroy(sk);
3390
3391	/*
3392	 * Observation: when sk_common_release is called, processes have
3393	 * no access to socket. But net still has.
3394	 * Step one, detach it from networking:
3395	 *
3396	 * A. Remove from hash tables.
3397	 */
3398
3399	sk->sk_prot->unhash(sk);
3400
3401	/*
3402	 * In this point socket cannot receive new packets, but it is possible
3403	 * that some packets are in flight because some CPU runs receiver and
3404	 * did hash table lookup before we unhashed socket. They will achieve
3405	 * receive queue and will be purged by socket destructor.
3406	 *
3407	 * Also we still have packets pending on receive queue and probably,
3408	 * our own packets waiting in device queues. sock_destroy will drain
3409	 * receive queue, but transmitted packets will delay socket destruction
3410	 * until the last reference will be released.
3411	 */
3412
3413	sock_orphan(sk);
3414
3415	xfrm_sk_free_policy(sk);
3416
3417	sk_refcnt_debug_release(sk);
3418
3419	sock_put(sk);
3420}
3421EXPORT_SYMBOL(sk_common_release);
3422
3423void sk_get_meminfo(const struct sock *sk, u32 *mem)
3424{
3425	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3426
3427	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3428	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3429	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3430	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3431	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3432	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3433	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3434	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3435	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3436}
3437
3438#ifdef CONFIG_PROC_FS
3439#define PROTO_INUSE_NR	64	/* should be enough for the first time */
3440struct prot_inuse {
3441	int val[PROTO_INUSE_NR];
3442};
3443
3444static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3445
 
3446void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3447{
3448	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3449}
3450EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3451
3452int sock_prot_inuse_get(struct net *net, struct proto *prot)
3453{
3454	int cpu, idx = prot->inuse_idx;
3455	int res = 0;
3456
3457	for_each_possible_cpu(cpu)
3458		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3459
3460	return res >= 0 ? res : 0;
3461}
3462EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3463
3464static void sock_inuse_add(struct net *net, int val)
3465{
3466	this_cpu_add(*net->core.sock_inuse, val);
3467}
3468
3469int sock_inuse_get(struct net *net)
3470{
3471	int cpu, res = 0;
3472
3473	for_each_possible_cpu(cpu)
3474		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3475
3476	return res;
3477}
3478
3479EXPORT_SYMBOL_GPL(sock_inuse_get);
3480
3481static int __net_init sock_inuse_init_net(struct net *net)
3482{
3483	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3484	if (net->core.prot_inuse == NULL)
3485		return -ENOMEM;
3486
3487	net->core.sock_inuse = alloc_percpu(int);
3488	if (net->core.sock_inuse == NULL)
3489		goto out;
3490
3491	return 0;
3492
3493out:
3494	free_percpu(net->core.prot_inuse);
3495	return -ENOMEM;
3496}
3497
3498static void __net_exit sock_inuse_exit_net(struct net *net)
3499{
3500	free_percpu(net->core.prot_inuse);
3501	free_percpu(net->core.sock_inuse);
3502}
3503
3504static struct pernet_operations net_inuse_ops = {
3505	.init = sock_inuse_init_net,
3506	.exit = sock_inuse_exit_net,
3507};
3508
3509static __init int net_inuse_init(void)
3510{
3511	if (register_pernet_subsys(&net_inuse_ops))
3512		panic("Cannot initialize net inuse counters");
3513
3514	return 0;
3515}
3516
3517core_initcall(net_inuse_init);
 
 
 
 
 
 
 
 
3518
3519static int assign_proto_idx(struct proto *prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
3520{
3521	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3522
3523	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3524		pr_err("PROTO_INUSE_NR exhausted\n");
3525		return -ENOSPC;
3526	}
3527
3528	set_bit(prot->inuse_idx, proto_inuse_idx);
3529	return 0;
3530}
3531
3532static void release_proto_idx(struct proto *prot)
3533{
3534	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3535		clear_bit(prot->inuse_idx, proto_inuse_idx);
3536}
3537#else
3538static inline int assign_proto_idx(struct proto *prot)
3539{
3540	return 0;
3541}
3542
3543static inline void release_proto_idx(struct proto *prot)
3544{
3545}
3546
3547static void sock_inuse_add(struct net *net, int val)
3548{
3549}
3550#endif
3551
3552static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3553{
3554	if (!twsk_prot)
3555		return;
3556	kfree(twsk_prot->twsk_slab_name);
3557	twsk_prot->twsk_slab_name = NULL;
3558	kmem_cache_destroy(twsk_prot->twsk_slab);
3559	twsk_prot->twsk_slab = NULL;
3560}
3561
3562static int tw_prot_init(const struct proto *prot)
3563{
3564	struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
3565
3566	if (!twsk_prot)
3567		return 0;
3568
3569	twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
3570					      prot->name);
3571	if (!twsk_prot->twsk_slab_name)
3572		return -ENOMEM;
3573
3574	twsk_prot->twsk_slab =
3575		kmem_cache_create(twsk_prot->twsk_slab_name,
3576				  twsk_prot->twsk_obj_size, 0,
3577				  SLAB_ACCOUNT | prot->slab_flags,
3578				  NULL);
3579	if (!twsk_prot->twsk_slab) {
3580		pr_crit("%s: Can't create timewait sock SLAB cache!\n",
3581			prot->name);
3582		return -ENOMEM;
3583	}
3584
3585	return 0;
3586}
3587
3588static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3589{
3590	if (!rsk_prot)
3591		return;
3592	kfree(rsk_prot->slab_name);
3593	rsk_prot->slab_name = NULL;
3594	kmem_cache_destroy(rsk_prot->slab);
3595	rsk_prot->slab = NULL;
3596}
3597
3598static int req_prot_init(const struct proto *prot)
3599{
3600	struct request_sock_ops *rsk_prot = prot->rsk_prot;
3601
3602	if (!rsk_prot)
3603		return 0;
3604
3605	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3606					prot->name);
3607	if (!rsk_prot->slab_name)
3608		return -ENOMEM;
3609
3610	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3611					   rsk_prot->obj_size, 0,
3612					   SLAB_ACCOUNT | prot->slab_flags,
3613					   NULL);
3614
3615	if (!rsk_prot->slab) {
3616		pr_crit("%s: Can't create request sock SLAB cache!\n",
3617			prot->name);
3618		return -ENOMEM;
3619	}
3620	return 0;
3621}
3622
3623int proto_register(struct proto *prot, int alloc_slab)
3624{
3625	int ret = -ENOBUFS;
3626
3627	if (alloc_slab) {
3628		prot->slab = kmem_cache_create_usercopy(prot->name,
3629					prot->obj_size, 0,
3630					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3631					prot->slab_flags,
3632					prot->useroffset, prot->usersize,
3633					NULL);
3634
3635		if (prot->slab == NULL) {
3636			pr_crit("%s: Can't create sock SLAB cache!\n",
3637				prot->name);
3638			goto out;
3639		}
3640
3641		if (req_prot_init(prot))
3642			goto out_free_request_sock_slab;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3643
3644		if (tw_prot_init(prot))
3645			goto out_free_timewait_sock_slab;
3646	}
3647
3648	mutex_lock(&proto_list_mutex);
3649	ret = assign_proto_idx(prot);
3650	if (ret) {
3651		mutex_unlock(&proto_list_mutex);
3652		goto out_free_timewait_sock_slab;
 
 
 
 
 
3653	}
 
 
3654	list_add(&prot->node, &proto_list);
3655	mutex_unlock(&proto_list_mutex);
3656	return ret;
 
3657
3658out_free_timewait_sock_slab:
3659	if (alloc_slab)
3660		tw_prot_cleanup(prot->twsk_prot);
3661out_free_request_sock_slab:
3662	if (alloc_slab) {
3663		req_prot_cleanup(prot->rsk_prot);
3664
3665		kmem_cache_destroy(prot->slab);
3666		prot->slab = NULL;
3667	}
 
 
 
 
3668out:
3669	return ret;
3670}
3671EXPORT_SYMBOL(proto_register);
3672
3673void proto_unregister(struct proto *prot)
3674{
3675	mutex_lock(&proto_list_mutex);
3676	release_proto_idx(prot);
3677	list_del(&prot->node);
3678	mutex_unlock(&proto_list_mutex);
3679
3680	kmem_cache_destroy(prot->slab);
3681	prot->slab = NULL;
3682
3683	req_prot_cleanup(prot->rsk_prot);
3684	tw_prot_cleanup(prot->twsk_prot);
3685}
3686EXPORT_SYMBOL(proto_unregister);
3687
3688int sock_load_diag_module(int family, int protocol)
3689{
3690	if (!protocol) {
3691		if (!sock_is_registered(family))
3692			return -ENOENT;
3693
3694		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3695				      NETLINK_SOCK_DIAG, family);
3696	}
3697
3698#ifdef CONFIG_INET
3699	if (family == AF_INET &&
3700	    protocol != IPPROTO_RAW &&
3701	    protocol < MAX_INET_PROTOS &&
3702	    !rcu_access_pointer(inet_protos[protocol]))
3703		return -ENOENT;
3704#endif
3705
3706	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3707			      NETLINK_SOCK_DIAG, family, protocol);
 
 
 
3708}
3709EXPORT_SYMBOL(sock_load_diag_module);
3710
3711#ifdef CONFIG_PROC_FS
3712static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3713	__acquires(proto_list_mutex)
3714{
3715	mutex_lock(&proto_list_mutex);
3716	return seq_list_start_head(&proto_list, *pos);
3717}
3718
3719static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3720{
3721	return seq_list_next(v, &proto_list, pos);
3722}
3723
3724static void proto_seq_stop(struct seq_file *seq, void *v)
3725	__releases(proto_list_mutex)
3726{
3727	mutex_unlock(&proto_list_mutex);
3728}
3729
3730static char proto_method_implemented(const void *method)
3731{
3732	return method == NULL ? 'n' : 'y';
3733}
3734static long sock_prot_memory_allocated(struct proto *proto)
3735{
3736	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3737}
3738
3739static const char *sock_prot_memory_pressure(struct proto *proto)
3740{
3741	return proto->memory_pressure != NULL ?
3742	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3743}
3744
3745static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3746{
3747
3748	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3749			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3750		   proto->name,
3751		   proto->obj_size,
3752		   sock_prot_inuse_get(seq_file_net(seq), proto),
3753		   sock_prot_memory_allocated(proto),
3754		   sock_prot_memory_pressure(proto),
3755		   proto->max_header,
3756		   proto->slab == NULL ? "no" : "yes",
3757		   module_name(proto->owner),
3758		   proto_method_implemented(proto->close),
3759		   proto_method_implemented(proto->connect),
3760		   proto_method_implemented(proto->disconnect),
3761		   proto_method_implemented(proto->accept),
3762		   proto_method_implemented(proto->ioctl),
3763		   proto_method_implemented(proto->init),
3764		   proto_method_implemented(proto->destroy),
3765		   proto_method_implemented(proto->shutdown),
3766		   proto_method_implemented(proto->setsockopt),
3767		   proto_method_implemented(proto->getsockopt),
3768		   proto_method_implemented(proto->sendmsg),
3769		   proto_method_implemented(proto->recvmsg),
3770		   proto_method_implemented(proto->sendpage),
3771		   proto_method_implemented(proto->bind),
3772		   proto_method_implemented(proto->backlog_rcv),
3773		   proto_method_implemented(proto->hash),
3774		   proto_method_implemented(proto->unhash),
3775		   proto_method_implemented(proto->get_port),
3776		   proto_method_implemented(proto->enter_memory_pressure));
3777}
3778
3779static int proto_seq_show(struct seq_file *seq, void *v)
3780{
3781	if (v == &proto_list)
3782		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3783			   "protocol",
3784			   "size",
3785			   "sockets",
3786			   "memory",
3787			   "press",
3788			   "maxhdr",
3789			   "slab",
3790			   "module",
3791			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3792	else
3793		proto_seq_printf(seq, list_entry(v, struct proto, node));
3794	return 0;
3795}
3796
3797static const struct seq_operations proto_seq_ops = {
3798	.start  = proto_seq_start,
3799	.next   = proto_seq_next,
3800	.stop   = proto_seq_stop,
3801	.show   = proto_seq_show,
3802};
3803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3804static __net_init int proto_init_net(struct net *net)
3805{
3806	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3807			sizeof(struct seq_net_private)))
3808		return -ENOMEM;
3809
3810	return 0;
3811}
3812
3813static __net_exit void proto_exit_net(struct net *net)
3814{
3815	remove_proc_entry("protocols", net->proc_net);
3816}
3817
3818
3819static __net_initdata struct pernet_operations proto_net_ops = {
3820	.init = proto_init_net,
3821	.exit = proto_exit_net,
3822};
3823
3824static int __init proto_init(void)
3825{
3826	return register_pernet_subsys(&proto_net_ops);
3827}
3828
3829subsys_initcall(proto_init);
3830
3831#endif /* PROC_FS */
3832
3833#ifdef CONFIG_NET_RX_BUSY_POLL
3834bool sk_busy_loop_end(void *p, unsigned long start_time)
3835{
3836	struct sock *sk = p;
3837
3838	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3839	       sk_busy_loop_timeout(sk, start_time);
3840}
3841EXPORT_SYMBOL(sk_busy_loop_end);
3842#endif /* CONFIG_NET_RX_BUSY_POLL */
3843
3844int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
3845{
3846	if (!sk->sk_prot->bind_add)
3847		return -EOPNOTSUPP;
3848	return sk->sk_prot->bind_add(sk, addr, addr_len);
3849}
3850EXPORT_SYMBOL(sock_bind_add);
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Generic socket support routines. Memory allocators, socket lock/release
   7 *		handler for protocols to use and generic option handler.
   8 *
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 *
  85 *
  86 *		This program is free software; you can redistribute it and/or
  87 *		modify it under the terms of the GNU General Public License
  88 *		as published by the Free Software Foundation; either version
  89 *		2 of the License, or (at your option) any later version.
  90 */
  91
 
 
 
  92#include <linux/capability.h>
  93#include <linux/errno.h>
 
  94#include <linux/types.h>
  95#include <linux/socket.h>
  96#include <linux/in.h>
  97#include <linux/kernel.h>
  98#include <linux/module.h>
  99#include <linux/proc_fs.h>
 100#include <linux/seq_file.h>
 101#include <linux/sched.h>
 
 102#include <linux/timer.h>
 103#include <linux/string.h>
 104#include <linux/sockios.h>
 105#include <linux/net.h>
 106#include <linux/mm.h>
 107#include <linux/slab.h>
 108#include <linux/interrupt.h>
 109#include <linux/poll.h>
 110#include <linux/tcp.h>
 111#include <linux/init.h>
 112#include <linux/highmem.h>
 113#include <linux/user_namespace.h>
 
 
 
 
 114
 115#include <asm/uaccess.h>
 116#include <asm/system.h>
 117
 118#include <linux/netdevice.h>
 119#include <net/protocol.h>
 120#include <linux/skbuff.h>
 121#include <net/net_namespace.h>
 122#include <net/request_sock.h>
 123#include <net/sock.h>
 124#include <linux/net_tstamp.h>
 125#include <net/xfrm.h>
 126#include <linux/ipsec.h>
 127#include <net/cls_cgroup.h>
 
 
 128
 129#include <linux/filter.h>
 
 
 130
 131#include <trace/events/sock.h>
 132
 133#ifdef CONFIG_INET
 134#include <net/tcp.h>
 135#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136
 137/*
 138 * Each address family might have different locking rules, so we have
 139 * one slock key per address family:
 
 140 */
 141static struct lock_class_key af_family_keys[AF_MAX];
 
 142static struct lock_class_key af_family_slock_keys[AF_MAX];
 
 143
 144/*
 145 * Make lock validator output more readable. (we pre-construct these
 146 * strings build-time, so that runtime initialization of socket
 147 * locks is fast):
 148 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149static const char *const af_family_key_strings[AF_MAX+1] = {
 150  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
 151  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
 152  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
 153  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
 154  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
 155  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
 156  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
 157  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
 158  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
 159  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
 160  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
 161  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
 162  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
 163  "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
 164};
 165static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 166  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
 167  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
 168  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
 169  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
 170  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
 171  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
 172  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
 173  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
 174  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
 175  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
 176  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
 177  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
 178  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
 179  "slock-AF_NFC"   , "slock-AF_MAX"
 180};
 181static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 182  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
 183  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
 184  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
 185  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
 186  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
 187  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
 188  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
 189  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
 190  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
 191  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
 192  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
 193  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
 194  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
 195  "clock-AF_NFC"   , "clock-AF_MAX"
 
 
 
 
 
 
 196};
 197
 198/*
 199 * sk_callback_lock locking rules are per-address-family,
 200 * so split the lock classes by using a per-AF key:
 201 */
 202static struct lock_class_key af_callback_keys[AF_MAX];
 203
 204/* Take into consideration the size of the struct sk_buff overhead in the
 205 * determination of these values, since that is non-constant across
 206 * platforms.  This makes socket queueing behavior and performance
 207 * not depend upon such differences.
 208 */
 209#define _SK_MEM_PACKETS		256
 210#define _SK_MEM_OVERHEAD	(sizeof(struct sk_buff) + 256)
 211#define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 212#define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 213
 214/* Run time adjustable parameters. */
 215__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 
 216__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 
 217__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 218__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 219
 220/* Maximal space eaten by iovec or ancillary data plus some space */
 221int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 222EXPORT_SYMBOL(sysctl_optmem_max);
 223
 224#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
 225int net_cls_subsys_id = -1;
 226EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 227#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 228
 229static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 
 230{
 231	struct timeval tv;
 
 
 
 232
 233	if (optlen < sizeof(tv))
 234		return -EINVAL;
 235	if (copy_from_user(&tv, optval, sizeof(tv)))
 236		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 238		return -EDOM;
 239
 240	if (tv.tv_sec < 0) {
 241		static int warned __read_mostly;
 242
 243		*timeo_p = 0;
 244		if (warned < 10 && net_ratelimit()) {
 245			warned++;
 246			printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
 247			       "tries to set negative timeout\n",
 248				current->comm, task_pid_nr(current));
 249		}
 250		return 0;
 251	}
 252	*timeo_p = MAX_SCHEDULE_TIMEOUT;
 253	if (tv.tv_sec == 0 && tv.tv_usec == 0)
 254		return 0;
 255	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
 256		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
 257	return 0;
 258}
 259
 260static void sock_warn_obsolete_bsdism(const char *name)
 261{
 262	static int warned;
 263	static char warncomm[TASK_COMM_LEN];
 264	if (strcmp(warncomm, current->comm) && warned < 5) {
 265		strcpy(warncomm,  current->comm);
 266		printk(KERN_WARNING "process `%s' is using obsolete "
 267		       "%s SO_BSDCOMPAT\n", warncomm, name);
 268		warned++;
 269	}
 270}
 271
 272static void sock_disable_timestamp(struct sock *sk, int flag)
 273{
 274	if (sock_flag(sk, flag)) {
 275		sock_reset_flag(sk, flag);
 276		if (!sock_flag(sk, SOCK_TIMESTAMP) &&
 277		    !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
 278			net_disable_timestamp();
 279		}
 280	}
 281}
 282
 283
 284int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 285{
 286	int err;
 287	int skb_len;
 288	unsigned long flags;
 289	struct sk_buff_head *list = &sk->sk_receive_queue;
 290
 291	/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
 292	   number of warnings when compiling with -W --ANK
 293	 */
 294	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 295	    (unsigned)sk->sk_rcvbuf) {
 296		atomic_inc(&sk->sk_drops);
 297		trace_sock_rcvqueue_full(sk, skb);
 298		return -ENOMEM;
 299	}
 300
 301	err = sk_filter(sk, skb);
 302	if (err)
 303		return err;
 304
 305	if (!sk_rmem_schedule(sk, skb->truesize)) {
 306		atomic_inc(&sk->sk_drops);
 307		return -ENOBUFS;
 308	}
 309
 310	skb->dev = NULL;
 311	skb_set_owner_r(skb, sk);
 312
 313	/* Cache the SKB length before we tack it onto the receive
 314	 * queue.  Once it is added it no longer belongs to us and
 315	 * may be freed by other threads of control pulling packets
 316	 * from the queue.
 317	 */
 318	skb_len = skb->len;
 319
 320	/* we escape from rcu protected region, make sure we dont leak
 321	 * a norefcounted dst
 322	 */
 323	skb_dst_force(skb);
 324
 325	spin_lock_irqsave(&list->lock, flags);
 326	skb->dropcount = atomic_read(&sk->sk_drops);
 327	__skb_queue_tail(list, skb);
 328	spin_unlock_irqrestore(&list->lock, flags);
 329
 330	if (!sock_flag(sk, SOCK_DEAD))
 331		sk->sk_data_ready(sk, skb_len);
 332	return 0;
 333}
 
 
 
 
 
 
 
 
 
 
 
 
 334EXPORT_SYMBOL(sock_queue_rcv_skb);
 335
 336int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 
 337{
 338	int rc = NET_RX_SUCCESS;
 339
 340	if (sk_filter(sk, skb))
 341		goto discard_and_relse;
 342
 343	skb->dev = NULL;
 344
 345	if (sk_rcvqueues_full(sk, skb)) {
 346		atomic_inc(&sk->sk_drops);
 347		goto discard_and_relse;
 348	}
 349	if (nested)
 350		bh_lock_sock_nested(sk);
 351	else
 352		bh_lock_sock(sk);
 353	if (!sock_owned_by_user(sk)) {
 354		/*
 355		 * trylock + unlock semantics:
 356		 */
 357		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 358
 359		rc = sk_backlog_rcv(sk, skb);
 360
 361		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 362	} else if (sk_add_backlog(sk, skb)) {
 363		bh_unlock_sock(sk);
 364		atomic_inc(&sk->sk_drops);
 365		goto discard_and_relse;
 366	}
 367
 368	bh_unlock_sock(sk);
 369out:
 370	sock_put(sk);
 
 371	return rc;
 372discard_and_relse:
 373	kfree_skb(skb);
 374	goto out;
 375}
 376EXPORT_SYMBOL(sk_receive_skb);
 377
 378void sk_reset_txq(struct sock *sk)
 379{
 380	sk_tx_queue_clear(sk);
 381}
 382EXPORT_SYMBOL(sk_reset_txq);
 383
 
 
 
 
 384struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 385{
 386	struct dst_entry *dst = __sk_dst_get(sk);
 387
 388	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 
 
 389		sk_tx_queue_clear(sk);
 390		rcu_assign_pointer(sk->sk_dst_cache, NULL);
 
 391		dst_release(dst);
 392		return NULL;
 393	}
 394
 395	return dst;
 396}
 397EXPORT_SYMBOL(__sk_dst_check);
 398
 399struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 400{
 401	struct dst_entry *dst = sk_dst_get(sk);
 402
 403	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 
 
 404		sk_dst_reset(sk);
 405		dst_release(dst);
 406		return NULL;
 407	}
 408
 409	return dst;
 410}
 411EXPORT_SYMBOL(sk_dst_check);
 412
 413static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
 414{
 415	int ret = -ENOPROTOOPT;
 416#ifdef CONFIG_NETDEVICES
 417	struct net *net = sock_net(sk);
 418	char devname[IFNAMSIZ];
 419	int index;
 420
 421	/* Sorry... */
 422	ret = -EPERM;
 423	if (!capable(CAP_NET_RAW))
 424		goto out;
 425
 426	ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427	if (optlen < 0)
 428		goto out;
 429
 430	/* Bind this socket to a particular device like "eth0",
 431	 * as specified in the passed interface name. If the
 432	 * name is "" or the option length is zero the socket
 433	 * is not bound.
 434	 */
 435	if (optlen > IFNAMSIZ - 1)
 436		optlen = IFNAMSIZ - 1;
 437	memset(devname, 0, sizeof(devname));
 438
 439	ret = -EFAULT;
 440	if (copy_from_user(devname, optval, optlen))
 441		goto out;
 442
 443	index = 0;
 444	if (devname[0] != '\0') {
 445		struct net_device *dev;
 446
 447		rcu_read_lock();
 448		dev = dev_get_by_name_rcu(net, devname);
 449		if (dev)
 450			index = dev->ifindex;
 451		rcu_read_unlock();
 452		ret = -ENODEV;
 453		if (!dev)
 454			goto out;
 455	}
 456
 457	lock_sock(sk);
 458	sk->sk_bound_dev_if = index;
 459	sk_dst_reset(sk);
 460	release_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461
 462	ret = 0;
 463
 464out:
 465#endif
 466
 467	return ret;
 468}
 469
 470static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471{
 472	if (valbool)
 473		sock_set_flag(sk, bit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474	else
 475		sock_reset_flag(sk, bit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476}
 477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478/*
 479 *	This is meant for all protocols to use and covers goings on
 480 *	at the socket level. Everything here is generic.
 481 */
 482
 483int sock_setsockopt(struct socket *sock, int level, int optname,
 484		    char __user *optval, unsigned int optlen)
 485{
 
 
 486	struct sock *sk = sock->sk;
 487	int val;
 488	int valbool;
 489	struct linger ling;
 490	int ret = 0;
 491
 492	/*
 493	 *	Options without arguments
 494	 */
 495
 496	if (optname == SO_BINDTODEVICE)
 497		return sock_bindtodevice(sk, optval, optlen);
 498
 499	if (optlen < sizeof(int))
 500		return -EINVAL;
 501
 502	if (get_user(val, (int __user *)optval))
 503		return -EFAULT;
 504
 505	valbool = val ? 1 : 0;
 506
 507	lock_sock(sk);
 508
 509	switch (optname) {
 510	case SO_DEBUG:
 511		if (val && !capable(CAP_NET_ADMIN))
 512			ret = -EACCES;
 513		else
 514			sock_valbool_flag(sk, SOCK_DBG, valbool);
 515		break;
 516	case SO_REUSEADDR:
 517		sk->sk_reuse = valbool;
 
 
 
 518		break;
 519	case SO_TYPE:
 520	case SO_PROTOCOL:
 521	case SO_DOMAIN:
 522	case SO_ERROR:
 523		ret = -ENOPROTOOPT;
 524		break;
 525	case SO_DONTROUTE:
 526		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
 
 527		break;
 528	case SO_BROADCAST:
 529		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
 530		break;
 531	case SO_SNDBUF:
 532		/* Don't error on this BSD doesn't and if you think
 533		   about it this is right. Otherwise apps have to
 534		   play 'guess the biggest size' games. RCVBUF/SNDBUF
 535		   are treated in BSD as hints */
 536
 537		if (val > sysctl_wmem_max)
 538			val = sysctl_wmem_max;
 539set_sndbuf:
 
 
 
 
 540		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 541		if ((val * 2) < SOCK_MIN_SNDBUF)
 542			sk->sk_sndbuf = SOCK_MIN_SNDBUF;
 543		else
 544			sk->sk_sndbuf = val * 2;
 545
 546		/*
 547		 *	Wake up sending tasks if we
 548		 *	upped the value.
 549		 */
 550		sk->sk_write_space(sk);
 551		break;
 552
 553	case SO_SNDBUFFORCE:
 554		if (!capable(CAP_NET_ADMIN)) {
 555			ret = -EPERM;
 556			break;
 557		}
 
 
 
 
 
 
 558		goto set_sndbuf;
 559
 560	case SO_RCVBUF:
 561		/* Don't error on this BSD doesn't and if you think
 562		   about it this is right. Otherwise apps have to
 563		   play 'guess the biggest size' games. RCVBUF/SNDBUF
 564		   are treated in BSD as hints */
 565
 566		if (val > sysctl_rmem_max)
 567			val = sysctl_rmem_max;
 568set_rcvbuf:
 569		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 570		/*
 571		 * We double it on the way in to account for
 572		 * "struct sk_buff" etc. overhead.   Applications
 573		 * assume that the SO_RCVBUF setting they make will
 574		 * allow that much actual data to be received on that
 575		 * socket.
 576		 *
 577		 * Applications are unaware that "struct sk_buff" and
 578		 * other overheads allocate from the receive buffer
 579		 * during socket buffer allocation.
 580		 *
 581		 * And after considering the possible alternatives,
 582		 * returning the value we actually used in getsockopt
 583		 * is the most desirable behavior.
 584		 */
 585		if ((val * 2) < SOCK_MIN_RCVBUF)
 586			sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
 587		else
 588			sk->sk_rcvbuf = val * 2;
 589		break;
 590
 591	case SO_RCVBUFFORCE:
 592		if (!capable(CAP_NET_ADMIN)) {
 593			ret = -EPERM;
 594			break;
 595		}
 596		goto set_rcvbuf;
 
 
 
 
 
 597
 598	case SO_KEEPALIVE:
 599#ifdef CONFIG_INET
 600		if (sk->sk_protocol == IPPROTO_TCP)
 601			tcp_set_keepalive(sk, valbool);
 602#endif
 603		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
 604		break;
 605
 606	case SO_OOBINLINE:
 607		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
 608		break;
 609
 610	case SO_NO_CHECK:
 611		sk->sk_no_check = valbool;
 612		break;
 613
 614	case SO_PRIORITY:
 615		if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
 
 616			sk->sk_priority = val;
 617		else
 618			ret = -EPERM;
 619		break;
 620
 621	case SO_LINGER:
 622		if (optlen < sizeof(ling)) {
 623			ret = -EINVAL;	/* 1003.1g */
 624			break;
 625		}
 626		if (copy_from_user(&ling, optval, sizeof(ling))) {
 627			ret = -EFAULT;
 628			break;
 629		}
 630		if (!ling.l_onoff)
 631			sock_reset_flag(sk, SOCK_LINGER);
 632		else {
 633#if (BITS_PER_LONG == 32)
 634			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
 635				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 636			else
 637#endif
 638				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
 639			sock_set_flag(sk, SOCK_LINGER);
 640		}
 641		break;
 642
 643	case SO_BSDCOMPAT:
 644		sock_warn_obsolete_bsdism("setsockopt");
 645		break;
 646
 647	case SO_PASSCRED:
 648		if (valbool)
 649			set_bit(SOCK_PASSCRED, &sock->flags);
 650		else
 651			clear_bit(SOCK_PASSCRED, &sock->flags);
 652		break;
 653
 654	case SO_TIMESTAMP:
 655	case SO_TIMESTAMPNS:
 656		if (valbool)  {
 657			if (optname == SO_TIMESTAMP)
 658				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 659			else
 660				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
 661			sock_set_flag(sk, SOCK_RCVTSTAMP);
 662			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 663		} else {
 664			sock_reset_flag(sk, SOCK_RCVTSTAMP);
 665			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 666		}
 667		break;
 668
 669	case SO_TIMESTAMPING:
 670		if (val & ~SOF_TIMESTAMPING_MASK) {
 671			ret = -EINVAL;
 672			break;
 
 
 
 
 
 
 
 673		}
 674		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
 675				  val & SOF_TIMESTAMPING_TX_HARDWARE);
 676		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
 677				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
 678		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
 679				  val & SOF_TIMESTAMPING_RX_HARDWARE);
 680		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 681			sock_enable_timestamp(sk,
 682					      SOCK_TIMESTAMPING_RX_SOFTWARE);
 683		else
 684			sock_disable_timestamp(sk,
 685					       SOCK_TIMESTAMPING_RX_SOFTWARE);
 686		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
 687				  val & SOF_TIMESTAMPING_SOFTWARE);
 688		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
 689				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
 690		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
 691				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
 692		break;
 693
 694	case SO_RCVLOWAT:
 695		if (val < 0)
 696			val = INT_MAX;
 697		sk->sk_rcvlowat = val ? : 1;
 
 
 
 698		break;
 699
 700	case SO_RCVTIMEO:
 701		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
 
 
 702		break;
 703
 704	case SO_SNDTIMEO:
 705		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
 
 
 706		break;
 707
 708	case SO_ATTACH_FILTER:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709		ret = -EINVAL;
 710		if (optlen == sizeof(struct sock_fprog)) {
 711			struct sock_fprog fprog;
 712
 713			ret = -EFAULT;
 714			if (copy_from_user(&fprog, optval, sizeof(fprog)))
 715				break;
 716
 717			ret = sk_attach_filter(&fprog, sk);
 718		}
 719		break;
 720
 
 
 
 
 721	case SO_DETACH_FILTER:
 722		ret = sk_detach_filter(sk);
 723		break;
 724
 
 
 
 
 
 
 
 725	case SO_PASSSEC:
 726		if (valbool)
 727			set_bit(SOCK_PASSSEC, &sock->flags);
 728		else
 729			clear_bit(SOCK_PASSSEC, &sock->flags);
 730		break;
 731	case SO_MARK:
 732		if (!capable(CAP_NET_ADMIN))
 733			ret = -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 734		else
 735			sk->sk_mark = val;
 
 
 
 
 
 
 
 
 736		break;
 737
 738		/* We implement the SO_SNDLOWAT etc to
 739		   not be settable (1003.1g 5.3) */
 740	case SO_RXQ_OVFL:
 741		if (valbool)
 742			sock_set_flag(sk, SOCK_RXQ_OVFL);
 
 
 
 
 
 
 
 
 
 
 743		else
 744			sock_reset_flag(sk, SOCK_RXQ_OVFL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745		break;
 
 746	default:
 747		ret = -ENOPROTOOPT;
 748		break;
 749	}
 750	release_sock(sk);
 751	return ret;
 752}
 753EXPORT_SYMBOL(sock_setsockopt);
 754
 
 
 
 
 
 
 
 
 
 
 755
 756void cred_to_ucred(struct pid *pid, const struct cred *cred,
 757		   struct ucred *ucred)
 758{
 759	ucred->pid = pid_vnr(pid);
 760	ucred->uid = ucred->gid = -1;
 761	if (cred) {
 762		struct user_namespace *current_ns = current_user_ns();
 763
 764		ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
 765		ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
 766	}
 767}
 768EXPORT_SYMBOL_GPL(cred_to_ucred);
 
 
 
 
 
 
 
 
 
 
 
 769
 770int sock_getsockopt(struct socket *sock, int level, int optname,
 771		    char __user *optval, int __user *optlen)
 772{
 773	struct sock *sk = sock->sk;
 774
 775	union {
 776		int val;
 
 
 777		struct linger ling;
 778		struct timeval tm;
 
 
 
 
 779	} v;
 780
 781	int lv = sizeof(int);
 782	int len;
 783
 784	if (get_user(len, optlen))
 785		return -EFAULT;
 786	if (len < 0)
 787		return -EINVAL;
 788
 789	memset(&v, 0, sizeof(v));
 790
 791	switch (optname) {
 792	case SO_DEBUG:
 793		v.val = sock_flag(sk, SOCK_DBG);
 794		break;
 795
 796	case SO_DONTROUTE:
 797		v.val = sock_flag(sk, SOCK_LOCALROUTE);
 798		break;
 799
 800	case SO_BROADCAST:
 801		v.val = !!sock_flag(sk, SOCK_BROADCAST);
 802		break;
 803
 804	case SO_SNDBUF:
 805		v.val = sk->sk_sndbuf;
 806		break;
 807
 808	case SO_RCVBUF:
 809		v.val = sk->sk_rcvbuf;
 810		break;
 811
 812	case SO_REUSEADDR:
 813		v.val = sk->sk_reuse;
 814		break;
 815
 
 
 
 
 816	case SO_KEEPALIVE:
 817		v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
 818		break;
 819
 820	case SO_TYPE:
 821		v.val = sk->sk_type;
 822		break;
 823
 824	case SO_PROTOCOL:
 825		v.val = sk->sk_protocol;
 826		break;
 827
 828	case SO_DOMAIN:
 829		v.val = sk->sk_family;
 830		break;
 831
 832	case SO_ERROR:
 833		v.val = -sock_error(sk);
 834		if (v.val == 0)
 835			v.val = xchg(&sk->sk_err_soft, 0);
 836		break;
 837
 838	case SO_OOBINLINE:
 839		v.val = !!sock_flag(sk, SOCK_URGINLINE);
 840		break;
 841
 842	case SO_NO_CHECK:
 843		v.val = sk->sk_no_check;
 844		break;
 845
 846	case SO_PRIORITY:
 847		v.val = sk->sk_priority;
 848		break;
 849
 850	case SO_LINGER:
 851		lv		= sizeof(v.ling);
 852		v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER);
 853		v.ling.l_linger	= sk->sk_lingertime / HZ;
 854		break;
 855
 856	case SO_BSDCOMPAT:
 857		sock_warn_obsolete_bsdism("getsockopt");
 858		break;
 859
 860	case SO_TIMESTAMP:
 861		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
 
 862				!sock_flag(sk, SOCK_RCVTSTAMPNS);
 863		break;
 864
 865	case SO_TIMESTAMPNS:
 866		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867		break;
 868
 869	case SO_TIMESTAMPING:
 870		v.val = 0;
 871		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
 872			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
 873		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
 874			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
 875		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
 876			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
 877		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
 878			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
 879		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
 880			v.val |= SOF_TIMESTAMPING_SOFTWARE;
 881		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
 882			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
 883		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
 884			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
 885		break;
 886
 887	case SO_RCVTIMEO:
 888		lv = sizeof(struct timeval);
 889		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
 890			v.tm.tv_sec = 0;
 891			v.tm.tv_usec = 0;
 892		} else {
 893			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
 894			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
 895		}
 896		break;
 897
 898	case SO_SNDTIMEO:
 899		lv = sizeof(struct timeval);
 900		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
 901			v.tm.tv_sec = 0;
 902			v.tm.tv_usec = 0;
 903		} else {
 904			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
 905			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
 906		}
 907		break;
 908
 909	case SO_RCVLOWAT:
 910		v.val = sk->sk_rcvlowat;
 911		break;
 912
 913	case SO_SNDLOWAT:
 914		v.val = 1;
 915		break;
 916
 917	case SO_PASSCRED:
 918		v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
 919		break;
 920
 921	case SO_PEERCRED:
 922	{
 923		struct ucred peercred;
 924		if (len > sizeof(peercred))
 925			len = sizeof(peercred);
 
 
 926		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
 
 
 927		if (copy_to_user(optval, &peercred, len))
 928			return -EFAULT;
 929		goto lenout;
 930	}
 931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932	case SO_PEERNAME:
 933	{
 934		char address[128];
 935
 936		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
 
 937			return -ENOTCONN;
 938		if (lv < len)
 939			return -EINVAL;
 940		if (copy_to_user(optval, address, len))
 941			return -EFAULT;
 942		goto lenout;
 943	}
 944
 945	/* Dubious BSD thing... Probably nobody even uses it, but
 946	 * the UNIX standard wants it for whatever reason... -DaveM
 947	 */
 948	case SO_ACCEPTCONN:
 949		v.val = sk->sk_state == TCP_LISTEN;
 950		break;
 951
 952	case SO_PASSSEC:
 953		v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
 954		break;
 955
 956	case SO_PEERSEC:
 957		return security_socket_getpeersec_stream(sock, optval, optlen, len);
 958
 959	case SO_MARK:
 960		v.val = sk->sk_mark;
 961		break;
 962
 963	case SO_RXQ_OVFL:
 964		v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965		break;
 966
 967	default:
 
 
 
 968		return -ENOPROTOOPT;
 969	}
 970
 971	if (len > lv)
 972		len = lv;
 973	if (copy_to_user(optval, &v, len))
 974		return -EFAULT;
 975lenout:
 976	if (put_user(len, optlen))
 977		return -EFAULT;
 978	return 0;
 979}
 980
 981/*
 982 * Initialize an sk_lock.
 983 *
 984 * (We also register the sk_lock with the lock validator.)
 985 */
 986static inline void sock_lock_init(struct sock *sk)
 987{
 988	sock_lock_init_class_and_name(sk,
 
 
 
 
 
 
 
 
 
 989			af_family_slock_key_strings[sk->sk_family],
 990			af_family_slock_keys + sk->sk_family,
 991			af_family_key_strings[sk->sk_family],
 992			af_family_keys + sk->sk_family);
 993}
 994
 995/*
 996 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
 997 * even temporarly, because of RCU lookups. sk_node should also be left as is.
 998 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
 999 */
1000static void sock_copy(struct sock *nsk, const struct sock *osk)
1001{
 
1002#ifdef CONFIG_SECURITY_NETWORK
1003	void *sptr = nsk->sk_security;
1004#endif
 
 
 
 
 
 
 
 
 
 
1005	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1006
1007	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1008	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1009
1010#ifdef CONFIG_SECURITY_NETWORK
1011	nsk->sk_security = sptr;
1012	security_sk_clone(osk, nsk);
1013#endif
1014}
1015
1016/*
1017 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1018 * un-modified. Special care is taken when initializing object to zero.
1019 */
1020static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1021{
1022	if (offsetof(struct sock, sk_node.next) != 0)
1023		memset(sk, 0, offsetof(struct sock, sk_node.next));
1024	memset(&sk->sk_node.pprev, 0,
1025	       size - offsetof(struct sock, sk_node.pprev));
1026}
1027
1028void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1029{
1030	unsigned long nulls1, nulls2;
1031
1032	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1033	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1034	if (nulls1 > nulls2)
1035		swap(nulls1, nulls2);
1036
1037	if (nulls1 != 0)
1038		memset((char *)sk, 0, nulls1);
1039	memset((char *)sk + nulls1 + sizeof(void *), 0,
1040	       nulls2 - nulls1 - sizeof(void *));
1041	memset((char *)sk + nulls2 + sizeof(void *), 0,
1042	       size - nulls2 - sizeof(void *));
1043}
1044EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1045
1046static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1047		int family)
1048{
1049	struct sock *sk;
1050	struct kmem_cache *slab;
1051
1052	slab = prot->slab;
1053	if (slab != NULL) {
1054		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1055		if (!sk)
1056			return sk;
1057		if (priority & __GFP_ZERO) {
1058			if (prot->clear_sk)
1059				prot->clear_sk(sk, prot->obj_size);
1060			else
1061				sk_prot_clear_nulls(sk, prot->obj_size);
1062		}
1063	} else
1064		sk = kmalloc(prot->obj_size, priority);
1065
1066	if (sk != NULL) {
1067		kmemcheck_annotate_bitfield(sk, flags);
1068
1069		if (security_sk_alloc(sk, family, priority))
1070			goto out_free;
1071
1072		if (!try_module_get(prot->owner))
1073			goto out_free_sec;
1074		sk_tx_queue_clear(sk);
1075	}
1076
1077	return sk;
1078
1079out_free_sec:
1080	security_sk_free(sk);
1081out_free:
1082	if (slab != NULL)
1083		kmem_cache_free(slab, sk);
1084	else
1085		kfree(sk);
1086	return NULL;
1087}
1088
1089static void sk_prot_free(struct proto *prot, struct sock *sk)
1090{
1091	struct kmem_cache *slab;
1092	struct module *owner;
1093
1094	owner = prot->owner;
1095	slab = prot->slab;
1096
 
 
1097	security_sk_free(sk);
1098	if (slab != NULL)
1099		kmem_cache_free(slab, sk);
1100	else
1101		kfree(sk);
1102	module_put(owner);
1103}
1104
1105#ifdef CONFIG_CGROUPS
1106void sock_update_classid(struct sock *sk)
1107{
1108	u32 classid;
1109
1110	rcu_read_lock();  /* doing current task, which cannot vanish. */
1111	classid = task_cls_classid(current);
1112	rcu_read_unlock();
1113	if (classid && classid != sk->sk_classid)
1114		sk->sk_classid = classid;
1115}
1116EXPORT_SYMBOL(sock_update_classid);
1117#endif
1118
1119/**
1120 *	sk_alloc - All socket objects are allocated here
1121 *	@net: the applicable net namespace
1122 *	@family: protocol family
1123 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1124 *	@prot: struct proto associated with this new sock instance
 
1125 */
1126struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1127		      struct proto *prot)
1128{
1129	struct sock *sk;
1130
1131	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1132	if (sk) {
1133		sk->sk_family = family;
1134		/*
1135		 * See comment in struct sock definition to understand
1136		 * why we need sk_prot_creator -acme
1137		 */
1138		sk->sk_prot = sk->sk_prot_creator = prot;
 
1139		sock_lock_init(sk);
1140		sock_net_set(sk, get_net(net));
1141		atomic_set(&sk->sk_wmem_alloc, 1);
 
 
 
1142
1143		sock_update_classid(sk);
 
 
 
 
 
 
 
1144	}
1145
1146	return sk;
1147}
1148EXPORT_SYMBOL(sk_alloc);
1149
1150static void __sk_free(struct sock *sk)
 
 
 
1151{
 
1152	struct sk_filter *filter;
1153
1154	if (sk->sk_destruct)
1155		sk->sk_destruct(sk);
1156
1157	filter = rcu_dereference_check(sk->sk_filter,
1158				       atomic_read(&sk->sk_wmem_alloc) == 0);
1159	if (filter) {
1160		sk_filter_uncharge(sk, filter);
1161		rcu_assign_pointer(sk->sk_filter, NULL);
1162	}
1163
1164	sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1165	sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
 
 
 
1166
1167	if (atomic_read(&sk->sk_omem_alloc))
1168		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1169		       __func__, atomic_read(&sk->sk_omem_alloc));
 
 
 
 
 
1170
1171	if (sk->sk_peer_cred)
1172		put_cred(sk->sk_peer_cred);
1173	put_pid(sk->sk_peer_pid);
1174	put_net(sock_net(sk));
 
 
1175	sk_prot_free(sk->sk_prot_creator, sk);
1176}
1177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178void sk_free(struct sock *sk)
1179{
1180	/*
1181	 * We subtract one from sk_wmem_alloc and can know if
1182	 * some packets are still in some tx queue.
1183	 * If not null, sock_wfree() will call __sk_free(sk) later
1184	 */
1185	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1186		__sk_free(sk);
1187}
1188EXPORT_SYMBOL(sk_free);
1189
1190/*
1191 * Last sock_put should drop reference to sk->sk_net. It has already
1192 * been dropped in sk_change_net. Taking reference to stopping namespace
1193 * is not an option.
1194 * Take reference to a socket to remove it from hash _alive_ and after that
1195 * destroy it in the context of init_net.
1196 */
1197void sk_release_kernel(struct sock *sk)
1198{
1199	if (sk == NULL || sk->sk_socket == NULL)
1200		return;
 
1201
1202	sock_hold(sk);
1203	sock_release(sk->sk_socket);
1204	release_net(sock_net(sk));
1205	sock_net_set(sk, get_net(&init_net));
1206	sock_put(sk);
 
 
 
 
 
 
 
 
1207}
1208EXPORT_SYMBOL(sk_release_kernel);
1209
1210struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
 
 
 
 
 
 
 
1211{
 
 
 
1212	struct sock *newsk;
1213
1214	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1215	if (newsk != NULL) {
1216		struct sk_filter *filter;
 
 
1217
1218		sock_copy(newsk, sk);
1219
1220		/* SANITY */
 
1221		get_net(sock_net(newsk));
1222		sk_node_init(&newsk->sk_node);
1223		sock_lock_init(newsk);
1224		bh_lock_sock(newsk);
1225		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1226		newsk->sk_backlog.len = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1227
1228		atomic_set(&newsk->sk_rmem_alloc, 0);
1229		/*
1230		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1231		 */
1232		atomic_set(&newsk->sk_wmem_alloc, 1);
1233		atomic_set(&newsk->sk_omem_alloc, 0);
1234		skb_queue_head_init(&newsk->sk_receive_queue);
1235		skb_queue_head_init(&newsk->sk_write_queue);
1236#ifdef CONFIG_NET_DMA
1237		skb_queue_head_init(&newsk->sk_async_wait_queue);
1238#endif
1239
1240		spin_lock_init(&newsk->sk_dst_lock);
1241		rwlock_init(&newsk->sk_callback_lock);
1242		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1243				af_callback_keys + newsk->sk_family,
1244				af_family_clock_key_strings[newsk->sk_family]);
1245
1246		newsk->sk_dst_cache	= NULL;
1247		newsk->sk_wmem_queued	= 0;
1248		newsk->sk_forward_alloc = 0;
1249		newsk->sk_send_head	= NULL;
1250		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1251
1252		sock_reset_flag(newsk, SOCK_DONE);
1253		skb_queue_head_init(&newsk->sk_error_queue);
1254
1255		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1256		if (filter != NULL)
1257			sk_filter_charge(newsk, filter);
1258
1259		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1260			/* It is still raw copy of parent, so invalidate
1261			 * destructor and make plain sk_free() */
1262			newsk->sk_destruct = NULL;
1263			sk_free(newsk);
1264			newsk = NULL;
1265			goto out;
1266		}
1267
1268		newsk->sk_err	   = 0;
1269		newsk->sk_priority = 0;
1270		/*
1271		 * Before updating sk_refcnt, we must commit prior changes to memory
1272		 * (Documentation/RCU/rculist_nulls.txt for details)
 
1273		 */
1274		smp_wmb();
1275		atomic_set(&newsk->sk_refcnt, 2);
 
1276
1277		/*
1278		 * Increment the counter in the same struct proto as the master
1279		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1280		 * is the same as sk->sk_prot->socks, as this field was copied
1281		 * with memcpy).
1282		 *
1283		 * This _changes_ the previous behaviour, where
1284		 * tcp_create_openreq_child always was incrementing the
1285		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1286		 * to be taken into account in all callers. -acme
1287		 */
1288		sk_refcnt_debug_inc(newsk);
1289		sk_set_socket(newsk, NULL);
1290		newsk->sk_wq = NULL;
 
 
 
 
1291
1292		if (newsk->sk_prot->sockets_allocated)
1293			percpu_counter_inc(newsk->sk_prot->sockets_allocated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294
1295		if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1296		    sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1297			net_enable_timestamp();
1298	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299out:
1300	return newsk;
1301}
1302EXPORT_SYMBOL_GPL(sk_clone);
 
 
 
 
 
 
 
 
 
 
1303
1304void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1305{
1306	__sk_dst_set(sk, dst);
1307	sk->sk_route_caps = dst->dev->features;
 
 
1308	if (sk->sk_route_caps & NETIF_F_GSO)
1309		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1310	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1311	if (sk_can_gso(sk)) {
1312		if (dst->header_len) {
1313			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1314		} else {
1315			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1316			sk->sk_gso_max_size = dst->dev->gso_max_size;
 
1317		}
1318	}
 
1319}
1320EXPORT_SYMBOL_GPL(sk_setup_caps);
1321
1322void __init sk_init(void)
1323{
1324	if (totalram_pages <= 4096) {
1325		sysctl_wmem_max = 32767;
1326		sysctl_rmem_max = 32767;
1327		sysctl_wmem_default = 32767;
1328		sysctl_rmem_default = 32767;
1329	} else if (totalram_pages >= 131072) {
1330		sysctl_wmem_max = 131071;
1331		sysctl_rmem_max = 131071;
1332	}
1333}
1334
1335/*
1336 *	Simple resource managers for sockets.
1337 */
1338
1339
1340/*
1341 * Write buffer destructor automatically called from kfree_skb.
1342 */
1343void sock_wfree(struct sk_buff *skb)
1344{
1345	struct sock *sk = skb->sk;
1346	unsigned int len = skb->truesize;
1347
1348	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1349		/*
1350		 * Keep a reference on sk_wmem_alloc, this will be released
1351		 * after sk_write_space() call
1352		 */
1353		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1354		sk->sk_write_space(sk);
1355		len = 1;
1356	}
1357	/*
1358	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1359	 * could not do because of in-flight packets
1360	 */
1361	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1362		__sk_free(sk);
1363}
1364EXPORT_SYMBOL(sock_wfree);
1365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1366/*
1367 * Read buffer destructor automatically called from kfree_skb.
1368 */
1369void sock_rfree(struct sk_buff *skb)
1370{
1371	struct sock *sk = skb->sk;
1372	unsigned int len = skb->truesize;
1373
1374	atomic_sub(len, &sk->sk_rmem_alloc);
1375	sk_mem_uncharge(sk, len);
1376}
1377EXPORT_SYMBOL(sock_rfree);
1378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379
1380int sock_i_uid(struct sock *sk)
1381{
1382	int uid;
1383
1384	read_lock_bh(&sk->sk_callback_lock);
1385	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1386	read_unlock_bh(&sk->sk_callback_lock);
1387	return uid;
1388}
1389EXPORT_SYMBOL(sock_i_uid);
1390
1391unsigned long sock_i_ino(struct sock *sk)
1392{
1393	unsigned long ino;
1394
1395	read_lock_bh(&sk->sk_callback_lock);
1396	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1397	read_unlock_bh(&sk->sk_callback_lock);
1398	return ino;
1399}
1400EXPORT_SYMBOL(sock_i_ino);
1401
1402/*
1403 * Allocate a skb from the socket's send buffer.
1404 */
1405struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1406			     gfp_t priority)
1407{
1408	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
 
1409		struct sk_buff *skb = alloc_skb(size, priority);
 
1410		if (skb) {
1411			skb_set_owner_w(skb, sk);
1412			return skb;
1413		}
1414	}
1415	return NULL;
1416}
1417EXPORT_SYMBOL(sock_wmalloc);
1418
1419/*
1420 * Allocate a skb from the socket's receive buffer.
1421 */
1422struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
 
 
 
 
1423			     gfp_t priority)
1424{
1425	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1426		struct sk_buff *skb = alloc_skb(size, priority);
1427		if (skb) {
1428			skb_set_owner_r(skb, sk);
1429			return skb;
1430		}
1431	}
1432	return NULL;
 
 
 
 
 
 
 
1433}
1434
1435/*
1436 * Allocate a memory block from the socket's option memory buffer.
1437 */
1438void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1439{
1440	if ((unsigned)size <= sysctl_optmem_max &&
1441	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1442		void *mem;
1443		/* First do the add, to avoid the race if kmalloc
1444		 * might sleep.
1445		 */
1446		atomic_add(size, &sk->sk_omem_alloc);
1447		mem = kmalloc(size, priority);
1448		if (mem)
1449			return mem;
1450		atomic_sub(size, &sk->sk_omem_alloc);
1451	}
1452	return NULL;
1453}
1454EXPORT_SYMBOL(sock_kmalloc);
1455
1456/*
1457 * Free an option memory block.
 
1458 */
 
 
 
 
 
 
 
 
 
 
 
 
1459void sock_kfree_s(struct sock *sk, void *mem, int size)
1460{
1461	kfree(mem);
1462	atomic_sub(size, &sk->sk_omem_alloc);
1463}
1464EXPORT_SYMBOL(sock_kfree_s);
1465
 
 
 
 
 
 
1466/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1467   I think, these locks should be removed for datagram sockets.
1468 */
1469static long sock_wait_for_wmem(struct sock *sk, long timeo)
1470{
1471	DEFINE_WAIT(wait);
1472
1473	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1474	for (;;) {
1475		if (!timeo)
1476			break;
1477		if (signal_pending(current))
1478			break;
1479		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1480		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1481		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1482			break;
1483		if (sk->sk_shutdown & SEND_SHUTDOWN)
1484			break;
1485		if (sk->sk_err)
1486			break;
1487		timeo = schedule_timeout(timeo);
1488	}
1489	finish_wait(sk_sleep(sk), &wait);
1490	return timeo;
1491}
1492
1493
1494/*
1495 *	Generic send/receive buffer handlers
1496 */
1497
1498struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1499				     unsigned long data_len, int noblock,
1500				     int *errcode)
1501{
1502	struct sk_buff *skb;
1503	gfp_t gfp_mask;
1504	long timeo;
1505	int err;
1506
1507	gfp_mask = sk->sk_allocation;
1508	if (gfp_mask & __GFP_WAIT)
1509		gfp_mask |= __GFP_REPEAT;
1510
1511	timeo = sock_sndtimeo(sk, noblock);
1512	while (1) {
1513		err = sock_error(sk);
1514		if (err != 0)
1515			goto failure;
1516
1517		err = -EPIPE;
1518		if (sk->sk_shutdown & SEND_SHUTDOWN)
1519			goto failure;
1520
1521		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1522			skb = alloc_skb(header_len, gfp_mask);
1523			if (skb) {
1524				int npages;
1525				int i;
1526
1527				/* No pages, we're done... */
1528				if (!data_len)
1529					break;
1530
1531				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1532				skb->truesize += data_len;
1533				skb_shinfo(skb)->nr_frags = npages;
1534				for (i = 0; i < npages; i++) {
1535					struct page *page;
1536					skb_frag_t *frag;
1537
1538					page = alloc_pages(sk->sk_allocation, 0);
1539					if (!page) {
1540						err = -ENOBUFS;
1541						skb_shinfo(skb)->nr_frags = i;
1542						kfree_skb(skb);
1543						goto failure;
1544					}
1545
1546					frag = &skb_shinfo(skb)->frags[i];
1547					frag->page = page;
1548					frag->page_offset = 0;
1549					frag->size = (data_len >= PAGE_SIZE ?
1550						      PAGE_SIZE :
1551						      data_len);
1552					data_len -= PAGE_SIZE;
1553				}
1554
1555				/* Full success... */
1556				break;
1557			}
1558			err = -ENOBUFS;
1559			goto failure;
1560		}
1561		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1562		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1563		err = -EAGAIN;
1564		if (!timeo)
1565			goto failure;
1566		if (signal_pending(current))
1567			goto interrupted;
1568		timeo = sock_wait_for_wmem(sk, timeo);
1569	}
1570
1571	skb_set_owner_w(skb, sk);
 
 
1572	return skb;
1573
1574interrupted:
1575	err = sock_intr_errno(timeo);
1576failure:
1577	*errcode = err;
1578	return NULL;
1579}
1580EXPORT_SYMBOL(sock_alloc_send_pskb);
1581
1582struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1583				    int noblock, int *errcode)
1584{
1585	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1586}
1587EXPORT_SYMBOL(sock_alloc_send_skb);
1588
1589static void __lock_sock(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590	__releases(&sk->sk_lock.slock)
1591	__acquires(&sk->sk_lock.slock)
1592{
1593	DEFINE_WAIT(wait);
1594
1595	for (;;) {
1596		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1597					TASK_UNINTERRUPTIBLE);
1598		spin_unlock_bh(&sk->sk_lock.slock);
1599		schedule();
1600		spin_lock_bh(&sk->sk_lock.slock);
1601		if (!sock_owned_by_user(sk))
1602			break;
1603	}
1604	finish_wait(&sk->sk_lock.wq, &wait);
1605}
1606
1607static void __release_sock(struct sock *sk)
1608	__releases(&sk->sk_lock.slock)
1609	__acquires(&sk->sk_lock.slock)
1610{
1611	struct sk_buff *skb = sk->sk_backlog.head;
1612
1613	do {
1614		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1615		bh_unlock_sock(sk);
 
1616
1617		do {
1618			struct sk_buff *next = skb->next;
1619
1620			WARN_ON_ONCE(skb_dst_is_noref(skb));
1621			skb->next = NULL;
1622			sk_backlog_rcv(sk, skb);
1623
1624			/*
1625			 * We are in process context here with softirqs
1626			 * disabled, use cond_resched_softirq() to preempt.
1627			 * This is safe to do because we've taken the backlog
1628			 * queue private:
1629			 */
1630			cond_resched_softirq();
1631
1632			skb = next;
1633		} while (skb != NULL);
1634
1635		bh_lock_sock(sk);
1636	} while ((skb = sk->sk_backlog.head) != NULL);
1637
1638	/*
1639	 * Doing the zeroing here guarantee we can not loop forever
1640	 * while a wild producer attempts to flood us.
1641	 */
1642	sk->sk_backlog.len = 0;
1643}
1644
 
 
 
 
 
 
 
1645/**
1646 * sk_wait_data - wait for data to arrive at sk_receive_queue
1647 * @sk:    sock to wait on
1648 * @timeo: for how long
 
1649 *
1650 * Now socket state including sk->sk_err is changed only under lock,
1651 * hence we may omit checks after joining wait queue.
1652 * We check receive queue before schedule() only as optimization;
1653 * it is very likely that release_sock() added new data.
1654 */
1655int sk_wait_data(struct sock *sk, long *timeo)
1656{
 
1657	int rc;
1658	DEFINE_WAIT(wait);
1659
1660	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1661	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1662	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1663	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1664	finish_wait(sk_sleep(sk), &wait);
1665	return rc;
1666}
1667EXPORT_SYMBOL(sk_wait_data);
1668
1669/**
1670 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1671 *	@sk: socket
1672 *	@size: memory size to allocate
 
1673 *	@kind: allocation type
1674 *
1675 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1676 *	rmem allocation. This function assumes that protocols which have
1677 *	memory_pressure use sk_wmem_queued as write buffer accounting.
1678 */
1679int __sk_mem_schedule(struct sock *sk, int size, int kind)
1680{
1681	struct proto *prot = sk->sk_prot;
1682	int amt = sk_mem_pages(size);
1683	long allocated;
1684
1685	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1686	allocated = atomic_long_add_return(amt, prot->memory_allocated);
 
1687
1688	/* Under limit. */
1689	if (allocated <= prot->sysctl_mem[0]) {
1690		if (prot->memory_pressure && *prot->memory_pressure)
1691			*prot->memory_pressure = 0;
1692		return 1;
1693	}
1694
1695	/* Under pressure. */
1696	if (allocated > prot->sysctl_mem[1])
1697		if (prot->enter_memory_pressure)
1698			prot->enter_memory_pressure(sk);
1699
1700	/* Over hard limit. */
1701	if (allocated > prot->sysctl_mem[2])
1702		goto suppress_allocation;
1703
1704	/* guarantee minimum buffer size under pressure */
1705	if (kind == SK_MEM_RECV) {
1706		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1707			return 1;
 
1708	} else { /* SK_MEM_SEND */
 
 
1709		if (sk->sk_type == SOCK_STREAM) {
1710			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1711				return 1;
1712		} else if (atomic_read(&sk->sk_wmem_alloc) <
1713			   prot->sysctl_wmem[0])
1714				return 1;
 
1715	}
1716
1717	if (prot->memory_pressure) {
1718		int alloc;
1719
1720		if (!*prot->memory_pressure)
1721			return 1;
1722		alloc = percpu_counter_read_positive(prot->sockets_allocated);
1723		if (prot->sysctl_mem[2] > alloc *
1724		    sk_mem_pages(sk->sk_wmem_queued +
1725				 atomic_read(&sk->sk_rmem_alloc) +
1726				 sk->sk_forward_alloc))
1727			return 1;
1728	}
1729
1730suppress_allocation:
1731
1732	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1733		sk_stream_moderate_sndbuf(sk);
1734
1735		/* Fail only if socket is _under_ its sndbuf.
1736		 * In this case we cannot block, so that we have to fail.
1737		 */
1738		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1739			return 1;
1740	}
1741
1742	trace_sock_exceed_buf_limit(sk, prot, allocated);
 
 
 
 
 
 
1743
1744	/* Alas. Undo changes. */
1745	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1746	atomic_long_sub(amt, prot->memory_allocated);
1747	return 0;
1748}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1749EXPORT_SYMBOL(__sk_mem_schedule);
1750
1751/**
1752 *	__sk_reclaim - reclaim memory_allocated
1753 *	@sk: socket
 
 
 
1754 */
1755void __sk_mem_reclaim(struct sock *sk)
1756{
1757	struct proto *prot = sk->sk_prot;
 
 
 
 
 
 
 
 
 
1758
1759	atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1760		   prot->memory_allocated);
1761	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1762
1763	if (prot->memory_pressure && *prot->memory_pressure &&
1764	    (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1765		*prot->memory_pressure = 0;
 
 
 
1766}
1767EXPORT_SYMBOL(__sk_mem_reclaim);
1768
 
 
 
 
 
 
1769
1770/*
1771 * Set of default routines for initialising struct proto_ops when
1772 * the protocol does not support a particular function. In certain
1773 * cases where it makes no sense for a protocol to have a "do nothing"
1774 * function, some default processing is provided.
1775 */
1776
1777int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1778{
1779	return -EOPNOTSUPP;
1780}
1781EXPORT_SYMBOL(sock_no_bind);
1782
1783int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1784		    int len, int flags)
1785{
1786	return -EOPNOTSUPP;
1787}
1788EXPORT_SYMBOL(sock_no_connect);
1789
1790int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1791{
1792	return -EOPNOTSUPP;
1793}
1794EXPORT_SYMBOL(sock_no_socketpair);
1795
1796int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
 
1797{
1798	return -EOPNOTSUPP;
1799}
1800EXPORT_SYMBOL(sock_no_accept);
1801
1802int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1803		    int *len, int peer)
1804{
1805	return -EOPNOTSUPP;
1806}
1807EXPORT_SYMBOL(sock_no_getname);
1808
1809unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1810{
1811	return 0;
1812}
1813EXPORT_SYMBOL(sock_no_poll);
1814
1815int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1816{
1817	return -EOPNOTSUPP;
1818}
1819EXPORT_SYMBOL(sock_no_ioctl);
1820
1821int sock_no_listen(struct socket *sock, int backlog)
1822{
1823	return -EOPNOTSUPP;
1824}
1825EXPORT_SYMBOL(sock_no_listen);
1826
1827int sock_no_shutdown(struct socket *sock, int how)
1828{
1829	return -EOPNOTSUPP;
1830}
1831EXPORT_SYMBOL(sock_no_shutdown);
1832
1833int sock_no_setsockopt(struct socket *sock, int level, int optname,
1834		    char __user *optval, unsigned int optlen)
1835{
1836	return -EOPNOTSUPP;
1837}
1838EXPORT_SYMBOL(sock_no_setsockopt);
1839
1840int sock_no_getsockopt(struct socket *sock, int level, int optname,
1841		    char __user *optval, int __user *optlen)
1842{
1843	return -EOPNOTSUPP;
1844}
1845EXPORT_SYMBOL(sock_no_getsockopt);
1846
1847int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1848		    size_t len)
1849{
1850	return -EOPNOTSUPP;
1851}
1852EXPORT_SYMBOL(sock_no_sendmsg);
1853
1854int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1855		    size_t len, int flags)
1856{
1857	return -EOPNOTSUPP;
1858}
1859EXPORT_SYMBOL(sock_no_recvmsg);
1860
1861int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1862{
1863	/* Mirror missing mmap method error code */
1864	return -ENODEV;
1865}
1866EXPORT_SYMBOL(sock_no_mmap);
1867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1869{
1870	ssize_t res;
1871	struct msghdr msg = {.msg_flags = flags};
1872	struct kvec iov;
1873	char *kaddr = kmap(page);
1874	iov.iov_base = kaddr + offset;
1875	iov.iov_len = size;
1876	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1877	kunmap(page);
1878	return res;
1879}
1880EXPORT_SYMBOL(sock_no_sendpage);
1881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1882/*
1883 *	Default Socket Callbacks
1884 */
1885
1886static void sock_def_wakeup(struct sock *sk)
1887{
1888	struct socket_wq *wq;
1889
1890	rcu_read_lock();
1891	wq = rcu_dereference(sk->sk_wq);
1892	if (wq_has_sleeper(wq))
1893		wake_up_interruptible_all(&wq->wait);
1894	rcu_read_unlock();
1895}
1896
1897static void sock_def_error_report(struct sock *sk)
1898{
1899	struct socket_wq *wq;
1900
1901	rcu_read_lock();
1902	wq = rcu_dereference(sk->sk_wq);
1903	if (wq_has_sleeper(wq))
1904		wake_up_interruptible_poll(&wq->wait, POLLERR);
1905	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1906	rcu_read_unlock();
1907}
1908
1909static void sock_def_readable(struct sock *sk, int len)
1910{
1911	struct socket_wq *wq;
1912
1913	rcu_read_lock();
1914	wq = rcu_dereference(sk->sk_wq);
1915	if (wq_has_sleeper(wq))
1916		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
1917						POLLRDNORM | POLLRDBAND);
1918	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1919	rcu_read_unlock();
1920}
1921
1922static void sock_def_write_space(struct sock *sk)
1923{
1924	struct socket_wq *wq;
1925
1926	rcu_read_lock();
1927
1928	/* Do not wake up a writer until he can make "significant"
1929	 * progress.  --DaveM
1930	 */
1931	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1932		wq = rcu_dereference(sk->sk_wq);
1933		if (wq_has_sleeper(wq))
1934			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1935						POLLWRNORM | POLLWRBAND);
1936
1937		/* Should agree with poll, otherwise some programs break */
1938		if (sock_writeable(sk))
1939			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1940	}
1941
1942	rcu_read_unlock();
1943}
1944
1945static void sock_def_destruct(struct sock *sk)
1946{
1947	kfree(sk->sk_protinfo);
1948}
1949
1950void sk_send_sigurg(struct sock *sk)
1951{
1952	if (sk->sk_socket && sk->sk_socket->file)
1953		if (send_sigurg(&sk->sk_socket->file->f_owner))
1954			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1955}
1956EXPORT_SYMBOL(sk_send_sigurg);
1957
1958void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1959		    unsigned long expires)
1960{
1961	if (!mod_timer(timer, expires))
1962		sock_hold(sk);
1963}
1964EXPORT_SYMBOL(sk_reset_timer);
1965
1966void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1967{
1968	if (timer_pending(timer) && del_timer(timer))
1969		__sock_put(sk);
1970}
1971EXPORT_SYMBOL(sk_stop_timer);
1972
 
 
 
 
 
 
 
1973void sock_init_data(struct socket *sock, struct sock *sk)
1974{
1975	skb_queue_head_init(&sk->sk_receive_queue);
1976	skb_queue_head_init(&sk->sk_write_queue);
1977	skb_queue_head_init(&sk->sk_error_queue);
1978#ifdef CONFIG_NET_DMA
1979	skb_queue_head_init(&sk->sk_async_wait_queue);
1980#endif
1981
1982	sk->sk_send_head	=	NULL;
1983
1984	init_timer(&sk->sk_timer);
1985
1986	sk->sk_allocation	=	GFP_KERNEL;
1987	sk->sk_rcvbuf		=	sysctl_rmem_default;
1988	sk->sk_sndbuf		=	sysctl_wmem_default;
1989	sk->sk_state		=	TCP_CLOSE;
1990	sk_set_socket(sk, sock);
1991
1992	sock_set_flag(sk, SOCK_ZAPPED);
1993
1994	if (sock) {
1995		sk->sk_type	=	sock->type;
1996		sk->sk_wq	=	sock->wq;
1997		sock->sk	=	sk;
1998	} else
1999		sk->sk_wq	=	NULL;
 
 
 
2000
2001	spin_lock_init(&sk->sk_dst_lock);
2002	rwlock_init(&sk->sk_callback_lock);
2003	lockdep_set_class_and_name(&sk->sk_callback_lock,
 
 
 
 
 
 
 
2004			af_callback_keys + sk->sk_family,
2005			af_family_clock_key_strings[sk->sk_family]);
2006
2007	sk->sk_state_change	=	sock_def_wakeup;
2008	sk->sk_data_ready	=	sock_def_readable;
2009	sk->sk_write_space	=	sock_def_write_space;
2010	sk->sk_error_report	=	sock_def_error_report;
2011	sk->sk_destruct		=	sock_def_destruct;
2012
2013	sk->sk_sndmsg_page	=	NULL;
2014	sk->sk_sndmsg_off	=	0;
 
2015
2016	sk->sk_peer_pid 	=	NULL;
2017	sk->sk_peer_cred	=	NULL;
 
 
2018	sk->sk_write_pending	=	0;
2019	sk->sk_rcvlowat		=	1;
2020	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2021	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2022
2023	sk->sk_stamp = ktime_set(-1L, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2024
 
2025	/*
2026	 * Before updating sk_refcnt, we must commit prior changes to memory
2027	 * (Documentation/RCU/rculist_nulls.txt for details)
2028	 */
2029	smp_wmb();
2030	atomic_set(&sk->sk_refcnt, 1);
2031	atomic_set(&sk->sk_drops, 0);
2032}
2033EXPORT_SYMBOL(sock_init_data);
2034
2035void lock_sock_nested(struct sock *sk, int subclass)
2036{
2037	might_sleep();
2038	spin_lock_bh(&sk->sk_lock.slock);
2039	if (sk->sk_lock.owned)
2040		__lock_sock(sk);
2041	sk->sk_lock.owned = 1;
2042	spin_unlock(&sk->sk_lock.slock);
2043	/*
2044	 * The sk_lock has mutex_lock() semantics here:
2045	 */
2046	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2047	local_bh_enable();
2048}
2049EXPORT_SYMBOL(lock_sock_nested);
2050
2051void release_sock(struct sock *sk)
2052{
2053	/*
2054	 * The sk_lock has mutex_unlock() semantics:
2055	 */
2056	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2057
2058	spin_lock_bh(&sk->sk_lock.slock);
2059	if (sk->sk_backlog.tail)
2060		__release_sock(sk);
2061	sk->sk_lock.owned = 0;
 
 
 
 
 
 
 
2062	if (waitqueue_active(&sk->sk_lock.wq))
2063		wake_up(&sk->sk_lock.wq);
2064	spin_unlock_bh(&sk->sk_lock.slock);
2065}
2066EXPORT_SYMBOL(release_sock);
2067
2068/**
2069 * lock_sock_fast - fast version of lock_sock
2070 * @sk: socket
2071 *
2072 * This version should be used for very small section, where process wont block
2073 * return false if fast path is taken
 
2074 *   sk_lock.slock locked, owned = 0, BH disabled
2075 * return true if slow path is taken
 
 
2076 *   sk_lock.slock unlocked, owned = 1, BH enabled
2077 */
2078bool lock_sock_fast(struct sock *sk)
2079{
2080	might_sleep();
2081	spin_lock_bh(&sk->sk_lock.slock);
2082
2083	if (!sk->sk_lock.owned)
2084		/*
2085		 * Note : We must disable BH
2086		 */
2087		return false;
2088
2089	__lock_sock(sk);
2090	sk->sk_lock.owned = 1;
2091	spin_unlock(&sk->sk_lock.slock);
2092	/*
2093	 * The sk_lock has mutex_lock() semantics here:
2094	 */
2095	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
 
2096	local_bh_enable();
2097	return true;
2098}
2099EXPORT_SYMBOL(lock_sock_fast);
2100
2101int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
 
2102{
2103	struct timeval tv;
2104	if (!sock_flag(sk, SOCK_TIMESTAMP))
2105		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2106	tv = ktime_to_timeval(sk->sk_stamp);
2107	if (tv.tv_sec == -1)
2108		return -ENOENT;
2109	if (tv.tv_sec == 0) {
2110		sk->sk_stamp = ktime_get_real();
2111		tv = ktime_to_timeval(sk->sk_stamp);
2112	}
2113	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2114}
2115EXPORT_SYMBOL(sock_get_timestamp);
2116
2117int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2118{
2119	struct timespec ts;
2120	if (!sock_flag(sk, SOCK_TIMESTAMP))
2121		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2122	ts = ktime_to_timespec(sk->sk_stamp);
2123	if (ts.tv_sec == -1)
2124		return -ENOENT;
2125	if (ts.tv_sec == 0) {
2126		sk->sk_stamp = ktime_get_real();
2127		ts = ktime_to_timespec(sk->sk_stamp);
 
2128	}
2129	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2130}
2131EXPORT_SYMBOL(sock_get_timestampns);
2132
2133void sock_enable_timestamp(struct sock *sk, int flag)
2134{
2135	if (!sock_flag(sk, flag)) {
 
 
2136		sock_set_flag(sk, flag);
2137		/*
2138		 * we just set one of the two flags which require net
2139		 * time stamping, but time stamping might have been on
2140		 * already because of the other one
2141		 */
2142		if (!sock_flag(sk,
2143				flag == SOCK_TIMESTAMP ?
2144				SOCK_TIMESTAMPING_RX_SOFTWARE :
2145				SOCK_TIMESTAMP))
2146			net_enable_timestamp();
2147	}
2148}
2149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2150/*
2151 *	Get a socket option on an socket.
2152 *
2153 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2154 *	asynchronous errors should be reported by getsockopt. We assume
2155 *	this means if you specify SO_ERROR (otherwise whats the point of it).
2156 */
2157int sock_common_getsockopt(struct socket *sock, int level, int optname,
2158			   char __user *optval, int __user *optlen)
2159{
2160	struct sock *sk = sock->sk;
2161
2162	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2163}
2164EXPORT_SYMBOL(sock_common_getsockopt);
2165
2166#ifdef CONFIG_COMPAT
2167int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2168				  char __user *optval, int __user *optlen)
2169{
2170	struct sock *sk = sock->sk;
2171
2172	if (sk->sk_prot->compat_getsockopt != NULL)
2173		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2174						      optval, optlen);
2175	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2176}
2177EXPORT_SYMBOL(compat_sock_common_getsockopt);
2178#endif
2179
2180int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2181			struct msghdr *msg, size_t size, int flags)
2182{
2183	struct sock *sk = sock->sk;
2184	int addr_len = 0;
2185	int err;
2186
2187	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2188				   flags & ~MSG_DONTWAIT, &addr_len);
2189	if (err >= 0)
2190		msg->msg_namelen = addr_len;
2191	return err;
2192}
2193EXPORT_SYMBOL(sock_common_recvmsg);
2194
2195/*
2196 *	Set socket options on an inet socket.
2197 */
2198int sock_common_setsockopt(struct socket *sock, int level, int optname,
2199			   char __user *optval, unsigned int optlen)
2200{
2201	struct sock *sk = sock->sk;
2202
2203	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2204}
2205EXPORT_SYMBOL(sock_common_setsockopt);
2206
2207#ifdef CONFIG_COMPAT
2208int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2209				  char __user *optval, unsigned int optlen)
2210{
2211	struct sock *sk = sock->sk;
2212
2213	if (sk->sk_prot->compat_setsockopt != NULL)
2214		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2215						      optval, optlen);
2216	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2217}
2218EXPORT_SYMBOL(compat_sock_common_setsockopt);
2219#endif
2220
2221void sk_common_release(struct sock *sk)
2222{
2223	if (sk->sk_prot->destroy)
2224		sk->sk_prot->destroy(sk);
2225
2226	/*
2227	 * Observation: when sock_common_release is called, processes have
2228	 * no access to socket. But net still has.
2229	 * Step one, detach it from networking:
2230	 *
2231	 * A. Remove from hash tables.
2232	 */
2233
2234	sk->sk_prot->unhash(sk);
2235
2236	/*
2237	 * In this point socket cannot receive new packets, but it is possible
2238	 * that some packets are in flight because some CPU runs receiver and
2239	 * did hash table lookup before we unhashed socket. They will achieve
2240	 * receive queue and will be purged by socket destructor.
2241	 *
2242	 * Also we still have packets pending on receive queue and probably,
2243	 * our own packets waiting in device queues. sock_destroy will drain
2244	 * receive queue, but transmitted packets will delay socket destruction
2245	 * until the last reference will be released.
2246	 */
2247
2248	sock_orphan(sk);
2249
2250	xfrm_sk_free_policy(sk);
2251
2252	sk_refcnt_debug_release(sk);
 
2253	sock_put(sk);
2254}
2255EXPORT_SYMBOL(sk_common_release);
2256
2257static DEFINE_RWLOCK(proto_list_lock);
2258static LIST_HEAD(proto_list);
 
 
 
 
 
 
 
 
 
 
 
 
2259
2260#ifdef CONFIG_PROC_FS
2261#define PROTO_INUSE_NR	64	/* should be enough for the first time */
2262struct prot_inuse {
2263	int val[PROTO_INUSE_NR];
2264};
2265
2266static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2267
2268#ifdef CONFIG_NET_NS
2269void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2270{
2271	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2272}
2273EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2274
2275int sock_prot_inuse_get(struct net *net, struct proto *prot)
2276{
2277	int cpu, idx = prot->inuse_idx;
2278	int res = 0;
2279
2280	for_each_possible_cpu(cpu)
2281		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2282
2283	return res >= 0 ? res : 0;
2284}
2285EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2287static int __net_init sock_inuse_init_net(struct net *net)
2288{
2289	net->core.inuse = alloc_percpu(struct prot_inuse);
2290	return net->core.inuse ? 0 : -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
2291}
2292
2293static void __net_exit sock_inuse_exit_net(struct net *net)
2294{
2295	free_percpu(net->core.inuse);
 
2296}
2297
2298static struct pernet_operations net_inuse_ops = {
2299	.init = sock_inuse_init_net,
2300	.exit = sock_inuse_exit_net,
2301};
2302
2303static __init int net_inuse_init(void)
2304{
2305	if (register_pernet_subsys(&net_inuse_ops))
2306		panic("Cannot initialize net inuse counters");
2307
2308	return 0;
2309}
2310
2311core_initcall(net_inuse_init);
2312#else
2313static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2314
2315void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2316{
2317	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2318}
2319EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2320
2321int sock_prot_inuse_get(struct net *net, struct proto *prot)
2322{
2323	int cpu, idx = prot->inuse_idx;
2324	int res = 0;
2325
2326	for_each_possible_cpu(cpu)
2327		res += per_cpu(prot_inuse, cpu).val[idx];
2328
2329	return res >= 0 ? res : 0;
2330}
2331EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2332#endif
2333
2334static void assign_proto_idx(struct proto *prot)
2335{
2336	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2337
2338	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2339		printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2340		return;
2341	}
2342
2343	set_bit(prot->inuse_idx, proto_inuse_idx);
 
2344}
2345
2346static void release_proto_idx(struct proto *prot)
2347{
2348	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2349		clear_bit(prot->inuse_idx, proto_inuse_idx);
2350}
2351#else
2352static inline void assign_proto_idx(struct proto *prot)
2353{
 
2354}
2355
2356static inline void release_proto_idx(struct proto *prot)
2357{
2358}
 
 
 
 
2359#endif
2360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2361int proto_register(struct proto *prot, int alloc_slab)
2362{
 
 
2363	if (alloc_slab) {
2364		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2365					SLAB_HWCACHE_ALIGN | prot->slab_flags,
 
 
 
2366					NULL);
2367
2368		if (prot->slab == NULL) {
2369			printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2370			       prot->name);
2371			goto out;
2372		}
2373
2374		if (prot->rsk_prot != NULL) {
2375			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2376			if (prot->rsk_prot->slab_name == NULL)
2377				goto out_free_sock_slab;
2378
2379			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2380								 prot->rsk_prot->obj_size, 0,
2381								 SLAB_HWCACHE_ALIGN, NULL);
2382
2383			if (prot->rsk_prot->slab == NULL) {
2384				printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2385				       prot->name);
2386				goto out_free_request_sock_slab_name;
2387			}
2388		}
2389
2390		if (prot->twsk_prot != NULL) {
2391			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2392
2393			if (prot->twsk_prot->twsk_slab_name == NULL)
2394				goto out_free_request_sock_slab;
 
2395
2396			prot->twsk_prot->twsk_slab =
2397				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2398						  prot->twsk_prot->twsk_obj_size,
2399						  0,
2400						  SLAB_HWCACHE_ALIGN |
2401							prot->slab_flags,
2402						  NULL);
2403			if (prot->twsk_prot->twsk_slab == NULL)
2404				goto out_free_timewait_sock_slab_name;
2405		}
2406	}
2407
2408	write_lock(&proto_list_lock);
2409	list_add(&prot->node, &proto_list);
2410	assign_proto_idx(prot);
2411	write_unlock(&proto_list_lock);
2412	return 0;
2413
2414out_free_timewait_sock_slab_name:
2415	kfree(prot->twsk_prot->twsk_slab_name);
 
2416out_free_request_sock_slab:
2417	if (prot->rsk_prot && prot->rsk_prot->slab) {
2418		kmem_cache_destroy(prot->rsk_prot->slab);
2419		prot->rsk_prot->slab = NULL;
2420	}
2421out_free_request_sock_slab_name:
2422	if (prot->rsk_prot)
2423		kfree(prot->rsk_prot->slab_name);
2424out_free_sock_slab:
2425	kmem_cache_destroy(prot->slab);
2426	prot->slab = NULL;
2427out:
2428	return -ENOBUFS;
2429}
2430EXPORT_SYMBOL(proto_register);
2431
2432void proto_unregister(struct proto *prot)
2433{
2434	write_lock(&proto_list_lock);
2435	release_proto_idx(prot);
2436	list_del(&prot->node);
2437	write_unlock(&proto_list_lock);
2438
2439	if (prot->slab != NULL) {
2440		kmem_cache_destroy(prot->slab);
2441		prot->slab = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
2442	}
2443
2444	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2445		kmem_cache_destroy(prot->rsk_prot->slab);
2446		kfree(prot->rsk_prot->slab_name);
2447		prot->rsk_prot->slab = NULL;
2448	}
 
 
2449
2450	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2451		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2452		kfree(prot->twsk_prot->twsk_slab_name);
2453		prot->twsk_prot->twsk_slab = NULL;
2454	}
2455}
2456EXPORT_SYMBOL(proto_unregister);
2457
2458#ifdef CONFIG_PROC_FS
2459static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2460	__acquires(proto_list_lock)
2461{
2462	read_lock(&proto_list_lock);
2463	return seq_list_start_head(&proto_list, *pos);
2464}
2465
2466static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2467{
2468	return seq_list_next(v, &proto_list, pos);
2469}
2470
2471static void proto_seq_stop(struct seq_file *seq, void *v)
2472	__releases(proto_list_lock)
2473{
2474	read_unlock(&proto_list_lock);
2475}
2476
2477static char proto_method_implemented(const void *method)
2478{
2479	return method == NULL ? 'n' : 'y';
2480}
 
 
 
 
 
 
 
 
 
 
2481
2482static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2483{
 
2484	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2485			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2486		   proto->name,
2487		   proto->obj_size,
2488		   sock_prot_inuse_get(seq_file_net(seq), proto),
2489		   proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2490		   proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2491		   proto->max_header,
2492		   proto->slab == NULL ? "no" : "yes",
2493		   module_name(proto->owner),
2494		   proto_method_implemented(proto->close),
2495		   proto_method_implemented(proto->connect),
2496		   proto_method_implemented(proto->disconnect),
2497		   proto_method_implemented(proto->accept),
2498		   proto_method_implemented(proto->ioctl),
2499		   proto_method_implemented(proto->init),
2500		   proto_method_implemented(proto->destroy),
2501		   proto_method_implemented(proto->shutdown),
2502		   proto_method_implemented(proto->setsockopt),
2503		   proto_method_implemented(proto->getsockopt),
2504		   proto_method_implemented(proto->sendmsg),
2505		   proto_method_implemented(proto->recvmsg),
2506		   proto_method_implemented(proto->sendpage),
2507		   proto_method_implemented(proto->bind),
2508		   proto_method_implemented(proto->backlog_rcv),
2509		   proto_method_implemented(proto->hash),
2510		   proto_method_implemented(proto->unhash),
2511		   proto_method_implemented(proto->get_port),
2512		   proto_method_implemented(proto->enter_memory_pressure));
2513}
2514
2515static int proto_seq_show(struct seq_file *seq, void *v)
2516{
2517	if (v == &proto_list)
2518		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2519			   "protocol",
2520			   "size",
2521			   "sockets",
2522			   "memory",
2523			   "press",
2524			   "maxhdr",
2525			   "slab",
2526			   "module",
2527			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2528	else
2529		proto_seq_printf(seq, list_entry(v, struct proto, node));
2530	return 0;
2531}
2532
2533static const struct seq_operations proto_seq_ops = {
2534	.start  = proto_seq_start,
2535	.next   = proto_seq_next,
2536	.stop   = proto_seq_stop,
2537	.show   = proto_seq_show,
2538};
2539
2540static int proto_seq_open(struct inode *inode, struct file *file)
2541{
2542	return seq_open_net(inode, file, &proto_seq_ops,
2543			    sizeof(struct seq_net_private));
2544}
2545
2546static const struct file_operations proto_seq_fops = {
2547	.owner		= THIS_MODULE,
2548	.open		= proto_seq_open,
2549	.read		= seq_read,
2550	.llseek		= seq_lseek,
2551	.release	= seq_release_net,
2552};
2553
2554static __net_init int proto_init_net(struct net *net)
2555{
2556	if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
 
2557		return -ENOMEM;
2558
2559	return 0;
2560}
2561
2562static __net_exit void proto_exit_net(struct net *net)
2563{
2564	proc_net_remove(net, "protocols");
2565}
2566
2567
2568static __net_initdata struct pernet_operations proto_net_ops = {
2569	.init = proto_init_net,
2570	.exit = proto_exit_net,
2571};
2572
2573static int __init proto_init(void)
2574{
2575	return register_pernet_subsys(&proto_net_ops);
2576}
2577
2578subsys_initcall(proto_init);
2579
2580#endif /* PROC_FS */