Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Generic socket support routines. Memory allocators, socket lock/release
   7 *		handler for protocols to use and generic option handler.
   8 *
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 *
  85 *
  86 *		This program is free software; you can redistribute it and/or
  87 *		modify it under the terms of the GNU General Public License
  88 *		as published by the Free Software Foundation; either version
  89 *		2 of the License, or (at your option) any later version.
  90 */
  91
  92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  93
  94#include <linux/capability.h>
  95#include <linux/errno.h>
  96#include <linux/errqueue.h>
  97#include <linux/types.h>
  98#include <linux/socket.h>
  99#include <linux/in.h>
 100#include <linux/kernel.h>
 101#include <linux/module.h>
 102#include <linux/proc_fs.h>
 103#include <linux/seq_file.h>
 104#include <linux/sched.h>
 105#include <linux/timer.h>
 106#include <linux/string.h>
 107#include <linux/sockios.h>
 108#include <linux/net.h>
 109#include <linux/mm.h>
 110#include <linux/slab.h>
 111#include <linux/interrupt.h>
 112#include <linux/poll.h>
 113#include <linux/tcp.h>
 114#include <linux/init.h>
 115#include <linux/highmem.h>
 116#include <linux/user_namespace.h>
 117#include <linux/static_key.h>
 118#include <linux/memcontrol.h>
 119#include <linux/prefetch.h>
 120
 121#include <asm/uaccess.h>
 122
 123#include <linux/netdevice.h>
 124#include <net/protocol.h>
 125#include <linux/skbuff.h>
 126#include <net/net_namespace.h>
 127#include <net/request_sock.h>
 128#include <net/sock.h>
 129#include <linux/net_tstamp.h>
 130#include <net/xfrm.h>
 131#include <linux/ipsec.h>
 132#include <net/cls_cgroup.h>
 133#include <net/netprio_cgroup.h>
 134
 135#include <linux/filter.h>
 136
 137#include <trace/events/sock.h>
 138
 139#ifdef CONFIG_INET
 140#include <net/tcp.h>
 141#endif
 142
 143#include <net/busy_poll.h>
 144
 145static DEFINE_MUTEX(proto_list_mutex);
 146static LIST_HEAD(proto_list);
 147
 148/**
 149 * sk_ns_capable - General socket capability test
 150 * @sk: Socket to use a capability on or through
 151 * @user_ns: The user namespace of the capability to use
 152 * @cap: The capability to use
 153 *
 154 * Test to see if the opener of the socket had when the socket was
 155 * created and the current process has the capability @cap in the user
 156 * namespace @user_ns.
 157 */
 158bool sk_ns_capable(const struct sock *sk,
 159		   struct user_namespace *user_ns, int cap)
 160{
 161	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 162		ns_capable(user_ns, cap);
 163}
 164EXPORT_SYMBOL(sk_ns_capable);
 165
 166/**
 167 * sk_capable - Socket global capability test
 168 * @sk: Socket to use a capability on or through
 169 * @cap: The global capbility to use
 170 *
 171 * Test to see if the opener of the socket had when the socket was
 172 * created and the current process has the capability @cap in all user
 173 * namespaces.
 174 */
 175bool sk_capable(const struct sock *sk, int cap)
 176{
 177	return sk_ns_capable(sk, &init_user_ns, cap);
 178}
 179EXPORT_SYMBOL(sk_capable);
 180
 181/**
 182 * sk_net_capable - Network namespace socket capability test
 183 * @sk: Socket to use a capability on or through
 184 * @cap: The capability to use
 185 *
 186 * Test to see if the opener of the socket had when the socke was created
 187 * and the current process has the capability @cap over the network namespace
 188 * the socket is a member of.
 189 */
 190bool sk_net_capable(const struct sock *sk, int cap)
 191{
 192	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 193}
 194EXPORT_SYMBOL(sk_net_capable);
 195
 196
 197#ifdef CONFIG_MEMCG_KMEM
 198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 199{
 200	struct proto *proto;
 201	int ret = 0;
 202
 203	mutex_lock(&proto_list_mutex);
 204	list_for_each_entry(proto, &proto_list, node) {
 205		if (proto->init_cgroup) {
 206			ret = proto->init_cgroup(memcg, ss);
 207			if (ret)
 208				goto out;
 209		}
 210	}
 211
 212	mutex_unlock(&proto_list_mutex);
 213	return ret;
 214out:
 215	list_for_each_entry_continue_reverse(proto, &proto_list, node)
 216		if (proto->destroy_cgroup)
 217			proto->destroy_cgroup(memcg);
 218	mutex_unlock(&proto_list_mutex);
 219	return ret;
 220}
 221
 222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
 223{
 224	struct proto *proto;
 225
 226	mutex_lock(&proto_list_mutex);
 227	list_for_each_entry_reverse(proto, &proto_list, node)
 228		if (proto->destroy_cgroup)
 229			proto->destroy_cgroup(memcg);
 230	mutex_unlock(&proto_list_mutex);
 231}
 232#endif
 233
 234/*
 235 * Each address family might have different locking rules, so we have
 236 * one slock key per address family:
 237 */
 238static struct lock_class_key af_family_keys[AF_MAX];
 239static struct lock_class_key af_family_slock_keys[AF_MAX];
 240
 241#if defined(CONFIG_MEMCG_KMEM)
 242struct static_key memcg_socket_limit_enabled;
 243EXPORT_SYMBOL(memcg_socket_limit_enabled);
 244#endif
 245
 246/*
 247 * Make lock validator output more readable. (we pre-construct these
 248 * strings build-time, so that runtime initialization of socket
 249 * locks is fast):
 250 */
 251static const char *const af_family_key_strings[AF_MAX+1] = {
 252  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
 253  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
 254  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
 255  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
 256  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
 257  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
 258  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
 259  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
 260  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
 261  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
 262  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
 263  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
 264  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
 265  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
 266};
 267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 268  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
 269  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
 270  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
 271  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
 272  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
 273  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
 274  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
 275  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
 276  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
 277  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
 278  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
 279  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
 280  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
 281  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
 282};
 283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 284  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
 285  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
 286  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
 287  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
 288  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
 289  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
 290  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
 291  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
 292  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
 293  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
 294  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
 295  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
 296  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
 297  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
 298};
 299
 300/*
 301 * sk_callback_lock locking rules are per-address-family,
 302 * so split the lock classes by using a per-AF key:
 303 */
 304static struct lock_class_key af_callback_keys[AF_MAX];
 305
 306/* Take into consideration the size of the struct sk_buff overhead in the
 307 * determination of these values, since that is non-constant across
 308 * platforms.  This makes socket queueing behavior and performance
 309 * not depend upon such differences.
 310 */
 311#define _SK_MEM_PACKETS		256
 312#define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
 313#define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 314#define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 315
 316/* Run time adjustable parameters. */
 317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 318EXPORT_SYMBOL(sysctl_wmem_max);
 319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 320EXPORT_SYMBOL(sysctl_rmem_max);
 321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 323
 324/* Maximal space eaten by iovec or ancillary data plus some space */
 325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 326EXPORT_SYMBOL(sysctl_optmem_max);
 327
 328struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
 329EXPORT_SYMBOL_GPL(memalloc_socks);
 330
 331/**
 332 * sk_set_memalloc - sets %SOCK_MEMALLOC
 333 * @sk: socket to set it on
 334 *
 335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 336 * It's the responsibility of the admin to adjust min_free_kbytes
 337 * to meet the requirements
 338 */
 339void sk_set_memalloc(struct sock *sk)
 340{
 341	sock_set_flag(sk, SOCK_MEMALLOC);
 342	sk->sk_allocation |= __GFP_MEMALLOC;
 343	static_key_slow_inc(&memalloc_socks);
 344}
 345EXPORT_SYMBOL_GPL(sk_set_memalloc);
 346
 347void sk_clear_memalloc(struct sock *sk)
 348{
 349	sock_reset_flag(sk, SOCK_MEMALLOC);
 350	sk->sk_allocation &= ~__GFP_MEMALLOC;
 351	static_key_slow_dec(&memalloc_socks);
 352
 353	/*
 354	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 355	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
 356	 * it has rmem allocations there is a risk that the user of the
 357	 * socket cannot make forward progress due to exceeding the rmem
 358	 * limits. By rights, sk_clear_memalloc() should only be called
 359	 * on sockets being torn down but warn and reset the accounting if
 360	 * that assumption breaks.
 361	 */
 362	if (WARN_ON(sk->sk_forward_alloc))
 363		sk_mem_reclaim(sk);
 364}
 365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 366
 367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 368{
 369	int ret;
 370	unsigned long pflags = current->flags;
 371
 372	/* these should have been dropped before queueing */
 373	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 374
 375	current->flags |= PF_MEMALLOC;
 376	ret = sk->sk_backlog_rcv(sk, skb);
 377	tsk_restore_flags(current, pflags, PF_MEMALLOC);
 378
 379	return ret;
 380}
 381EXPORT_SYMBOL(__sk_backlog_rcv);
 382
 383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 384{
 385	struct timeval tv;
 386
 387	if (optlen < sizeof(tv))
 388		return -EINVAL;
 389	if (copy_from_user(&tv, optval, sizeof(tv)))
 390		return -EFAULT;
 391	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 392		return -EDOM;
 393
 394	if (tv.tv_sec < 0) {
 395		static int warned __read_mostly;
 396
 397		*timeo_p = 0;
 398		if (warned < 10 && net_ratelimit()) {
 399			warned++;
 400			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 401				__func__, current->comm, task_pid_nr(current));
 402		}
 403		return 0;
 404	}
 405	*timeo_p = MAX_SCHEDULE_TIMEOUT;
 406	if (tv.tv_sec == 0 && tv.tv_usec == 0)
 407		return 0;
 408	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
 409		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
 410	return 0;
 411}
 412
 413static void sock_warn_obsolete_bsdism(const char *name)
 414{
 415	static int warned;
 416	static char warncomm[TASK_COMM_LEN];
 417	if (strcmp(warncomm, current->comm) && warned < 5) {
 418		strcpy(warncomm,  current->comm);
 419		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
 420			warncomm, name);
 421		warned++;
 422	}
 423}
 424
 425#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
 426
 427static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 428{
 429	if (sk->sk_flags & flags) {
 430		sk->sk_flags &= ~flags;
 431		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 432			net_disable_timestamp();
 433	}
 434}
 435
 436
 437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 438{
 439	int err;
 440	int skb_len;
 441	unsigned long flags;
 442	struct sk_buff_head *list = &sk->sk_receive_queue;
 443
 444	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 445		atomic_inc(&sk->sk_drops);
 446		trace_sock_rcvqueue_full(sk, skb);
 447		return -ENOMEM;
 448	}
 449
 450	err = sk_filter(sk, skb);
 451	if (err)
 452		return err;
 453
 454	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 455		atomic_inc(&sk->sk_drops);
 456		return -ENOBUFS;
 457	}
 458
 459	skb->dev = NULL;
 460	skb_set_owner_r(skb, sk);
 461
 462	/* Cache the SKB length before we tack it onto the receive
 463	 * queue.  Once it is added it no longer belongs to us and
 464	 * may be freed by other threads of control pulling packets
 465	 * from the queue.
 466	 */
 467	skb_len = skb->len;
 468
 469	/* we escape from rcu protected region, make sure we dont leak
 470	 * a norefcounted dst
 471	 */
 472	skb_dst_force(skb);
 473
 474	spin_lock_irqsave(&list->lock, flags);
 475	skb->dropcount = atomic_read(&sk->sk_drops);
 476	__skb_queue_tail(list, skb);
 477	spin_unlock_irqrestore(&list->lock, flags);
 478
 479	if (!sock_flag(sk, SOCK_DEAD))
 480		sk->sk_data_ready(sk);
 481	return 0;
 482}
 483EXPORT_SYMBOL(sock_queue_rcv_skb);
 484
 485int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 486{
 487	int rc = NET_RX_SUCCESS;
 488
 489	if (sk_filter(sk, skb))
 490		goto discard_and_relse;
 491
 492	skb->dev = NULL;
 493
 494	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
 495		atomic_inc(&sk->sk_drops);
 496		goto discard_and_relse;
 497	}
 498	if (nested)
 499		bh_lock_sock_nested(sk);
 500	else
 501		bh_lock_sock(sk);
 502	if (!sock_owned_by_user(sk)) {
 503		/*
 504		 * trylock + unlock semantics:
 505		 */
 506		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 507
 508		rc = sk_backlog_rcv(sk, skb);
 509
 510		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 511	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 512		bh_unlock_sock(sk);
 513		atomic_inc(&sk->sk_drops);
 514		goto discard_and_relse;
 515	}
 516
 517	bh_unlock_sock(sk);
 518out:
 519	sock_put(sk);
 520	return rc;
 521discard_and_relse:
 522	kfree_skb(skb);
 523	goto out;
 524}
 525EXPORT_SYMBOL(sk_receive_skb);
 526
 
 
 
 
 
 
 527struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 528{
 529	struct dst_entry *dst = __sk_dst_get(sk);
 530
 531	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 532		sk_tx_queue_clear(sk);
 533		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 534		dst_release(dst);
 535		return NULL;
 536	}
 537
 538	return dst;
 539}
 540EXPORT_SYMBOL(__sk_dst_check);
 541
 542struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 543{
 544	struct dst_entry *dst = sk_dst_get(sk);
 545
 546	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 547		sk_dst_reset(sk);
 548		dst_release(dst);
 549		return NULL;
 550	}
 551
 552	return dst;
 553}
 554EXPORT_SYMBOL(sk_dst_check);
 555
 556static int sock_setbindtodevice(struct sock *sk, char __user *optval,
 557				int optlen)
 558{
 559	int ret = -ENOPROTOOPT;
 560#ifdef CONFIG_NETDEVICES
 561	struct net *net = sock_net(sk);
 562	char devname[IFNAMSIZ];
 563	int index;
 564
 565	/* Sorry... */
 566	ret = -EPERM;
 567	if (!ns_capable(net->user_ns, CAP_NET_RAW))
 568		goto out;
 569
 570	ret = -EINVAL;
 571	if (optlen < 0)
 572		goto out;
 573
 574	/* Bind this socket to a particular device like "eth0",
 575	 * as specified in the passed interface name. If the
 576	 * name is "" or the option length is zero the socket
 577	 * is not bound.
 578	 */
 579	if (optlen > IFNAMSIZ - 1)
 580		optlen = IFNAMSIZ - 1;
 581	memset(devname, 0, sizeof(devname));
 582
 583	ret = -EFAULT;
 584	if (copy_from_user(devname, optval, optlen))
 585		goto out;
 586
 587	index = 0;
 588	if (devname[0] != '\0') {
 589		struct net_device *dev;
 590
 591		rcu_read_lock();
 592		dev = dev_get_by_name_rcu(net, devname);
 593		if (dev)
 594			index = dev->ifindex;
 595		rcu_read_unlock();
 596		ret = -ENODEV;
 597		if (!dev)
 598			goto out;
 599	}
 600
 601	lock_sock(sk);
 602	sk->sk_bound_dev_if = index;
 603	sk_dst_reset(sk);
 604	release_sock(sk);
 605
 606	ret = 0;
 607
 608out:
 609#endif
 610
 611	return ret;
 612}
 613
 614static int sock_getbindtodevice(struct sock *sk, char __user *optval,
 615				int __user *optlen, int len)
 616{
 617	int ret = -ENOPROTOOPT;
 618#ifdef CONFIG_NETDEVICES
 619	struct net *net = sock_net(sk);
 620	char devname[IFNAMSIZ];
 621
 622	if (sk->sk_bound_dev_if == 0) {
 623		len = 0;
 624		goto zero;
 625	}
 626
 627	ret = -EINVAL;
 628	if (len < IFNAMSIZ)
 629		goto out;
 630
 631	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
 632	if (ret)
 633		goto out;
 634
 635	len = strlen(devname) + 1;
 636
 637	ret = -EFAULT;
 638	if (copy_to_user(optval, devname, len))
 639		goto out;
 640
 641zero:
 642	ret = -EFAULT;
 643	if (put_user(len, optlen))
 644		goto out;
 645
 646	ret = 0;
 647
 648out:
 649#endif
 650
 651	return ret;
 652}
 653
 654static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 655{
 656	if (valbool)
 657		sock_set_flag(sk, bit);
 658	else
 659		sock_reset_flag(sk, bit);
 660}
 661
 662/*
 663 *	This is meant for all protocols to use and covers goings on
 664 *	at the socket level. Everything here is generic.
 665 */
 666
 667int sock_setsockopt(struct socket *sock, int level, int optname,
 668		    char __user *optval, unsigned int optlen)
 669{
 670	struct sock *sk = sock->sk;
 671	int val;
 672	int valbool;
 673	struct linger ling;
 674	int ret = 0;
 675
 676	/*
 677	 *	Options without arguments
 678	 */
 679
 680	if (optname == SO_BINDTODEVICE)
 681		return sock_setbindtodevice(sk, optval, optlen);
 682
 683	if (optlen < sizeof(int))
 684		return -EINVAL;
 685
 686	if (get_user(val, (int __user *)optval))
 687		return -EFAULT;
 688
 689	valbool = val ? 1 : 0;
 690
 691	lock_sock(sk);
 692
 693	switch (optname) {
 694	case SO_DEBUG:
 695		if (val && !capable(CAP_NET_ADMIN))
 696			ret = -EACCES;
 697		else
 698			sock_valbool_flag(sk, SOCK_DBG, valbool);
 699		break;
 700	case SO_REUSEADDR:
 701		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 702		break;
 703	case SO_REUSEPORT:
 704		sk->sk_reuseport = valbool;
 705		break;
 706	case SO_TYPE:
 707	case SO_PROTOCOL:
 708	case SO_DOMAIN:
 709	case SO_ERROR:
 710		ret = -ENOPROTOOPT;
 711		break;
 712	case SO_DONTROUTE:
 713		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
 714		break;
 715	case SO_BROADCAST:
 716		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
 717		break;
 718	case SO_SNDBUF:
 719		/* Don't error on this BSD doesn't and if you think
 720		 * about it this is right. Otherwise apps have to
 721		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 722		 * are treated in BSD as hints
 723		 */
 724		val = min_t(u32, val, sysctl_wmem_max);
 725set_sndbuf:
 726		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 727		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
 728		/* Wake up sending tasks if we upped the value. */
 729		sk->sk_write_space(sk);
 730		break;
 731
 732	case SO_SNDBUFFORCE:
 733		if (!capable(CAP_NET_ADMIN)) {
 734			ret = -EPERM;
 735			break;
 736		}
 737		goto set_sndbuf;
 738
 739	case SO_RCVBUF:
 740		/* Don't error on this BSD doesn't and if you think
 741		 * about it this is right. Otherwise apps have to
 742		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 743		 * are treated in BSD as hints
 744		 */
 745		val = min_t(u32, val, sysctl_rmem_max);
 746set_rcvbuf:
 747		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 748		/*
 749		 * We double it on the way in to account for
 750		 * "struct sk_buff" etc. overhead.   Applications
 751		 * assume that the SO_RCVBUF setting they make will
 752		 * allow that much actual data to be received on that
 753		 * socket.
 754		 *
 755		 * Applications are unaware that "struct sk_buff" and
 756		 * other overheads allocate from the receive buffer
 757		 * during socket buffer allocation.
 758		 *
 759		 * And after considering the possible alternatives,
 760		 * returning the value we actually used in getsockopt
 761		 * is the most desirable behavior.
 762		 */
 763		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
 764		break;
 765
 766	case SO_RCVBUFFORCE:
 767		if (!capable(CAP_NET_ADMIN)) {
 768			ret = -EPERM;
 769			break;
 770		}
 771		goto set_rcvbuf;
 772
 773	case SO_KEEPALIVE:
 774#ifdef CONFIG_INET
 775		if (sk->sk_protocol == IPPROTO_TCP &&
 776		    sk->sk_type == SOCK_STREAM)
 777			tcp_set_keepalive(sk, valbool);
 778#endif
 779		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
 780		break;
 781
 782	case SO_OOBINLINE:
 783		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
 784		break;
 785
 786	case SO_NO_CHECK:
 787		sk->sk_no_check = valbool;
 788		break;
 789
 790	case SO_PRIORITY:
 791		if ((val >= 0 && val <= 6) ||
 792		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 793			sk->sk_priority = val;
 794		else
 795			ret = -EPERM;
 796		break;
 797
 798	case SO_LINGER:
 799		if (optlen < sizeof(ling)) {
 800			ret = -EINVAL;	/* 1003.1g */
 801			break;
 802		}
 803		if (copy_from_user(&ling, optval, sizeof(ling))) {
 804			ret = -EFAULT;
 805			break;
 806		}
 807		if (!ling.l_onoff)
 808			sock_reset_flag(sk, SOCK_LINGER);
 809		else {
 810#if (BITS_PER_LONG == 32)
 811			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
 812				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 813			else
 814#endif
 815				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
 816			sock_set_flag(sk, SOCK_LINGER);
 817		}
 818		break;
 819
 820	case SO_BSDCOMPAT:
 821		sock_warn_obsolete_bsdism("setsockopt");
 822		break;
 823
 824	case SO_PASSCRED:
 825		if (valbool)
 826			set_bit(SOCK_PASSCRED, &sock->flags);
 827		else
 828			clear_bit(SOCK_PASSCRED, &sock->flags);
 829		break;
 830
 831	case SO_TIMESTAMP:
 832	case SO_TIMESTAMPNS:
 833		if (valbool)  {
 834			if (optname == SO_TIMESTAMP)
 835				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 836			else
 837				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
 838			sock_set_flag(sk, SOCK_RCVTSTAMP);
 839			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 840		} else {
 841			sock_reset_flag(sk, SOCK_RCVTSTAMP);
 842			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 843		}
 844		break;
 845
 846	case SO_TIMESTAMPING:
 847		if (val & ~SOF_TIMESTAMPING_MASK) {
 848			ret = -EINVAL;
 849			break;
 850		}
 851		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
 852				  val & SOF_TIMESTAMPING_TX_HARDWARE);
 853		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
 854				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
 855		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
 856				  val & SOF_TIMESTAMPING_RX_HARDWARE);
 857		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 858			sock_enable_timestamp(sk,
 859					      SOCK_TIMESTAMPING_RX_SOFTWARE);
 860		else
 861			sock_disable_timestamp(sk,
 862					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 863		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
 864				  val & SOF_TIMESTAMPING_SOFTWARE);
 865		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
 866				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
 867		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
 868				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
 869		break;
 870
 871	case SO_RCVLOWAT:
 872		if (val < 0)
 873			val = INT_MAX;
 874		sk->sk_rcvlowat = val ? : 1;
 875		break;
 876
 877	case SO_RCVTIMEO:
 878		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
 879		break;
 880
 881	case SO_SNDTIMEO:
 882		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
 883		break;
 884
 885	case SO_ATTACH_FILTER:
 886		ret = -EINVAL;
 887		if (optlen == sizeof(struct sock_fprog)) {
 888			struct sock_fprog fprog;
 889
 890			ret = -EFAULT;
 891			if (copy_from_user(&fprog, optval, sizeof(fprog)))
 892				break;
 893
 894			ret = sk_attach_filter(&fprog, sk);
 895		}
 896		break;
 897
 898	case SO_DETACH_FILTER:
 899		ret = sk_detach_filter(sk);
 900		break;
 901
 902	case SO_LOCK_FILTER:
 903		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
 904			ret = -EPERM;
 905		else
 906			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
 907		break;
 908
 909	case SO_PASSSEC:
 910		if (valbool)
 911			set_bit(SOCK_PASSSEC, &sock->flags);
 912		else
 913			clear_bit(SOCK_PASSSEC, &sock->flags);
 914		break;
 915	case SO_MARK:
 916		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 917			ret = -EPERM;
 918		else
 919			sk->sk_mark = val;
 920		break;
 921
 922		/* We implement the SO_SNDLOWAT etc to
 923		   not be settable (1003.1g 5.3) */
 924	case SO_RXQ_OVFL:
 925		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
 926		break;
 927
 928	case SO_WIFI_STATUS:
 929		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
 930		break;
 931
 932	case SO_PEEK_OFF:
 933		if (sock->ops->set_peek_off)
 934			ret = sock->ops->set_peek_off(sk, val);
 935		else
 936			ret = -EOPNOTSUPP;
 937		break;
 938
 939	case SO_NOFCS:
 940		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
 941		break;
 942
 943	case SO_SELECT_ERR_QUEUE:
 944		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
 945		break;
 946
 947#ifdef CONFIG_NET_RX_BUSY_POLL
 948	case SO_BUSY_POLL:
 949		/* allow unprivileged users to decrease the value */
 950		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
 951			ret = -EPERM;
 952		else {
 953			if (val < 0)
 954				ret = -EINVAL;
 955			else
 956				sk->sk_ll_usec = val;
 957		}
 958		break;
 959#endif
 960
 961	case SO_MAX_PACING_RATE:
 962		sk->sk_max_pacing_rate = val;
 963		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
 964					 sk->sk_max_pacing_rate);
 965		break;
 966
 967	default:
 968		ret = -ENOPROTOOPT;
 969		break;
 970	}
 971	release_sock(sk);
 972	return ret;
 973}
 974EXPORT_SYMBOL(sock_setsockopt);
 975
 976
 977static void cred_to_ucred(struct pid *pid, const struct cred *cred,
 978			  struct ucred *ucred)
 979{
 980	ucred->pid = pid_vnr(pid);
 981	ucred->uid = ucred->gid = -1;
 982	if (cred) {
 983		struct user_namespace *current_ns = current_user_ns();
 984
 985		ucred->uid = from_kuid_munged(current_ns, cred->euid);
 986		ucred->gid = from_kgid_munged(current_ns, cred->egid);
 987	}
 988}
 
 989
 990int sock_getsockopt(struct socket *sock, int level, int optname,
 991		    char __user *optval, int __user *optlen)
 992{
 993	struct sock *sk = sock->sk;
 994
 995	union {
 996		int val;
 997		struct linger ling;
 998		struct timeval tm;
 999	} v;
1000
1001	int lv = sizeof(int);
1002	int len;
1003
1004	if (get_user(len, optlen))
1005		return -EFAULT;
1006	if (len < 0)
1007		return -EINVAL;
1008
1009	memset(&v, 0, sizeof(v));
1010
1011	switch (optname) {
1012	case SO_DEBUG:
1013		v.val = sock_flag(sk, SOCK_DBG);
1014		break;
1015
1016	case SO_DONTROUTE:
1017		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1018		break;
1019
1020	case SO_BROADCAST:
1021		v.val = sock_flag(sk, SOCK_BROADCAST);
1022		break;
1023
1024	case SO_SNDBUF:
1025		v.val = sk->sk_sndbuf;
1026		break;
1027
1028	case SO_RCVBUF:
1029		v.val = sk->sk_rcvbuf;
1030		break;
1031
1032	case SO_REUSEADDR:
1033		v.val = sk->sk_reuse;
1034		break;
1035
1036	case SO_REUSEPORT:
1037		v.val = sk->sk_reuseport;
1038		break;
1039
1040	case SO_KEEPALIVE:
1041		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1042		break;
1043
1044	case SO_TYPE:
1045		v.val = sk->sk_type;
1046		break;
1047
1048	case SO_PROTOCOL:
1049		v.val = sk->sk_protocol;
1050		break;
1051
1052	case SO_DOMAIN:
1053		v.val = sk->sk_family;
1054		break;
1055
1056	case SO_ERROR:
1057		v.val = -sock_error(sk);
1058		if (v.val == 0)
1059			v.val = xchg(&sk->sk_err_soft, 0);
1060		break;
1061
1062	case SO_OOBINLINE:
1063		v.val = sock_flag(sk, SOCK_URGINLINE);
1064		break;
1065
1066	case SO_NO_CHECK:
1067		v.val = sk->sk_no_check;
1068		break;
1069
1070	case SO_PRIORITY:
1071		v.val = sk->sk_priority;
1072		break;
1073
1074	case SO_LINGER:
1075		lv		= sizeof(v.ling);
1076		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1077		v.ling.l_linger	= sk->sk_lingertime / HZ;
1078		break;
1079
1080	case SO_BSDCOMPAT:
1081		sock_warn_obsolete_bsdism("getsockopt");
1082		break;
1083
1084	case SO_TIMESTAMP:
1085		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1086				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1087		break;
1088
1089	case SO_TIMESTAMPNS:
1090		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1091		break;
1092
1093	case SO_TIMESTAMPING:
1094		v.val = 0;
1095		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1096			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1097		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1098			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1099		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1100			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1101		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1102			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1103		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1104			v.val |= SOF_TIMESTAMPING_SOFTWARE;
1105		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1106			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1107		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1108			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1109		break;
1110
1111	case SO_RCVTIMEO:
1112		lv = sizeof(struct timeval);
1113		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1114			v.tm.tv_sec = 0;
1115			v.tm.tv_usec = 0;
1116		} else {
1117			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1118			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1119		}
1120		break;
1121
1122	case SO_SNDTIMEO:
1123		lv = sizeof(struct timeval);
1124		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1125			v.tm.tv_sec = 0;
1126			v.tm.tv_usec = 0;
1127		} else {
1128			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1129			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1130		}
1131		break;
1132
1133	case SO_RCVLOWAT:
1134		v.val = sk->sk_rcvlowat;
1135		break;
1136
1137	case SO_SNDLOWAT:
1138		v.val = 1;
1139		break;
1140
1141	case SO_PASSCRED:
1142		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1143		break;
1144
1145	case SO_PEERCRED:
1146	{
1147		struct ucred peercred;
1148		if (len > sizeof(peercred))
1149			len = sizeof(peercred);
1150		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1151		if (copy_to_user(optval, &peercred, len))
1152			return -EFAULT;
1153		goto lenout;
1154	}
1155
1156	case SO_PEERNAME:
1157	{
1158		char address[128];
1159
1160		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1161			return -ENOTCONN;
1162		if (lv < len)
1163			return -EINVAL;
1164		if (copy_to_user(optval, address, len))
1165			return -EFAULT;
1166		goto lenout;
1167	}
1168
1169	/* Dubious BSD thing... Probably nobody even uses it, but
1170	 * the UNIX standard wants it for whatever reason... -DaveM
1171	 */
1172	case SO_ACCEPTCONN:
1173		v.val = sk->sk_state == TCP_LISTEN;
1174		break;
1175
1176	case SO_PASSSEC:
1177		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1178		break;
1179
1180	case SO_PEERSEC:
1181		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1182
1183	case SO_MARK:
1184		v.val = sk->sk_mark;
1185		break;
1186
1187	case SO_RXQ_OVFL:
1188		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1189		break;
1190
1191	case SO_WIFI_STATUS:
1192		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1193		break;
1194
1195	case SO_PEEK_OFF:
1196		if (!sock->ops->set_peek_off)
1197			return -EOPNOTSUPP;
1198
1199		v.val = sk->sk_peek_off;
1200		break;
1201	case SO_NOFCS:
1202		v.val = sock_flag(sk, SOCK_NOFCS);
1203		break;
1204
1205	case SO_BINDTODEVICE:
1206		return sock_getbindtodevice(sk, optval, optlen, len);
1207
1208	case SO_GET_FILTER:
1209		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1210		if (len < 0)
1211			return len;
1212
1213		goto lenout;
1214
1215	case SO_LOCK_FILTER:
1216		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1217		break;
1218
1219	case SO_BPF_EXTENSIONS:
1220		v.val = bpf_tell_extensions();
1221		break;
1222
1223	case SO_SELECT_ERR_QUEUE:
1224		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1225		break;
1226
1227#ifdef CONFIG_NET_RX_BUSY_POLL
1228	case SO_BUSY_POLL:
1229		v.val = sk->sk_ll_usec;
1230		break;
1231#endif
1232
1233	case SO_MAX_PACING_RATE:
1234		v.val = sk->sk_max_pacing_rate;
1235		break;
1236
1237	default:
1238		return -ENOPROTOOPT;
1239	}
1240
1241	if (len > lv)
1242		len = lv;
1243	if (copy_to_user(optval, &v, len))
1244		return -EFAULT;
1245lenout:
1246	if (put_user(len, optlen))
1247		return -EFAULT;
1248	return 0;
1249}
1250
1251/*
1252 * Initialize an sk_lock.
1253 *
1254 * (We also register the sk_lock with the lock validator.)
1255 */
1256static inline void sock_lock_init(struct sock *sk)
1257{
1258	sock_lock_init_class_and_name(sk,
1259			af_family_slock_key_strings[sk->sk_family],
1260			af_family_slock_keys + sk->sk_family,
1261			af_family_key_strings[sk->sk_family],
1262			af_family_keys + sk->sk_family);
1263}
1264
1265/*
1266 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1267 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1268 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1269 */
1270static void sock_copy(struct sock *nsk, const struct sock *osk)
1271{
1272#ifdef CONFIG_SECURITY_NETWORK
1273	void *sptr = nsk->sk_security;
1274#endif
1275	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1276
1277	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1278	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1279
1280#ifdef CONFIG_SECURITY_NETWORK
1281	nsk->sk_security = sptr;
1282	security_sk_clone(osk, nsk);
1283#endif
1284}
1285
 
 
 
 
 
 
 
 
 
 
 
 
1286void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1287{
1288	unsigned long nulls1, nulls2;
1289
1290	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1291	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1292	if (nulls1 > nulls2)
1293		swap(nulls1, nulls2);
1294
1295	if (nulls1 != 0)
1296		memset((char *)sk, 0, nulls1);
1297	memset((char *)sk + nulls1 + sizeof(void *), 0,
1298	       nulls2 - nulls1 - sizeof(void *));
1299	memset((char *)sk + nulls2 + sizeof(void *), 0,
1300	       size - nulls2 - sizeof(void *));
1301}
1302EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1303
1304static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1305		int family)
1306{
1307	struct sock *sk;
1308	struct kmem_cache *slab;
1309
1310	slab = prot->slab;
1311	if (slab != NULL) {
1312		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1313		if (!sk)
1314			return sk;
1315		if (priority & __GFP_ZERO) {
1316			if (prot->clear_sk)
1317				prot->clear_sk(sk, prot->obj_size);
1318			else
1319				sk_prot_clear_nulls(sk, prot->obj_size);
1320		}
1321	} else
1322		sk = kmalloc(prot->obj_size, priority);
1323
1324	if (sk != NULL) {
1325		kmemcheck_annotate_bitfield(sk, flags);
1326
1327		if (security_sk_alloc(sk, family, priority))
1328			goto out_free;
1329
1330		if (!try_module_get(prot->owner))
1331			goto out_free_sec;
1332		sk_tx_queue_clear(sk);
1333	}
1334
1335	return sk;
1336
1337out_free_sec:
1338	security_sk_free(sk);
1339out_free:
1340	if (slab != NULL)
1341		kmem_cache_free(slab, sk);
1342	else
1343		kfree(sk);
1344	return NULL;
1345}
1346
1347static void sk_prot_free(struct proto *prot, struct sock *sk)
1348{
1349	struct kmem_cache *slab;
1350	struct module *owner;
1351
1352	owner = prot->owner;
1353	slab = prot->slab;
1354
1355	security_sk_free(sk);
1356	if (slab != NULL)
1357		kmem_cache_free(slab, sk);
1358	else
1359		kfree(sk);
1360	module_put(owner);
1361}
1362
1363#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 
 
 
 
 
 
 
 
 
 
 
 
1364void sock_update_netprioidx(struct sock *sk)
1365{
1366	if (in_interrupt())
1367		return;
1368
1369	sk->sk_cgrp_prioidx = task_netprioidx(current);
1370}
1371EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1372#endif
1373
1374/**
1375 *	sk_alloc - All socket objects are allocated here
1376 *	@net: the applicable net namespace
1377 *	@family: protocol family
1378 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1379 *	@prot: struct proto associated with this new sock instance
1380 */
1381struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1382		      struct proto *prot)
1383{
1384	struct sock *sk;
1385
1386	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1387	if (sk) {
1388		sk->sk_family = family;
1389		/*
1390		 * See comment in struct sock definition to understand
1391		 * why we need sk_prot_creator -acme
1392		 */
1393		sk->sk_prot = sk->sk_prot_creator = prot;
1394		sock_lock_init(sk);
1395		sock_net_set(sk, get_net(net));
1396		atomic_set(&sk->sk_wmem_alloc, 1);
1397
1398		sock_update_classid(sk);
1399		sock_update_netprioidx(sk);
1400	}
1401
1402	return sk;
1403}
1404EXPORT_SYMBOL(sk_alloc);
1405
1406static void __sk_free(struct sock *sk)
1407{
1408	struct sk_filter *filter;
1409
1410	if (sk->sk_destruct)
1411		sk->sk_destruct(sk);
1412
1413	filter = rcu_dereference_check(sk->sk_filter,
1414				       atomic_read(&sk->sk_wmem_alloc) == 0);
1415	if (filter) {
1416		sk_filter_uncharge(sk, filter);
1417		RCU_INIT_POINTER(sk->sk_filter, NULL);
1418	}
1419
1420	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1421
1422	if (atomic_read(&sk->sk_omem_alloc))
1423		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1424			 __func__, atomic_read(&sk->sk_omem_alloc));
1425
1426	if (sk->sk_peer_cred)
1427		put_cred(sk->sk_peer_cred);
1428	put_pid(sk->sk_peer_pid);
1429	put_net(sock_net(sk));
1430	sk_prot_free(sk->sk_prot_creator, sk);
1431}
1432
1433void sk_free(struct sock *sk)
1434{
1435	/*
1436	 * We subtract one from sk_wmem_alloc and can know if
1437	 * some packets are still in some tx queue.
1438	 * If not null, sock_wfree() will call __sk_free(sk) later
1439	 */
1440	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1441		__sk_free(sk);
1442}
1443EXPORT_SYMBOL(sk_free);
1444
1445/*
1446 * Last sock_put should drop reference to sk->sk_net. It has already
1447 * been dropped in sk_change_net. Taking reference to stopping namespace
1448 * is not an option.
1449 * Take reference to a socket to remove it from hash _alive_ and after that
1450 * destroy it in the context of init_net.
1451 */
1452void sk_release_kernel(struct sock *sk)
1453{
1454	if (sk == NULL || sk->sk_socket == NULL)
1455		return;
1456
1457	sock_hold(sk);
1458	sock_release(sk->sk_socket);
1459	release_net(sock_net(sk));
1460	sock_net_set(sk, get_net(&init_net));
1461	sock_put(sk);
1462}
1463EXPORT_SYMBOL(sk_release_kernel);
1464
1465static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1466{
1467	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1468		sock_update_memcg(newsk);
1469}
1470
1471/**
1472 *	sk_clone_lock - clone a socket, and lock its clone
1473 *	@sk: the socket to clone
1474 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1475 *
1476 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1477 */
1478struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1479{
1480	struct sock *newsk;
1481
1482	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1483	if (newsk != NULL) {
1484		struct sk_filter *filter;
1485
1486		sock_copy(newsk, sk);
1487
1488		/* SANITY */
1489		get_net(sock_net(newsk));
1490		sk_node_init(&newsk->sk_node);
1491		sock_lock_init(newsk);
1492		bh_lock_sock(newsk);
1493		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1494		newsk->sk_backlog.len = 0;
1495
1496		atomic_set(&newsk->sk_rmem_alloc, 0);
1497		/*
1498		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1499		 */
1500		atomic_set(&newsk->sk_wmem_alloc, 1);
1501		atomic_set(&newsk->sk_omem_alloc, 0);
1502		skb_queue_head_init(&newsk->sk_receive_queue);
1503		skb_queue_head_init(&newsk->sk_write_queue);
1504#ifdef CONFIG_NET_DMA
1505		skb_queue_head_init(&newsk->sk_async_wait_queue);
1506#endif
1507
1508		spin_lock_init(&newsk->sk_dst_lock);
1509		rwlock_init(&newsk->sk_callback_lock);
1510		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1511				af_callback_keys + newsk->sk_family,
1512				af_family_clock_key_strings[newsk->sk_family]);
1513
1514		newsk->sk_dst_cache	= NULL;
1515		newsk->sk_wmem_queued	= 0;
1516		newsk->sk_forward_alloc = 0;
1517		newsk->sk_send_head	= NULL;
1518		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1519
1520		sock_reset_flag(newsk, SOCK_DONE);
1521		skb_queue_head_init(&newsk->sk_error_queue);
1522
1523		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1524		if (filter != NULL)
1525			sk_filter_charge(newsk, filter);
1526
1527		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1528			/* It is still raw copy of parent, so invalidate
1529			 * destructor and make plain sk_free() */
1530			newsk->sk_destruct = NULL;
1531			bh_unlock_sock(newsk);
1532			sk_free(newsk);
1533			newsk = NULL;
1534			goto out;
1535		}
1536
1537		newsk->sk_err	   = 0;
1538		newsk->sk_priority = 0;
1539		/*
1540		 * Before updating sk_refcnt, we must commit prior changes to memory
1541		 * (Documentation/RCU/rculist_nulls.txt for details)
1542		 */
1543		smp_wmb();
1544		atomic_set(&newsk->sk_refcnt, 2);
1545
1546		/*
1547		 * Increment the counter in the same struct proto as the master
1548		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1549		 * is the same as sk->sk_prot->socks, as this field was copied
1550		 * with memcpy).
1551		 *
1552		 * This _changes_ the previous behaviour, where
1553		 * tcp_create_openreq_child always was incrementing the
1554		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1555		 * to be taken into account in all callers. -acme
1556		 */
1557		sk_refcnt_debug_inc(newsk);
1558		sk_set_socket(newsk, NULL);
1559		newsk->sk_wq = NULL;
1560
1561		sk_update_clone(sk, newsk);
1562
1563		if (newsk->sk_prot->sockets_allocated)
1564			sk_sockets_allocated_inc(newsk);
1565
1566		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1567			net_enable_timestamp();
1568	}
1569out:
1570	return newsk;
1571}
1572EXPORT_SYMBOL_GPL(sk_clone_lock);
1573
1574void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1575{
1576	__sk_dst_set(sk, dst);
1577	sk->sk_route_caps = dst->dev->features;
1578	if (sk->sk_route_caps & NETIF_F_GSO)
1579		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1580	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1581	if (sk_can_gso(sk)) {
1582		if (dst->header_len) {
1583			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1584		} else {
1585			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1586			sk->sk_gso_max_size = dst->dev->gso_max_size;
1587			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1588		}
1589	}
1590}
1591EXPORT_SYMBOL_GPL(sk_setup_caps);
1592
 
 
 
 
 
 
 
 
 
 
 
 
 
1593/*
1594 *	Simple resource managers for sockets.
1595 */
1596
1597
1598/*
1599 * Write buffer destructor automatically called from kfree_skb.
1600 */
1601void sock_wfree(struct sk_buff *skb)
1602{
1603	struct sock *sk = skb->sk;
1604	unsigned int len = skb->truesize;
1605
1606	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1607		/*
1608		 * Keep a reference on sk_wmem_alloc, this will be released
1609		 * after sk_write_space() call
1610		 */
1611		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1612		sk->sk_write_space(sk);
1613		len = 1;
1614	}
1615	/*
1616	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1617	 * could not do because of in-flight packets
1618	 */
1619	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1620		__sk_free(sk);
1621}
1622EXPORT_SYMBOL(sock_wfree);
1623
1624void skb_orphan_partial(struct sk_buff *skb)
1625{
1626	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1627	 * so we do not completely orphan skb, but transfert all
1628	 * accounted bytes but one, to avoid unexpected reorders.
1629	 */
1630	if (skb->destructor == sock_wfree
1631#ifdef CONFIG_INET
1632	    || skb->destructor == tcp_wfree
1633#endif
1634		) {
1635		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1636		skb->truesize = 1;
1637	} else {
1638		skb_orphan(skb);
1639	}
1640}
1641EXPORT_SYMBOL(skb_orphan_partial);
1642
1643/*
1644 * Read buffer destructor automatically called from kfree_skb.
1645 */
1646void sock_rfree(struct sk_buff *skb)
1647{
1648	struct sock *sk = skb->sk;
1649	unsigned int len = skb->truesize;
1650
1651	atomic_sub(len, &sk->sk_rmem_alloc);
1652	sk_mem_uncharge(sk, len);
1653}
1654EXPORT_SYMBOL(sock_rfree);
1655
1656void sock_edemux(struct sk_buff *skb)
1657{
1658	struct sock *sk = skb->sk;
1659
1660#ifdef CONFIG_INET
1661	if (sk->sk_state == TCP_TIME_WAIT)
1662		inet_twsk_put(inet_twsk(sk));
1663	else
1664#endif
1665		sock_put(sk);
1666}
1667EXPORT_SYMBOL(sock_edemux);
1668
1669kuid_t sock_i_uid(struct sock *sk)
1670{
1671	kuid_t uid;
1672
1673	read_lock_bh(&sk->sk_callback_lock);
1674	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1675	read_unlock_bh(&sk->sk_callback_lock);
1676	return uid;
1677}
1678EXPORT_SYMBOL(sock_i_uid);
1679
1680unsigned long sock_i_ino(struct sock *sk)
1681{
1682	unsigned long ino;
1683
1684	read_lock_bh(&sk->sk_callback_lock);
1685	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1686	read_unlock_bh(&sk->sk_callback_lock);
1687	return ino;
1688}
1689EXPORT_SYMBOL(sock_i_ino);
1690
1691/*
1692 * Allocate a skb from the socket's send buffer.
1693 */
1694struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1695			     gfp_t priority)
1696{
1697	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1698		struct sk_buff *skb = alloc_skb(size, priority);
1699		if (skb) {
1700			skb_set_owner_w(skb, sk);
1701			return skb;
1702		}
1703	}
1704	return NULL;
1705}
1706EXPORT_SYMBOL(sock_wmalloc);
1707
1708/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1709 * Allocate a memory block from the socket's option memory buffer.
1710 */
1711void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1712{
1713	if ((unsigned int)size <= sysctl_optmem_max &&
1714	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1715		void *mem;
1716		/* First do the add, to avoid the race if kmalloc
1717		 * might sleep.
1718		 */
1719		atomic_add(size, &sk->sk_omem_alloc);
1720		mem = kmalloc(size, priority);
1721		if (mem)
1722			return mem;
1723		atomic_sub(size, &sk->sk_omem_alloc);
1724	}
1725	return NULL;
1726}
1727EXPORT_SYMBOL(sock_kmalloc);
1728
1729/*
1730 * Free an option memory block.
1731 */
1732void sock_kfree_s(struct sock *sk, void *mem, int size)
1733{
1734	kfree(mem);
1735	atomic_sub(size, &sk->sk_omem_alloc);
1736}
1737EXPORT_SYMBOL(sock_kfree_s);
1738
1739/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1740   I think, these locks should be removed for datagram sockets.
1741 */
1742static long sock_wait_for_wmem(struct sock *sk, long timeo)
1743{
1744	DEFINE_WAIT(wait);
1745
1746	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1747	for (;;) {
1748		if (!timeo)
1749			break;
1750		if (signal_pending(current))
1751			break;
1752		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1753		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1754		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1755			break;
1756		if (sk->sk_shutdown & SEND_SHUTDOWN)
1757			break;
1758		if (sk->sk_err)
1759			break;
1760		timeo = schedule_timeout(timeo);
1761	}
1762	finish_wait(sk_sleep(sk), &wait);
1763	return timeo;
1764}
1765
1766
1767/*
1768 *	Generic send/receive buffer handlers
1769 */
1770
1771struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1772				     unsigned long data_len, int noblock,
1773				     int *errcode, int max_page_order)
1774{
1775	struct sk_buff *skb = NULL;
1776	unsigned long chunk;
1777	gfp_t gfp_mask;
1778	long timeo;
1779	int err;
1780	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1781	struct page *page;
1782	int i;
1783
1784	err = -EMSGSIZE;
1785	if (npages > MAX_SKB_FRAGS)
1786		goto failure;
1787
 
 
 
 
1788	timeo = sock_sndtimeo(sk, noblock);
1789	while (!skb) {
1790		err = sock_error(sk);
1791		if (err != 0)
1792			goto failure;
1793
1794		err = -EPIPE;
1795		if (sk->sk_shutdown & SEND_SHUTDOWN)
1796			goto failure;
1797
1798		if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
1799			set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1800			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1801			err = -EAGAIN;
1802			if (!timeo)
1803				goto failure;
1804			if (signal_pending(current))
1805				goto interrupted;
1806			timeo = sock_wait_for_wmem(sk, timeo);
1807			continue;
1808		}
1809
1810		err = -ENOBUFS;
1811		gfp_mask = sk->sk_allocation;
1812		if (gfp_mask & __GFP_WAIT)
1813			gfp_mask |= __GFP_REPEAT;
1814
1815		skb = alloc_skb(header_len, gfp_mask);
1816		if (!skb)
1817			goto failure;
1818
1819		skb->truesize += data_len;
1820
1821		for (i = 0; npages > 0; i++) {
1822			int order = max_page_order;
1823
1824			while (order) {
1825				if (npages >= 1 << order) {
1826					page = alloc_pages(sk->sk_allocation |
1827							   __GFP_COMP |
1828							   __GFP_NOWARN |
1829							   __GFP_NORETRY,
1830							   order);
1831					if (page)
1832						goto fill_page;
1833				}
1834				order--;
 
 
1835			}
1836			page = alloc_page(sk->sk_allocation);
1837			if (!page)
1838				goto failure;
1839fill_page:
1840			chunk = min_t(unsigned long, data_len,
1841				      PAGE_SIZE << order);
1842			skb_fill_page_desc(skb, i, page, 0, chunk);
1843			data_len -= chunk;
1844			npages -= 1 << order;
1845		}
 
 
 
 
 
 
 
 
1846	}
1847
1848	skb_set_owner_w(skb, sk);
1849	return skb;
1850
1851interrupted:
1852	err = sock_intr_errno(timeo);
1853failure:
1854	kfree_skb(skb);
1855	*errcode = err;
1856	return NULL;
1857}
1858EXPORT_SYMBOL(sock_alloc_send_pskb);
1859
1860struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1861				    int noblock, int *errcode)
1862{
1863	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1864}
1865EXPORT_SYMBOL(sock_alloc_send_skb);
1866
1867/* On 32bit arches, an skb frag is limited to 2^15 */
1868#define SKB_FRAG_PAGE_ORDER	get_order(32768)
1869
1870/**
1871 * skb_page_frag_refill - check that a page_frag contains enough room
1872 * @sz: minimum size of the fragment we want to get
1873 * @pfrag: pointer to page_frag
1874 * @prio: priority for memory allocation
1875 *
1876 * Note: While this allocator tries to use high order pages, there is
1877 * no guarantee that allocations succeed. Therefore, @sz MUST be
1878 * less or equal than PAGE_SIZE.
1879 */
1880bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1881{
1882	int order;
1883
1884	if (pfrag->page) {
1885		if (atomic_read(&pfrag->page->_count) == 1) {
1886			pfrag->offset = 0;
1887			return true;
1888		}
1889		if (pfrag->offset + sz <= pfrag->size)
1890			return true;
1891		put_page(pfrag->page);
1892	}
1893
1894	order = SKB_FRAG_PAGE_ORDER;
1895	do {
1896		gfp_t gfp = prio;
1897
1898		if (order)
1899			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1900		pfrag->page = alloc_pages(gfp, order);
1901		if (likely(pfrag->page)) {
1902			pfrag->offset = 0;
1903			pfrag->size = PAGE_SIZE << order;
1904			return true;
1905		}
1906	} while (--order >= 0);
1907
1908	return false;
1909}
1910EXPORT_SYMBOL(skb_page_frag_refill);
1911
1912bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1913{
1914	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1915		return true;
1916
1917	sk_enter_memory_pressure(sk);
1918	sk_stream_moderate_sndbuf(sk);
1919	return false;
1920}
1921EXPORT_SYMBOL(sk_page_frag_refill);
1922
1923static void __lock_sock(struct sock *sk)
1924	__releases(&sk->sk_lock.slock)
1925	__acquires(&sk->sk_lock.slock)
1926{
1927	DEFINE_WAIT(wait);
1928
1929	for (;;) {
1930		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1931					TASK_UNINTERRUPTIBLE);
1932		spin_unlock_bh(&sk->sk_lock.slock);
1933		schedule();
1934		spin_lock_bh(&sk->sk_lock.slock);
1935		if (!sock_owned_by_user(sk))
1936			break;
1937	}
1938	finish_wait(&sk->sk_lock.wq, &wait);
1939}
1940
1941static void __release_sock(struct sock *sk)
1942	__releases(&sk->sk_lock.slock)
1943	__acquires(&sk->sk_lock.slock)
1944{
1945	struct sk_buff *skb = sk->sk_backlog.head;
1946
1947	do {
1948		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1949		bh_unlock_sock(sk);
1950
1951		do {
1952			struct sk_buff *next = skb->next;
1953
1954			prefetch(next);
1955			WARN_ON_ONCE(skb_dst_is_noref(skb));
1956			skb->next = NULL;
1957			sk_backlog_rcv(sk, skb);
1958
1959			/*
1960			 * We are in process context here with softirqs
1961			 * disabled, use cond_resched_softirq() to preempt.
1962			 * This is safe to do because we've taken the backlog
1963			 * queue private:
1964			 */
1965			cond_resched_softirq();
1966
1967			skb = next;
1968		} while (skb != NULL);
1969
1970		bh_lock_sock(sk);
1971	} while ((skb = sk->sk_backlog.head) != NULL);
1972
1973	/*
1974	 * Doing the zeroing here guarantee we can not loop forever
1975	 * while a wild producer attempts to flood us.
1976	 */
1977	sk->sk_backlog.len = 0;
1978}
1979
1980/**
1981 * sk_wait_data - wait for data to arrive at sk_receive_queue
1982 * @sk:    sock to wait on
1983 * @timeo: for how long
1984 *
1985 * Now socket state including sk->sk_err is changed only under lock,
1986 * hence we may omit checks after joining wait queue.
1987 * We check receive queue before schedule() only as optimization;
1988 * it is very likely that release_sock() added new data.
1989 */
1990int sk_wait_data(struct sock *sk, long *timeo)
1991{
1992	int rc;
1993	DEFINE_WAIT(wait);
1994
1995	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1996	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1997	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1998	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1999	finish_wait(sk_sleep(sk), &wait);
2000	return rc;
2001}
2002EXPORT_SYMBOL(sk_wait_data);
2003
2004/**
2005 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2006 *	@sk: socket
2007 *	@size: memory size to allocate
2008 *	@kind: allocation type
2009 *
2010 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2011 *	rmem allocation. This function assumes that protocols which have
2012 *	memory_pressure use sk_wmem_queued as write buffer accounting.
2013 */
2014int __sk_mem_schedule(struct sock *sk, int size, int kind)
2015{
2016	struct proto *prot = sk->sk_prot;
2017	int amt = sk_mem_pages(size);
2018	long allocated;
2019	int parent_status = UNDER_LIMIT;
2020
2021	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2022
2023	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2024
2025	/* Under limit. */
2026	if (parent_status == UNDER_LIMIT &&
2027			allocated <= sk_prot_mem_limits(sk, 0)) {
2028		sk_leave_memory_pressure(sk);
2029		return 1;
2030	}
2031
2032	/* Under pressure. (we or our parents) */
2033	if ((parent_status > SOFT_LIMIT) ||
2034			allocated > sk_prot_mem_limits(sk, 1))
2035		sk_enter_memory_pressure(sk);
2036
2037	/* Over hard limit (we or our parents) */
2038	if ((parent_status == OVER_LIMIT) ||
2039			(allocated > sk_prot_mem_limits(sk, 2)))
2040		goto suppress_allocation;
2041
2042	/* guarantee minimum buffer size under pressure */
2043	if (kind == SK_MEM_RECV) {
2044		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2045			return 1;
2046
2047	} else { /* SK_MEM_SEND */
2048		if (sk->sk_type == SOCK_STREAM) {
2049			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2050				return 1;
2051		} else if (atomic_read(&sk->sk_wmem_alloc) <
2052			   prot->sysctl_wmem[0])
2053				return 1;
2054	}
2055
2056	if (sk_has_memory_pressure(sk)) {
2057		int alloc;
2058
2059		if (!sk_under_memory_pressure(sk))
2060			return 1;
2061		alloc = sk_sockets_allocated_read_positive(sk);
2062		if (sk_prot_mem_limits(sk, 2) > alloc *
2063		    sk_mem_pages(sk->sk_wmem_queued +
2064				 atomic_read(&sk->sk_rmem_alloc) +
2065				 sk->sk_forward_alloc))
2066			return 1;
2067	}
2068
2069suppress_allocation:
2070
2071	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2072		sk_stream_moderate_sndbuf(sk);
2073
2074		/* Fail only if socket is _under_ its sndbuf.
2075		 * In this case we cannot block, so that we have to fail.
2076		 */
2077		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2078			return 1;
2079	}
2080
2081	trace_sock_exceed_buf_limit(sk, prot, allocated);
2082
2083	/* Alas. Undo changes. */
2084	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2085
2086	sk_memory_allocated_sub(sk, amt);
2087
2088	return 0;
2089}
2090EXPORT_SYMBOL(__sk_mem_schedule);
2091
2092/**
2093 *	__sk_reclaim - reclaim memory_allocated
2094 *	@sk: socket
2095 */
2096void __sk_mem_reclaim(struct sock *sk)
2097{
2098	sk_memory_allocated_sub(sk,
2099				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2100	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2101
2102	if (sk_under_memory_pressure(sk) &&
2103	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2104		sk_leave_memory_pressure(sk);
2105}
2106EXPORT_SYMBOL(__sk_mem_reclaim);
2107
2108
2109/*
2110 * Set of default routines for initialising struct proto_ops when
2111 * the protocol does not support a particular function. In certain
2112 * cases where it makes no sense for a protocol to have a "do nothing"
2113 * function, some default processing is provided.
2114 */
2115
2116int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2117{
2118	return -EOPNOTSUPP;
2119}
2120EXPORT_SYMBOL(sock_no_bind);
2121
2122int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2123		    int len, int flags)
2124{
2125	return -EOPNOTSUPP;
2126}
2127EXPORT_SYMBOL(sock_no_connect);
2128
2129int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2130{
2131	return -EOPNOTSUPP;
2132}
2133EXPORT_SYMBOL(sock_no_socketpair);
2134
2135int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2136{
2137	return -EOPNOTSUPP;
2138}
2139EXPORT_SYMBOL(sock_no_accept);
2140
2141int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2142		    int *len, int peer)
2143{
2144	return -EOPNOTSUPP;
2145}
2146EXPORT_SYMBOL(sock_no_getname);
2147
2148unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2149{
2150	return 0;
2151}
2152EXPORT_SYMBOL(sock_no_poll);
2153
2154int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2155{
2156	return -EOPNOTSUPP;
2157}
2158EXPORT_SYMBOL(sock_no_ioctl);
2159
2160int sock_no_listen(struct socket *sock, int backlog)
2161{
2162	return -EOPNOTSUPP;
2163}
2164EXPORT_SYMBOL(sock_no_listen);
2165
2166int sock_no_shutdown(struct socket *sock, int how)
2167{
2168	return -EOPNOTSUPP;
2169}
2170EXPORT_SYMBOL(sock_no_shutdown);
2171
2172int sock_no_setsockopt(struct socket *sock, int level, int optname,
2173		    char __user *optval, unsigned int optlen)
2174{
2175	return -EOPNOTSUPP;
2176}
2177EXPORT_SYMBOL(sock_no_setsockopt);
2178
2179int sock_no_getsockopt(struct socket *sock, int level, int optname,
2180		    char __user *optval, int __user *optlen)
2181{
2182	return -EOPNOTSUPP;
2183}
2184EXPORT_SYMBOL(sock_no_getsockopt);
2185
2186int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2187		    size_t len)
2188{
2189	return -EOPNOTSUPP;
2190}
2191EXPORT_SYMBOL(sock_no_sendmsg);
2192
2193int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2194		    size_t len, int flags)
2195{
2196	return -EOPNOTSUPP;
2197}
2198EXPORT_SYMBOL(sock_no_recvmsg);
2199
2200int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2201{
2202	/* Mirror missing mmap method error code */
2203	return -ENODEV;
2204}
2205EXPORT_SYMBOL(sock_no_mmap);
2206
2207ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2208{
2209	ssize_t res;
2210	struct msghdr msg = {.msg_flags = flags};
2211	struct kvec iov;
2212	char *kaddr = kmap(page);
2213	iov.iov_base = kaddr + offset;
2214	iov.iov_len = size;
2215	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2216	kunmap(page);
2217	return res;
2218}
2219EXPORT_SYMBOL(sock_no_sendpage);
2220
2221/*
2222 *	Default Socket Callbacks
2223 */
2224
2225static void sock_def_wakeup(struct sock *sk)
2226{
2227	struct socket_wq *wq;
2228
2229	rcu_read_lock();
2230	wq = rcu_dereference(sk->sk_wq);
2231	if (wq_has_sleeper(wq))
2232		wake_up_interruptible_all(&wq->wait);
2233	rcu_read_unlock();
2234}
2235
2236static void sock_def_error_report(struct sock *sk)
2237{
2238	struct socket_wq *wq;
2239
2240	rcu_read_lock();
2241	wq = rcu_dereference(sk->sk_wq);
2242	if (wq_has_sleeper(wq))
2243		wake_up_interruptible_poll(&wq->wait, POLLERR);
2244	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2245	rcu_read_unlock();
2246}
2247
2248static void sock_def_readable(struct sock *sk)
2249{
2250	struct socket_wq *wq;
2251
2252	rcu_read_lock();
2253	wq = rcu_dereference(sk->sk_wq);
2254	if (wq_has_sleeper(wq))
2255		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2256						POLLRDNORM | POLLRDBAND);
2257	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2258	rcu_read_unlock();
2259}
2260
2261static void sock_def_write_space(struct sock *sk)
2262{
2263	struct socket_wq *wq;
2264
2265	rcu_read_lock();
2266
2267	/* Do not wake up a writer until he can make "significant"
2268	 * progress.  --DaveM
2269	 */
2270	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2271		wq = rcu_dereference(sk->sk_wq);
2272		if (wq_has_sleeper(wq))
2273			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2274						POLLWRNORM | POLLWRBAND);
2275
2276		/* Should agree with poll, otherwise some programs break */
2277		if (sock_writeable(sk))
2278			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2279	}
2280
2281	rcu_read_unlock();
2282}
2283
2284static void sock_def_destruct(struct sock *sk)
2285{
2286	kfree(sk->sk_protinfo);
2287}
2288
2289void sk_send_sigurg(struct sock *sk)
2290{
2291	if (sk->sk_socket && sk->sk_socket->file)
2292		if (send_sigurg(&sk->sk_socket->file->f_owner))
2293			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2294}
2295EXPORT_SYMBOL(sk_send_sigurg);
2296
2297void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2298		    unsigned long expires)
2299{
2300	if (!mod_timer(timer, expires))
2301		sock_hold(sk);
2302}
2303EXPORT_SYMBOL(sk_reset_timer);
2304
2305void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2306{
2307	if (del_timer(timer))
2308		__sock_put(sk);
2309}
2310EXPORT_SYMBOL(sk_stop_timer);
2311
2312void sock_init_data(struct socket *sock, struct sock *sk)
2313{
2314	skb_queue_head_init(&sk->sk_receive_queue);
2315	skb_queue_head_init(&sk->sk_write_queue);
2316	skb_queue_head_init(&sk->sk_error_queue);
2317#ifdef CONFIG_NET_DMA
2318	skb_queue_head_init(&sk->sk_async_wait_queue);
2319#endif
2320
2321	sk->sk_send_head	=	NULL;
2322
2323	init_timer(&sk->sk_timer);
2324
2325	sk->sk_allocation	=	GFP_KERNEL;
2326	sk->sk_rcvbuf		=	sysctl_rmem_default;
2327	sk->sk_sndbuf		=	sysctl_wmem_default;
2328	sk->sk_state		=	TCP_CLOSE;
2329	sk_set_socket(sk, sock);
2330
2331	sock_set_flag(sk, SOCK_ZAPPED);
2332
2333	if (sock) {
2334		sk->sk_type	=	sock->type;
2335		sk->sk_wq	=	sock->wq;
2336		sock->sk	=	sk;
2337	} else
2338		sk->sk_wq	=	NULL;
2339
2340	spin_lock_init(&sk->sk_dst_lock);
2341	rwlock_init(&sk->sk_callback_lock);
2342	lockdep_set_class_and_name(&sk->sk_callback_lock,
2343			af_callback_keys + sk->sk_family,
2344			af_family_clock_key_strings[sk->sk_family]);
2345
2346	sk->sk_state_change	=	sock_def_wakeup;
2347	sk->sk_data_ready	=	sock_def_readable;
2348	sk->sk_write_space	=	sock_def_write_space;
2349	sk->sk_error_report	=	sock_def_error_report;
2350	sk->sk_destruct		=	sock_def_destruct;
2351
2352	sk->sk_frag.page	=	NULL;
2353	sk->sk_frag.offset	=	0;
2354	sk->sk_peek_off		=	-1;
2355
2356	sk->sk_peer_pid 	=	NULL;
2357	sk->sk_peer_cred	=	NULL;
2358	sk->sk_write_pending	=	0;
2359	sk->sk_rcvlowat		=	1;
2360	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2361	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2362
2363	sk->sk_stamp = ktime_set(-1L, 0);
2364
2365#ifdef CONFIG_NET_RX_BUSY_POLL
2366	sk->sk_napi_id		=	0;
2367	sk->sk_ll_usec		=	sysctl_net_busy_read;
2368#endif
2369
2370	sk->sk_max_pacing_rate = ~0U;
2371	sk->sk_pacing_rate = ~0U;
2372	/*
2373	 * Before updating sk_refcnt, we must commit prior changes to memory
2374	 * (Documentation/RCU/rculist_nulls.txt for details)
2375	 */
2376	smp_wmb();
2377	atomic_set(&sk->sk_refcnt, 1);
2378	atomic_set(&sk->sk_drops, 0);
2379}
2380EXPORT_SYMBOL(sock_init_data);
2381
2382void lock_sock_nested(struct sock *sk, int subclass)
2383{
2384	might_sleep();
2385	spin_lock_bh(&sk->sk_lock.slock);
2386	if (sk->sk_lock.owned)
2387		__lock_sock(sk);
2388	sk->sk_lock.owned = 1;
2389	spin_unlock(&sk->sk_lock.slock);
2390	/*
2391	 * The sk_lock has mutex_lock() semantics here:
2392	 */
2393	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2394	local_bh_enable();
2395}
2396EXPORT_SYMBOL(lock_sock_nested);
2397
2398void release_sock(struct sock *sk)
2399{
2400	/*
2401	 * The sk_lock has mutex_unlock() semantics:
2402	 */
2403	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2404
2405	spin_lock_bh(&sk->sk_lock.slock);
2406	if (sk->sk_backlog.tail)
2407		__release_sock(sk);
2408
2409	/* Warning : release_cb() might need to release sk ownership,
2410	 * ie call sock_release_ownership(sk) before us.
2411	 */
2412	if (sk->sk_prot->release_cb)
2413		sk->sk_prot->release_cb(sk);
2414
2415	sock_release_ownership(sk);
2416	if (waitqueue_active(&sk->sk_lock.wq))
2417		wake_up(&sk->sk_lock.wq);
2418	spin_unlock_bh(&sk->sk_lock.slock);
2419}
2420EXPORT_SYMBOL(release_sock);
2421
2422/**
2423 * lock_sock_fast - fast version of lock_sock
2424 * @sk: socket
2425 *
2426 * This version should be used for very small section, where process wont block
2427 * return false if fast path is taken
2428 *   sk_lock.slock locked, owned = 0, BH disabled
2429 * return true if slow path is taken
2430 *   sk_lock.slock unlocked, owned = 1, BH enabled
2431 */
2432bool lock_sock_fast(struct sock *sk)
2433{
2434	might_sleep();
2435	spin_lock_bh(&sk->sk_lock.slock);
2436
2437	if (!sk->sk_lock.owned)
2438		/*
2439		 * Note : We must disable BH
2440		 */
2441		return false;
2442
2443	__lock_sock(sk);
2444	sk->sk_lock.owned = 1;
2445	spin_unlock(&sk->sk_lock.slock);
2446	/*
2447	 * The sk_lock has mutex_lock() semantics here:
2448	 */
2449	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2450	local_bh_enable();
2451	return true;
2452}
2453EXPORT_SYMBOL(lock_sock_fast);
2454
2455int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2456{
2457	struct timeval tv;
2458	if (!sock_flag(sk, SOCK_TIMESTAMP))
2459		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2460	tv = ktime_to_timeval(sk->sk_stamp);
2461	if (tv.tv_sec == -1)
2462		return -ENOENT;
2463	if (tv.tv_sec == 0) {
2464		sk->sk_stamp = ktime_get_real();
2465		tv = ktime_to_timeval(sk->sk_stamp);
2466	}
2467	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2468}
2469EXPORT_SYMBOL(sock_get_timestamp);
2470
2471int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2472{
2473	struct timespec ts;
2474	if (!sock_flag(sk, SOCK_TIMESTAMP))
2475		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2476	ts = ktime_to_timespec(sk->sk_stamp);
2477	if (ts.tv_sec == -1)
2478		return -ENOENT;
2479	if (ts.tv_sec == 0) {
2480		sk->sk_stamp = ktime_get_real();
2481		ts = ktime_to_timespec(sk->sk_stamp);
2482	}
2483	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2484}
2485EXPORT_SYMBOL(sock_get_timestampns);
2486
2487void sock_enable_timestamp(struct sock *sk, int flag)
2488{
2489	if (!sock_flag(sk, flag)) {
2490		unsigned long previous_flags = sk->sk_flags;
2491
2492		sock_set_flag(sk, flag);
2493		/*
2494		 * we just set one of the two flags which require net
2495		 * time stamping, but time stamping might have been on
2496		 * already because of the other one
2497		 */
2498		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2499			net_enable_timestamp();
2500	}
2501}
2502
2503int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2504		       int level, int type)
2505{
2506	struct sock_exterr_skb *serr;
2507	struct sk_buff *skb, *skb2;
2508	int copied, err;
2509
2510	err = -EAGAIN;
2511	skb = skb_dequeue(&sk->sk_error_queue);
2512	if (skb == NULL)
2513		goto out;
2514
2515	copied = skb->len;
2516	if (copied > len) {
2517		msg->msg_flags |= MSG_TRUNC;
2518		copied = len;
2519	}
2520	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2521	if (err)
2522		goto out_free_skb;
2523
2524	sock_recv_timestamp(msg, sk, skb);
2525
2526	serr = SKB_EXT_ERR(skb);
2527	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2528
2529	msg->msg_flags |= MSG_ERRQUEUE;
2530	err = copied;
2531
2532	/* Reset and regenerate socket error */
2533	spin_lock_bh(&sk->sk_error_queue.lock);
2534	sk->sk_err = 0;
2535	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2536		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2537		spin_unlock_bh(&sk->sk_error_queue.lock);
2538		sk->sk_error_report(sk);
2539	} else
2540		spin_unlock_bh(&sk->sk_error_queue.lock);
2541
2542out_free_skb:
2543	kfree_skb(skb);
2544out:
2545	return err;
2546}
2547EXPORT_SYMBOL(sock_recv_errqueue);
2548
2549/*
2550 *	Get a socket option on an socket.
2551 *
2552 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2553 *	asynchronous errors should be reported by getsockopt. We assume
2554 *	this means if you specify SO_ERROR (otherwise whats the point of it).
2555 */
2556int sock_common_getsockopt(struct socket *sock, int level, int optname,
2557			   char __user *optval, int __user *optlen)
2558{
2559	struct sock *sk = sock->sk;
2560
2561	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2562}
2563EXPORT_SYMBOL(sock_common_getsockopt);
2564
2565#ifdef CONFIG_COMPAT
2566int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2567				  char __user *optval, int __user *optlen)
2568{
2569	struct sock *sk = sock->sk;
2570
2571	if (sk->sk_prot->compat_getsockopt != NULL)
2572		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2573						      optval, optlen);
2574	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2575}
2576EXPORT_SYMBOL(compat_sock_common_getsockopt);
2577#endif
2578
2579int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2580			struct msghdr *msg, size_t size, int flags)
2581{
2582	struct sock *sk = sock->sk;
2583	int addr_len = 0;
2584	int err;
2585
2586	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2587				   flags & ~MSG_DONTWAIT, &addr_len);
2588	if (err >= 0)
2589		msg->msg_namelen = addr_len;
2590	return err;
2591}
2592EXPORT_SYMBOL(sock_common_recvmsg);
2593
2594/*
2595 *	Set socket options on an inet socket.
2596 */
2597int sock_common_setsockopt(struct socket *sock, int level, int optname,
2598			   char __user *optval, unsigned int optlen)
2599{
2600	struct sock *sk = sock->sk;
2601
2602	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2603}
2604EXPORT_SYMBOL(sock_common_setsockopt);
2605
2606#ifdef CONFIG_COMPAT
2607int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2608				  char __user *optval, unsigned int optlen)
2609{
2610	struct sock *sk = sock->sk;
2611
2612	if (sk->sk_prot->compat_setsockopt != NULL)
2613		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2614						      optval, optlen);
2615	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2616}
2617EXPORT_SYMBOL(compat_sock_common_setsockopt);
2618#endif
2619
2620void sk_common_release(struct sock *sk)
2621{
2622	if (sk->sk_prot->destroy)
2623		sk->sk_prot->destroy(sk);
2624
2625	/*
2626	 * Observation: when sock_common_release is called, processes have
2627	 * no access to socket. But net still has.
2628	 * Step one, detach it from networking:
2629	 *
2630	 * A. Remove from hash tables.
2631	 */
2632
2633	sk->sk_prot->unhash(sk);
2634
2635	/*
2636	 * In this point socket cannot receive new packets, but it is possible
2637	 * that some packets are in flight because some CPU runs receiver and
2638	 * did hash table lookup before we unhashed socket. They will achieve
2639	 * receive queue and will be purged by socket destructor.
2640	 *
2641	 * Also we still have packets pending on receive queue and probably,
2642	 * our own packets waiting in device queues. sock_destroy will drain
2643	 * receive queue, but transmitted packets will delay socket destruction
2644	 * until the last reference will be released.
2645	 */
2646
2647	sock_orphan(sk);
2648
2649	xfrm_sk_free_policy(sk);
2650
2651	sk_refcnt_debug_release(sk);
2652
2653	if (sk->sk_frag.page) {
2654		put_page(sk->sk_frag.page);
2655		sk->sk_frag.page = NULL;
2656	}
2657
2658	sock_put(sk);
2659}
2660EXPORT_SYMBOL(sk_common_release);
2661
2662#ifdef CONFIG_PROC_FS
2663#define PROTO_INUSE_NR	64	/* should be enough for the first time */
2664struct prot_inuse {
2665	int val[PROTO_INUSE_NR];
2666};
2667
2668static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2669
2670#ifdef CONFIG_NET_NS
2671void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2672{
2673	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2674}
2675EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2676
2677int sock_prot_inuse_get(struct net *net, struct proto *prot)
2678{
2679	int cpu, idx = prot->inuse_idx;
2680	int res = 0;
2681
2682	for_each_possible_cpu(cpu)
2683		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2684
2685	return res >= 0 ? res : 0;
2686}
2687EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2688
2689static int __net_init sock_inuse_init_net(struct net *net)
2690{
2691	net->core.inuse = alloc_percpu(struct prot_inuse);
2692	return net->core.inuse ? 0 : -ENOMEM;
2693}
2694
2695static void __net_exit sock_inuse_exit_net(struct net *net)
2696{
2697	free_percpu(net->core.inuse);
2698}
2699
2700static struct pernet_operations net_inuse_ops = {
2701	.init = sock_inuse_init_net,
2702	.exit = sock_inuse_exit_net,
2703};
2704
2705static __init int net_inuse_init(void)
2706{
2707	if (register_pernet_subsys(&net_inuse_ops))
2708		panic("Cannot initialize net inuse counters");
2709
2710	return 0;
2711}
2712
2713core_initcall(net_inuse_init);
2714#else
2715static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2716
2717void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2718{
2719	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2720}
2721EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2722
2723int sock_prot_inuse_get(struct net *net, struct proto *prot)
2724{
2725	int cpu, idx = prot->inuse_idx;
2726	int res = 0;
2727
2728	for_each_possible_cpu(cpu)
2729		res += per_cpu(prot_inuse, cpu).val[idx];
2730
2731	return res >= 0 ? res : 0;
2732}
2733EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2734#endif
2735
2736static void assign_proto_idx(struct proto *prot)
2737{
2738	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2739
2740	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2741		pr_err("PROTO_INUSE_NR exhausted\n");
2742		return;
2743	}
2744
2745	set_bit(prot->inuse_idx, proto_inuse_idx);
2746}
2747
2748static void release_proto_idx(struct proto *prot)
2749{
2750	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2751		clear_bit(prot->inuse_idx, proto_inuse_idx);
2752}
2753#else
2754static inline void assign_proto_idx(struct proto *prot)
2755{
2756}
2757
2758static inline void release_proto_idx(struct proto *prot)
2759{
2760}
2761#endif
2762
2763int proto_register(struct proto *prot, int alloc_slab)
2764{
2765	if (alloc_slab) {
2766		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2767					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2768					NULL);
2769
2770		if (prot->slab == NULL) {
2771			pr_crit("%s: Can't create sock SLAB cache!\n",
2772				prot->name);
2773			goto out;
2774		}
2775
2776		if (prot->rsk_prot != NULL) {
2777			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2778			if (prot->rsk_prot->slab_name == NULL)
2779				goto out_free_sock_slab;
2780
2781			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2782								 prot->rsk_prot->obj_size, 0,
2783								 SLAB_HWCACHE_ALIGN, NULL);
2784
2785			if (prot->rsk_prot->slab == NULL) {
2786				pr_crit("%s: Can't create request sock SLAB cache!\n",
2787					prot->name);
2788				goto out_free_request_sock_slab_name;
2789			}
2790		}
2791
2792		if (prot->twsk_prot != NULL) {
2793			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2794
2795			if (prot->twsk_prot->twsk_slab_name == NULL)
2796				goto out_free_request_sock_slab;
2797
2798			prot->twsk_prot->twsk_slab =
2799				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2800						  prot->twsk_prot->twsk_obj_size,
2801						  0,
2802						  SLAB_HWCACHE_ALIGN |
2803							prot->slab_flags,
2804						  NULL);
2805			if (prot->twsk_prot->twsk_slab == NULL)
2806				goto out_free_timewait_sock_slab_name;
2807		}
2808	}
2809
2810	mutex_lock(&proto_list_mutex);
2811	list_add(&prot->node, &proto_list);
2812	assign_proto_idx(prot);
2813	mutex_unlock(&proto_list_mutex);
2814	return 0;
2815
2816out_free_timewait_sock_slab_name:
2817	kfree(prot->twsk_prot->twsk_slab_name);
2818out_free_request_sock_slab:
2819	if (prot->rsk_prot && prot->rsk_prot->slab) {
2820		kmem_cache_destroy(prot->rsk_prot->slab);
2821		prot->rsk_prot->slab = NULL;
2822	}
2823out_free_request_sock_slab_name:
2824	if (prot->rsk_prot)
2825		kfree(prot->rsk_prot->slab_name);
2826out_free_sock_slab:
2827	kmem_cache_destroy(prot->slab);
2828	prot->slab = NULL;
2829out:
2830	return -ENOBUFS;
2831}
2832EXPORT_SYMBOL(proto_register);
2833
2834void proto_unregister(struct proto *prot)
2835{
2836	mutex_lock(&proto_list_mutex);
2837	release_proto_idx(prot);
2838	list_del(&prot->node);
2839	mutex_unlock(&proto_list_mutex);
2840
2841	if (prot->slab != NULL) {
2842		kmem_cache_destroy(prot->slab);
2843		prot->slab = NULL;
2844	}
2845
2846	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2847		kmem_cache_destroy(prot->rsk_prot->slab);
2848		kfree(prot->rsk_prot->slab_name);
2849		prot->rsk_prot->slab = NULL;
2850	}
2851
2852	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2853		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2854		kfree(prot->twsk_prot->twsk_slab_name);
2855		prot->twsk_prot->twsk_slab = NULL;
2856	}
2857}
2858EXPORT_SYMBOL(proto_unregister);
2859
2860#ifdef CONFIG_PROC_FS
2861static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2862	__acquires(proto_list_mutex)
2863{
2864	mutex_lock(&proto_list_mutex);
2865	return seq_list_start_head(&proto_list, *pos);
2866}
2867
2868static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2869{
2870	return seq_list_next(v, &proto_list, pos);
2871}
2872
2873static void proto_seq_stop(struct seq_file *seq, void *v)
2874	__releases(proto_list_mutex)
2875{
2876	mutex_unlock(&proto_list_mutex);
2877}
2878
2879static char proto_method_implemented(const void *method)
2880{
2881	return method == NULL ? 'n' : 'y';
2882}
2883static long sock_prot_memory_allocated(struct proto *proto)
2884{
2885	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2886}
2887
2888static char *sock_prot_memory_pressure(struct proto *proto)
2889{
2890	return proto->memory_pressure != NULL ?
2891	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2892}
2893
2894static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2895{
2896
2897	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2898			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2899		   proto->name,
2900		   proto->obj_size,
2901		   sock_prot_inuse_get(seq_file_net(seq), proto),
2902		   sock_prot_memory_allocated(proto),
2903		   sock_prot_memory_pressure(proto),
2904		   proto->max_header,
2905		   proto->slab == NULL ? "no" : "yes",
2906		   module_name(proto->owner),
2907		   proto_method_implemented(proto->close),
2908		   proto_method_implemented(proto->connect),
2909		   proto_method_implemented(proto->disconnect),
2910		   proto_method_implemented(proto->accept),
2911		   proto_method_implemented(proto->ioctl),
2912		   proto_method_implemented(proto->init),
2913		   proto_method_implemented(proto->destroy),
2914		   proto_method_implemented(proto->shutdown),
2915		   proto_method_implemented(proto->setsockopt),
2916		   proto_method_implemented(proto->getsockopt),
2917		   proto_method_implemented(proto->sendmsg),
2918		   proto_method_implemented(proto->recvmsg),
2919		   proto_method_implemented(proto->sendpage),
2920		   proto_method_implemented(proto->bind),
2921		   proto_method_implemented(proto->backlog_rcv),
2922		   proto_method_implemented(proto->hash),
2923		   proto_method_implemented(proto->unhash),
2924		   proto_method_implemented(proto->get_port),
2925		   proto_method_implemented(proto->enter_memory_pressure));
2926}
2927
2928static int proto_seq_show(struct seq_file *seq, void *v)
2929{
2930	if (v == &proto_list)
2931		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2932			   "protocol",
2933			   "size",
2934			   "sockets",
2935			   "memory",
2936			   "press",
2937			   "maxhdr",
2938			   "slab",
2939			   "module",
2940			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2941	else
2942		proto_seq_printf(seq, list_entry(v, struct proto, node));
2943	return 0;
2944}
2945
2946static const struct seq_operations proto_seq_ops = {
2947	.start  = proto_seq_start,
2948	.next   = proto_seq_next,
2949	.stop   = proto_seq_stop,
2950	.show   = proto_seq_show,
2951};
2952
2953static int proto_seq_open(struct inode *inode, struct file *file)
2954{
2955	return seq_open_net(inode, file, &proto_seq_ops,
2956			    sizeof(struct seq_net_private));
2957}
2958
2959static const struct file_operations proto_seq_fops = {
2960	.owner		= THIS_MODULE,
2961	.open		= proto_seq_open,
2962	.read		= seq_read,
2963	.llseek		= seq_lseek,
2964	.release	= seq_release_net,
2965};
2966
2967static __net_init int proto_init_net(struct net *net)
2968{
2969	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2970		return -ENOMEM;
2971
2972	return 0;
2973}
2974
2975static __net_exit void proto_exit_net(struct net *net)
2976{
2977	remove_proc_entry("protocols", net->proc_net);
2978}
2979
2980
2981static __net_initdata struct pernet_operations proto_net_ops = {
2982	.init = proto_init_net,
2983	.exit = proto_exit_net,
2984};
2985
2986static int __init proto_init(void)
2987{
2988	return register_pernet_subsys(&proto_net_ops);
2989}
2990
2991subsys_initcall(proto_init);
2992
2993#endif /* PROC_FS */
v3.5.6
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Generic socket support routines. Memory allocators, socket lock/release
   7 *		handler for protocols to use and generic option handler.
   8 *
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *		Alan Cox	: 	Numerous verify_area() problems
  17 *		Alan Cox	:	Connecting on a connecting socket
  18 *					now returns an error for tcp.
  19 *		Alan Cox	:	sock->protocol is set correctly.
  20 *					and is not sometimes left as 0.
  21 *		Alan Cox	:	connect handles icmp errors on a
  22 *					connect properly. Unfortunately there
  23 *					is a restart syscall nasty there. I
  24 *					can't match BSD without hacking the C
  25 *					library. Ideas urgently sought!
  26 *		Alan Cox	:	Disallow bind() to addresses that are
  27 *					not ours - especially broadcast ones!!
  28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
  29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
  30 *					instead they leave that for the DESTROY timer.
  31 *		Alan Cox	:	Clean up error flag in accept
  32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
  33 *					was buggy. Put a remove_sock() in the handler
  34 *					for memory when we hit 0. Also altered the timer
  35 *					code. The ACK stuff can wait and needs major
  36 *					TCP layer surgery.
  37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
  38 *					and fixed timer/inet_bh race.
  39 *		Alan Cox	:	Added zapped flag for TCP
  40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
  41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
  46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
  47 *	Pauline Middelink	:	identd support
  48 *		Alan Cox	:	Fixed connect() taking signals I think.
  49 *		Alan Cox	:	SO_LINGER supported
  50 *		Alan Cox	:	Error reporting fixes
  51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
  52 *		Alan Cox	:	inet sockets don't set sk->type!
  53 *		Alan Cox	:	Split socket option code
  54 *		Alan Cox	:	Callbacks
  55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
  56 *		Alex		:	Removed restriction on inet fioctl
  57 *		Alan Cox	:	Splitting INET from NET core
  58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
  59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *		Alan Cox	:	Split IP from generic code
  61 *		Alan Cox	:	New kfree_skbmem()
  62 *		Alan Cox	:	Make SO_DEBUG superuser only.
  63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
  64 *					(compatibility fix)
  65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
  66 *		Alan Cox	:	Allocator for a socket is settable.
  67 *		Alan Cox	:	SO_ERROR includes soft errors.
  68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
  69 *		Alan Cox	: 	Generic socket allocation to make hooks
  70 *					easier (suggested by Craig Metz).
  71 *		Michael Pall	:	SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
  79 *		Andi Kleen	:	Fix write_space callback
  80 *		Chris Evans	:	Security fixes - signedness again
  81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 *
  85 *
  86 *		This program is free software; you can redistribute it and/or
  87 *		modify it under the terms of the GNU General Public License
  88 *		as published by the Free Software Foundation; either version
  89 *		2 of the License, or (at your option) any later version.
  90 */
  91
  92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  93
  94#include <linux/capability.h>
  95#include <linux/errno.h>
 
  96#include <linux/types.h>
  97#include <linux/socket.h>
  98#include <linux/in.h>
  99#include <linux/kernel.h>
 100#include <linux/module.h>
 101#include <linux/proc_fs.h>
 102#include <linux/seq_file.h>
 103#include <linux/sched.h>
 104#include <linux/timer.h>
 105#include <linux/string.h>
 106#include <linux/sockios.h>
 107#include <linux/net.h>
 108#include <linux/mm.h>
 109#include <linux/slab.h>
 110#include <linux/interrupt.h>
 111#include <linux/poll.h>
 112#include <linux/tcp.h>
 113#include <linux/init.h>
 114#include <linux/highmem.h>
 115#include <linux/user_namespace.h>
 116#include <linux/static_key.h>
 117#include <linux/memcontrol.h>
 118#include <linux/prefetch.h>
 119
 120#include <asm/uaccess.h>
 121
 122#include <linux/netdevice.h>
 123#include <net/protocol.h>
 124#include <linux/skbuff.h>
 125#include <net/net_namespace.h>
 126#include <net/request_sock.h>
 127#include <net/sock.h>
 128#include <linux/net_tstamp.h>
 129#include <net/xfrm.h>
 130#include <linux/ipsec.h>
 131#include <net/cls_cgroup.h>
 132#include <net/netprio_cgroup.h>
 133
 134#include <linux/filter.h>
 135
 136#include <trace/events/sock.h>
 137
 138#ifdef CONFIG_INET
 139#include <net/tcp.h>
 140#endif
 141
 
 
 142static DEFINE_MUTEX(proto_list_mutex);
 143static LIST_HEAD(proto_list);
 144
 145#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 147{
 148	struct proto *proto;
 149	int ret = 0;
 150
 151	mutex_lock(&proto_list_mutex);
 152	list_for_each_entry(proto, &proto_list, node) {
 153		if (proto->init_cgroup) {
 154			ret = proto->init_cgroup(memcg, ss);
 155			if (ret)
 156				goto out;
 157		}
 158	}
 159
 160	mutex_unlock(&proto_list_mutex);
 161	return ret;
 162out:
 163	list_for_each_entry_continue_reverse(proto, &proto_list, node)
 164		if (proto->destroy_cgroup)
 165			proto->destroy_cgroup(memcg);
 166	mutex_unlock(&proto_list_mutex);
 167	return ret;
 168}
 169
 170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
 171{
 172	struct proto *proto;
 173
 174	mutex_lock(&proto_list_mutex);
 175	list_for_each_entry_reverse(proto, &proto_list, node)
 176		if (proto->destroy_cgroup)
 177			proto->destroy_cgroup(memcg);
 178	mutex_unlock(&proto_list_mutex);
 179}
 180#endif
 181
 182/*
 183 * Each address family might have different locking rules, so we have
 184 * one slock key per address family:
 185 */
 186static struct lock_class_key af_family_keys[AF_MAX];
 187static struct lock_class_key af_family_slock_keys[AF_MAX];
 188
 
 189struct static_key memcg_socket_limit_enabled;
 190EXPORT_SYMBOL(memcg_socket_limit_enabled);
 
 191
 192/*
 193 * Make lock validator output more readable. (we pre-construct these
 194 * strings build-time, so that runtime initialization of socket
 195 * locks is fast):
 196 */
 197static const char *const af_family_key_strings[AF_MAX+1] = {
 198  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
 199  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
 200  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
 201  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
 202  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
 203  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
 204  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
 205  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
 206  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
 207  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
 208  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
 209  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
 210  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
 211  "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
 212};
 213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 214  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
 215  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
 216  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
 217  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
 218  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
 219  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
 220  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
 221  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
 222  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
 223  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
 224  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
 225  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
 226  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
 227  "slock-AF_NFC"   , "slock-AF_MAX"
 228};
 229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 230  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
 231  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
 232  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
 233  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
 234  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
 235  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
 236  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
 237  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
 238  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
 239  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
 240  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
 241  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
 242  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
 243  "clock-AF_NFC"   , "clock-AF_MAX"
 244};
 245
 246/*
 247 * sk_callback_lock locking rules are per-address-family,
 248 * so split the lock classes by using a per-AF key:
 249 */
 250static struct lock_class_key af_callback_keys[AF_MAX];
 251
 252/* Take into consideration the size of the struct sk_buff overhead in the
 253 * determination of these values, since that is non-constant across
 254 * platforms.  This makes socket queueing behavior and performance
 255 * not depend upon such differences.
 256 */
 257#define _SK_MEM_PACKETS		256
 258#define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
 259#define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 260#define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 261
 262/* Run time adjustable parameters. */
 263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 264EXPORT_SYMBOL(sysctl_wmem_max);
 265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 266EXPORT_SYMBOL(sysctl_rmem_max);
 267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 269
 270/* Maximal space eaten by iovec or ancillary data plus some space */
 271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 272EXPORT_SYMBOL(sysctl_optmem_max);
 273
 274#if defined(CONFIG_CGROUPS)
 275#if !defined(CONFIG_NET_CLS_CGROUP)
 276int net_cls_subsys_id = -1;
 277EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 278#endif
 279#if !defined(CONFIG_NETPRIO_CGROUP)
 280int net_prio_subsys_id = -1;
 281EXPORT_SYMBOL_GPL(net_prio_subsys_id);
 282#endif
 283#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 286{
 287	struct timeval tv;
 288
 289	if (optlen < sizeof(tv))
 290		return -EINVAL;
 291	if (copy_from_user(&tv, optval, sizeof(tv)))
 292		return -EFAULT;
 293	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 294		return -EDOM;
 295
 296	if (tv.tv_sec < 0) {
 297		static int warned __read_mostly;
 298
 299		*timeo_p = 0;
 300		if (warned < 10 && net_ratelimit()) {
 301			warned++;
 302			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 303				__func__, current->comm, task_pid_nr(current));
 304		}
 305		return 0;
 306	}
 307	*timeo_p = MAX_SCHEDULE_TIMEOUT;
 308	if (tv.tv_sec == 0 && tv.tv_usec == 0)
 309		return 0;
 310	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
 311		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
 312	return 0;
 313}
 314
 315static void sock_warn_obsolete_bsdism(const char *name)
 316{
 317	static int warned;
 318	static char warncomm[TASK_COMM_LEN];
 319	if (strcmp(warncomm, current->comm) && warned < 5) {
 320		strcpy(warncomm,  current->comm);
 321		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
 322			warncomm, name);
 323		warned++;
 324	}
 325}
 326
 327#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
 328
 329static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 330{
 331	if (sk->sk_flags & flags) {
 332		sk->sk_flags &= ~flags;
 333		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 334			net_disable_timestamp();
 335	}
 336}
 337
 338
 339int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 340{
 341	int err;
 342	int skb_len;
 343	unsigned long flags;
 344	struct sk_buff_head *list = &sk->sk_receive_queue;
 345
 346	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 347		atomic_inc(&sk->sk_drops);
 348		trace_sock_rcvqueue_full(sk, skb);
 349		return -ENOMEM;
 350	}
 351
 352	err = sk_filter(sk, skb);
 353	if (err)
 354		return err;
 355
 356	if (!sk_rmem_schedule(sk, skb->truesize)) {
 357		atomic_inc(&sk->sk_drops);
 358		return -ENOBUFS;
 359	}
 360
 361	skb->dev = NULL;
 362	skb_set_owner_r(skb, sk);
 363
 364	/* Cache the SKB length before we tack it onto the receive
 365	 * queue.  Once it is added it no longer belongs to us and
 366	 * may be freed by other threads of control pulling packets
 367	 * from the queue.
 368	 */
 369	skb_len = skb->len;
 370
 371	/* we escape from rcu protected region, make sure we dont leak
 372	 * a norefcounted dst
 373	 */
 374	skb_dst_force(skb);
 375
 376	spin_lock_irqsave(&list->lock, flags);
 377	skb->dropcount = atomic_read(&sk->sk_drops);
 378	__skb_queue_tail(list, skb);
 379	spin_unlock_irqrestore(&list->lock, flags);
 380
 381	if (!sock_flag(sk, SOCK_DEAD))
 382		sk->sk_data_ready(sk, skb_len);
 383	return 0;
 384}
 385EXPORT_SYMBOL(sock_queue_rcv_skb);
 386
 387int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 388{
 389	int rc = NET_RX_SUCCESS;
 390
 391	if (sk_filter(sk, skb))
 392		goto discard_and_relse;
 393
 394	skb->dev = NULL;
 395
 396	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
 397		atomic_inc(&sk->sk_drops);
 398		goto discard_and_relse;
 399	}
 400	if (nested)
 401		bh_lock_sock_nested(sk);
 402	else
 403		bh_lock_sock(sk);
 404	if (!sock_owned_by_user(sk)) {
 405		/*
 406		 * trylock + unlock semantics:
 407		 */
 408		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 409
 410		rc = sk_backlog_rcv(sk, skb);
 411
 412		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 413	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 414		bh_unlock_sock(sk);
 415		atomic_inc(&sk->sk_drops);
 416		goto discard_and_relse;
 417	}
 418
 419	bh_unlock_sock(sk);
 420out:
 421	sock_put(sk);
 422	return rc;
 423discard_and_relse:
 424	kfree_skb(skb);
 425	goto out;
 426}
 427EXPORT_SYMBOL(sk_receive_skb);
 428
 429void sk_reset_txq(struct sock *sk)
 430{
 431	sk_tx_queue_clear(sk);
 432}
 433EXPORT_SYMBOL(sk_reset_txq);
 434
 435struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 436{
 437	struct dst_entry *dst = __sk_dst_get(sk);
 438
 439	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 440		sk_tx_queue_clear(sk);
 441		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 442		dst_release(dst);
 443		return NULL;
 444	}
 445
 446	return dst;
 447}
 448EXPORT_SYMBOL(__sk_dst_check);
 449
 450struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 451{
 452	struct dst_entry *dst = sk_dst_get(sk);
 453
 454	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 455		sk_dst_reset(sk);
 456		dst_release(dst);
 457		return NULL;
 458	}
 459
 460	return dst;
 461}
 462EXPORT_SYMBOL(sk_dst_check);
 463
 464static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
 
 465{
 466	int ret = -ENOPROTOOPT;
 467#ifdef CONFIG_NETDEVICES
 468	struct net *net = sock_net(sk);
 469	char devname[IFNAMSIZ];
 470	int index;
 471
 472	/* Sorry... */
 473	ret = -EPERM;
 474	if (!capable(CAP_NET_RAW))
 475		goto out;
 476
 477	ret = -EINVAL;
 478	if (optlen < 0)
 479		goto out;
 480
 481	/* Bind this socket to a particular device like "eth0",
 482	 * as specified in the passed interface name. If the
 483	 * name is "" or the option length is zero the socket
 484	 * is not bound.
 485	 */
 486	if (optlen > IFNAMSIZ - 1)
 487		optlen = IFNAMSIZ - 1;
 488	memset(devname, 0, sizeof(devname));
 489
 490	ret = -EFAULT;
 491	if (copy_from_user(devname, optval, optlen))
 492		goto out;
 493
 494	index = 0;
 495	if (devname[0] != '\0') {
 496		struct net_device *dev;
 497
 498		rcu_read_lock();
 499		dev = dev_get_by_name_rcu(net, devname);
 500		if (dev)
 501			index = dev->ifindex;
 502		rcu_read_unlock();
 503		ret = -ENODEV;
 504		if (!dev)
 505			goto out;
 506	}
 507
 508	lock_sock(sk);
 509	sk->sk_bound_dev_if = index;
 510	sk_dst_reset(sk);
 511	release_sock(sk);
 512
 513	ret = 0;
 514
 515out:
 516#endif
 517
 518	return ret;
 519}
 520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 522{
 523	if (valbool)
 524		sock_set_flag(sk, bit);
 525	else
 526		sock_reset_flag(sk, bit);
 527}
 528
 529/*
 530 *	This is meant for all protocols to use and covers goings on
 531 *	at the socket level. Everything here is generic.
 532 */
 533
 534int sock_setsockopt(struct socket *sock, int level, int optname,
 535		    char __user *optval, unsigned int optlen)
 536{
 537	struct sock *sk = sock->sk;
 538	int val;
 539	int valbool;
 540	struct linger ling;
 541	int ret = 0;
 542
 543	/*
 544	 *	Options without arguments
 545	 */
 546
 547	if (optname == SO_BINDTODEVICE)
 548		return sock_bindtodevice(sk, optval, optlen);
 549
 550	if (optlen < sizeof(int))
 551		return -EINVAL;
 552
 553	if (get_user(val, (int __user *)optval))
 554		return -EFAULT;
 555
 556	valbool = val ? 1 : 0;
 557
 558	lock_sock(sk);
 559
 560	switch (optname) {
 561	case SO_DEBUG:
 562		if (val && !capable(CAP_NET_ADMIN))
 563			ret = -EACCES;
 564		else
 565			sock_valbool_flag(sk, SOCK_DBG, valbool);
 566		break;
 567	case SO_REUSEADDR:
 568		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 569		break;
 
 
 
 570	case SO_TYPE:
 571	case SO_PROTOCOL:
 572	case SO_DOMAIN:
 573	case SO_ERROR:
 574		ret = -ENOPROTOOPT;
 575		break;
 576	case SO_DONTROUTE:
 577		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
 578		break;
 579	case SO_BROADCAST:
 580		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
 581		break;
 582	case SO_SNDBUF:
 583		/* Don't error on this BSD doesn't and if you think
 584		 * about it this is right. Otherwise apps have to
 585		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 586		 * are treated in BSD as hints
 587		 */
 588		val = min_t(u32, val, sysctl_wmem_max);
 589set_sndbuf:
 590		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 591		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
 592		/* Wake up sending tasks if we upped the value. */
 593		sk->sk_write_space(sk);
 594		break;
 595
 596	case SO_SNDBUFFORCE:
 597		if (!capable(CAP_NET_ADMIN)) {
 598			ret = -EPERM;
 599			break;
 600		}
 601		goto set_sndbuf;
 602
 603	case SO_RCVBUF:
 604		/* Don't error on this BSD doesn't and if you think
 605		 * about it this is right. Otherwise apps have to
 606		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 607		 * are treated in BSD as hints
 608		 */
 609		val = min_t(u32, val, sysctl_rmem_max);
 610set_rcvbuf:
 611		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 612		/*
 613		 * We double it on the way in to account for
 614		 * "struct sk_buff" etc. overhead.   Applications
 615		 * assume that the SO_RCVBUF setting they make will
 616		 * allow that much actual data to be received on that
 617		 * socket.
 618		 *
 619		 * Applications are unaware that "struct sk_buff" and
 620		 * other overheads allocate from the receive buffer
 621		 * during socket buffer allocation.
 622		 *
 623		 * And after considering the possible alternatives,
 624		 * returning the value we actually used in getsockopt
 625		 * is the most desirable behavior.
 626		 */
 627		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
 628		break;
 629
 630	case SO_RCVBUFFORCE:
 631		if (!capable(CAP_NET_ADMIN)) {
 632			ret = -EPERM;
 633			break;
 634		}
 635		goto set_rcvbuf;
 636
 637	case SO_KEEPALIVE:
 638#ifdef CONFIG_INET
 639		if (sk->sk_protocol == IPPROTO_TCP)
 
 640			tcp_set_keepalive(sk, valbool);
 641#endif
 642		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
 643		break;
 644
 645	case SO_OOBINLINE:
 646		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
 647		break;
 648
 649	case SO_NO_CHECK:
 650		sk->sk_no_check = valbool;
 651		break;
 652
 653	case SO_PRIORITY:
 654		if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
 
 655			sk->sk_priority = val;
 656		else
 657			ret = -EPERM;
 658		break;
 659
 660	case SO_LINGER:
 661		if (optlen < sizeof(ling)) {
 662			ret = -EINVAL;	/* 1003.1g */
 663			break;
 664		}
 665		if (copy_from_user(&ling, optval, sizeof(ling))) {
 666			ret = -EFAULT;
 667			break;
 668		}
 669		if (!ling.l_onoff)
 670			sock_reset_flag(sk, SOCK_LINGER);
 671		else {
 672#if (BITS_PER_LONG == 32)
 673			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
 674				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 675			else
 676#endif
 677				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
 678			sock_set_flag(sk, SOCK_LINGER);
 679		}
 680		break;
 681
 682	case SO_BSDCOMPAT:
 683		sock_warn_obsolete_bsdism("setsockopt");
 684		break;
 685
 686	case SO_PASSCRED:
 687		if (valbool)
 688			set_bit(SOCK_PASSCRED, &sock->flags);
 689		else
 690			clear_bit(SOCK_PASSCRED, &sock->flags);
 691		break;
 692
 693	case SO_TIMESTAMP:
 694	case SO_TIMESTAMPNS:
 695		if (valbool)  {
 696			if (optname == SO_TIMESTAMP)
 697				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 698			else
 699				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
 700			sock_set_flag(sk, SOCK_RCVTSTAMP);
 701			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 702		} else {
 703			sock_reset_flag(sk, SOCK_RCVTSTAMP);
 704			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 705		}
 706		break;
 707
 708	case SO_TIMESTAMPING:
 709		if (val & ~SOF_TIMESTAMPING_MASK) {
 710			ret = -EINVAL;
 711			break;
 712		}
 713		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
 714				  val & SOF_TIMESTAMPING_TX_HARDWARE);
 715		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
 716				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
 717		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
 718				  val & SOF_TIMESTAMPING_RX_HARDWARE);
 719		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 720			sock_enable_timestamp(sk,
 721					      SOCK_TIMESTAMPING_RX_SOFTWARE);
 722		else
 723			sock_disable_timestamp(sk,
 724					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 725		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
 726				  val & SOF_TIMESTAMPING_SOFTWARE);
 727		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
 728				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
 729		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
 730				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
 731		break;
 732
 733	case SO_RCVLOWAT:
 734		if (val < 0)
 735			val = INT_MAX;
 736		sk->sk_rcvlowat = val ? : 1;
 737		break;
 738
 739	case SO_RCVTIMEO:
 740		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
 741		break;
 742
 743	case SO_SNDTIMEO:
 744		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
 745		break;
 746
 747	case SO_ATTACH_FILTER:
 748		ret = -EINVAL;
 749		if (optlen == sizeof(struct sock_fprog)) {
 750			struct sock_fprog fprog;
 751
 752			ret = -EFAULT;
 753			if (copy_from_user(&fprog, optval, sizeof(fprog)))
 754				break;
 755
 756			ret = sk_attach_filter(&fprog, sk);
 757		}
 758		break;
 759
 760	case SO_DETACH_FILTER:
 761		ret = sk_detach_filter(sk);
 762		break;
 763
 
 
 
 
 
 
 
 764	case SO_PASSSEC:
 765		if (valbool)
 766			set_bit(SOCK_PASSSEC, &sock->flags);
 767		else
 768			clear_bit(SOCK_PASSSEC, &sock->flags);
 769		break;
 770	case SO_MARK:
 771		if (!capable(CAP_NET_ADMIN))
 772			ret = -EPERM;
 773		else
 774			sk->sk_mark = val;
 775		break;
 776
 777		/* We implement the SO_SNDLOWAT etc to
 778		   not be settable (1003.1g 5.3) */
 779	case SO_RXQ_OVFL:
 780		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
 781		break;
 782
 783	case SO_WIFI_STATUS:
 784		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
 785		break;
 786
 787	case SO_PEEK_OFF:
 788		if (sock->ops->set_peek_off)
 789			sock->ops->set_peek_off(sk, val);
 790		else
 791			ret = -EOPNOTSUPP;
 792		break;
 793
 794	case SO_NOFCS:
 795		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
 796		break;
 797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798	default:
 799		ret = -ENOPROTOOPT;
 800		break;
 801	}
 802	release_sock(sk);
 803	return ret;
 804}
 805EXPORT_SYMBOL(sock_setsockopt);
 806
 807
 808void cred_to_ucred(struct pid *pid, const struct cred *cred,
 809		   struct ucred *ucred)
 810{
 811	ucred->pid = pid_vnr(pid);
 812	ucred->uid = ucred->gid = -1;
 813	if (cred) {
 814		struct user_namespace *current_ns = current_user_ns();
 815
 816		ucred->uid = from_kuid(current_ns, cred->euid);
 817		ucred->gid = from_kgid(current_ns, cred->egid);
 818	}
 819}
 820EXPORT_SYMBOL_GPL(cred_to_ucred);
 821
 822int sock_getsockopt(struct socket *sock, int level, int optname,
 823		    char __user *optval, int __user *optlen)
 824{
 825	struct sock *sk = sock->sk;
 826
 827	union {
 828		int val;
 829		struct linger ling;
 830		struct timeval tm;
 831	} v;
 832
 833	int lv = sizeof(int);
 834	int len;
 835
 836	if (get_user(len, optlen))
 837		return -EFAULT;
 838	if (len < 0)
 839		return -EINVAL;
 840
 841	memset(&v, 0, sizeof(v));
 842
 843	switch (optname) {
 844	case SO_DEBUG:
 845		v.val = sock_flag(sk, SOCK_DBG);
 846		break;
 847
 848	case SO_DONTROUTE:
 849		v.val = sock_flag(sk, SOCK_LOCALROUTE);
 850		break;
 851
 852	case SO_BROADCAST:
 853		v.val = sock_flag(sk, SOCK_BROADCAST);
 854		break;
 855
 856	case SO_SNDBUF:
 857		v.val = sk->sk_sndbuf;
 858		break;
 859
 860	case SO_RCVBUF:
 861		v.val = sk->sk_rcvbuf;
 862		break;
 863
 864	case SO_REUSEADDR:
 865		v.val = sk->sk_reuse;
 866		break;
 867
 
 
 
 
 868	case SO_KEEPALIVE:
 869		v.val = sock_flag(sk, SOCK_KEEPOPEN);
 870		break;
 871
 872	case SO_TYPE:
 873		v.val = sk->sk_type;
 874		break;
 875
 876	case SO_PROTOCOL:
 877		v.val = sk->sk_protocol;
 878		break;
 879
 880	case SO_DOMAIN:
 881		v.val = sk->sk_family;
 882		break;
 883
 884	case SO_ERROR:
 885		v.val = -sock_error(sk);
 886		if (v.val == 0)
 887			v.val = xchg(&sk->sk_err_soft, 0);
 888		break;
 889
 890	case SO_OOBINLINE:
 891		v.val = sock_flag(sk, SOCK_URGINLINE);
 892		break;
 893
 894	case SO_NO_CHECK:
 895		v.val = sk->sk_no_check;
 896		break;
 897
 898	case SO_PRIORITY:
 899		v.val = sk->sk_priority;
 900		break;
 901
 902	case SO_LINGER:
 903		lv		= sizeof(v.ling);
 904		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
 905		v.ling.l_linger	= sk->sk_lingertime / HZ;
 906		break;
 907
 908	case SO_BSDCOMPAT:
 909		sock_warn_obsolete_bsdism("getsockopt");
 910		break;
 911
 912	case SO_TIMESTAMP:
 913		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
 914				!sock_flag(sk, SOCK_RCVTSTAMPNS);
 915		break;
 916
 917	case SO_TIMESTAMPNS:
 918		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
 919		break;
 920
 921	case SO_TIMESTAMPING:
 922		v.val = 0;
 923		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
 924			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
 925		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
 926			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
 927		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
 928			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
 929		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
 930			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
 931		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
 932			v.val |= SOF_TIMESTAMPING_SOFTWARE;
 933		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
 934			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
 935		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
 936			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
 937		break;
 938
 939	case SO_RCVTIMEO:
 940		lv = sizeof(struct timeval);
 941		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
 942			v.tm.tv_sec = 0;
 943			v.tm.tv_usec = 0;
 944		} else {
 945			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
 946			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
 947		}
 948		break;
 949
 950	case SO_SNDTIMEO:
 951		lv = sizeof(struct timeval);
 952		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
 953			v.tm.tv_sec = 0;
 954			v.tm.tv_usec = 0;
 955		} else {
 956			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
 957			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
 958		}
 959		break;
 960
 961	case SO_RCVLOWAT:
 962		v.val = sk->sk_rcvlowat;
 963		break;
 964
 965	case SO_SNDLOWAT:
 966		v.val = 1;
 967		break;
 968
 969	case SO_PASSCRED:
 970		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
 971		break;
 972
 973	case SO_PEERCRED:
 974	{
 975		struct ucred peercred;
 976		if (len > sizeof(peercred))
 977			len = sizeof(peercred);
 978		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
 979		if (copy_to_user(optval, &peercred, len))
 980			return -EFAULT;
 981		goto lenout;
 982	}
 983
 984	case SO_PEERNAME:
 985	{
 986		char address[128];
 987
 988		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
 989			return -ENOTCONN;
 990		if (lv < len)
 991			return -EINVAL;
 992		if (copy_to_user(optval, address, len))
 993			return -EFAULT;
 994		goto lenout;
 995	}
 996
 997	/* Dubious BSD thing... Probably nobody even uses it, but
 998	 * the UNIX standard wants it for whatever reason... -DaveM
 999	 */
1000	case SO_ACCEPTCONN:
1001		v.val = sk->sk_state == TCP_LISTEN;
1002		break;
1003
1004	case SO_PASSSEC:
1005		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1006		break;
1007
1008	case SO_PEERSEC:
1009		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1010
1011	case SO_MARK:
1012		v.val = sk->sk_mark;
1013		break;
1014
1015	case SO_RXQ_OVFL:
1016		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1017		break;
1018
1019	case SO_WIFI_STATUS:
1020		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1021		break;
1022
1023	case SO_PEEK_OFF:
1024		if (!sock->ops->set_peek_off)
1025			return -EOPNOTSUPP;
1026
1027		v.val = sk->sk_peek_off;
1028		break;
1029	case SO_NOFCS:
1030		v.val = sock_flag(sk, SOCK_NOFCS);
1031		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032	default:
1033		return -ENOPROTOOPT;
1034	}
1035
1036	if (len > lv)
1037		len = lv;
1038	if (copy_to_user(optval, &v, len))
1039		return -EFAULT;
1040lenout:
1041	if (put_user(len, optlen))
1042		return -EFAULT;
1043	return 0;
1044}
1045
1046/*
1047 * Initialize an sk_lock.
1048 *
1049 * (We also register the sk_lock with the lock validator.)
1050 */
1051static inline void sock_lock_init(struct sock *sk)
1052{
1053	sock_lock_init_class_and_name(sk,
1054			af_family_slock_key_strings[sk->sk_family],
1055			af_family_slock_keys + sk->sk_family,
1056			af_family_key_strings[sk->sk_family],
1057			af_family_keys + sk->sk_family);
1058}
1059
1060/*
1061 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1062 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1063 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1064 */
1065static void sock_copy(struct sock *nsk, const struct sock *osk)
1066{
1067#ifdef CONFIG_SECURITY_NETWORK
1068	void *sptr = nsk->sk_security;
1069#endif
1070	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1071
1072	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1073	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1074
1075#ifdef CONFIG_SECURITY_NETWORK
1076	nsk->sk_security = sptr;
1077	security_sk_clone(osk, nsk);
1078#endif
1079}
1080
1081/*
1082 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1083 * un-modified. Special care is taken when initializing object to zero.
1084 */
1085static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1086{
1087	if (offsetof(struct sock, sk_node.next) != 0)
1088		memset(sk, 0, offsetof(struct sock, sk_node.next));
1089	memset(&sk->sk_node.pprev, 0,
1090	       size - offsetof(struct sock, sk_node.pprev));
1091}
1092
1093void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1094{
1095	unsigned long nulls1, nulls2;
1096
1097	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1098	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1099	if (nulls1 > nulls2)
1100		swap(nulls1, nulls2);
1101
1102	if (nulls1 != 0)
1103		memset((char *)sk, 0, nulls1);
1104	memset((char *)sk + nulls1 + sizeof(void *), 0,
1105	       nulls2 - nulls1 - sizeof(void *));
1106	memset((char *)sk + nulls2 + sizeof(void *), 0,
1107	       size - nulls2 - sizeof(void *));
1108}
1109EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1110
1111static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1112		int family)
1113{
1114	struct sock *sk;
1115	struct kmem_cache *slab;
1116
1117	slab = prot->slab;
1118	if (slab != NULL) {
1119		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1120		if (!sk)
1121			return sk;
1122		if (priority & __GFP_ZERO) {
1123			if (prot->clear_sk)
1124				prot->clear_sk(sk, prot->obj_size);
1125			else
1126				sk_prot_clear_nulls(sk, prot->obj_size);
1127		}
1128	} else
1129		sk = kmalloc(prot->obj_size, priority);
1130
1131	if (sk != NULL) {
1132		kmemcheck_annotate_bitfield(sk, flags);
1133
1134		if (security_sk_alloc(sk, family, priority))
1135			goto out_free;
1136
1137		if (!try_module_get(prot->owner))
1138			goto out_free_sec;
1139		sk_tx_queue_clear(sk);
1140	}
1141
1142	return sk;
1143
1144out_free_sec:
1145	security_sk_free(sk);
1146out_free:
1147	if (slab != NULL)
1148		kmem_cache_free(slab, sk);
1149	else
1150		kfree(sk);
1151	return NULL;
1152}
1153
1154static void sk_prot_free(struct proto *prot, struct sock *sk)
1155{
1156	struct kmem_cache *slab;
1157	struct module *owner;
1158
1159	owner = prot->owner;
1160	slab = prot->slab;
1161
1162	security_sk_free(sk);
1163	if (slab != NULL)
1164		kmem_cache_free(slab, sk);
1165	else
1166		kfree(sk);
1167	module_put(owner);
1168}
1169
1170#ifdef CONFIG_CGROUPS
1171void sock_update_classid(struct sock *sk)
1172{
1173	u32 classid;
1174
1175	rcu_read_lock();  /* doing current task, which cannot vanish. */
1176	classid = task_cls_classid(current);
1177	rcu_read_unlock();
1178	if (classid && classid != sk->sk_classid)
1179		sk->sk_classid = classid;
1180}
1181EXPORT_SYMBOL(sock_update_classid);
1182
1183void sock_update_netprioidx(struct sock *sk)
1184{
1185	if (in_interrupt())
1186		return;
1187
1188	sk->sk_cgrp_prioidx = task_netprioidx(current);
1189}
1190EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1191#endif
1192
1193/**
1194 *	sk_alloc - All socket objects are allocated here
1195 *	@net: the applicable net namespace
1196 *	@family: protocol family
1197 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1198 *	@prot: struct proto associated with this new sock instance
1199 */
1200struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1201		      struct proto *prot)
1202{
1203	struct sock *sk;
1204
1205	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1206	if (sk) {
1207		sk->sk_family = family;
1208		/*
1209		 * See comment in struct sock definition to understand
1210		 * why we need sk_prot_creator -acme
1211		 */
1212		sk->sk_prot = sk->sk_prot_creator = prot;
1213		sock_lock_init(sk);
1214		sock_net_set(sk, get_net(net));
1215		atomic_set(&sk->sk_wmem_alloc, 1);
1216
1217		sock_update_classid(sk);
1218		sock_update_netprioidx(sk);
1219	}
1220
1221	return sk;
1222}
1223EXPORT_SYMBOL(sk_alloc);
1224
1225static void __sk_free(struct sock *sk)
1226{
1227	struct sk_filter *filter;
1228
1229	if (sk->sk_destruct)
1230		sk->sk_destruct(sk);
1231
1232	filter = rcu_dereference_check(sk->sk_filter,
1233				       atomic_read(&sk->sk_wmem_alloc) == 0);
1234	if (filter) {
1235		sk_filter_uncharge(sk, filter);
1236		RCU_INIT_POINTER(sk->sk_filter, NULL);
1237	}
1238
1239	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1240
1241	if (atomic_read(&sk->sk_omem_alloc))
1242		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1243			 __func__, atomic_read(&sk->sk_omem_alloc));
1244
1245	if (sk->sk_peer_cred)
1246		put_cred(sk->sk_peer_cred);
1247	put_pid(sk->sk_peer_pid);
1248	put_net(sock_net(sk));
1249	sk_prot_free(sk->sk_prot_creator, sk);
1250}
1251
1252void sk_free(struct sock *sk)
1253{
1254	/*
1255	 * We subtract one from sk_wmem_alloc and can know if
1256	 * some packets are still in some tx queue.
1257	 * If not null, sock_wfree() will call __sk_free(sk) later
1258	 */
1259	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1260		__sk_free(sk);
1261}
1262EXPORT_SYMBOL(sk_free);
1263
1264/*
1265 * Last sock_put should drop reference to sk->sk_net. It has already
1266 * been dropped in sk_change_net. Taking reference to stopping namespace
1267 * is not an option.
1268 * Take reference to a socket to remove it from hash _alive_ and after that
1269 * destroy it in the context of init_net.
1270 */
1271void sk_release_kernel(struct sock *sk)
1272{
1273	if (sk == NULL || sk->sk_socket == NULL)
1274		return;
1275
1276	sock_hold(sk);
1277	sock_release(sk->sk_socket);
1278	release_net(sock_net(sk));
1279	sock_net_set(sk, get_net(&init_net));
1280	sock_put(sk);
1281}
1282EXPORT_SYMBOL(sk_release_kernel);
1283
1284static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1285{
1286	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1287		sock_update_memcg(newsk);
1288}
1289
1290/**
1291 *	sk_clone_lock - clone a socket, and lock its clone
1292 *	@sk: the socket to clone
1293 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1294 *
1295 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1296 */
1297struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1298{
1299	struct sock *newsk;
1300
1301	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1302	if (newsk != NULL) {
1303		struct sk_filter *filter;
1304
1305		sock_copy(newsk, sk);
1306
1307		/* SANITY */
1308		get_net(sock_net(newsk));
1309		sk_node_init(&newsk->sk_node);
1310		sock_lock_init(newsk);
1311		bh_lock_sock(newsk);
1312		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1313		newsk->sk_backlog.len = 0;
1314
1315		atomic_set(&newsk->sk_rmem_alloc, 0);
1316		/*
1317		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1318		 */
1319		atomic_set(&newsk->sk_wmem_alloc, 1);
1320		atomic_set(&newsk->sk_omem_alloc, 0);
1321		skb_queue_head_init(&newsk->sk_receive_queue);
1322		skb_queue_head_init(&newsk->sk_write_queue);
1323#ifdef CONFIG_NET_DMA
1324		skb_queue_head_init(&newsk->sk_async_wait_queue);
1325#endif
1326
1327		spin_lock_init(&newsk->sk_dst_lock);
1328		rwlock_init(&newsk->sk_callback_lock);
1329		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1330				af_callback_keys + newsk->sk_family,
1331				af_family_clock_key_strings[newsk->sk_family]);
1332
1333		newsk->sk_dst_cache	= NULL;
1334		newsk->sk_wmem_queued	= 0;
1335		newsk->sk_forward_alloc = 0;
1336		newsk->sk_send_head	= NULL;
1337		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1338
1339		sock_reset_flag(newsk, SOCK_DONE);
1340		skb_queue_head_init(&newsk->sk_error_queue);
1341
1342		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1343		if (filter != NULL)
1344			sk_filter_charge(newsk, filter);
1345
1346		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1347			/* It is still raw copy of parent, so invalidate
1348			 * destructor and make plain sk_free() */
1349			newsk->sk_destruct = NULL;
1350			bh_unlock_sock(newsk);
1351			sk_free(newsk);
1352			newsk = NULL;
1353			goto out;
1354		}
1355
1356		newsk->sk_err	   = 0;
1357		newsk->sk_priority = 0;
1358		/*
1359		 * Before updating sk_refcnt, we must commit prior changes to memory
1360		 * (Documentation/RCU/rculist_nulls.txt for details)
1361		 */
1362		smp_wmb();
1363		atomic_set(&newsk->sk_refcnt, 2);
1364
1365		/*
1366		 * Increment the counter in the same struct proto as the master
1367		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1368		 * is the same as sk->sk_prot->socks, as this field was copied
1369		 * with memcpy).
1370		 *
1371		 * This _changes_ the previous behaviour, where
1372		 * tcp_create_openreq_child always was incrementing the
1373		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1374		 * to be taken into account in all callers. -acme
1375		 */
1376		sk_refcnt_debug_inc(newsk);
1377		sk_set_socket(newsk, NULL);
1378		newsk->sk_wq = NULL;
1379
1380		sk_update_clone(sk, newsk);
1381
1382		if (newsk->sk_prot->sockets_allocated)
1383			sk_sockets_allocated_inc(newsk);
1384
1385		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1386			net_enable_timestamp();
1387	}
1388out:
1389	return newsk;
1390}
1391EXPORT_SYMBOL_GPL(sk_clone_lock);
1392
1393void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1394{
1395	__sk_dst_set(sk, dst);
1396	sk->sk_route_caps = dst->dev->features;
1397	if (sk->sk_route_caps & NETIF_F_GSO)
1398		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1399	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1400	if (sk_can_gso(sk)) {
1401		if (dst->header_len) {
1402			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1403		} else {
1404			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1405			sk->sk_gso_max_size = dst->dev->gso_max_size;
1406			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1407		}
1408	}
1409}
1410EXPORT_SYMBOL_GPL(sk_setup_caps);
1411
1412void __init sk_init(void)
1413{
1414	if (totalram_pages <= 4096) {
1415		sysctl_wmem_max = 32767;
1416		sysctl_rmem_max = 32767;
1417		sysctl_wmem_default = 32767;
1418		sysctl_rmem_default = 32767;
1419	} else if (totalram_pages >= 131072) {
1420		sysctl_wmem_max = 131071;
1421		sysctl_rmem_max = 131071;
1422	}
1423}
1424
1425/*
1426 *	Simple resource managers for sockets.
1427 */
1428
1429
1430/*
1431 * Write buffer destructor automatically called from kfree_skb.
1432 */
1433void sock_wfree(struct sk_buff *skb)
1434{
1435	struct sock *sk = skb->sk;
1436	unsigned int len = skb->truesize;
1437
1438	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1439		/*
1440		 * Keep a reference on sk_wmem_alloc, this will be released
1441		 * after sk_write_space() call
1442		 */
1443		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1444		sk->sk_write_space(sk);
1445		len = 1;
1446	}
1447	/*
1448	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1449	 * could not do because of in-flight packets
1450	 */
1451	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1452		__sk_free(sk);
1453}
1454EXPORT_SYMBOL(sock_wfree);
1455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456/*
1457 * Read buffer destructor automatically called from kfree_skb.
1458 */
1459void sock_rfree(struct sk_buff *skb)
1460{
1461	struct sock *sk = skb->sk;
1462	unsigned int len = skb->truesize;
1463
1464	atomic_sub(len, &sk->sk_rmem_alloc);
1465	sk_mem_uncharge(sk, len);
1466}
1467EXPORT_SYMBOL(sock_rfree);
1468
 
 
 
1469
1470int sock_i_uid(struct sock *sk)
 
 
 
 
 
 
 
 
 
1471{
1472	int uid;
1473
1474	read_lock_bh(&sk->sk_callback_lock);
1475	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1476	read_unlock_bh(&sk->sk_callback_lock);
1477	return uid;
1478}
1479EXPORT_SYMBOL(sock_i_uid);
1480
1481unsigned long sock_i_ino(struct sock *sk)
1482{
1483	unsigned long ino;
1484
1485	read_lock_bh(&sk->sk_callback_lock);
1486	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1487	read_unlock_bh(&sk->sk_callback_lock);
1488	return ino;
1489}
1490EXPORT_SYMBOL(sock_i_ino);
1491
1492/*
1493 * Allocate a skb from the socket's send buffer.
1494 */
1495struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1496			     gfp_t priority)
1497{
1498	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1499		struct sk_buff *skb = alloc_skb(size, priority);
1500		if (skb) {
1501			skb_set_owner_w(skb, sk);
1502			return skb;
1503		}
1504	}
1505	return NULL;
1506}
1507EXPORT_SYMBOL(sock_wmalloc);
1508
1509/*
1510 * Allocate a skb from the socket's receive buffer.
1511 */
1512struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1513			     gfp_t priority)
1514{
1515	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1516		struct sk_buff *skb = alloc_skb(size, priority);
1517		if (skb) {
1518			skb_set_owner_r(skb, sk);
1519			return skb;
1520		}
1521	}
1522	return NULL;
1523}
1524
1525/*
1526 * Allocate a memory block from the socket's option memory buffer.
1527 */
1528void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1529{
1530	if ((unsigned int)size <= sysctl_optmem_max &&
1531	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1532		void *mem;
1533		/* First do the add, to avoid the race if kmalloc
1534		 * might sleep.
1535		 */
1536		atomic_add(size, &sk->sk_omem_alloc);
1537		mem = kmalloc(size, priority);
1538		if (mem)
1539			return mem;
1540		atomic_sub(size, &sk->sk_omem_alloc);
1541	}
1542	return NULL;
1543}
1544EXPORT_SYMBOL(sock_kmalloc);
1545
1546/*
1547 * Free an option memory block.
1548 */
1549void sock_kfree_s(struct sock *sk, void *mem, int size)
1550{
1551	kfree(mem);
1552	atomic_sub(size, &sk->sk_omem_alloc);
1553}
1554EXPORT_SYMBOL(sock_kfree_s);
1555
1556/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1557   I think, these locks should be removed for datagram sockets.
1558 */
1559static long sock_wait_for_wmem(struct sock *sk, long timeo)
1560{
1561	DEFINE_WAIT(wait);
1562
1563	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1564	for (;;) {
1565		if (!timeo)
1566			break;
1567		if (signal_pending(current))
1568			break;
1569		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1570		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1571		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1572			break;
1573		if (sk->sk_shutdown & SEND_SHUTDOWN)
1574			break;
1575		if (sk->sk_err)
1576			break;
1577		timeo = schedule_timeout(timeo);
1578	}
1579	finish_wait(sk_sleep(sk), &wait);
1580	return timeo;
1581}
1582
1583
1584/*
1585 *	Generic send/receive buffer handlers
1586 */
1587
1588struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1589				     unsigned long data_len, int noblock,
1590				     int *errcode)
1591{
1592	struct sk_buff *skb;
 
1593	gfp_t gfp_mask;
1594	long timeo;
1595	int err;
1596	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 
 
1597
1598	err = -EMSGSIZE;
1599	if (npages > MAX_SKB_FRAGS)
1600		goto failure;
1601
1602	gfp_mask = sk->sk_allocation;
1603	if (gfp_mask & __GFP_WAIT)
1604		gfp_mask |= __GFP_REPEAT;
1605
1606	timeo = sock_sndtimeo(sk, noblock);
1607	while (1) {
1608		err = sock_error(sk);
1609		if (err != 0)
1610			goto failure;
1611
1612		err = -EPIPE;
1613		if (sk->sk_shutdown & SEND_SHUTDOWN)
1614			goto failure;
1615
1616		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1617			skb = alloc_skb(header_len, gfp_mask);
1618			if (skb) {
1619				int i;
1620
1621				/* No pages, we're done... */
1622				if (!data_len)
1623					break;
1624
1625				skb->truesize += data_len;
1626				skb_shinfo(skb)->nr_frags = npages;
1627				for (i = 0; i < npages; i++) {
1628					struct page *page;
1629
1630					page = alloc_pages(sk->sk_allocation, 0);
1631					if (!page) {
1632						err = -ENOBUFS;
1633						skb_shinfo(skb)->nr_frags = i;
1634						kfree_skb(skb);
1635						goto failure;
1636					}
1637
1638					__skb_fill_page_desc(skb, i,
1639							page, 0,
1640							(data_len >= PAGE_SIZE ?
1641							 PAGE_SIZE :
1642							 data_len));
1643					data_len -= PAGE_SIZE;
 
 
 
 
 
 
 
1644				}
1645
1646				/* Full success... */
1647				break;
1648			}
1649			err = -ENOBUFS;
1650			goto failure;
 
 
 
 
 
 
 
1651		}
1652		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1653		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1654		err = -EAGAIN;
1655		if (!timeo)
1656			goto failure;
1657		if (signal_pending(current))
1658			goto interrupted;
1659		timeo = sock_wait_for_wmem(sk, timeo);
1660	}
1661
1662	skb_set_owner_w(skb, sk);
1663	return skb;
1664
1665interrupted:
1666	err = sock_intr_errno(timeo);
1667failure:
 
1668	*errcode = err;
1669	return NULL;
1670}
1671EXPORT_SYMBOL(sock_alloc_send_pskb);
1672
1673struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1674				    int noblock, int *errcode)
1675{
1676	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1677}
1678EXPORT_SYMBOL(sock_alloc_send_skb);
1679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1680static void __lock_sock(struct sock *sk)
1681	__releases(&sk->sk_lock.slock)
1682	__acquires(&sk->sk_lock.slock)
1683{
1684	DEFINE_WAIT(wait);
1685
1686	for (;;) {
1687		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1688					TASK_UNINTERRUPTIBLE);
1689		spin_unlock_bh(&sk->sk_lock.slock);
1690		schedule();
1691		spin_lock_bh(&sk->sk_lock.slock);
1692		if (!sock_owned_by_user(sk))
1693			break;
1694	}
1695	finish_wait(&sk->sk_lock.wq, &wait);
1696}
1697
1698static void __release_sock(struct sock *sk)
1699	__releases(&sk->sk_lock.slock)
1700	__acquires(&sk->sk_lock.slock)
1701{
1702	struct sk_buff *skb = sk->sk_backlog.head;
1703
1704	do {
1705		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1706		bh_unlock_sock(sk);
1707
1708		do {
1709			struct sk_buff *next = skb->next;
1710
1711			prefetch(next);
1712			WARN_ON_ONCE(skb_dst_is_noref(skb));
1713			skb->next = NULL;
1714			sk_backlog_rcv(sk, skb);
1715
1716			/*
1717			 * We are in process context here with softirqs
1718			 * disabled, use cond_resched_softirq() to preempt.
1719			 * This is safe to do because we've taken the backlog
1720			 * queue private:
1721			 */
1722			cond_resched_softirq();
1723
1724			skb = next;
1725		} while (skb != NULL);
1726
1727		bh_lock_sock(sk);
1728	} while ((skb = sk->sk_backlog.head) != NULL);
1729
1730	/*
1731	 * Doing the zeroing here guarantee we can not loop forever
1732	 * while a wild producer attempts to flood us.
1733	 */
1734	sk->sk_backlog.len = 0;
1735}
1736
1737/**
1738 * sk_wait_data - wait for data to arrive at sk_receive_queue
1739 * @sk:    sock to wait on
1740 * @timeo: for how long
1741 *
1742 * Now socket state including sk->sk_err is changed only under lock,
1743 * hence we may omit checks after joining wait queue.
1744 * We check receive queue before schedule() only as optimization;
1745 * it is very likely that release_sock() added new data.
1746 */
1747int sk_wait_data(struct sock *sk, long *timeo)
1748{
1749	int rc;
1750	DEFINE_WAIT(wait);
1751
1752	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1753	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1754	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1755	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1756	finish_wait(sk_sleep(sk), &wait);
1757	return rc;
1758}
1759EXPORT_SYMBOL(sk_wait_data);
1760
1761/**
1762 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1763 *	@sk: socket
1764 *	@size: memory size to allocate
1765 *	@kind: allocation type
1766 *
1767 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1768 *	rmem allocation. This function assumes that protocols which have
1769 *	memory_pressure use sk_wmem_queued as write buffer accounting.
1770 */
1771int __sk_mem_schedule(struct sock *sk, int size, int kind)
1772{
1773	struct proto *prot = sk->sk_prot;
1774	int amt = sk_mem_pages(size);
1775	long allocated;
1776	int parent_status = UNDER_LIMIT;
1777
1778	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1779
1780	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1781
1782	/* Under limit. */
1783	if (parent_status == UNDER_LIMIT &&
1784			allocated <= sk_prot_mem_limits(sk, 0)) {
1785		sk_leave_memory_pressure(sk);
1786		return 1;
1787	}
1788
1789	/* Under pressure. (we or our parents) */
1790	if ((parent_status > SOFT_LIMIT) ||
1791			allocated > sk_prot_mem_limits(sk, 1))
1792		sk_enter_memory_pressure(sk);
1793
1794	/* Over hard limit (we or our parents) */
1795	if ((parent_status == OVER_LIMIT) ||
1796			(allocated > sk_prot_mem_limits(sk, 2)))
1797		goto suppress_allocation;
1798
1799	/* guarantee minimum buffer size under pressure */
1800	if (kind == SK_MEM_RECV) {
1801		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1802			return 1;
1803
1804	} else { /* SK_MEM_SEND */
1805		if (sk->sk_type == SOCK_STREAM) {
1806			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1807				return 1;
1808		} else if (atomic_read(&sk->sk_wmem_alloc) <
1809			   prot->sysctl_wmem[0])
1810				return 1;
1811	}
1812
1813	if (sk_has_memory_pressure(sk)) {
1814		int alloc;
1815
1816		if (!sk_under_memory_pressure(sk))
1817			return 1;
1818		alloc = sk_sockets_allocated_read_positive(sk);
1819		if (sk_prot_mem_limits(sk, 2) > alloc *
1820		    sk_mem_pages(sk->sk_wmem_queued +
1821				 atomic_read(&sk->sk_rmem_alloc) +
1822				 sk->sk_forward_alloc))
1823			return 1;
1824	}
1825
1826suppress_allocation:
1827
1828	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1829		sk_stream_moderate_sndbuf(sk);
1830
1831		/* Fail only if socket is _under_ its sndbuf.
1832		 * In this case we cannot block, so that we have to fail.
1833		 */
1834		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1835			return 1;
1836	}
1837
1838	trace_sock_exceed_buf_limit(sk, prot, allocated);
1839
1840	/* Alas. Undo changes. */
1841	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1842
1843	sk_memory_allocated_sub(sk, amt);
1844
1845	return 0;
1846}
1847EXPORT_SYMBOL(__sk_mem_schedule);
1848
1849/**
1850 *	__sk_reclaim - reclaim memory_allocated
1851 *	@sk: socket
1852 */
1853void __sk_mem_reclaim(struct sock *sk)
1854{
1855	sk_memory_allocated_sub(sk,
1856				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
1857	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1858
1859	if (sk_under_memory_pressure(sk) &&
1860	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1861		sk_leave_memory_pressure(sk);
1862}
1863EXPORT_SYMBOL(__sk_mem_reclaim);
1864
1865
1866/*
1867 * Set of default routines for initialising struct proto_ops when
1868 * the protocol does not support a particular function. In certain
1869 * cases where it makes no sense for a protocol to have a "do nothing"
1870 * function, some default processing is provided.
1871 */
1872
1873int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1874{
1875	return -EOPNOTSUPP;
1876}
1877EXPORT_SYMBOL(sock_no_bind);
1878
1879int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1880		    int len, int flags)
1881{
1882	return -EOPNOTSUPP;
1883}
1884EXPORT_SYMBOL(sock_no_connect);
1885
1886int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1887{
1888	return -EOPNOTSUPP;
1889}
1890EXPORT_SYMBOL(sock_no_socketpair);
1891
1892int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1893{
1894	return -EOPNOTSUPP;
1895}
1896EXPORT_SYMBOL(sock_no_accept);
1897
1898int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1899		    int *len, int peer)
1900{
1901	return -EOPNOTSUPP;
1902}
1903EXPORT_SYMBOL(sock_no_getname);
1904
1905unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1906{
1907	return 0;
1908}
1909EXPORT_SYMBOL(sock_no_poll);
1910
1911int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1912{
1913	return -EOPNOTSUPP;
1914}
1915EXPORT_SYMBOL(sock_no_ioctl);
1916
1917int sock_no_listen(struct socket *sock, int backlog)
1918{
1919	return -EOPNOTSUPP;
1920}
1921EXPORT_SYMBOL(sock_no_listen);
1922
1923int sock_no_shutdown(struct socket *sock, int how)
1924{
1925	return -EOPNOTSUPP;
1926}
1927EXPORT_SYMBOL(sock_no_shutdown);
1928
1929int sock_no_setsockopt(struct socket *sock, int level, int optname,
1930		    char __user *optval, unsigned int optlen)
1931{
1932	return -EOPNOTSUPP;
1933}
1934EXPORT_SYMBOL(sock_no_setsockopt);
1935
1936int sock_no_getsockopt(struct socket *sock, int level, int optname,
1937		    char __user *optval, int __user *optlen)
1938{
1939	return -EOPNOTSUPP;
1940}
1941EXPORT_SYMBOL(sock_no_getsockopt);
1942
1943int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1944		    size_t len)
1945{
1946	return -EOPNOTSUPP;
1947}
1948EXPORT_SYMBOL(sock_no_sendmsg);
1949
1950int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1951		    size_t len, int flags)
1952{
1953	return -EOPNOTSUPP;
1954}
1955EXPORT_SYMBOL(sock_no_recvmsg);
1956
1957int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1958{
1959	/* Mirror missing mmap method error code */
1960	return -ENODEV;
1961}
1962EXPORT_SYMBOL(sock_no_mmap);
1963
1964ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1965{
1966	ssize_t res;
1967	struct msghdr msg = {.msg_flags = flags};
1968	struct kvec iov;
1969	char *kaddr = kmap(page);
1970	iov.iov_base = kaddr + offset;
1971	iov.iov_len = size;
1972	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1973	kunmap(page);
1974	return res;
1975}
1976EXPORT_SYMBOL(sock_no_sendpage);
1977
1978/*
1979 *	Default Socket Callbacks
1980 */
1981
1982static void sock_def_wakeup(struct sock *sk)
1983{
1984	struct socket_wq *wq;
1985
1986	rcu_read_lock();
1987	wq = rcu_dereference(sk->sk_wq);
1988	if (wq_has_sleeper(wq))
1989		wake_up_interruptible_all(&wq->wait);
1990	rcu_read_unlock();
1991}
1992
1993static void sock_def_error_report(struct sock *sk)
1994{
1995	struct socket_wq *wq;
1996
1997	rcu_read_lock();
1998	wq = rcu_dereference(sk->sk_wq);
1999	if (wq_has_sleeper(wq))
2000		wake_up_interruptible_poll(&wq->wait, POLLERR);
2001	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2002	rcu_read_unlock();
2003}
2004
2005static void sock_def_readable(struct sock *sk, int len)
2006{
2007	struct socket_wq *wq;
2008
2009	rcu_read_lock();
2010	wq = rcu_dereference(sk->sk_wq);
2011	if (wq_has_sleeper(wq))
2012		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2013						POLLRDNORM | POLLRDBAND);
2014	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2015	rcu_read_unlock();
2016}
2017
2018static void sock_def_write_space(struct sock *sk)
2019{
2020	struct socket_wq *wq;
2021
2022	rcu_read_lock();
2023
2024	/* Do not wake up a writer until he can make "significant"
2025	 * progress.  --DaveM
2026	 */
2027	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2028		wq = rcu_dereference(sk->sk_wq);
2029		if (wq_has_sleeper(wq))
2030			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2031						POLLWRNORM | POLLWRBAND);
2032
2033		/* Should agree with poll, otherwise some programs break */
2034		if (sock_writeable(sk))
2035			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2036	}
2037
2038	rcu_read_unlock();
2039}
2040
2041static void sock_def_destruct(struct sock *sk)
2042{
2043	kfree(sk->sk_protinfo);
2044}
2045
2046void sk_send_sigurg(struct sock *sk)
2047{
2048	if (sk->sk_socket && sk->sk_socket->file)
2049		if (send_sigurg(&sk->sk_socket->file->f_owner))
2050			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2051}
2052EXPORT_SYMBOL(sk_send_sigurg);
2053
2054void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2055		    unsigned long expires)
2056{
2057	if (!mod_timer(timer, expires))
2058		sock_hold(sk);
2059}
2060EXPORT_SYMBOL(sk_reset_timer);
2061
2062void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2063{
2064	if (timer_pending(timer) && del_timer(timer))
2065		__sock_put(sk);
2066}
2067EXPORT_SYMBOL(sk_stop_timer);
2068
2069void sock_init_data(struct socket *sock, struct sock *sk)
2070{
2071	skb_queue_head_init(&sk->sk_receive_queue);
2072	skb_queue_head_init(&sk->sk_write_queue);
2073	skb_queue_head_init(&sk->sk_error_queue);
2074#ifdef CONFIG_NET_DMA
2075	skb_queue_head_init(&sk->sk_async_wait_queue);
2076#endif
2077
2078	sk->sk_send_head	=	NULL;
2079
2080	init_timer(&sk->sk_timer);
2081
2082	sk->sk_allocation	=	GFP_KERNEL;
2083	sk->sk_rcvbuf		=	sysctl_rmem_default;
2084	sk->sk_sndbuf		=	sysctl_wmem_default;
2085	sk->sk_state		=	TCP_CLOSE;
2086	sk_set_socket(sk, sock);
2087
2088	sock_set_flag(sk, SOCK_ZAPPED);
2089
2090	if (sock) {
2091		sk->sk_type	=	sock->type;
2092		sk->sk_wq	=	sock->wq;
2093		sock->sk	=	sk;
2094	} else
2095		sk->sk_wq	=	NULL;
2096
2097	spin_lock_init(&sk->sk_dst_lock);
2098	rwlock_init(&sk->sk_callback_lock);
2099	lockdep_set_class_and_name(&sk->sk_callback_lock,
2100			af_callback_keys + sk->sk_family,
2101			af_family_clock_key_strings[sk->sk_family]);
2102
2103	sk->sk_state_change	=	sock_def_wakeup;
2104	sk->sk_data_ready	=	sock_def_readable;
2105	sk->sk_write_space	=	sock_def_write_space;
2106	sk->sk_error_report	=	sock_def_error_report;
2107	sk->sk_destruct		=	sock_def_destruct;
2108
2109	sk->sk_sndmsg_page	=	NULL;
2110	sk->sk_sndmsg_off	=	0;
2111	sk->sk_peek_off		=	-1;
2112
2113	sk->sk_peer_pid 	=	NULL;
2114	sk->sk_peer_cred	=	NULL;
2115	sk->sk_write_pending	=	0;
2116	sk->sk_rcvlowat		=	1;
2117	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2118	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2119
2120	sk->sk_stamp = ktime_set(-1L, 0);
2121
 
 
 
 
 
 
 
2122	/*
2123	 * Before updating sk_refcnt, we must commit prior changes to memory
2124	 * (Documentation/RCU/rculist_nulls.txt for details)
2125	 */
2126	smp_wmb();
2127	atomic_set(&sk->sk_refcnt, 1);
2128	atomic_set(&sk->sk_drops, 0);
2129}
2130EXPORT_SYMBOL(sock_init_data);
2131
2132void lock_sock_nested(struct sock *sk, int subclass)
2133{
2134	might_sleep();
2135	spin_lock_bh(&sk->sk_lock.slock);
2136	if (sk->sk_lock.owned)
2137		__lock_sock(sk);
2138	sk->sk_lock.owned = 1;
2139	spin_unlock(&sk->sk_lock.slock);
2140	/*
2141	 * The sk_lock has mutex_lock() semantics here:
2142	 */
2143	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2144	local_bh_enable();
2145}
2146EXPORT_SYMBOL(lock_sock_nested);
2147
2148void release_sock(struct sock *sk)
2149{
2150	/*
2151	 * The sk_lock has mutex_unlock() semantics:
2152	 */
2153	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2154
2155	spin_lock_bh(&sk->sk_lock.slock);
2156	if (sk->sk_backlog.tail)
2157		__release_sock(sk);
2158	sk->sk_lock.owned = 0;
 
 
 
 
 
 
 
2159	if (waitqueue_active(&sk->sk_lock.wq))
2160		wake_up(&sk->sk_lock.wq);
2161	spin_unlock_bh(&sk->sk_lock.slock);
2162}
2163EXPORT_SYMBOL(release_sock);
2164
2165/**
2166 * lock_sock_fast - fast version of lock_sock
2167 * @sk: socket
2168 *
2169 * This version should be used for very small section, where process wont block
2170 * return false if fast path is taken
2171 *   sk_lock.slock locked, owned = 0, BH disabled
2172 * return true if slow path is taken
2173 *   sk_lock.slock unlocked, owned = 1, BH enabled
2174 */
2175bool lock_sock_fast(struct sock *sk)
2176{
2177	might_sleep();
2178	spin_lock_bh(&sk->sk_lock.slock);
2179
2180	if (!sk->sk_lock.owned)
2181		/*
2182		 * Note : We must disable BH
2183		 */
2184		return false;
2185
2186	__lock_sock(sk);
2187	sk->sk_lock.owned = 1;
2188	spin_unlock(&sk->sk_lock.slock);
2189	/*
2190	 * The sk_lock has mutex_lock() semantics here:
2191	 */
2192	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2193	local_bh_enable();
2194	return true;
2195}
2196EXPORT_SYMBOL(lock_sock_fast);
2197
2198int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2199{
2200	struct timeval tv;
2201	if (!sock_flag(sk, SOCK_TIMESTAMP))
2202		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2203	tv = ktime_to_timeval(sk->sk_stamp);
2204	if (tv.tv_sec == -1)
2205		return -ENOENT;
2206	if (tv.tv_sec == 0) {
2207		sk->sk_stamp = ktime_get_real();
2208		tv = ktime_to_timeval(sk->sk_stamp);
2209	}
2210	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2211}
2212EXPORT_SYMBOL(sock_get_timestamp);
2213
2214int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2215{
2216	struct timespec ts;
2217	if (!sock_flag(sk, SOCK_TIMESTAMP))
2218		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2219	ts = ktime_to_timespec(sk->sk_stamp);
2220	if (ts.tv_sec == -1)
2221		return -ENOENT;
2222	if (ts.tv_sec == 0) {
2223		sk->sk_stamp = ktime_get_real();
2224		ts = ktime_to_timespec(sk->sk_stamp);
2225	}
2226	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2227}
2228EXPORT_SYMBOL(sock_get_timestampns);
2229
2230void sock_enable_timestamp(struct sock *sk, int flag)
2231{
2232	if (!sock_flag(sk, flag)) {
2233		unsigned long previous_flags = sk->sk_flags;
2234
2235		sock_set_flag(sk, flag);
2236		/*
2237		 * we just set one of the two flags which require net
2238		 * time stamping, but time stamping might have been on
2239		 * already because of the other one
2240		 */
2241		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2242			net_enable_timestamp();
2243	}
2244}
2245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246/*
2247 *	Get a socket option on an socket.
2248 *
2249 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2250 *	asynchronous errors should be reported by getsockopt. We assume
2251 *	this means if you specify SO_ERROR (otherwise whats the point of it).
2252 */
2253int sock_common_getsockopt(struct socket *sock, int level, int optname,
2254			   char __user *optval, int __user *optlen)
2255{
2256	struct sock *sk = sock->sk;
2257
2258	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2259}
2260EXPORT_SYMBOL(sock_common_getsockopt);
2261
2262#ifdef CONFIG_COMPAT
2263int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2264				  char __user *optval, int __user *optlen)
2265{
2266	struct sock *sk = sock->sk;
2267
2268	if (sk->sk_prot->compat_getsockopt != NULL)
2269		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2270						      optval, optlen);
2271	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2272}
2273EXPORT_SYMBOL(compat_sock_common_getsockopt);
2274#endif
2275
2276int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2277			struct msghdr *msg, size_t size, int flags)
2278{
2279	struct sock *sk = sock->sk;
2280	int addr_len = 0;
2281	int err;
2282
2283	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2284				   flags & ~MSG_DONTWAIT, &addr_len);
2285	if (err >= 0)
2286		msg->msg_namelen = addr_len;
2287	return err;
2288}
2289EXPORT_SYMBOL(sock_common_recvmsg);
2290
2291/*
2292 *	Set socket options on an inet socket.
2293 */
2294int sock_common_setsockopt(struct socket *sock, int level, int optname,
2295			   char __user *optval, unsigned int optlen)
2296{
2297	struct sock *sk = sock->sk;
2298
2299	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2300}
2301EXPORT_SYMBOL(sock_common_setsockopt);
2302
2303#ifdef CONFIG_COMPAT
2304int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2305				  char __user *optval, unsigned int optlen)
2306{
2307	struct sock *sk = sock->sk;
2308
2309	if (sk->sk_prot->compat_setsockopt != NULL)
2310		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2311						      optval, optlen);
2312	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2313}
2314EXPORT_SYMBOL(compat_sock_common_setsockopt);
2315#endif
2316
2317void sk_common_release(struct sock *sk)
2318{
2319	if (sk->sk_prot->destroy)
2320		sk->sk_prot->destroy(sk);
2321
2322	/*
2323	 * Observation: when sock_common_release is called, processes have
2324	 * no access to socket. But net still has.
2325	 * Step one, detach it from networking:
2326	 *
2327	 * A. Remove from hash tables.
2328	 */
2329
2330	sk->sk_prot->unhash(sk);
2331
2332	/*
2333	 * In this point socket cannot receive new packets, but it is possible
2334	 * that some packets are in flight because some CPU runs receiver and
2335	 * did hash table lookup before we unhashed socket. They will achieve
2336	 * receive queue and will be purged by socket destructor.
2337	 *
2338	 * Also we still have packets pending on receive queue and probably,
2339	 * our own packets waiting in device queues. sock_destroy will drain
2340	 * receive queue, but transmitted packets will delay socket destruction
2341	 * until the last reference will be released.
2342	 */
2343
2344	sock_orphan(sk);
2345
2346	xfrm_sk_free_policy(sk);
2347
2348	sk_refcnt_debug_release(sk);
 
 
 
 
 
 
2349	sock_put(sk);
2350}
2351EXPORT_SYMBOL(sk_common_release);
2352
2353#ifdef CONFIG_PROC_FS
2354#define PROTO_INUSE_NR	64	/* should be enough for the first time */
2355struct prot_inuse {
2356	int val[PROTO_INUSE_NR];
2357};
2358
2359static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2360
2361#ifdef CONFIG_NET_NS
2362void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2363{
2364	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2365}
2366EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2367
2368int sock_prot_inuse_get(struct net *net, struct proto *prot)
2369{
2370	int cpu, idx = prot->inuse_idx;
2371	int res = 0;
2372
2373	for_each_possible_cpu(cpu)
2374		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2375
2376	return res >= 0 ? res : 0;
2377}
2378EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2379
2380static int __net_init sock_inuse_init_net(struct net *net)
2381{
2382	net->core.inuse = alloc_percpu(struct prot_inuse);
2383	return net->core.inuse ? 0 : -ENOMEM;
2384}
2385
2386static void __net_exit sock_inuse_exit_net(struct net *net)
2387{
2388	free_percpu(net->core.inuse);
2389}
2390
2391static struct pernet_operations net_inuse_ops = {
2392	.init = sock_inuse_init_net,
2393	.exit = sock_inuse_exit_net,
2394};
2395
2396static __init int net_inuse_init(void)
2397{
2398	if (register_pernet_subsys(&net_inuse_ops))
2399		panic("Cannot initialize net inuse counters");
2400
2401	return 0;
2402}
2403
2404core_initcall(net_inuse_init);
2405#else
2406static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2407
2408void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2409{
2410	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2411}
2412EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2413
2414int sock_prot_inuse_get(struct net *net, struct proto *prot)
2415{
2416	int cpu, idx = prot->inuse_idx;
2417	int res = 0;
2418
2419	for_each_possible_cpu(cpu)
2420		res += per_cpu(prot_inuse, cpu).val[idx];
2421
2422	return res >= 0 ? res : 0;
2423}
2424EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2425#endif
2426
2427static void assign_proto_idx(struct proto *prot)
2428{
2429	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2430
2431	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2432		pr_err("PROTO_INUSE_NR exhausted\n");
2433		return;
2434	}
2435
2436	set_bit(prot->inuse_idx, proto_inuse_idx);
2437}
2438
2439static void release_proto_idx(struct proto *prot)
2440{
2441	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2442		clear_bit(prot->inuse_idx, proto_inuse_idx);
2443}
2444#else
2445static inline void assign_proto_idx(struct proto *prot)
2446{
2447}
2448
2449static inline void release_proto_idx(struct proto *prot)
2450{
2451}
2452#endif
2453
2454int proto_register(struct proto *prot, int alloc_slab)
2455{
2456	if (alloc_slab) {
2457		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2458					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2459					NULL);
2460
2461		if (prot->slab == NULL) {
2462			pr_crit("%s: Can't create sock SLAB cache!\n",
2463				prot->name);
2464			goto out;
2465		}
2466
2467		if (prot->rsk_prot != NULL) {
2468			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2469			if (prot->rsk_prot->slab_name == NULL)
2470				goto out_free_sock_slab;
2471
2472			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2473								 prot->rsk_prot->obj_size, 0,
2474								 SLAB_HWCACHE_ALIGN, NULL);
2475
2476			if (prot->rsk_prot->slab == NULL) {
2477				pr_crit("%s: Can't create request sock SLAB cache!\n",
2478					prot->name);
2479				goto out_free_request_sock_slab_name;
2480			}
2481		}
2482
2483		if (prot->twsk_prot != NULL) {
2484			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2485
2486			if (prot->twsk_prot->twsk_slab_name == NULL)
2487				goto out_free_request_sock_slab;
2488
2489			prot->twsk_prot->twsk_slab =
2490				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2491						  prot->twsk_prot->twsk_obj_size,
2492						  0,
2493						  SLAB_HWCACHE_ALIGN |
2494							prot->slab_flags,
2495						  NULL);
2496			if (prot->twsk_prot->twsk_slab == NULL)
2497				goto out_free_timewait_sock_slab_name;
2498		}
2499	}
2500
2501	mutex_lock(&proto_list_mutex);
2502	list_add(&prot->node, &proto_list);
2503	assign_proto_idx(prot);
2504	mutex_unlock(&proto_list_mutex);
2505	return 0;
2506
2507out_free_timewait_sock_slab_name:
2508	kfree(prot->twsk_prot->twsk_slab_name);
2509out_free_request_sock_slab:
2510	if (prot->rsk_prot && prot->rsk_prot->slab) {
2511		kmem_cache_destroy(prot->rsk_prot->slab);
2512		prot->rsk_prot->slab = NULL;
2513	}
2514out_free_request_sock_slab_name:
2515	if (prot->rsk_prot)
2516		kfree(prot->rsk_prot->slab_name);
2517out_free_sock_slab:
2518	kmem_cache_destroy(prot->slab);
2519	prot->slab = NULL;
2520out:
2521	return -ENOBUFS;
2522}
2523EXPORT_SYMBOL(proto_register);
2524
2525void proto_unregister(struct proto *prot)
2526{
2527	mutex_lock(&proto_list_mutex);
2528	release_proto_idx(prot);
2529	list_del(&prot->node);
2530	mutex_unlock(&proto_list_mutex);
2531
2532	if (prot->slab != NULL) {
2533		kmem_cache_destroy(prot->slab);
2534		prot->slab = NULL;
2535	}
2536
2537	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2538		kmem_cache_destroy(prot->rsk_prot->slab);
2539		kfree(prot->rsk_prot->slab_name);
2540		prot->rsk_prot->slab = NULL;
2541	}
2542
2543	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2544		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2545		kfree(prot->twsk_prot->twsk_slab_name);
2546		prot->twsk_prot->twsk_slab = NULL;
2547	}
2548}
2549EXPORT_SYMBOL(proto_unregister);
2550
2551#ifdef CONFIG_PROC_FS
2552static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2553	__acquires(proto_list_mutex)
2554{
2555	mutex_lock(&proto_list_mutex);
2556	return seq_list_start_head(&proto_list, *pos);
2557}
2558
2559static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2560{
2561	return seq_list_next(v, &proto_list, pos);
2562}
2563
2564static void proto_seq_stop(struct seq_file *seq, void *v)
2565	__releases(proto_list_mutex)
2566{
2567	mutex_unlock(&proto_list_mutex);
2568}
2569
2570static char proto_method_implemented(const void *method)
2571{
2572	return method == NULL ? 'n' : 'y';
2573}
2574static long sock_prot_memory_allocated(struct proto *proto)
2575{
2576	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2577}
2578
2579static char *sock_prot_memory_pressure(struct proto *proto)
2580{
2581	return proto->memory_pressure != NULL ?
2582	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2583}
2584
2585static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2586{
2587
2588	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2589			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2590		   proto->name,
2591		   proto->obj_size,
2592		   sock_prot_inuse_get(seq_file_net(seq), proto),
2593		   sock_prot_memory_allocated(proto),
2594		   sock_prot_memory_pressure(proto),
2595		   proto->max_header,
2596		   proto->slab == NULL ? "no" : "yes",
2597		   module_name(proto->owner),
2598		   proto_method_implemented(proto->close),
2599		   proto_method_implemented(proto->connect),
2600		   proto_method_implemented(proto->disconnect),
2601		   proto_method_implemented(proto->accept),
2602		   proto_method_implemented(proto->ioctl),
2603		   proto_method_implemented(proto->init),
2604		   proto_method_implemented(proto->destroy),
2605		   proto_method_implemented(proto->shutdown),
2606		   proto_method_implemented(proto->setsockopt),
2607		   proto_method_implemented(proto->getsockopt),
2608		   proto_method_implemented(proto->sendmsg),
2609		   proto_method_implemented(proto->recvmsg),
2610		   proto_method_implemented(proto->sendpage),
2611		   proto_method_implemented(proto->bind),
2612		   proto_method_implemented(proto->backlog_rcv),
2613		   proto_method_implemented(proto->hash),
2614		   proto_method_implemented(proto->unhash),
2615		   proto_method_implemented(proto->get_port),
2616		   proto_method_implemented(proto->enter_memory_pressure));
2617}
2618
2619static int proto_seq_show(struct seq_file *seq, void *v)
2620{
2621	if (v == &proto_list)
2622		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2623			   "protocol",
2624			   "size",
2625			   "sockets",
2626			   "memory",
2627			   "press",
2628			   "maxhdr",
2629			   "slab",
2630			   "module",
2631			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2632	else
2633		proto_seq_printf(seq, list_entry(v, struct proto, node));
2634	return 0;
2635}
2636
2637static const struct seq_operations proto_seq_ops = {
2638	.start  = proto_seq_start,
2639	.next   = proto_seq_next,
2640	.stop   = proto_seq_stop,
2641	.show   = proto_seq_show,
2642};
2643
2644static int proto_seq_open(struct inode *inode, struct file *file)
2645{
2646	return seq_open_net(inode, file, &proto_seq_ops,
2647			    sizeof(struct seq_net_private));
2648}
2649
2650static const struct file_operations proto_seq_fops = {
2651	.owner		= THIS_MODULE,
2652	.open		= proto_seq_open,
2653	.read		= seq_read,
2654	.llseek		= seq_lseek,
2655	.release	= seq_release_net,
2656};
2657
2658static __net_init int proto_init_net(struct net *net)
2659{
2660	if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2661		return -ENOMEM;
2662
2663	return 0;
2664}
2665
2666static __net_exit void proto_exit_net(struct net *net)
2667{
2668	proc_net_remove(net, "protocols");
2669}
2670
2671
2672static __net_initdata struct pernet_operations proto_net_ops = {
2673	.init = proto_init_net,
2674	.exit = proto_exit_net,
2675};
2676
2677static int __init proto_init(void)
2678{
2679	return register_pernet_subsys(&proto_net_ops);
2680}
2681
2682subsys_initcall(proto_init);
2683
2684#endif /* PROC_FS */