Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#include <linux/capability.h>
95#include <linux/errno.h>
96#include <linux/errqueue.h>
97#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/sched/mm.h>
106#include <linux/timer.h>
107#include <linux/string.h>
108#include <linux/sockios.h>
109#include <linux/net.h>
110#include <linux/mm.h>
111#include <linux/slab.h>
112#include <linux/interrupt.h>
113#include <linux/poll.h>
114#include <linux/tcp.h>
115#include <linux/init.h>
116#include <linux/highmem.h>
117#include <linux/user_namespace.h>
118#include <linux/static_key.h>
119#include <linux/memcontrol.h>
120#include <linux/prefetch.h>
121
122#include <linux/uaccess.h>
123
124#include <linux/netdevice.h>
125#include <net/protocol.h>
126#include <linux/skbuff.h>
127#include <net/net_namespace.h>
128#include <net/request_sock.h>
129#include <net/sock.h>
130#include <linux/net_tstamp.h>
131#include <net/xfrm.h>
132#include <linux/ipsec.h>
133#include <net/cls_cgroup.h>
134#include <net/netprio_cgroup.h>
135#include <linux/sock_diag.h>
136
137#include <linux/filter.h>
138#include <net/sock_reuseport.h>
139
140#include <trace/events/sock.h>
141
142#include <net/tcp.h>
143#include <net/busy_poll.h>
144
145static DEFINE_MUTEX(proto_list_mutex);
146static LIST_HEAD(proto_list);
147
148static void sock_inuse_add(struct net *net, int val);
149
150/**
151 * sk_ns_capable - General socket capability test
152 * @sk: Socket to use a capability on or through
153 * @user_ns: The user namespace of the capability to use
154 * @cap: The capability to use
155 *
156 * Test to see if the opener of the socket had when the socket was
157 * created and the current process has the capability @cap in the user
158 * namespace @user_ns.
159 */
160bool sk_ns_capable(const struct sock *sk,
161 struct user_namespace *user_ns, int cap)
162{
163 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 ns_capable(user_ns, cap);
165}
166EXPORT_SYMBOL(sk_ns_capable);
167
168/**
169 * sk_capable - Socket global capability test
170 * @sk: Socket to use a capability on or through
171 * @cap: The global capability to use
172 *
173 * Test to see if the opener of the socket had when the socket was
174 * created and the current process has the capability @cap in all user
175 * namespaces.
176 */
177bool sk_capable(const struct sock *sk, int cap)
178{
179 return sk_ns_capable(sk, &init_user_ns, cap);
180}
181EXPORT_SYMBOL(sk_capable);
182
183/**
184 * sk_net_capable - Network namespace socket capability test
185 * @sk: Socket to use a capability on or through
186 * @cap: The capability to use
187 *
188 * Test to see if the opener of the socket had when the socket was created
189 * and the current process has the capability @cap over the network namespace
190 * the socket is a member of.
191 */
192bool sk_net_capable(const struct sock *sk, int cap)
193{
194 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195}
196EXPORT_SYMBOL(sk_net_capable);
197
198/*
199 * Each address family might have different locking rules, so we have
200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
202 */
203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
207
208/*
209 * Make lock validator output more readable. (we pre-construct these
210 * strings build-time, so that runtime initialization of socket
211 * locks is fast):
212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
231static const char *const af_family_key_strings[AF_MAX+1] = {
232 _sock_locks("sk_lock-")
233};
234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
235 _sock_locks("slock-")
236};
237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
238 _sock_locks("clock-")
239};
240
241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
242 _sock_locks("k-sk_lock-")
243};
244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
245 _sock_locks("k-slock-")
246};
247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
248 _sock_locks("k-clock-")
249};
250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
251 "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
252 "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
253 "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
254 "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
255 "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
256 "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
257 "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
258 "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
259 "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
260 "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
261 "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
262 "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
263 "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
264 "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
265 "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX"
266};
267static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
268 "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
269 "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
270 "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
271 "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
272 "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
273 "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
274 "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
275 "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
276 "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
277 "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
278 "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
279 "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
280 "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
281 "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
282 "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX"
283};
284static const char *const af_family_elock_key_strings[AF_MAX+1] = {
285 "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
286 "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
287 "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
288 "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
289 "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
290 "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
291 "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
292 "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
293 "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
294 "elock-27" , "elock-28" , "elock-AF_CAN" ,
295 "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
296 "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
297 "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
298 "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
299 "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX"
300};
301
302/*
303 * sk_callback_lock and sk queues locking rules are per-address-family,
304 * so split the lock classes by using a per-AF key:
305 */
306static struct lock_class_key af_callback_keys[AF_MAX];
307static struct lock_class_key af_rlock_keys[AF_MAX];
308static struct lock_class_key af_wlock_keys[AF_MAX];
309static struct lock_class_key af_elock_keys[AF_MAX];
310static struct lock_class_key af_kern_callback_keys[AF_MAX];
311
312/* Run time adjustable parameters. */
313__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
314EXPORT_SYMBOL(sysctl_wmem_max);
315__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
316EXPORT_SYMBOL(sysctl_rmem_max);
317__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
318__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
319
320/* Maximal space eaten by iovec or ancillary data plus some space */
321int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
322EXPORT_SYMBOL(sysctl_optmem_max);
323
324int sysctl_tstamp_allow_data __read_mostly = 1;
325
326struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
327EXPORT_SYMBOL_GPL(memalloc_socks);
328
329/**
330 * sk_set_memalloc - sets %SOCK_MEMALLOC
331 * @sk: socket to set it on
332 *
333 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
334 * It's the responsibility of the admin to adjust min_free_kbytes
335 * to meet the requirements
336 */
337void sk_set_memalloc(struct sock *sk)
338{
339 sock_set_flag(sk, SOCK_MEMALLOC);
340 sk->sk_allocation |= __GFP_MEMALLOC;
341 static_key_slow_inc(&memalloc_socks);
342}
343EXPORT_SYMBOL_GPL(sk_set_memalloc);
344
345void sk_clear_memalloc(struct sock *sk)
346{
347 sock_reset_flag(sk, SOCK_MEMALLOC);
348 sk->sk_allocation &= ~__GFP_MEMALLOC;
349 static_key_slow_dec(&memalloc_socks);
350
351 /*
352 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
353 * progress of swapping. SOCK_MEMALLOC may be cleared while
354 * it has rmem allocations due to the last swapfile being deactivated
355 * but there is a risk that the socket is unusable due to exceeding
356 * the rmem limits. Reclaim the reserves and obey rmem limits again.
357 */
358 sk_mem_reclaim(sk);
359}
360EXPORT_SYMBOL_GPL(sk_clear_memalloc);
361
362int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
363{
364 int ret;
365 unsigned int noreclaim_flag;
366
367 /* these should have been dropped before queueing */
368 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
369
370 noreclaim_flag = memalloc_noreclaim_save();
371 ret = sk->sk_backlog_rcv(sk, skb);
372 memalloc_noreclaim_restore(noreclaim_flag);
373
374 return ret;
375}
376EXPORT_SYMBOL(__sk_backlog_rcv);
377
378static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
379{
380 struct timeval tv;
381
382 if (optlen < sizeof(tv))
383 return -EINVAL;
384 if (copy_from_user(&tv, optval, sizeof(tv)))
385 return -EFAULT;
386 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
387 return -EDOM;
388
389 if (tv.tv_sec < 0) {
390 static int warned __read_mostly;
391
392 *timeo_p = 0;
393 if (warned < 10 && net_ratelimit()) {
394 warned++;
395 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
396 __func__, current->comm, task_pid_nr(current));
397 }
398 return 0;
399 }
400 *timeo_p = MAX_SCHEDULE_TIMEOUT;
401 if (tv.tv_sec == 0 && tv.tv_usec == 0)
402 return 0;
403 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
404 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
405 return 0;
406}
407
408static void sock_warn_obsolete_bsdism(const char *name)
409{
410 static int warned;
411 static char warncomm[TASK_COMM_LEN];
412 if (strcmp(warncomm, current->comm) && warned < 5) {
413 strcpy(warncomm, current->comm);
414 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
415 warncomm, name);
416 warned++;
417 }
418}
419
420static bool sock_needs_netstamp(const struct sock *sk)
421{
422 switch (sk->sk_family) {
423 case AF_UNSPEC:
424 case AF_UNIX:
425 return false;
426 default:
427 return true;
428 }
429}
430
431static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
432{
433 if (sk->sk_flags & flags) {
434 sk->sk_flags &= ~flags;
435 if (sock_needs_netstamp(sk) &&
436 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
437 net_disable_timestamp();
438 }
439}
440
441
442int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
443{
444 unsigned long flags;
445 struct sk_buff_head *list = &sk->sk_receive_queue;
446
447 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
448 atomic_inc(&sk->sk_drops);
449 trace_sock_rcvqueue_full(sk, skb);
450 return -ENOMEM;
451 }
452
453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
456 }
457
458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
460
461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
466 spin_lock_irqsave(&list->lock, flags);
467 sock_skb_set_dropcount(sk, skb);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
470
471 if (!sock_flag(sk, SOCK_DEAD))
472 sk->sk_data_ready(sk);
473 return 0;
474}
475EXPORT_SYMBOL(__sock_queue_rcv_skb);
476
477int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
478{
479 int err;
480
481 err = sk_filter(sk, skb);
482 if (err)
483 return err;
484
485 return __sock_queue_rcv_skb(sk, skb);
486}
487EXPORT_SYMBOL(sock_queue_rcv_skb);
488
489int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
490 const int nested, unsigned int trim_cap, bool refcounted)
491{
492 int rc = NET_RX_SUCCESS;
493
494 if (sk_filter_trim_cap(sk, skb, trim_cap))
495 goto discard_and_relse;
496
497 skb->dev = NULL;
498
499 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
500 atomic_inc(&sk->sk_drops);
501 goto discard_and_relse;
502 }
503 if (nested)
504 bh_lock_sock_nested(sk);
505 else
506 bh_lock_sock(sk);
507 if (!sock_owned_by_user(sk)) {
508 /*
509 * trylock + unlock semantics:
510 */
511 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
512
513 rc = sk_backlog_rcv(sk, skb);
514
515 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
516 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
517 bh_unlock_sock(sk);
518 atomic_inc(&sk->sk_drops);
519 goto discard_and_relse;
520 }
521
522 bh_unlock_sock(sk);
523out:
524 if (refcounted)
525 sock_put(sk);
526 return rc;
527discard_and_relse:
528 kfree_skb(skb);
529 goto out;
530}
531EXPORT_SYMBOL(__sk_receive_skb);
532
533struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
534{
535 struct dst_entry *dst = __sk_dst_get(sk);
536
537 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
538 sk_tx_queue_clear(sk);
539 sk->sk_dst_pending_confirm = 0;
540 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
541 dst_release(dst);
542 return NULL;
543 }
544
545 return dst;
546}
547EXPORT_SYMBOL(__sk_dst_check);
548
549struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
550{
551 struct dst_entry *dst = sk_dst_get(sk);
552
553 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
554 sk_dst_reset(sk);
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560}
561EXPORT_SYMBOL(sk_dst_check);
562
563static int sock_setbindtodevice(struct sock *sk, char __user *optval,
564 int optlen)
565{
566 int ret = -ENOPROTOOPT;
567#ifdef CONFIG_NETDEVICES
568 struct net *net = sock_net(sk);
569 char devname[IFNAMSIZ];
570 int index;
571
572 /* Sorry... */
573 ret = -EPERM;
574 if (!ns_capable(net->user_ns, CAP_NET_RAW))
575 goto out;
576
577 ret = -EINVAL;
578 if (optlen < 0)
579 goto out;
580
581 /* Bind this socket to a particular device like "eth0",
582 * as specified in the passed interface name. If the
583 * name is "" or the option length is zero the socket
584 * is not bound.
585 */
586 if (optlen > IFNAMSIZ - 1)
587 optlen = IFNAMSIZ - 1;
588 memset(devname, 0, sizeof(devname));
589
590 ret = -EFAULT;
591 if (copy_from_user(devname, optval, optlen))
592 goto out;
593
594 index = 0;
595 if (devname[0] != '\0') {
596 struct net_device *dev;
597
598 rcu_read_lock();
599 dev = dev_get_by_name_rcu(net, devname);
600 if (dev)
601 index = dev->ifindex;
602 rcu_read_unlock();
603 ret = -ENODEV;
604 if (!dev)
605 goto out;
606 }
607
608 lock_sock(sk);
609 sk->sk_bound_dev_if = index;
610 sk_dst_reset(sk);
611 release_sock(sk);
612
613 ret = 0;
614
615out:
616#endif
617
618 return ret;
619}
620
621static int sock_getbindtodevice(struct sock *sk, char __user *optval,
622 int __user *optlen, int len)
623{
624 int ret = -ENOPROTOOPT;
625#ifdef CONFIG_NETDEVICES
626 struct net *net = sock_net(sk);
627 char devname[IFNAMSIZ];
628
629 if (sk->sk_bound_dev_if == 0) {
630 len = 0;
631 goto zero;
632 }
633
634 ret = -EINVAL;
635 if (len < IFNAMSIZ)
636 goto out;
637
638 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
639 if (ret)
640 goto out;
641
642 len = strlen(devname) + 1;
643
644 ret = -EFAULT;
645 if (copy_to_user(optval, devname, len))
646 goto out;
647
648zero:
649 ret = -EFAULT;
650 if (put_user(len, optlen))
651 goto out;
652
653 ret = 0;
654
655out:
656#endif
657
658 return ret;
659}
660
661static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
662{
663 if (valbool)
664 sock_set_flag(sk, bit);
665 else
666 sock_reset_flag(sk, bit);
667}
668
669bool sk_mc_loop(struct sock *sk)
670{
671 if (dev_recursion_level())
672 return false;
673 if (!sk)
674 return true;
675 switch (sk->sk_family) {
676 case AF_INET:
677 return inet_sk(sk)->mc_loop;
678#if IS_ENABLED(CONFIG_IPV6)
679 case AF_INET6:
680 return inet6_sk(sk)->mc_loop;
681#endif
682 }
683 WARN_ON(1);
684 return true;
685}
686EXPORT_SYMBOL(sk_mc_loop);
687
688/*
689 * This is meant for all protocols to use and covers goings on
690 * at the socket level. Everything here is generic.
691 */
692
693int sock_setsockopt(struct socket *sock, int level, int optname,
694 char __user *optval, unsigned int optlen)
695{
696 struct sock *sk = sock->sk;
697 int val;
698 int valbool;
699 struct linger ling;
700 int ret = 0;
701
702 /*
703 * Options without arguments
704 */
705
706 if (optname == SO_BINDTODEVICE)
707 return sock_setbindtodevice(sk, optval, optlen);
708
709 if (optlen < sizeof(int))
710 return -EINVAL;
711
712 if (get_user(val, (int __user *)optval))
713 return -EFAULT;
714
715 valbool = val ? 1 : 0;
716
717 lock_sock(sk);
718
719 switch (optname) {
720 case SO_DEBUG:
721 if (val && !capable(CAP_NET_ADMIN))
722 ret = -EACCES;
723 else
724 sock_valbool_flag(sk, SOCK_DBG, valbool);
725 break;
726 case SO_REUSEADDR:
727 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
728 break;
729 case SO_REUSEPORT:
730 sk->sk_reuseport = valbool;
731 break;
732 case SO_TYPE:
733 case SO_PROTOCOL:
734 case SO_DOMAIN:
735 case SO_ERROR:
736 ret = -ENOPROTOOPT;
737 break;
738 case SO_DONTROUTE:
739 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
740 break;
741 case SO_BROADCAST:
742 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
743 break;
744 case SO_SNDBUF:
745 /* Don't error on this BSD doesn't and if you think
746 * about it this is right. Otherwise apps have to
747 * play 'guess the biggest size' games. RCVBUF/SNDBUF
748 * are treated in BSD as hints
749 */
750 val = min_t(u32, val, sysctl_wmem_max);
751set_sndbuf:
752 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
753 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
754 /* Wake up sending tasks if we upped the value. */
755 sk->sk_write_space(sk);
756 break;
757
758 case SO_SNDBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_sndbuf;
764
765 case SO_RCVBUF:
766 /* Don't error on this BSD doesn't and if you think
767 * about it this is right. Otherwise apps have to
768 * play 'guess the biggest size' games. RCVBUF/SNDBUF
769 * are treated in BSD as hints
770 */
771 val = min_t(u32, val, sysctl_rmem_max);
772set_rcvbuf:
773 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
774 /*
775 * We double it on the way in to account for
776 * "struct sk_buff" etc. overhead. Applications
777 * assume that the SO_RCVBUF setting they make will
778 * allow that much actual data to be received on that
779 * socket.
780 *
781 * Applications are unaware that "struct sk_buff" and
782 * other overheads allocate from the receive buffer
783 * during socket buffer allocation.
784 *
785 * And after considering the possible alternatives,
786 * returning the value we actually used in getsockopt
787 * is the most desirable behavior.
788 */
789 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
790 break;
791
792 case SO_RCVBUFFORCE:
793 if (!capable(CAP_NET_ADMIN)) {
794 ret = -EPERM;
795 break;
796 }
797 goto set_rcvbuf;
798
799 case SO_KEEPALIVE:
800 if (sk->sk_prot->keepalive)
801 sk->sk_prot->keepalive(sk, valbool);
802 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
803 break;
804
805 case SO_OOBINLINE:
806 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
807 break;
808
809 case SO_NO_CHECK:
810 sk->sk_no_check_tx = valbool;
811 break;
812
813 case SO_PRIORITY:
814 if ((val >= 0 && val <= 6) ||
815 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
816 sk->sk_priority = val;
817 else
818 ret = -EPERM;
819 break;
820
821 case SO_LINGER:
822 if (optlen < sizeof(ling)) {
823 ret = -EINVAL; /* 1003.1g */
824 break;
825 }
826 if (copy_from_user(&ling, optval, sizeof(ling))) {
827 ret = -EFAULT;
828 break;
829 }
830 if (!ling.l_onoff)
831 sock_reset_flag(sk, SOCK_LINGER);
832 else {
833#if (BITS_PER_LONG == 32)
834 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
835 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
836 else
837#endif
838 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
839 sock_set_flag(sk, SOCK_LINGER);
840 }
841 break;
842
843 case SO_BSDCOMPAT:
844 sock_warn_obsolete_bsdism("setsockopt");
845 break;
846
847 case SO_PASSCRED:
848 if (valbool)
849 set_bit(SOCK_PASSCRED, &sock->flags);
850 else
851 clear_bit(SOCK_PASSCRED, &sock->flags);
852 break;
853
854 case SO_TIMESTAMP:
855 case SO_TIMESTAMPNS:
856 if (valbool) {
857 if (optname == SO_TIMESTAMP)
858 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
859 else
860 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
861 sock_set_flag(sk, SOCK_RCVTSTAMP);
862 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
863 } else {
864 sock_reset_flag(sk, SOCK_RCVTSTAMP);
865 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
866 }
867 break;
868
869 case SO_TIMESTAMPING:
870 if (val & ~SOF_TIMESTAMPING_MASK) {
871 ret = -EINVAL;
872 break;
873 }
874
875 if (val & SOF_TIMESTAMPING_OPT_ID &&
876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
877 if (sk->sk_protocol == IPPROTO_TCP &&
878 sk->sk_type == SOCK_STREAM) {
879 if ((1 << sk->sk_state) &
880 (TCPF_CLOSE | TCPF_LISTEN)) {
881 ret = -EINVAL;
882 break;
883 }
884 sk->sk_tskey = tcp_sk(sk)->snd_una;
885 } else {
886 sk->sk_tskey = 0;
887 }
888 }
889
890 if (val & SOF_TIMESTAMPING_OPT_STATS &&
891 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
892 ret = -EINVAL;
893 break;
894 }
895
896 sk->sk_tsflags = val;
897 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
898 sock_enable_timestamp(sk,
899 SOCK_TIMESTAMPING_RX_SOFTWARE);
900 else
901 sock_disable_timestamp(sk,
902 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
903 break;
904
905 case SO_RCVLOWAT:
906 if (val < 0)
907 val = INT_MAX;
908 sk->sk_rcvlowat = val ? : 1;
909 break;
910
911 case SO_RCVTIMEO:
912 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
913 break;
914
915 case SO_SNDTIMEO:
916 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
917 break;
918
919 case SO_ATTACH_FILTER:
920 ret = -EINVAL;
921 if (optlen == sizeof(struct sock_fprog)) {
922 struct sock_fprog fprog;
923
924 ret = -EFAULT;
925 if (copy_from_user(&fprog, optval, sizeof(fprog)))
926 break;
927
928 ret = sk_attach_filter(&fprog, sk);
929 }
930 break;
931
932 case SO_ATTACH_BPF:
933 ret = -EINVAL;
934 if (optlen == sizeof(u32)) {
935 u32 ufd;
936
937 ret = -EFAULT;
938 if (copy_from_user(&ufd, optval, sizeof(ufd)))
939 break;
940
941 ret = sk_attach_bpf(ufd, sk);
942 }
943 break;
944
945 case SO_ATTACH_REUSEPORT_CBPF:
946 ret = -EINVAL;
947 if (optlen == sizeof(struct sock_fprog)) {
948 struct sock_fprog fprog;
949
950 ret = -EFAULT;
951 if (copy_from_user(&fprog, optval, sizeof(fprog)))
952 break;
953
954 ret = sk_reuseport_attach_filter(&fprog, sk);
955 }
956 break;
957
958 case SO_ATTACH_REUSEPORT_EBPF:
959 ret = -EINVAL;
960 if (optlen == sizeof(u32)) {
961 u32 ufd;
962
963 ret = -EFAULT;
964 if (copy_from_user(&ufd, optval, sizeof(ufd)))
965 break;
966
967 ret = sk_reuseport_attach_bpf(ufd, sk);
968 }
969 break;
970
971 case SO_DETACH_FILTER:
972 ret = sk_detach_filter(sk);
973 break;
974
975 case SO_LOCK_FILTER:
976 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
977 ret = -EPERM;
978 else
979 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
980 break;
981
982 case SO_PASSSEC:
983 if (valbool)
984 set_bit(SOCK_PASSSEC, &sock->flags);
985 else
986 clear_bit(SOCK_PASSSEC, &sock->flags);
987 break;
988 case SO_MARK:
989 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
990 ret = -EPERM;
991 else
992 sk->sk_mark = val;
993 break;
994
995 case SO_RXQ_OVFL:
996 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
997 break;
998
999 case SO_WIFI_STATUS:
1000 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1001 break;
1002
1003 case SO_PEEK_OFF:
1004 if (sock->ops->set_peek_off)
1005 ret = sock->ops->set_peek_off(sk, val);
1006 else
1007 ret = -EOPNOTSUPP;
1008 break;
1009
1010 case SO_NOFCS:
1011 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1012 break;
1013
1014 case SO_SELECT_ERR_QUEUE:
1015 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1016 break;
1017
1018#ifdef CONFIG_NET_RX_BUSY_POLL
1019 case SO_BUSY_POLL:
1020 /* allow unprivileged users to decrease the value */
1021 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1022 ret = -EPERM;
1023 else {
1024 if (val < 0)
1025 ret = -EINVAL;
1026 else
1027 sk->sk_ll_usec = val;
1028 }
1029 break;
1030#endif
1031
1032 case SO_MAX_PACING_RATE:
1033 if (val != ~0U)
1034 cmpxchg(&sk->sk_pacing_status,
1035 SK_PACING_NONE,
1036 SK_PACING_NEEDED);
1037 sk->sk_max_pacing_rate = val;
1038 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
1039 sk->sk_max_pacing_rate);
1040 break;
1041
1042 case SO_INCOMING_CPU:
1043 sk->sk_incoming_cpu = val;
1044 break;
1045
1046 case SO_CNX_ADVICE:
1047 if (val == 1)
1048 dst_negative_advice(sk);
1049 break;
1050
1051 case SO_ZEROCOPY:
1052 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1053 if (sk->sk_protocol != IPPROTO_TCP)
1054 ret = -ENOTSUPP;
1055 } else if (sk->sk_family != PF_RDS) {
1056 ret = -ENOTSUPP;
1057 }
1058 if (!ret) {
1059 if (val < 0 || val > 1)
1060 ret = -EINVAL;
1061 else
1062 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1063 }
1064 break;
1065
1066 default:
1067 ret = -ENOPROTOOPT;
1068 break;
1069 }
1070 release_sock(sk);
1071 return ret;
1072}
1073EXPORT_SYMBOL(sock_setsockopt);
1074
1075
1076static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1077 struct ucred *ucred)
1078{
1079 ucred->pid = pid_vnr(pid);
1080 ucred->uid = ucred->gid = -1;
1081 if (cred) {
1082 struct user_namespace *current_ns = current_user_ns();
1083
1084 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1085 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1086 }
1087}
1088
1089static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1090{
1091 struct user_namespace *user_ns = current_user_ns();
1092 int i;
1093
1094 for (i = 0; i < src->ngroups; i++)
1095 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1096 return -EFAULT;
1097
1098 return 0;
1099}
1100
1101int sock_getsockopt(struct socket *sock, int level, int optname,
1102 char __user *optval, int __user *optlen)
1103{
1104 struct sock *sk = sock->sk;
1105
1106 union {
1107 int val;
1108 u64 val64;
1109 struct linger ling;
1110 struct timeval tm;
1111 } v;
1112
1113 int lv = sizeof(int);
1114 int len;
1115
1116 if (get_user(len, optlen))
1117 return -EFAULT;
1118 if (len < 0)
1119 return -EINVAL;
1120
1121 memset(&v, 0, sizeof(v));
1122
1123 switch (optname) {
1124 case SO_DEBUG:
1125 v.val = sock_flag(sk, SOCK_DBG);
1126 break;
1127
1128 case SO_DONTROUTE:
1129 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1130 break;
1131
1132 case SO_BROADCAST:
1133 v.val = sock_flag(sk, SOCK_BROADCAST);
1134 break;
1135
1136 case SO_SNDBUF:
1137 v.val = sk->sk_sndbuf;
1138 break;
1139
1140 case SO_RCVBUF:
1141 v.val = sk->sk_rcvbuf;
1142 break;
1143
1144 case SO_REUSEADDR:
1145 v.val = sk->sk_reuse;
1146 break;
1147
1148 case SO_REUSEPORT:
1149 v.val = sk->sk_reuseport;
1150 break;
1151
1152 case SO_KEEPALIVE:
1153 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1154 break;
1155
1156 case SO_TYPE:
1157 v.val = sk->sk_type;
1158 break;
1159
1160 case SO_PROTOCOL:
1161 v.val = sk->sk_protocol;
1162 break;
1163
1164 case SO_DOMAIN:
1165 v.val = sk->sk_family;
1166 break;
1167
1168 case SO_ERROR:
1169 v.val = -sock_error(sk);
1170 if (v.val == 0)
1171 v.val = xchg(&sk->sk_err_soft, 0);
1172 break;
1173
1174 case SO_OOBINLINE:
1175 v.val = sock_flag(sk, SOCK_URGINLINE);
1176 break;
1177
1178 case SO_NO_CHECK:
1179 v.val = sk->sk_no_check_tx;
1180 break;
1181
1182 case SO_PRIORITY:
1183 v.val = sk->sk_priority;
1184 break;
1185
1186 case SO_LINGER:
1187 lv = sizeof(v.ling);
1188 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1189 v.ling.l_linger = sk->sk_lingertime / HZ;
1190 break;
1191
1192 case SO_BSDCOMPAT:
1193 sock_warn_obsolete_bsdism("getsockopt");
1194 break;
1195
1196 case SO_TIMESTAMP:
1197 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1198 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1199 break;
1200
1201 case SO_TIMESTAMPNS:
1202 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1203 break;
1204
1205 case SO_TIMESTAMPING:
1206 v.val = sk->sk_tsflags;
1207 break;
1208
1209 case SO_RCVTIMEO:
1210 lv = sizeof(struct timeval);
1211 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1212 v.tm.tv_sec = 0;
1213 v.tm.tv_usec = 0;
1214 } else {
1215 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1216 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
1217 }
1218 break;
1219
1220 case SO_SNDTIMEO:
1221 lv = sizeof(struct timeval);
1222 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1223 v.tm.tv_sec = 0;
1224 v.tm.tv_usec = 0;
1225 } else {
1226 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1227 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
1228 }
1229 break;
1230
1231 case SO_RCVLOWAT:
1232 v.val = sk->sk_rcvlowat;
1233 break;
1234
1235 case SO_SNDLOWAT:
1236 v.val = 1;
1237 break;
1238
1239 case SO_PASSCRED:
1240 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1241 break;
1242
1243 case SO_PEERCRED:
1244 {
1245 struct ucred peercred;
1246 if (len > sizeof(peercred))
1247 len = sizeof(peercred);
1248 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1249 if (copy_to_user(optval, &peercred, len))
1250 return -EFAULT;
1251 goto lenout;
1252 }
1253
1254 case SO_PEERGROUPS:
1255 {
1256 int ret, n;
1257
1258 if (!sk->sk_peer_cred)
1259 return -ENODATA;
1260
1261 n = sk->sk_peer_cred->group_info->ngroups;
1262 if (len < n * sizeof(gid_t)) {
1263 len = n * sizeof(gid_t);
1264 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1265 }
1266 len = n * sizeof(gid_t);
1267
1268 ret = groups_to_user((gid_t __user *)optval,
1269 sk->sk_peer_cred->group_info);
1270 if (ret)
1271 return ret;
1272 goto lenout;
1273 }
1274
1275 case SO_PEERNAME:
1276 {
1277 char address[128];
1278
1279 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1280 if (lv < 0)
1281 return -ENOTCONN;
1282 if (lv < len)
1283 return -EINVAL;
1284 if (copy_to_user(optval, address, len))
1285 return -EFAULT;
1286 goto lenout;
1287 }
1288
1289 /* Dubious BSD thing... Probably nobody even uses it, but
1290 * the UNIX standard wants it for whatever reason... -DaveM
1291 */
1292 case SO_ACCEPTCONN:
1293 v.val = sk->sk_state == TCP_LISTEN;
1294 break;
1295
1296 case SO_PASSSEC:
1297 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1298 break;
1299
1300 case SO_PEERSEC:
1301 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1302
1303 case SO_MARK:
1304 v.val = sk->sk_mark;
1305 break;
1306
1307 case SO_RXQ_OVFL:
1308 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1309 break;
1310
1311 case SO_WIFI_STATUS:
1312 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1313 break;
1314
1315 case SO_PEEK_OFF:
1316 if (!sock->ops->set_peek_off)
1317 return -EOPNOTSUPP;
1318
1319 v.val = sk->sk_peek_off;
1320 break;
1321 case SO_NOFCS:
1322 v.val = sock_flag(sk, SOCK_NOFCS);
1323 break;
1324
1325 case SO_BINDTODEVICE:
1326 return sock_getbindtodevice(sk, optval, optlen, len);
1327
1328 case SO_GET_FILTER:
1329 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1330 if (len < 0)
1331 return len;
1332
1333 goto lenout;
1334
1335 case SO_LOCK_FILTER:
1336 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1337 break;
1338
1339 case SO_BPF_EXTENSIONS:
1340 v.val = bpf_tell_extensions();
1341 break;
1342
1343 case SO_SELECT_ERR_QUEUE:
1344 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1345 break;
1346
1347#ifdef CONFIG_NET_RX_BUSY_POLL
1348 case SO_BUSY_POLL:
1349 v.val = sk->sk_ll_usec;
1350 break;
1351#endif
1352
1353 case SO_MAX_PACING_RATE:
1354 v.val = sk->sk_max_pacing_rate;
1355 break;
1356
1357 case SO_INCOMING_CPU:
1358 v.val = sk->sk_incoming_cpu;
1359 break;
1360
1361 case SO_MEMINFO:
1362 {
1363 u32 meminfo[SK_MEMINFO_VARS];
1364
1365 if (get_user(len, optlen))
1366 return -EFAULT;
1367
1368 sk_get_meminfo(sk, meminfo);
1369
1370 len = min_t(unsigned int, len, sizeof(meminfo));
1371 if (copy_to_user(optval, &meminfo, len))
1372 return -EFAULT;
1373
1374 goto lenout;
1375 }
1376
1377#ifdef CONFIG_NET_RX_BUSY_POLL
1378 case SO_INCOMING_NAPI_ID:
1379 v.val = READ_ONCE(sk->sk_napi_id);
1380
1381 /* aggregate non-NAPI IDs down to 0 */
1382 if (v.val < MIN_NAPI_ID)
1383 v.val = 0;
1384
1385 break;
1386#endif
1387
1388 case SO_COOKIE:
1389 lv = sizeof(u64);
1390 if (len < lv)
1391 return -EINVAL;
1392 v.val64 = sock_gen_cookie(sk);
1393 break;
1394
1395 case SO_ZEROCOPY:
1396 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1397 break;
1398
1399 default:
1400 /* We implement the SO_SNDLOWAT etc to not be settable
1401 * (1003.1g 7).
1402 */
1403 return -ENOPROTOOPT;
1404 }
1405
1406 if (len > lv)
1407 len = lv;
1408 if (copy_to_user(optval, &v, len))
1409 return -EFAULT;
1410lenout:
1411 if (put_user(len, optlen))
1412 return -EFAULT;
1413 return 0;
1414}
1415
1416/*
1417 * Initialize an sk_lock.
1418 *
1419 * (We also register the sk_lock with the lock validator.)
1420 */
1421static inline void sock_lock_init(struct sock *sk)
1422{
1423 if (sk->sk_kern_sock)
1424 sock_lock_init_class_and_name(
1425 sk,
1426 af_family_kern_slock_key_strings[sk->sk_family],
1427 af_family_kern_slock_keys + sk->sk_family,
1428 af_family_kern_key_strings[sk->sk_family],
1429 af_family_kern_keys + sk->sk_family);
1430 else
1431 sock_lock_init_class_and_name(
1432 sk,
1433 af_family_slock_key_strings[sk->sk_family],
1434 af_family_slock_keys + sk->sk_family,
1435 af_family_key_strings[sk->sk_family],
1436 af_family_keys + sk->sk_family);
1437}
1438
1439/*
1440 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1441 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1442 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1443 */
1444static void sock_copy(struct sock *nsk, const struct sock *osk)
1445{
1446#ifdef CONFIG_SECURITY_NETWORK
1447 void *sptr = nsk->sk_security;
1448#endif
1449 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1450
1451 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1452 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1453
1454#ifdef CONFIG_SECURITY_NETWORK
1455 nsk->sk_security = sptr;
1456 security_sk_clone(osk, nsk);
1457#endif
1458}
1459
1460static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1461 int family)
1462{
1463 struct sock *sk;
1464 struct kmem_cache *slab;
1465
1466 slab = prot->slab;
1467 if (slab != NULL) {
1468 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1469 if (!sk)
1470 return sk;
1471 if (priority & __GFP_ZERO)
1472 sk_prot_clear_nulls(sk, prot->obj_size);
1473 } else
1474 sk = kmalloc(prot->obj_size, priority);
1475
1476 if (sk != NULL) {
1477 if (security_sk_alloc(sk, family, priority))
1478 goto out_free;
1479
1480 if (!try_module_get(prot->owner))
1481 goto out_free_sec;
1482 sk_tx_queue_clear(sk);
1483 }
1484
1485 return sk;
1486
1487out_free_sec:
1488 security_sk_free(sk);
1489out_free:
1490 if (slab != NULL)
1491 kmem_cache_free(slab, sk);
1492 else
1493 kfree(sk);
1494 return NULL;
1495}
1496
1497static void sk_prot_free(struct proto *prot, struct sock *sk)
1498{
1499 struct kmem_cache *slab;
1500 struct module *owner;
1501
1502 owner = prot->owner;
1503 slab = prot->slab;
1504
1505 cgroup_sk_free(&sk->sk_cgrp_data);
1506 mem_cgroup_sk_free(sk);
1507 security_sk_free(sk);
1508 if (slab != NULL)
1509 kmem_cache_free(slab, sk);
1510 else
1511 kfree(sk);
1512 module_put(owner);
1513}
1514
1515/**
1516 * sk_alloc - All socket objects are allocated here
1517 * @net: the applicable net namespace
1518 * @family: protocol family
1519 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1520 * @prot: struct proto associated with this new sock instance
1521 * @kern: is this to be a kernel socket?
1522 */
1523struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1524 struct proto *prot, int kern)
1525{
1526 struct sock *sk;
1527
1528 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1529 if (sk) {
1530 sk->sk_family = family;
1531 /*
1532 * See comment in struct sock definition to understand
1533 * why we need sk_prot_creator -acme
1534 */
1535 sk->sk_prot = sk->sk_prot_creator = prot;
1536 sk->sk_kern_sock = kern;
1537 sock_lock_init(sk);
1538 sk->sk_net_refcnt = kern ? 0 : 1;
1539 if (likely(sk->sk_net_refcnt)) {
1540 get_net(net);
1541 sock_inuse_add(net, 1);
1542 }
1543
1544 sock_net_set(sk, net);
1545 refcount_set(&sk->sk_wmem_alloc, 1);
1546
1547 mem_cgroup_sk_alloc(sk);
1548 cgroup_sk_alloc(&sk->sk_cgrp_data);
1549 sock_update_classid(&sk->sk_cgrp_data);
1550 sock_update_netprioidx(&sk->sk_cgrp_data);
1551 }
1552
1553 return sk;
1554}
1555EXPORT_SYMBOL(sk_alloc);
1556
1557/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1558 * grace period. This is the case for UDP sockets and TCP listeners.
1559 */
1560static void __sk_destruct(struct rcu_head *head)
1561{
1562 struct sock *sk = container_of(head, struct sock, sk_rcu);
1563 struct sk_filter *filter;
1564
1565 if (sk->sk_destruct)
1566 sk->sk_destruct(sk);
1567
1568 filter = rcu_dereference_check(sk->sk_filter,
1569 refcount_read(&sk->sk_wmem_alloc) == 0);
1570 if (filter) {
1571 sk_filter_uncharge(sk, filter);
1572 RCU_INIT_POINTER(sk->sk_filter, NULL);
1573 }
1574 if (rcu_access_pointer(sk->sk_reuseport_cb))
1575 reuseport_detach_sock(sk);
1576
1577 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1578
1579 if (atomic_read(&sk->sk_omem_alloc))
1580 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1581 __func__, atomic_read(&sk->sk_omem_alloc));
1582
1583 if (sk->sk_frag.page) {
1584 put_page(sk->sk_frag.page);
1585 sk->sk_frag.page = NULL;
1586 }
1587
1588 if (sk->sk_peer_cred)
1589 put_cred(sk->sk_peer_cred);
1590 put_pid(sk->sk_peer_pid);
1591 if (likely(sk->sk_net_refcnt))
1592 put_net(sock_net(sk));
1593 sk_prot_free(sk->sk_prot_creator, sk);
1594}
1595
1596void sk_destruct(struct sock *sk)
1597{
1598 if (sock_flag(sk, SOCK_RCU_FREE))
1599 call_rcu(&sk->sk_rcu, __sk_destruct);
1600 else
1601 __sk_destruct(&sk->sk_rcu);
1602}
1603
1604static void __sk_free(struct sock *sk)
1605{
1606 if (likely(sk->sk_net_refcnt))
1607 sock_inuse_add(sock_net(sk), -1);
1608
1609 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1610 sock_diag_broadcast_destroy(sk);
1611 else
1612 sk_destruct(sk);
1613}
1614
1615void sk_free(struct sock *sk)
1616{
1617 /*
1618 * We subtract one from sk_wmem_alloc and can know if
1619 * some packets are still in some tx queue.
1620 * If not null, sock_wfree() will call __sk_free(sk) later
1621 */
1622 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1623 __sk_free(sk);
1624}
1625EXPORT_SYMBOL(sk_free);
1626
1627static void sk_init_common(struct sock *sk)
1628{
1629 skb_queue_head_init(&sk->sk_receive_queue);
1630 skb_queue_head_init(&sk->sk_write_queue);
1631 skb_queue_head_init(&sk->sk_error_queue);
1632
1633 rwlock_init(&sk->sk_callback_lock);
1634 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1635 af_rlock_keys + sk->sk_family,
1636 af_family_rlock_key_strings[sk->sk_family]);
1637 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1638 af_wlock_keys + sk->sk_family,
1639 af_family_wlock_key_strings[sk->sk_family]);
1640 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1641 af_elock_keys + sk->sk_family,
1642 af_family_elock_key_strings[sk->sk_family]);
1643 lockdep_set_class_and_name(&sk->sk_callback_lock,
1644 af_callback_keys + sk->sk_family,
1645 af_family_clock_key_strings[sk->sk_family]);
1646}
1647
1648/**
1649 * sk_clone_lock - clone a socket, and lock its clone
1650 * @sk: the socket to clone
1651 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1652 *
1653 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1654 */
1655struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1656{
1657 struct sock *newsk;
1658 bool is_charged = true;
1659
1660 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1661 if (newsk != NULL) {
1662 struct sk_filter *filter;
1663
1664 sock_copy(newsk, sk);
1665
1666 newsk->sk_prot_creator = sk->sk_prot;
1667
1668 /* SANITY */
1669 if (likely(newsk->sk_net_refcnt))
1670 get_net(sock_net(newsk));
1671 sk_node_init(&newsk->sk_node);
1672 sock_lock_init(newsk);
1673 bh_lock_sock(newsk);
1674 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1675 newsk->sk_backlog.len = 0;
1676
1677 atomic_set(&newsk->sk_rmem_alloc, 0);
1678 /*
1679 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1680 */
1681 refcount_set(&newsk->sk_wmem_alloc, 1);
1682 atomic_set(&newsk->sk_omem_alloc, 0);
1683 sk_init_common(newsk);
1684
1685 newsk->sk_dst_cache = NULL;
1686 newsk->sk_dst_pending_confirm = 0;
1687 newsk->sk_wmem_queued = 0;
1688 newsk->sk_forward_alloc = 0;
1689 atomic_set(&newsk->sk_drops, 0);
1690 newsk->sk_send_head = NULL;
1691 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1692 atomic_set(&newsk->sk_zckey, 0);
1693
1694 sock_reset_flag(newsk, SOCK_DONE);
1695 mem_cgroup_sk_alloc(newsk);
1696 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1697
1698 rcu_read_lock();
1699 filter = rcu_dereference(sk->sk_filter);
1700 if (filter != NULL)
1701 /* though it's an empty new sock, the charging may fail
1702 * if sysctl_optmem_max was changed between creation of
1703 * original socket and cloning
1704 */
1705 is_charged = sk_filter_charge(newsk, filter);
1706 RCU_INIT_POINTER(newsk->sk_filter, filter);
1707 rcu_read_unlock();
1708
1709 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1710 /* We need to make sure that we don't uncharge the new
1711 * socket if we couldn't charge it in the first place
1712 * as otherwise we uncharge the parent's filter.
1713 */
1714 if (!is_charged)
1715 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1716 sk_free_unlock_clone(newsk);
1717 newsk = NULL;
1718 goto out;
1719 }
1720 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1721
1722 newsk->sk_err = 0;
1723 newsk->sk_err_soft = 0;
1724 newsk->sk_priority = 0;
1725 newsk->sk_incoming_cpu = raw_smp_processor_id();
1726 atomic64_set(&newsk->sk_cookie, 0);
1727 if (likely(newsk->sk_net_refcnt))
1728 sock_inuse_add(sock_net(newsk), 1);
1729
1730 /*
1731 * Before updating sk_refcnt, we must commit prior changes to memory
1732 * (Documentation/RCU/rculist_nulls.txt for details)
1733 */
1734 smp_wmb();
1735 refcount_set(&newsk->sk_refcnt, 2);
1736
1737 /*
1738 * Increment the counter in the same struct proto as the master
1739 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1740 * is the same as sk->sk_prot->socks, as this field was copied
1741 * with memcpy).
1742 *
1743 * This _changes_ the previous behaviour, where
1744 * tcp_create_openreq_child always was incrementing the
1745 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1746 * to be taken into account in all callers. -acme
1747 */
1748 sk_refcnt_debug_inc(newsk);
1749 sk_set_socket(newsk, NULL);
1750 newsk->sk_wq = NULL;
1751
1752 if (newsk->sk_prot->sockets_allocated)
1753 sk_sockets_allocated_inc(newsk);
1754
1755 if (sock_needs_netstamp(sk) &&
1756 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1757 net_enable_timestamp();
1758 }
1759out:
1760 return newsk;
1761}
1762EXPORT_SYMBOL_GPL(sk_clone_lock);
1763
1764void sk_free_unlock_clone(struct sock *sk)
1765{
1766 /* It is still raw copy of parent, so invalidate
1767 * destructor and make plain sk_free() */
1768 sk->sk_destruct = NULL;
1769 bh_unlock_sock(sk);
1770 sk_free(sk);
1771}
1772EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1773
1774void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1775{
1776 u32 max_segs = 1;
1777
1778 sk_dst_set(sk, dst);
1779 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
1780 if (sk->sk_route_caps & NETIF_F_GSO)
1781 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1782 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1783 if (sk_can_gso(sk)) {
1784 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
1785 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1786 } else {
1787 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1788 sk->sk_gso_max_size = dst->dev->gso_max_size;
1789 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1790 }
1791 }
1792 sk->sk_gso_max_segs = max_segs;
1793}
1794EXPORT_SYMBOL_GPL(sk_setup_caps);
1795
1796/*
1797 * Simple resource managers for sockets.
1798 */
1799
1800
1801/*
1802 * Write buffer destructor automatically called from kfree_skb.
1803 */
1804void sock_wfree(struct sk_buff *skb)
1805{
1806 struct sock *sk = skb->sk;
1807 unsigned int len = skb->truesize;
1808
1809 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1810 /*
1811 * Keep a reference on sk_wmem_alloc, this will be released
1812 * after sk_write_space() call
1813 */
1814 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1815 sk->sk_write_space(sk);
1816 len = 1;
1817 }
1818 /*
1819 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1820 * could not do because of in-flight packets
1821 */
1822 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
1823 __sk_free(sk);
1824}
1825EXPORT_SYMBOL(sock_wfree);
1826
1827/* This variant of sock_wfree() is used by TCP,
1828 * since it sets SOCK_USE_WRITE_QUEUE.
1829 */
1830void __sock_wfree(struct sk_buff *skb)
1831{
1832 struct sock *sk = skb->sk;
1833
1834 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1835 __sk_free(sk);
1836}
1837
1838void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1839{
1840 skb_orphan(skb);
1841 skb->sk = sk;
1842#ifdef CONFIG_INET
1843 if (unlikely(!sk_fullsock(sk))) {
1844 skb->destructor = sock_edemux;
1845 sock_hold(sk);
1846 return;
1847 }
1848#endif
1849 skb->destructor = sock_wfree;
1850 skb_set_hash_from_sk(skb, sk);
1851 /*
1852 * We used to take a refcount on sk, but following operation
1853 * is enough to guarantee sk_free() wont free this sock until
1854 * all in-flight packets are completed
1855 */
1856 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1857}
1858EXPORT_SYMBOL(skb_set_owner_w);
1859
1860/* This helper is used by netem, as it can hold packets in its
1861 * delay queue. We want to allow the owner socket to send more
1862 * packets, as if they were already TX completed by a typical driver.
1863 * But we also want to keep skb->sk set because some packet schedulers
1864 * rely on it (sch_fq for example).
1865 */
1866void skb_orphan_partial(struct sk_buff *skb)
1867{
1868 if (skb_is_tcp_pure_ack(skb))
1869 return;
1870
1871 if (skb->destructor == sock_wfree
1872#ifdef CONFIG_INET
1873 || skb->destructor == tcp_wfree
1874#endif
1875 ) {
1876 struct sock *sk = skb->sk;
1877
1878 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
1879 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
1880 skb->destructor = sock_efree;
1881 }
1882 } else {
1883 skb_orphan(skb);
1884 }
1885}
1886EXPORT_SYMBOL(skb_orphan_partial);
1887
1888/*
1889 * Read buffer destructor automatically called from kfree_skb.
1890 */
1891void sock_rfree(struct sk_buff *skb)
1892{
1893 struct sock *sk = skb->sk;
1894 unsigned int len = skb->truesize;
1895
1896 atomic_sub(len, &sk->sk_rmem_alloc);
1897 sk_mem_uncharge(sk, len);
1898}
1899EXPORT_SYMBOL(sock_rfree);
1900
1901/*
1902 * Buffer destructor for skbs that are not used directly in read or write
1903 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1904 */
1905void sock_efree(struct sk_buff *skb)
1906{
1907 sock_put(skb->sk);
1908}
1909EXPORT_SYMBOL(sock_efree);
1910
1911kuid_t sock_i_uid(struct sock *sk)
1912{
1913 kuid_t uid;
1914
1915 read_lock_bh(&sk->sk_callback_lock);
1916 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1917 read_unlock_bh(&sk->sk_callback_lock);
1918 return uid;
1919}
1920EXPORT_SYMBOL(sock_i_uid);
1921
1922unsigned long sock_i_ino(struct sock *sk)
1923{
1924 unsigned long ino;
1925
1926 read_lock_bh(&sk->sk_callback_lock);
1927 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1928 read_unlock_bh(&sk->sk_callback_lock);
1929 return ino;
1930}
1931EXPORT_SYMBOL(sock_i_ino);
1932
1933/*
1934 * Allocate a skb from the socket's send buffer.
1935 */
1936struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1937 gfp_t priority)
1938{
1939 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1940 struct sk_buff *skb = alloc_skb(size, priority);
1941 if (skb) {
1942 skb_set_owner_w(skb, sk);
1943 return skb;
1944 }
1945 }
1946 return NULL;
1947}
1948EXPORT_SYMBOL(sock_wmalloc);
1949
1950static void sock_ofree(struct sk_buff *skb)
1951{
1952 struct sock *sk = skb->sk;
1953
1954 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
1955}
1956
1957struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1958 gfp_t priority)
1959{
1960 struct sk_buff *skb;
1961
1962 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
1963 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
1964 sysctl_optmem_max)
1965 return NULL;
1966
1967 skb = alloc_skb(size, priority);
1968 if (!skb)
1969 return NULL;
1970
1971 atomic_add(skb->truesize, &sk->sk_omem_alloc);
1972 skb->sk = sk;
1973 skb->destructor = sock_ofree;
1974 return skb;
1975}
1976
1977/*
1978 * Allocate a memory block from the socket's option memory buffer.
1979 */
1980void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1981{
1982 if ((unsigned int)size <= sysctl_optmem_max &&
1983 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1984 void *mem;
1985 /* First do the add, to avoid the race if kmalloc
1986 * might sleep.
1987 */
1988 atomic_add(size, &sk->sk_omem_alloc);
1989 mem = kmalloc(size, priority);
1990 if (mem)
1991 return mem;
1992 atomic_sub(size, &sk->sk_omem_alloc);
1993 }
1994 return NULL;
1995}
1996EXPORT_SYMBOL(sock_kmalloc);
1997
1998/* Free an option memory block. Note, we actually want the inline
1999 * here as this allows gcc to detect the nullify and fold away the
2000 * condition entirely.
2001 */
2002static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2003 const bool nullify)
2004{
2005 if (WARN_ON_ONCE(!mem))
2006 return;
2007 if (nullify)
2008 kzfree(mem);
2009 else
2010 kfree(mem);
2011 atomic_sub(size, &sk->sk_omem_alloc);
2012}
2013
2014void sock_kfree_s(struct sock *sk, void *mem, int size)
2015{
2016 __sock_kfree_s(sk, mem, size, false);
2017}
2018EXPORT_SYMBOL(sock_kfree_s);
2019
2020void sock_kzfree_s(struct sock *sk, void *mem, int size)
2021{
2022 __sock_kfree_s(sk, mem, size, true);
2023}
2024EXPORT_SYMBOL(sock_kzfree_s);
2025
2026/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2027 I think, these locks should be removed for datagram sockets.
2028 */
2029static long sock_wait_for_wmem(struct sock *sk, long timeo)
2030{
2031 DEFINE_WAIT(wait);
2032
2033 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2034 for (;;) {
2035 if (!timeo)
2036 break;
2037 if (signal_pending(current))
2038 break;
2039 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2040 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2041 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
2042 break;
2043 if (sk->sk_shutdown & SEND_SHUTDOWN)
2044 break;
2045 if (sk->sk_err)
2046 break;
2047 timeo = schedule_timeout(timeo);
2048 }
2049 finish_wait(sk_sleep(sk), &wait);
2050 return timeo;
2051}
2052
2053
2054/*
2055 * Generic send/receive buffer handlers
2056 */
2057
2058struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2059 unsigned long data_len, int noblock,
2060 int *errcode, int max_page_order)
2061{
2062 struct sk_buff *skb;
2063 long timeo;
2064 int err;
2065
2066 timeo = sock_sndtimeo(sk, noblock);
2067 for (;;) {
2068 err = sock_error(sk);
2069 if (err != 0)
2070 goto failure;
2071
2072 err = -EPIPE;
2073 if (sk->sk_shutdown & SEND_SHUTDOWN)
2074 goto failure;
2075
2076 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
2077 break;
2078
2079 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2080 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2081 err = -EAGAIN;
2082 if (!timeo)
2083 goto failure;
2084 if (signal_pending(current))
2085 goto interrupted;
2086 timeo = sock_wait_for_wmem(sk, timeo);
2087 }
2088 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2089 errcode, sk->sk_allocation);
2090 if (skb)
2091 skb_set_owner_w(skb, sk);
2092 return skb;
2093
2094interrupted:
2095 err = sock_intr_errno(timeo);
2096failure:
2097 *errcode = err;
2098 return NULL;
2099}
2100EXPORT_SYMBOL(sock_alloc_send_pskb);
2101
2102struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2103 int noblock, int *errcode)
2104{
2105 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2106}
2107EXPORT_SYMBOL(sock_alloc_send_skb);
2108
2109int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2110 struct sockcm_cookie *sockc)
2111{
2112 u32 tsflags;
2113
2114 switch (cmsg->cmsg_type) {
2115 case SO_MARK:
2116 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2117 return -EPERM;
2118 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2119 return -EINVAL;
2120 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2121 break;
2122 case SO_TIMESTAMPING:
2123 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2124 return -EINVAL;
2125
2126 tsflags = *(u32 *)CMSG_DATA(cmsg);
2127 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2128 return -EINVAL;
2129
2130 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2131 sockc->tsflags |= tsflags;
2132 break;
2133 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2134 case SCM_RIGHTS:
2135 case SCM_CREDENTIALS:
2136 break;
2137 default:
2138 return -EINVAL;
2139 }
2140 return 0;
2141}
2142EXPORT_SYMBOL(__sock_cmsg_send);
2143
2144int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2145 struct sockcm_cookie *sockc)
2146{
2147 struct cmsghdr *cmsg;
2148 int ret;
2149
2150 for_each_cmsghdr(cmsg, msg) {
2151 if (!CMSG_OK(msg, cmsg))
2152 return -EINVAL;
2153 if (cmsg->cmsg_level != SOL_SOCKET)
2154 continue;
2155 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2156 if (ret)
2157 return ret;
2158 }
2159 return 0;
2160}
2161EXPORT_SYMBOL(sock_cmsg_send);
2162
2163static void sk_enter_memory_pressure(struct sock *sk)
2164{
2165 if (!sk->sk_prot->enter_memory_pressure)
2166 return;
2167
2168 sk->sk_prot->enter_memory_pressure(sk);
2169}
2170
2171static void sk_leave_memory_pressure(struct sock *sk)
2172{
2173 if (sk->sk_prot->leave_memory_pressure) {
2174 sk->sk_prot->leave_memory_pressure(sk);
2175 } else {
2176 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2177
2178 if (memory_pressure && *memory_pressure)
2179 *memory_pressure = 0;
2180 }
2181}
2182
2183/* On 32bit arches, an skb frag is limited to 2^15 */
2184#define SKB_FRAG_PAGE_ORDER get_order(32768)
2185
2186/**
2187 * skb_page_frag_refill - check that a page_frag contains enough room
2188 * @sz: minimum size of the fragment we want to get
2189 * @pfrag: pointer to page_frag
2190 * @gfp: priority for memory allocation
2191 *
2192 * Note: While this allocator tries to use high order pages, there is
2193 * no guarantee that allocations succeed. Therefore, @sz MUST be
2194 * less or equal than PAGE_SIZE.
2195 */
2196bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2197{
2198 if (pfrag->page) {
2199 if (page_ref_count(pfrag->page) == 1) {
2200 pfrag->offset = 0;
2201 return true;
2202 }
2203 if (pfrag->offset + sz <= pfrag->size)
2204 return true;
2205 put_page(pfrag->page);
2206 }
2207
2208 pfrag->offset = 0;
2209 if (SKB_FRAG_PAGE_ORDER) {
2210 /* Avoid direct reclaim but allow kswapd to wake */
2211 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2212 __GFP_COMP | __GFP_NOWARN |
2213 __GFP_NORETRY,
2214 SKB_FRAG_PAGE_ORDER);
2215 if (likely(pfrag->page)) {
2216 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2217 return true;
2218 }
2219 }
2220 pfrag->page = alloc_page(gfp);
2221 if (likely(pfrag->page)) {
2222 pfrag->size = PAGE_SIZE;
2223 return true;
2224 }
2225 return false;
2226}
2227EXPORT_SYMBOL(skb_page_frag_refill);
2228
2229bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2230{
2231 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2232 return true;
2233
2234 sk_enter_memory_pressure(sk);
2235 sk_stream_moderate_sndbuf(sk);
2236 return false;
2237}
2238EXPORT_SYMBOL(sk_page_frag_refill);
2239
2240int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2241 int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
2242 int first_coalesce)
2243{
2244 int sg_curr = *sg_curr_index, use = 0, rc = 0;
2245 unsigned int size = *sg_curr_size;
2246 struct page_frag *pfrag;
2247 struct scatterlist *sge;
2248
2249 len -= size;
2250 pfrag = sk_page_frag(sk);
2251
2252 while (len > 0) {
2253 unsigned int orig_offset;
2254
2255 if (!sk_page_frag_refill(sk, pfrag)) {
2256 rc = -ENOMEM;
2257 goto out;
2258 }
2259
2260 use = min_t(int, len, pfrag->size - pfrag->offset);
2261
2262 if (!sk_wmem_schedule(sk, use)) {
2263 rc = -ENOMEM;
2264 goto out;
2265 }
2266
2267 sk_mem_charge(sk, use);
2268 size += use;
2269 orig_offset = pfrag->offset;
2270 pfrag->offset += use;
2271
2272 sge = sg + sg_curr - 1;
2273 if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
2274 sg->offset + sg->length == orig_offset) {
2275 sg->length += use;
2276 } else {
2277 sge = sg + sg_curr;
2278 sg_unmark_end(sge);
2279 sg_set_page(sge, pfrag->page, use, orig_offset);
2280 get_page(pfrag->page);
2281 sg_curr++;
2282
2283 if (sg_curr == MAX_SKB_FRAGS)
2284 sg_curr = 0;
2285
2286 if (sg_curr == sg_start) {
2287 rc = -ENOSPC;
2288 break;
2289 }
2290 }
2291
2292 len -= use;
2293 }
2294out:
2295 *sg_curr_size = size;
2296 *sg_curr_index = sg_curr;
2297 return rc;
2298}
2299EXPORT_SYMBOL(sk_alloc_sg);
2300
2301static void __lock_sock(struct sock *sk)
2302 __releases(&sk->sk_lock.slock)
2303 __acquires(&sk->sk_lock.slock)
2304{
2305 DEFINE_WAIT(wait);
2306
2307 for (;;) {
2308 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2309 TASK_UNINTERRUPTIBLE);
2310 spin_unlock_bh(&sk->sk_lock.slock);
2311 schedule();
2312 spin_lock_bh(&sk->sk_lock.slock);
2313 if (!sock_owned_by_user(sk))
2314 break;
2315 }
2316 finish_wait(&sk->sk_lock.wq, &wait);
2317}
2318
2319static void __release_sock(struct sock *sk)
2320 __releases(&sk->sk_lock.slock)
2321 __acquires(&sk->sk_lock.slock)
2322{
2323 struct sk_buff *skb, *next;
2324
2325 while ((skb = sk->sk_backlog.head) != NULL) {
2326 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2327
2328 spin_unlock_bh(&sk->sk_lock.slock);
2329
2330 do {
2331 next = skb->next;
2332 prefetch(next);
2333 WARN_ON_ONCE(skb_dst_is_noref(skb));
2334 skb->next = NULL;
2335 sk_backlog_rcv(sk, skb);
2336
2337 cond_resched();
2338
2339 skb = next;
2340 } while (skb != NULL);
2341
2342 spin_lock_bh(&sk->sk_lock.slock);
2343 }
2344
2345 /*
2346 * Doing the zeroing here guarantee we can not loop forever
2347 * while a wild producer attempts to flood us.
2348 */
2349 sk->sk_backlog.len = 0;
2350}
2351
2352void __sk_flush_backlog(struct sock *sk)
2353{
2354 spin_lock_bh(&sk->sk_lock.slock);
2355 __release_sock(sk);
2356 spin_unlock_bh(&sk->sk_lock.slock);
2357}
2358
2359/**
2360 * sk_wait_data - wait for data to arrive at sk_receive_queue
2361 * @sk: sock to wait on
2362 * @timeo: for how long
2363 * @skb: last skb seen on sk_receive_queue
2364 *
2365 * Now socket state including sk->sk_err is changed only under lock,
2366 * hence we may omit checks after joining wait queue.
2367 * We check receive queue before schedule() only as optimization;
2368 * it is very likely that release_sock() added new data.
2369 */
2370int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2371{
2372 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2373 int rc;
2374
2375 add_wait_queue(sk_sleep(sk), &wait);
2376 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2377 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2378 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2379 remove_wait_queue(sk_sleep(sk), &wait);
2380 return rc;
2381}
2382EXPORT_SYMBOL(sk_wait_data);
2383
2384/**
2385 * __sk_mem_raise_allocated - increase memory_allocated
2386 * @sk: socket
2387 * @size: memory size to allocate
2388 * @amt: pages to allocate
2389 * @kind: allocation type
2390 *
2391 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2392 */
2393int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2394{
2395 struct proto *prot = sk->sk_prot;
2396 long allocated = sk_memory_allocated_add(sk, amt);
2397
2398 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2399 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2400 goto suppress_allocation;
2401
2402 /* Under limit. */
2403 if (allocated <= sk_prot_mem_limits(sk, 0)) {
2404 sk_leave_memory_pressure(sk);
2405 return 1;
2406 }
2407
2408 /* Under pressure. */
2409 if (allocated > sk_prot_mem_limits(sk, 1))
2410 sk_enter_memory_pressure(sk);
2411
2412 /* Over hard limit. */
2413 if (allocated > sk_prot_mem_limits(sk, 2))
2414 goto suppress_allocation;
2415
2416 /* guarantee minimum buffer size under pressure */
2417 if (kind == SK_MEM_RECV) {
2418 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2419 return 1;
2420
2421 } else { /* SK_MEM_SEND */
2422 int wmem0 = sk_get_wmem0(sk, prot);
2423
2424 if (sk->sk_type == SOCK_STREAM) {
2425 if (sk->sk_wmem_queued < wmem0)
2426 return 1;
2427 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2428 return 1;
2429 }
2430 }
2431
2432 if (sk_has_memory_pressure(sk)) {
2433 int alloc;
2434
2435 if (!sk_under_memory_pressure(sk))
2436 return 1;
2437 alloc = sk_sockets_allocated_read_positive(sk);
2438 if (sk_prot_mem_limits(sk, 2) > alloc *
2439 sk_mem_pages(sk->sk_wmem_queued +
2440 atomic_read(&sk->sk_rmem_alloc) +
2441 sk->sk_forward_alloc))
2442 return 1;
2443 }
2444
2445suppress_allocation:
2446
2447 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2448 sk_stream_moderate_sndbuf(sk);
2449
2450 /* Fail only if socket is _under_ its sndbuf.
2451 * In this case we cannot block, so that we have to fail.
2452 */
2453 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2454 return 1;
2455 }
2456
2457 trace_sock_exceed_buf_limit(sk, prot, allocated);
2458
2459 sk_memory_allocated_sub(sk, amt);
2460
2461 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2462 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2463
2464 return 0;
2465}
2466EXPORT_SYMBOL(__sk_mem_raise_allocated);
2467
2468/**
2469 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2470 * @sk: socket
2471 * @size: memory size to allocate
2472 * @kind: allocation type
2473 *
2474 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2475 * rmem allocation. This function assumes that protocols which have
2476 * memory_pressure use sk_wmem_queued as write buffer accounting.
2477 */
2478int __sk_mem_schedule(struct sock *sk, int size, int kind)
2479{
2480 int ret, amt = sk_mem_pages(size);
2481
2482 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2483 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2484 if (!ret)
2485 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2486 return ret;
2487}
2488EXPORT_SYMBOL(__sk_mem_schedule);
2489
2490/**
2491 * __sk_mem_reduce_allocated - reclaim memory_allocated
2492 * @sk: socket
2493 * @amount: number of quanta
2494 *
2495 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2496 */
2497void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2498{
2499 sk_memory_allocated_sub(sk, amount);
2500
2501 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2502 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2503
2504 if (sk_under_memory_pressure(sk) &&
2505 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2506 sk_leave_memory_pressure(sk);
2507}
2508EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2509
2510/**
2511 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2512 * @sk: socket
2513 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2514 */
2515void __sk_mem_reclaim(struct sock *sk, int amount)
2516{
2517 amount >>= SK_MEM_QUANTUM_SHIFT;
2518 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2519 __sk_mem_reduce_allocated(sk, amount);
2520}
2521EXPORT_SYMBOL(__sk_mem_reclaim);
2522
2523int sk_set_peek_off(struct sock *sk, int val)
2524{
2525 sk->sk_peek_off = val;
2526 return 0;
2527}
2528EXPORT_SYMBOL_GPL(sk_set_peek_off);
2529
2530/*
2531 * Set of default routines for initialising struct proto_ops when
2532 * the protocol does not support a particular function. In certain
2533 * cases where it makes no sense for a protocol to have a "do nothing"
2534 * function, some default processing is provided.
2535 */
2536
2537int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2538{
2539 return -EOPNOTSUPP;
2540}
2541EXPORT_SYMBOL(sock_no_bind);
2542
2543int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2544 int len, int flags)
2545{
2546 return -EOPNOTSUPP;
2547}
2548EXPORT_SYMBOL(sock_no_connect);
2549
2550int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2551{
2552 return -EOPNOTSUPP;
2553}
2554EXPORT_SYMBOL(sock_no_socketpair);
2555
2556int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2557 bool kern)
2558{
2559 return -EOPNOTSUPP;
2560}
2561EXPORT_SYMBOL(sock_no_accept);
2562
2563int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2564 int peer)
2565{
2566 return -EOPNOTSUPP;
2567}
2568EXPORT_SYMBOL(sock_no_getname);
2569
2570__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2571{
2572 return 0;
2573}
2574EXPORT_SYMBOL(sock_no_poll);
2575
2576int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2577{
2578 return -EOPNOTSUPP;
2579}
2580EXPORT_SYMBOL(sock_no_ioctl);
2581
2582int sock_no_listen(struct socket *sock, int backlog)
2583{
2584 return -EOPNOTSUPP;
2585}
2586EXPORT_SYMBOL(sock_no_listen);
2587
2588int sock_no_shutdown(struct socket *sock, int how)
2589{
2590 return -EOPNOTSUPP;
2591}
2592EXPORT_SYMBOL(sock_no_shutdown);
2593
2594int sock_no_setsockopt(struct socket *sock, int level, int optname,
2595 char __user *optval, unsigned int optlen)
2596{
2597 return -EOPNOTSUPP;
2598}
2599EXPORT_SYMBOL(sock_no_setsockopt);
2600
2601int sock_no_getsockopt(struct socket *sock, int level, int optname,
2602 char __user *optval, int __user *optlen)
2603{
2604 return -EOPNOTSUPP;
2605}
2606EXPORT_SYMBOL(sock_no_getsockopt);
2607
2608int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2609{
2610 return -EOPNOTSUPP;
2611}
2612EXPORT_SYMBOL(sock_no_sendmsg);
2613
2614int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2615{
2616 return -EOPNOTSUPP;
2617}
2618EXPORT_SYMBOL(sock_no_sendmsg_locked);
2619
2620int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2621 int flags)
2622{
2623 return -EOPNOTSUPP;
2624}
2625EXPORT_SYMBOL(sock_no_recvmsg);
2626
2627int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2628{
2629 /* Mirror missing mmap method error code */
2630 return -ENODEV;
2631}
2632EXPORT_SYMBOL(sock_no_mmap);
2633
2634ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2635{
2636 ssize_t res;
2637 struct msghdr msg = {.msg_flags = flags};
2638 struct kvec iov;
2639 char *kaddr = kmap(page);
2640 iov.iov_base = kaddr + offset;
2641 iov.iov_len = size;
2642 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2643 kunmap(page);
2644 return res;
2645}
2646EXPORT_SYMBOL(sock_no_sendpage);
2647
2648ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2649 int offset, size_t size, int flags)
2650{
2651 ssize_t res;
2652 struct msghdr msg = {.msg_flags = flags};
2653 struct kvec iov;
2654 char *kaddr = kmap(page);
2655
2656 iov.iov_base = kaddr + offset;
2657 iov.iov_len = size;
2658 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2659 kunmap(page);
2660 return res;
2661}
2662EXPORT_SYMBOL(sock_no_sendpage_locked);
2663
2664/*
2665 * Default Socket Callbacks
2666 */
2667
2668static void sock_def_wakeup(struct sock *sk)
2669{
2670 struct socket_wq *wq;
2671
2672 rcu_read_lock();
2673 wq = rcu_dereference(sk->sk_wq);
2674 if (skwq_has_sleeper(wq))
2675 wake_up_interruptible_all(&wq->wait);
2676 rcu_read_unlock();
2677}
2678
2679static void sock_def_error_report(struct sock *sk)
2680{
2681 struct socket_wq *wq;
2682
2683 rcu_read_lock();
2684 wq = rcu_dereference(sk->sk_wq);
2685 if (skwq_has_sleeper(wq))
2686 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2687 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2688 rcu_read_unlock();
2689}
2690
2691static void sock_def_readable(struct sock *sk)
2692{
2693 struct socket_wq *wq;
2694
2695 rcu_read_lock();
2696 wq = rcu_dereference(sk->sk_wq);
2697 if (skwq_has_sleeper(wq))
2698 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2699 EPOLLRDNORM | EPOLLRDBAND);
2700 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2701 rcu_read_unlock();
2702}
2703
2704static void sock_def_write_space(struct sock *sk)
2705{
2706 struct socket_wq *wq;
2707
2708 rcu_read_lock();
2709
2710 /* Do not wake up a writer until he can make "significant"
2711 * progress. --DaveM
2712 */
2713 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2714 wq = rcu_dereference(sk->sk_wq);
2715 if (skwq_has_sleeper(wq))
2716 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2717 EPOLLWRNORM | EPOLLWRBAND);
2718
2719 /* Should agree with poll, otherwise some programs break */
2720 if (sock_writeable(sk))
2721 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2722 }
2723
2724 rcu_read_unlock();
2725}
2726
2727static void sock_def_destruct(struct sock *sk)
2728{
2729}
2730
2731void sk_send_sigurg(struct sock *sk)
2732{
2733 if (sk->sk_socket && sk->sk_socket->file)
2734 if (send_sigurg(&sk->sk_socket->file->f_owner))
2735 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2736}
2737EXPORT_SYMBOL(sk_send_sigurg);
2738
2739void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2740 unsigned long expires)
2741{
2742 if (!mod_timer(timer, expires))
2743 sock_hold(sk);
2744}
2745EXPORT_SYMBOL(sk_reset_timer);
2746
2747void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2748{
2749 if (del_timer(timer))
2750 __sock_put(sk);
2751}
2752EXPORT_SYMBOL(sk_stop_timer);
2753
2754void sock_init_data(struct socket *sock, struct sock *sk)
2755{
2756 sk_init_common(sk);
2757 sk->sk_send_head = NULL;
2758
2759 timer_setup(&sk->sk_timer, NULL, 0);
2760
2761 sk->sk_allocation = GFP_KERNEL;
2762 sk->sk_rcvbuf = sysctl_rmem_default;
2763 sk->sk_sndbuf = sysctl_wmem_default;
2764 sk->sk_state = TCP_CLOSE;
2765 sk_set_socket(sk, sock);
2766
2767 sock_set_flag(sk, SOCK_ZAPPED);
2768
2769 if (sock) {
2770 sk->sk_type = sock->type;
2771 sk->sk_wq = sock->wq;
2772 sock->sk = sk;
2773 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2774 } else {
2775 sk->sk_wq = NULL;
2776 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2777 }
2778
2779 rwlock_init(&sk->sk_callback_lock);
2780 if (sk->sk_kern_sock)
2781 lockdep_set_class_and_name(
2782 &sk->sk_callback_lock,
2783 af_kern_callback_keys + sk->sk_family,
2784 af_family_kern_clock_key_strings[sk->sk_family]);
2785 else
2786 lockdep_set_class_and_name(
2787 &sk->sk_callback_lock,
2788 af_callback_keys + sk->sk_family,
2789 af_family_clock_key_strings[sk->sk_family]);
2790
2791 sk->sk_state_change = sock_def_wakeup;
2792 sk->sk_data_ready = sock_def_readable;
2793 sk->sk_write_space = sock_def_write_space;
2794 sk->sk_error_report = sock_def_error_report;
2795 sk->sk_destruct = sock_def_destruct;
2796
2797 sk->sk_frag.page = NULL;
2798 sk->sk_frag.offset = 0;
2799 sk->sk_peek_off = -1;
2800
2801 sk->sk_peer_pid = NULL;
2802 sk->sk_peer_cred = NULL;
2803 sk->sk_write_pending = 0;
2804 sk->sk_rcvlowat = 1;
2805 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2806 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2807
2808 sk->sk_stamp = SK_DEFAULT_STAMP;
2809 atomic_set(&sk->sk_zckey, 0);
2810
2811#ifdef CONFIG_NET_RX_BUSY_POLL
2812 sk->sk_napi_id = 0;
2813 sk->sk_ll_usec = sysctl_net_busy_read;
2814#endif
2815
2816 sk->sk_max_pacing_rate = ~0U;
2817 sk->sk_pacing_rate = ~0U;
2818 sk->sk_pacing_shift = 10;
2819 sk->sk_incoming_cpu = -1;
2820 /*
2821 * Before updating sk_refcnt, we must commit prior changes to memory
2822 * (Documentation/RCU/rculist_nulls.txt for details)
2823 */
2824 smp_wmb();
2825 refcount_set(&sk->sk_refcnt, 1);
2826 atomic_set(&sk->sk_drops, 0);
2827}
2828EXPORT_SYMBOL(sock_init_data);
2829
2830void lock_sock_nested(struct sock *sk, int subclass)
2831{
2832 might_sleep();
2833 spin_lock_bh(&sk->sk_lock.slock);
2834 if (sk->sk_lock.owned)
2835 __lock_sock(sk);
2836 sk->sk_lock.owned = 1;
2837 spin_unlock(&sk->sk_lock.slock);
2838 /*
2839 * The sk_lock has mutex_lock() semantics here:
2840 */
2841 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2842 local_bh_enable();
2843}
2844EXPORT_SYMBOL(lock_sock_nested);
2845
2846void release_sock(struct sock *sk)
2847{
2848 spin_lock_bh(&sk->sk_lock.slock);
2849 if (sk->sk_backlog.tail)
2850 __release_sock(sk);
2851
2852 /* Warning : release_cb() might need to release sk ownership,
2853 * ie call sock_release_ownership(sk) before us.
2854 */
2855 if (sk->sk_prot->release_cb)
2856 sk->sk_prot->release_cb(sk);
2857
2858 sock_release_ownership(sk);
2859 if (waitqueue_active(&sk->sk_lock.wq))
2860 wake_up(&sk->sk_lock.wq);
2861 spin_unlock_bh(&sk->sk_lock.slock);
2862}
2863EXPORT_SYMBOL(release_sock);
2864
2865/**
2866 * lock_sock_fast - fast version of lock_sock
2867 * @sk: socket
2868 *
2869 * This version should be used for very small section, where process wont block
2870 * return false if fast path is taken:
2871 *
2872 * sk_lock.slock locked, owned = 0, BH disabled
2873 *
2874 * return true if slow path is taken:
2875 *
2876 * sk_lock.slock unlocked, owned = 1, BH enabled
2877 */
2878bool lock_sock_fast(struct sock *sk)
2879{
2880 might_sleep();
2881 spin_lock_bh(&sk->sk_lock.slock);
2882
2883 if (!sk->sk_lock.owned)
2884 /*
2885 * Note : We must disable BH
2886 */
2887 return false;
2888
2889 __lock_sock(sk);
2890 sk->sk_lock.owned = 1;
2891 spin_unlock(&sk->sk_lock.slock);
2892 /*
2893 * The sk_lock has mutex_lock() semantics here:
2894 */
2895 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2896 local_bh_enable();
2897 return true;
2898}
2899EXPORT_SYMBOL(lock_sock_fast);
2900
2901int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2902{
2903 struct timeval tv;
2904 if (!sock_flag(sk, SOCK_TIMESTAMP))
2905 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2906 tv = ktime_to_timeval(sk->sk_stamp);
2907 if (tv.tv_sec == -1)
2908 return -ENOENT;
2909 if (tv.tv_sec == 0) {
2910 sk->sk_stamp = ktime_get_real();
2911 tv = ktime_to_timeval(sk->sk_stamp);
2912 }
2913 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2914}
2915EXPORT_SYMBOL(sock_get_timestamp);
2916
2917int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2918{
2919 struct timespec ts;
2920 if (!sock_flag(sk, SOCK_TIMESTAMP))
2921 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2922 ts = ktime_to_timespec(sk->sk_stamp);
2923 if (ts.tv_sec == -1)
2924 return -ENOENT;
2925 if (ts.tv_sec == 0) {
2926 sk->sk_stamp = ktime_get_real();
2927 ts = ktime_to_timespec(sk->sk_stamp);
2928 }
2929 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2930}
2931EXPORT_SYMBOL(sock_get_timestampns);
2932
2933void sock_enable_timestamp(struct sock *sk, int flag)
2934{
2935 if (!sock_flag(sk, flag)) {
2936 unsigned long previous_flags = sk->sk_flags;
2937
2938 sock_set_flag(sk, flag);
2939 /*
2940 * we just set one of the two flags which require net
2941 * time stamping, but time stamping might have been on
2942 * already because of the other one
2943 */
2944 if (sock_needs_netstamp(sk) &&
2945 !(previous_flags & SK_FLAGS_TIMESTAMP))
2946 net_enable_timestamp();
2947 }
2948}
2949
2950int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2951 int level, int type)
2952{
2953 struct sock_exterr_skb *serr;
2954 struct sk_buff *skb;
2955 int copied, err;
2956
2957 err = -EAGAIN;
2958 skb = sock_dequeue_err_skb(sk);
2959 if (skb == NULL)
2960 goto out;
2961
2962 copied = skb->len;
2963 if (copied > len) {
2964 msg->msg_flags |= MSG_TRUNC;
2965 copied = len;
2966 }
2967 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2968 if (err)
2969 goto out_free_skb;
2970
2971 sock_recv_timestamp(msg, sk, skb);
2972
2973 serr = SKB_EXT_ERR(skb);
2974 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2975
2976 msg->msg_flags |= MSG_ERRQUEUE;
2977 err = copied;
2978
2979out_free_skb:
2980 kfree_skb(skb);
2981out:
2982 return err;
2983}
2984EXPORT_SYMBOL(sock_recv_errqueue);
2985
2986/*
2987 * Get a socket option on an socket.
2988 *
2989 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2990 * asynchronous errors should be reported by getsockopt. We assume
2991 * this means if you specify SO_ERROR (otherwise whats the point of it).
2992 */
2993int sock_common_getsockopt(struct socket *sock, int level, int optname,
2994 char __user *optval, int __user *optlen)
2995{
2996 struct sock *sk = sock->sk;
2997
2998 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2999}
3000EXPORT_SYMBOL(sock_common_getsockopt);
3001
3002#ifdef CONFIG_COMPAT
3003int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3004 char __user *optval, int __user *optlen)
3005{
3006 struct sock *sk = sock->sk;
3007
3008 if (sk->sk_prot->compat_getsockopt != NULL)
3009 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3010 optval, optlen);
3011 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3012}
3013EXPORT_SYMBOL(compat_sock_common_getsockopt);
3014#endif
3015
3016int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3017 int flags)
3018{
3019 struct sock *sk = sock->sk;
3020 int addr_len = 0;
3021 int err;
3022
3023 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3024 flags & ~MSG_DONTWAIT, &addr_len);
3025 if (err >= 0)
3026 msg->msg_namelen = addr_len;
3027 return err;
3028}
3029EXPORT_SYMBOL(sock_common_recvmsg);
3030
3031/*
3032 * Set socket options on an inet socket.
3033 */
3034int sock_common_setsockopt(struct socket *sock, int level, int optname,
3035 char __user *optval, unsigned int optlen)
3036{
3037 struct sock *sk = sock->sk;
3038
3039 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3040}
3041EXPORT_SYMBOL(sock_common_setsockopt);
3042
3043#ifdef CONFIG_COMPAT
3044int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
3045 char __user *optval, unsigned int optlen)
3046{
3047 struct sock *sk = sock->sk;
3048
3049 if (sk->sk_prot->compat_setsockopt != NULL)
3050 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3051 optval, optlen);
3052 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3053}
3054EXPORT_SYMBOL(compat_sock_common_setsockopt);
3055#endif
3056
3057void sk_common_release(struct sock *sk)
3058{
3059 if (sk->sk_prot->destroy)
3060 sk->sk_prot->destroy(sk);
3061
3062 /*
3063 * Observation: when sock_common_release is called, processes have
3064 * no access to socket. But net still has.
3065 * Step one, detach it from networking:
3066 *
3067 * A. Remove from hash tables.
3068 */
3069
3070 sk->sk_prot->unhash(sk);
3071
3072 /*
3073 * In this point socket cannot receive new packets, but it is possible
3074 * that some packets are in flight because some CPU runs receiver and
3075 * did hash table lookup before we unhashed socket. They will achieve
3076 * receive queue and will be purged by socket destructor.
3077 *
3078 * Also we still have packets pending on receive queue and probably,
3079 * our own packets waiting in device queues. sock_destroy will drain
3080 * receive queue, but transmitted packets will delay socket destruction
3081 * until the last reference will be released.
3082 */
3083
3084 sock_orphan(sk);
3085
3086 xfrm_sk_free_policy(sk);
3087
3088 sk_refcnt_debug_release(sk);
3089
3090 sock_put(sk);
3091}
3092EXPORT_SYMBOL(sk_common_release);
3093
3094void sk_get_meminfo(const struct sock *sk, u32 *mem)
3095{
3096 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3097
3098 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3099 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
3100 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3101 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
3102 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3103 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
3104 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3105 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
3106 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3107}
3108
3109#ifdef CONFIG_PROC_FS
3110#define PROTO_INUSE_NR 64 /* should be enough for the first time */
3111struct prot_inuse {
3112 int val[PROTO_INUSE_NR];
3113};
3114
3115static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3116
3117void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3118{
3119 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3120}
3121EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3122
3123int sock_prot_inuse_get(struct net *net, struct proto *prot)
3124{
3125 int cpu, idx = prot->inuse_idx;
3126 int res = 0;
3127
3128 for_each_possible_cpu(cpu)
3129 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3130
3131 return res >= 0 ? res : 0;
3132}
3133EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3134
3135static void sock_inuse_add(struct net *net, int val)
3136{
3137 this_cpu_add(*net->core.sock_inuse, val);
3138}
3139
3140int sock_inuse_get(struct net *net)
3141{
3142 int cpu, res = 0;
3143
3144 for_each_possible_cpu(cpu)
3145 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3146
3147 return res;
3148}
3149
3150EXPORT_SYMBOL_GPL(sock_inuse_get);
3151
3152static int __net_init sock_inuse_init_net(struct net *net)
3153{
3154 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3155 if (net->core.prot_inuse == NULL)
3156 return -ENOMEM;
3157
3158 net->core.sock_inuse = alloc_percpu(int);
3159 if (net->core.sock_inuse == NULL)
3160 goto out;
3161
3162 return 0;
3163
3164out:
3165 free_percpu(net->core.prot_inuse);
3166 return -ENOMEM;
3167}
3168
3169static void __net_exit sock_inuse_exit_net(struct net *net)
3170{
3171 free_percpu(net->core.prot_inuse);
3172 free_percpu(net->core.sock_inuse);
3173}
3174
3175static struct pernet_operations net_inuse_ops = {
3176 .init = sock_inuse_init_net,
3177 .exit = sock_inuse_exit_net,
3178};
3179
3180static __init int net_inuse_init(void)
3181{
3182 if (register_pernet_subsys(&net_inuse_ops))
3183 panic("Cannot initialize net inuse counters");
3184
3185 return 0;
3186}
3187
3188core_initcall(net_inuse_init);
3189
3190static void assign_proto_idx(struct proto *prot)
3191{
3192 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3193
3194 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3195 pr_err("PROTO_INUSE_NR exhausted\n");
3196 return;
3197 }
3198
3199 set_bit(prot->inuse_idx, proto_inuse_idx);
3200}
3201
3202static void release_proto_idx(struct proto *prot)
3203{
3204 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3205 clear_bit(prot->inuse_idx, proto_inuse_idx);
3206}
3207#else
3208static inline void assign_proto_idx(struct proto *prot)
3209{
3210}
3211
3212static inline void release_proto_idx(struct proto *prot)
3213{
3214}
3215
3216static void sock_inuse_add(struct net *net, int val)
3217{
3218}
3219#endif
3220
3221static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3222{
3223 if (!rsk_prot)
3224 return;
3225 kfree(rsk_prot->slab_name);
3226 rsk_prot->slab_name = NULL;
3227 kmem_cache_destroy(rsk_prot->slab);
3228 rsk_prot->slab = NULL;
3229}
3230
3231static int req_prot_init(const struct proto *prot)
3232{
3233 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3234
3235 if (!rsk_prot)
3236 return 0;
3237
3238 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3239 prot->name);
3240 if (!rsk_prot->slab_name)
3241 return -ENOMEM;
3242
3243 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3244 rsk_prot->obj_size, 0,
3245 prot->slab_flags, NULL);
3246
3247 if (!rsk_prot->slab) {
3248 pr_crit("%s: Can't create request sock SLAB cache!\n",
3249 prot->name);
3250 return -ENOMEM;
3251 }
3252 return 0;
3253}
3254
3255int proto_register(struct proto *prot, int alloc_slab)
3256{
3257 if (alloc_slab) {
3258 prot->slab = kmem_cache_create_usercopy(prot->name,
3259 prot->obj_size, 0,
3260 SLAB_HWCACHE_ALIGN | prot->slab_flags,
3261 prot->useroffset, prot->usersize,
3262 NULL);
3263
3264 if (prot->slab == NULL) {
3265 pr_crit("%s: Can't create sock SLAB cache!\n",
3266 prot->name);
3267 goto out;
3268 }
3269
3270 if (req_prot_init(prot))
3271 goto out_free_request_sock_slab;
3272
3273 if (prot->twsk_prot != NULL) {
3274 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3275
3276 if (prot->twsk_prot->twsk_slab_name == NULL)
3277 goto out_free_request_sock_slab;
3278
3279 prot->twsk_prot->twsk_slab =
3280 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3281 prot->twsk_prot->twsk_obj_size,
3282 0,
3283 prot->slab_flags,
3284 NULL);
3285 if (prot->twsk_prot->twsk_slab == NULL)
3286 goto out_free_timewait_sock_slab_name;
3287 }
3288 }
3289
3290 mutex_lock(&proto_list_mutex);
3291 list_add(&prot->node, &proto_list);
3292 assign_proto_idx(prot);
3293 mutex_unlock(&proto_list_mutex);
3294 return 0;
3295
3296out_free_timewait_sock_slab_name:
3297 kfree(prot->twsk_prot->twsk_slab_name);
3298out_free_request_sock_slab:
3299 req_prot_cleanup(prot->rsk_prot);
3300
3301 kmem_cache_destroy(prot->slab);
3302 prot->slab = NULL;
3303out:
3304 return -ENOBUFS;
3305}
3306EXPORT_SYMBOL(proto_register);
3307
3308void proto_unregister(struct proto *prot)
3309{
3310 mutex_lock(&proto_list_mutex);
3311 release_proto_idx(prot);
3312 list_del(&prot->node);
3313 mutex_unlock(&proto_list_mutex);
3314
3315 kmem_cache_destroy(prot->slab);
3316 prot->slab = NULL;
3317
3318 req_prot_cleanup(prot->rsk_prot);
3319
3320 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
3321 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
3322 kfree(prot->twsk_prot->twsk_slab_name);
3323 prot->twsk_prot->twsk_slab = NULL;
3324 }
3325}
3326EXPORT_SYMBOL(proto_unregister);
3327
3328int sock_load_diag_module(int family, int protocol)
3329{
3330 if (!protocol) {
3331 if (!sock_is_registered(family))
3332 return -ENOENT;
3333
3334 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3335 NETLINK_SOCK_DIAG, family);
3336 }
3337
3338#ifdef CONFIG_INET
3339 if (family == AF_INET &&
3340 !rcu_access_pointer(inet_protos[protocol]))
3341 return -ENOENT;
3342#endif
3343
3344 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3345 NETLINK_SOCK_DIAG, family, protocol);
3346}
3347EXPORT_SYMBOL(sock_load_diag_module);
3348
3349#ifdef CONFIG_PROC_FS
3350static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3351 __acquires(proto_list_mutex)
3352{
3353 mutex_lock(&proto_list_mutex);
3354 return seq_list_start_head(&proto_list, *pos);
3355}
3356
3357static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3358{
3359 return seq_list_next(v, &proto_list, pos);
3360}
3361
3362static void proto_seq_stop(struct seq_file *seq, void *v)
3363 __releases(proto_list_mutex)
3364{
3365 mutex_unlock(&proto_list_mutex);
3366}
3367
3368static char proto_method_implemented(const void *method)
3369{
3370 return method == NULL ? 'n' : 'y';
3371}
3372static long sock_prot_memory_allocated(struct proto *proto)
3373{
3374 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3375}
3376
3377static char *sock_prot_memory_pressure(struct proto *proto)
3378{
3379 return proto->memory_pressure != NULL ?
3380 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3381}
3382
3383static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3384{
3385
3386 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
3387 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3388 proto->name,
3389 proto->obj_size,
3390 sock_prot_inuse_get(seq_file_net(seq), proto),
3391 sock_prot_memory_allocated(proto),
3392 sock_prot_memory_pressure(proto),
3393 proto->max_header,
3394 proto->slab == NULL ? "no" : "yes",
3395 module_name(proto->owner),
3396 proto_method_implemented(proto->close),
3397 proto_method_implemented(proto->connect),
3398 proto_method_implemented(proto->disconnect),
3399 proto_method_implemented(proto->accept),
3400 proto_method_implemented(proto->ioctl),
3401 proto_method_implemented(proto->init),
3402 proto_method_implemented(proto->destroy),
3403 proto_method_implemented(proto->shutdown),
3404 proto_method_implemented(proto->setsockopt),
3405 proto_method_implemented(proto->getsockopt),
3406 proto_method_implemented(proto->sendmsg),
3407 proto_method_implemented(proto->recvmsg),
3408 proto_method_implemented(proto->sendpage),
3409 proto_method_implemented(proto->bind),
3410 proto_method_implemented(proto->backlog_rcv),
3411 proto_method_implemented(proto->hash),
3412 proto_method_implemented(proto->unhash),
3413 proto_method_implemented(proto->get_port),
3414 proto_method_implemented(proto->enter_memory_pressure));
3415}
3416
3417static int proto_seq_show(struct seq_file *seq, void *v)
3418{
3419 if (v == &proto_list)
3420 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3421 "protocol",
3422 "size",
3423 "sockets",
3424 "memory",
3425 "press",
3426 "maxhdr",
3427 "slab",
3428 "module",
3429 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3430 else
3431 proto_seq_printf(seq, list_entry(v, struct proto, node));
3432 return 0;
3433}
3434
3435static const struct seq_operations proto_seq_ops = {
3436 .start = proto_seq_start,
3437 .next = proto_seq_next,
3438 .stop = proto_seq_stop,
3439 .show = proto_seq_show,
3440};
3441
3442static int proto_seq_open(struct inode *inode, struct file *file)
3443{
3444 return seq_open_net(inode, file, &proto_seq_ops,
3445 sizeof(struct seq_net_private));
3446}
3447
3448static const struct file_operations proto_seq_fops = {
3449 .open = proto_seq_open,
3450 .read = seq_read,
3451 .llseek = seq_lseek,
3452 .release = seq_release_net,
3453};
3454
3455static __net_init int proto_init_net(struct net *net)
3456{
3457 if (!proc_create("protocols", 0444, net->proc_net, &proto_seq_fops))
3458 return -ENOMEM;
3459
3460 return 0;
3461}
3462
3463static __net_exit void proto_exit_net(struct net *net)
3464{
3465 remove_proc_entry("protocols", net->proc_net);
3466}
3467
3468
3469static __net_initdata struct pernet_operations proto_net_ops = {
3470 .init = proto_init_net,
3471 .exit = proto_exit_net,
3472};
3473
3474static int __init proto_init(void)
3475{
3476 return register_pernet_subsys(&proto_net_ops);
3477}
3478
3479subsys_initcall(proto_init);
3480
3481#endif /* PROC_FS */
3482
3483#ifdef CONFIG_NET_RX_BUSY_POLL
3484bool sk_busy_loop_end(void *p, unsigned long start_time)
3485{
3486 struct sock *sk = p;
3487
3488 return !skb_queue_empty(&sk->sk_receive_queue) ||
3489 sk_busy_loop_timeout(sk, start_time);
3490}
3491EXPORT_SYMBOL(sk_busy_loop_end);
3492#endif /* CONFIG_NET_RX_BUSY_POLL */
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#include <linux/capability.h>
95#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
114#include <linux/highmem.h>
115#include <linux/user_namespace.h>
116#include <linux/static_key.h>
117#include <linux/memcontrol.h>
118#include <linux/prefetch.h>
119
120#include <asm/uaccess.h>
121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
125#include <net/net_namespace.h>
126#include <net/request_sock.h>
127#include <net/sock.h>
128#include <linux/net_tstamp.h>
129#include <net/xfrm.h>
130#include <linux/ipsec.h>
131#include <net/cls_cgroup.h>
132#include <net/netprio_cgroup.h>
133
134#include <linux/filter.h>
135
136#include <trace/events/sock.h>
137
138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
142static DEFINE_MUTEX(proto_list_mutex);
143static LIST_HEAD(proto_list);
144
145#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
147{
148 struct proto *proto;
149 int ret = 0;
150
151 mutex_lock(&proto_list_mutex);
152 list_for_each_entry(proto, &proto_list, node) {
153 if (proto->init_cgroup) {
154 ret = proto->init_cgroup(memcg, ss);
155 if (ret)
156 goto out;
157 }
158 }
159
160 mutex_unlock(&proto_list_mutex);
161 return ret;
162out:
163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
164 if (proto->destroy_cgroup)
165 proto->destroy_cgroup(memcg);
166 mutex_unlock(&proto_list_mutex);
167 return ret;
168}
169
170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
171{
172 struct proto *proto;
173
174 mutex_lock(&proto_list_mutex);
175 list_for_each_entry_reverse(proto, &proto_list, node)
176 if (proto->destroy_cgroup)
177 proto->destroy_cgroup(memcg);
178 mutex_unlock(&proto_list_mutex);
179}
180#endif
181
182/*
183 * Each address family might have different locking rules, so we have
184 * one slock key per address family:
185 */
186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX];
188
189struct static_key memcg_socket_limit_enabled;
190EXPORT_SYMBOL(memcg_socket_limit_enabled);
191
192/*
193 * Make lock validator output more readable. (we pre-construct these
194 * strings build-time, so that runtime initialization of socket
195 * locks is fast):
196 */
197static const char *const af_family_key_strings[AF_MAX+1] = {
198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
212};
213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
223 "slock-27" , "slock-28" , "slock-AF_CAN" ,
224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
227 "slock-AF_NFC" , "slock-AF_MAX"
228};
229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
239 "clock-27" , "clock-28" , "clock-AF_CAN" ,
240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
243 "clock-AF_NFC" , "clock-AF_MAX"
244};
245
246/*
247 * sk_callback_lock locking rules are per-address-family,
248 * so split the lock classes by using a per-AF key:
249 */
250static struct lock_class_key af_callback_keys[AF_MAX];
251
252/* Take into consideration the size of the struct sk_buff overhead in the
253 * determination of these values, since that is non-constant across
254 * platforms. This makes socket queueing behavior and performance
255 * not depend upon such differences.
256 */
257#define _SK_MEM_PACKETS 256
258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261
262/* Run time adjustable parameters. */
263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
264EXPORT_SYMBOL(sysctl_wmem_max);
265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
266EXPORT_SYMBOL(sysctl_rmem_max);
267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
269
270/* Maximal space eaten by iovec or ancillary data plus some space */
271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
272EXPORT_SYMBOL(sysctl_optmem_max);
273
274#if defined(CONFIG_CGROUPS)
275#if !defined(CONFIG_NET_CLS_CGROUP)
276int net_cls_subsys_id = -1;
277EXPORT_SYMBOL_GPL(net_cls_subsys_id);
278#endif
279#if !defined(CONFIG_NETPRIO_CGROUP)
280int net_prio_subsys_id = -1;
281EXPORT_SYMBOL_GPL(net_prio_subsys_id);
282#endif
283#endif
284
285static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
286{
287 struct timeval tv;
288
289 if (optlen < sizeof(tv))
290 return -EINVAL;
291 if (copy_from_user(&tv, optval, sizeof(tv)))
292 return -EFAULT;
293 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
294 return -EDOM;
295
296 if (tv.tv_sec < 0) {
297 static int warned __read_mostly;
298
299 *timeo_p = 0;
300 if (warned < 10 && net_ratelimit()) {
301 warned++;
302 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
303 __func__, current->comm, task_pid_nr(current));
304 }
305 return 0;
306 }
307 *timeo_p = MAX_SCHEDULE_TIMEOUT;
308 if (tv.tv_sec == 0 && tv.tv_usec == 0)
309 return 0;
310 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
311 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
312 return 0;
313}
314
315static void sock_warn_obsolete_bsdism(const char *name)
316{
317 static int warned;
318 static char warncomm[TASK_COMM_LEN];
319 if (strcmp(warncomm, current->comm) && warned < 5) {
320 strcpy(warncomm, current->comm);
321 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
322 warncomm, name);
323 warned++;
324 }
325}
326
327#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
328
329static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
330{
331 if (sk->sk_flags & flags) {
332 sk->sk_flags &= ~flags;
333 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
334 net_disable_timestamp();
335 }
336}
337
338
339int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
340{
341 int err;
342 int skb_len;
343 unsigned long flags;
344 struct sk_buff_head *list = &sk->sk_receive_queue;
345
346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
347 atomic_inc(&sk->sk_drops);
348 trace_sock_rcvqueue_full(sk, skb);
349 return -ENOMEM;
350 }
351
352 err = sk_filter(sk, skb);
353 if (err)
354 return err;
355
356 if (!sk_rmem_schedule(sk, skb->truesize)) {
357 atomic_inc(&sk->sk_drops);
358 return -ENOBUFS;
359 }
360
361 skb->dev = NULL;
362 skb_set_owner_r(skb, sk);
363
364 /* Cache the SKB length before we tack it onto the receive
365 * queue. Once it is added it no longer belongs to us and
366 * may be freed by other threads of control pulling packets
367 * from the queue.
368 */
369 skb_len = skb->len;
370
371 /* we escape from rcu protected region, make sure we dont leak
372 * a norefcounted dst
373 */
374 skb_dst_force(skb);
375
376 spin_lock_irqsave(&list->lock, flags);
377 skb->dropcount = atomic_read(&sk->sk_drops);
378 __skb_queue_tail(list, skb);
379 spin_unlock_irqrestore(&list->lock, flags);
380
381 if (!sock_flag(sk, SOCK_DEAD))
382 sk->sk_data_ready(sk, skb_len);
383 return 0;
384}
385EXPORT_SYMBOL(sock_queue_rcv_skb);
386
387int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
388{
389 int rc = NET_RX_SUCCESS;
390
391 if (sk_filter(sk, skb))
392 goto discard_and_relse;
393
394 skb->dev = NULL;
395
396 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
397 atomic_inc(&sk->sk_drops);
398 goto discard_and_relse;
399 }
400 if (nested)
401 bh_lock_sock_nested(sk);
402 else
403 bh_lock_sock(sk);
404 if (!sock_owned_by_user(sk)) {
405 /*
406 * trylock + unlock semantics:
407 */
408 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
409
410 rc = sk_backlog_rcv(sk, skb);
411
412 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
413 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
414 bh_unlock_sock(sk);
415 atomic_inc(&sk->sk_drops);
416 goto discard_and_relse;
417 }
418
419 bh_unlock_sock(sk);
420out:
421 sock_put(sk);
422 return rc;
423discard_and_relse:
424 kfree_skb(skb);
425 goto out;
426}
427EXPORT_SYMBOL(sk_receive_skb);
428
429void sk_reset_txq(struct sock *sk)
430{
431 sk_tx_queue_clear(sk);
432}
433EXPORT_SYMBOL(sk_reset_txq);
434
435struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
436{
437 struct dst_entry *dst = __sk_dst_get(sk);
438
439 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
440 sk_tx_queue_clear(sk);
441 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
442 dst_release(dst);
443 return NULL;
444 }
445
446 return dst;
447}
448EXPORT_SYMBOL(__sk_dst_check);
449
450struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
451{
452 struct dst_entry *dst = sk_dst_get(sk);
453
454 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
455 sk_dst_reset(sk);
456 dst_release(dst);
457 return NULL;
458 }
459
460 return dst;
461}
462EXPORT_SYMBOL(sk_dst_check);
463
464static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
465{
466 int ret = -ENOPROTOOPT;
467#ifdef CONFIG_NETDEVICES
468 struct net *net = sock_net(sk);
469 char devname[IFNAMSIZ];
470 int index;
471
472 /* Sorry... */
473 ret = -EPERM;
474 if (!capable(CAP_NET_RAW))
475 goto out;
476
477 ret = -EINVAL;
478 if (optlen < 0)
479 goto out;
480
481 /* Bind this socket to a particular device like "eth0",
482 * as specified in the passed interface name. If the
483 * name is "" or the option length is zero the socket
484 * is not bound.
485 */
486 if (optlen > IFNAMSIZ - 1)
487 optlen = IFNAMSIZ - 1;
488 memset(devname, 0, sizeof(devname));
489
490 ret = -EFAULT;
491 if (copy_from_user(devname, optval, optlen))
492 goto out;
493
494 index = 0;
495 if (devname[0] != '\0') {
496 struct net_device *dev;
497
498 rcu_read_lock();
499 dev = dev_get_by_name_rcu(net, devname);
500 if (dev)
501 index = dev->ifindex;
502 rcu_read_unlock();
503 ret = -ENODEV;
504 if (!dev)
505 goto out;
506 }
507
508 lock_sock(sk);
509 sk->sk_bound_dev_if = index;
510 sk_dst_reset(sk);
511 release_sock(sk);
512
513 ret = 0;
514
515out:
516#endif
517
518 return ret;
519}
520
521static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
522{
523 if (valbool)
524 sock_set_flag(sk, bit);
525 else
526 sock_reset_flag(sk, bit);
527}
528
529/*
530 * This is meant for all protocols to use and covers goings on
531 * at the socket level. Everything here is generic.
532 */
533
534int sock_setsockopt(struct socket *sock, int level, int optname,
535 char __user *optval, unsigned int optlen)
536{
537 struct sock *sk = sock->sk;
538 int val;
539 int valbool;
540 struct linger ling;
541 int ret = 0;
542
543 /*
544 * Options without arguments
545 */
546
547 if (optname == SO_BINDTODEVICE)
548 return sock_bindtodevice(sk, optval, optlen);
549
550 if (optlen < sizeof(int))
551 return -EINVAL;
552
553 if (get_user(val, (int __user *)optval))
554 return -EFAULT;
555
556 valbool = val ? 1 : 0;
557
558 lock_sock(sk);
559
560 switch (optname) {
561 case SO_DEBUG:
562 if (val && !capable(CAP_NET_ADMIN))
563 ret = -EACCES;
564 else
565 sock_valbool_flag(sk, SOCK_DBG, valbool);
566 break;
567 case SO_REUSEADDR:
568 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
569 break;
570 case SO_TYPE:
571 case SO_PROTOCOL:
572 case SO_DOMAIN:
573 case SO_ERROR:
574 ret = -ENOPROTOOPT;
575 break;
576 case SO_DONTROUTE:
577 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
578 break;
579 case SO_BROADCAST:
580 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
581 break;
582 case SO_SNDBUF:
583 /* Don't error on this BSD doesn't and if you think
584 * about it this is right. Otherwise apps have to
585 * play 'guess the biggest size' games. RCVBUF/SNDBUF
586 * are treated in BSD as hints
587 */
588 val = min_t(u32, val, sysctl_wmem_max);
589set_sndbuf:
590 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
592 /* Wake up sending tasks if we upped the value. */
593 sk->sk_write_space(sk);
594 break;
595
596 case SO_SNDBUFFORCE:
597 if (!capable(CAP_NET_ADMIN)) {
598 ret = -EPERM;
599 break;
600 }
601 goto set_sndbuf;
602
603 case SO_RCVBUF:
604 /* Don't error on this BSD doesn't and if you think
605 * about it this is right. Otherwise apps have to
606 * play 'guess the biggest size' games. RCVBUF/SNDBUF
607 * are treated in BSD as hints
608 */
609 val = min_t(u32, val, sysctl_rmem_max);
610set_rcvbuf:
611 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
612 /*
613 * We double it on the way in to account for
614 * "struct sk_buff" etc. overhead. Applications
615 * assume that the SO_RCVBUF setting they make will
616 * allow that much actual data to be received on that
617 * socket.
618 *
619 * Applications are unaware that "struct sk_buff" and
620 * other overheads allocate from the receive buffer
621 * during socket buffer allocation.
622 *
623 * And after considering the possible alternatives,
624 * returning the value we actually used in getsockopt
625 * is the most desirable behavior.
626 */
627 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
628 break;
629
630 case SO_RCVBUFFORCE:
631 if (!capable(CAP_NET_ADMIN)) {
632 ret = -EPERM;
633 break;
634 }
635 goto set_rcvbuf;
636
637 case SO_KEEPALIVE:
638#ifdef CONFIG_INET
639 if (sk->sk_protocol == IPPROTO_TCP)
640 tcp_set_keepalive(sk, valbool);
641#endif
642 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
643 break;
644
645 case SO_OOBINLINE:
646 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
647 break;
648
649 case SO_NO_CHECK:
650 sk->sk_no_check = valbool;
651 break;
652
653 case SO_PRIORITY:
654 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
655 sk->sk_priority = val;
656 else
657 ret = -EPERM;
658 break;
659
660 case SO_LINGER:
661 if (optlen < sizeof(ling)) {
662 ret = -EINVAL; /* 1003.1g */
663 break;
664 }
665 if (copy_from_user(&ling, optval, sizeof(ling))) {
666 ret = -EFAULT;
667 break;
668 }
669 if (!ling.l_onoff)
670 sock_reset_flag(sk, SOCK_LINGER);
671 else {
672#if (BITS_PER_LONG == 32)
673 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
674 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
675 else
676#endif
677 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
678 sock_set_flag(sk, SOCK_LINGER);
679 }
680 break;
681
682 case SO_BSDCOMPAT:
683 sock_warn_obsolete_bsdism("setsockopt");
684 break;
685
686 case SO_PASSCRED:
687 if (valbool)
688 set_bit(SOCK_PASSCRED, &sock->flags);
689 else
690 clear_bit(SOCK_PASSCRED, &sock->flags);
691 break;
692
693 case SO_TIMESTAMP:
694 case SO_TIMESTAMPNS:
695 if (valbool) {
696 if (optname == SO_TIMESTAMP)
697 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
698 else
699 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
700 sock_set_flag(sk, SOCK_RCVTSTAMP);
701 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
702 } else {
703 sock_reset_flag(sk, SOCK_RCVTSTAMP);
704 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
705 }
706 break;
707
708 case SO_TIMESTAMPING:
709 if (val & ~SOF_TIMESTAMPING_MASK) {
710 ret = -EINVAL;
711 break;
712 }
713 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
714 val & SOF_TIMESTAMPING_TX_HARDWARE);
715 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
716 val & SOF_TIMESTAMPING_TX_SOFTWARE);
717 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
718 val & SOF_TIMESTAMPING_RX_HARDWARE);
719 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
720 sock_enable_timestamp(sk,
721 SOCK_TIMESTAMPING_RX_SOFTWARE);
722 else
723 sock_disable_timestamp(sk,
724 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
725 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
726 val & SOF_TIMESTAMPING_SOFTWARE);
727 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
728 val & SOF_TIMESTAMPING_SYS_HARDWARE);
729 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
730 val & SOF_TIMESTAMPING_RAW_HARDWARE);
731 break;
732
733 case SO_RCVLOWAT:
734 if (val < 0)
735 val = INT_MAX;
736 sk->sk_rcvlowat = val ? : 1;
737 break;
738
739 case SO_RCVTIMEO:
740 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
741 break;
742
743 case SO_SNDTIMEO:
744 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
745 break;
746
747 case SO_ATTACH_FILTER:
748 ret = -EINVAL;
749 if (optlen == sizeof(struct sock_fprog)) {
750 struct sock_fprog fprog;
751
752 ret = -EFAULT;
753 if (copy_from_user(&fprog, optval, sizeof(fprog)))
754 break;
755
756 ret = sk_attach_filter(&fprog, sk);
757 }
758 break;
759
760 case SO_DETACH_FILTER:
761 ret = sk_detach_filter(sk);
762 break;
763
764 case SO_PASSSEC:
765 if (valbool)
766 set_bit(SOCK_PASSSEC, &sock->flags);
767 else
768 clear_bit(SOCK_PASSSEC, &sock->flags);
769 break;
770 case SO_MARK:
771 if (!capable(CAP_NET_ADMIN))
772 ret = -EPERM;
773 else
774 sk->sk_mark = val;
775 break;
776
777 /* We implement the SO_SNDLOWAT etc to
778 not be settable (1003.1g 5.3) */
779 case SO_RXQ_OVFL:
780 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
781 break;
782
783 case SO_WIFI_STATUS:
784 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
785 break;
786
787 case SO_PEEK_OFF:
788 if (sock->ops->set_peek_off)
789 sock->ops->set_peek_off(sk, val);
790 else
791 ret = -EOPNOTSUPP;
792 break;
793
794 case SO_NOFCS:
795 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
796 break;
797
798 default:
799 ret = -ENOPROTOOPT;
800 break;
801 }
802 release_sock(sk);
803 return ret;
804}
805EXPORT_SYMBOL(sock_setsockopt);
806
807
808void cred_to_ucred(struct pid *pid, const struct cred *cred,
809 struct ucred *ucred)
810{
811 ucred->pid = pid_vnr(pid);
812 ucred->uid = ucred->gid = -1;
813 if (cred) {
814 struct user_namespace *current_ns = current_user_ns();
815
816 ucred->uid = from_kuid(current_ns, cred->euid);
817 ucred->gid = from_kgid(current_ns, cred->egid);
818 }
819}
820EXPORT_SYMBOL_GPL(cred_to_ucred);
821
822int sock_getsockopt(struct socket *sock, int level, int optname,
823 char __user *optval, int __user *optlen)
824{
825 struct sock *sk = sock->sk;
826
827 union {
828 int val;
829 struct linger ling;
830 struct timeval tm;
831 } v;
832
833 int lv = sizeof(int);
834 int len;
835
836 if (get_user(len, optlen))
837 return -EFAULT;
838 if (len < 0)
839 return -EINVAL;
840
841 memset(&v, 0, sizeof(v));
842
843 switch (optname) {
844 case SO_DEBUG:
845 v.val = sock_flag(sk, SOCK_DBG);
846 break;
847
848 case SO_DONTROUTE:
849 v.val = sock_flag(sk, SOCK_LOCALROUTE);
850 break;
851
852 case SO_BROADCAST:
853 v.val = sock_flag(sk, SOCK_BROADCAST);
854 break;
855
856 case SO_SNDBUF:
857 v.val = sk->sk_sndbuf;
858 break;
859
860 case SO_RCVBUF:
861 v.val = sk->sk_rcvbuf;
862 break;
863
864 case SO_REUSEADDR:
865 v.val = sk->sk_reuse;
866 break;
867
868 case SO_KEEPALIVE:
869 v.val = sock_flag(sk, SOCK_KEEPOPEN);
870 break;
871
872 case SO_TYPE:
873 v.val = sk->sk_type;
874 break;
875
876 case SO_PROTOCOL:
877 v.val = sk->sk_protocol;
878 break;
879
880 case SO_DOMAIN:
881 v.val = sk->sk_family;
882 break;
883
884 case SO_ERROR:
885 v.val = -sock_error(sk);
886 if (v.val == 0)
887 v.val = xchg(&sk->sk_err_soft, 0);
888 break;
889
890 case SO_OOBINLINE:
891 v.val = sock_flag(sk, SOCK_URGINLINE);
892 break;
893
894 case SO_NO_CHECK:
895 v.val = sk->sk_no_check;
896 break;
897
898 case SO_PRIORITY:
899 v.val = sk->sk_priority;
900 break;
901
902 case SO_LINGER:
903 lv = sizeof(v.ling);
904 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
905 v.ling.l_linger = sk->sk_lingertime / HZ;
906 break;
907
908 case SO_BSDCOMPAT:
909 sock_warn_obsolete_bsdism("getsockopt");
910 break;
911
912 case SO_TIMESTAMP:
913 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
914 !sock_flag(sk, SOCK_RCVTSTAMPNS);
915 break;
916
917 case SO_TIMESTAMPNS:
918 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
919 break;
920
921 case SO_TIMESTAMPING:
922 v.val = 0;
923 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
924 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
925 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
926 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
927 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
928 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
929 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
930 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
931 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
932 v.val |= SOF_TIMESTAMPING_SOFTWARE;
933 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
934 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
935 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
936 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
937 break;
938
939 case SO_RCVTIMEO:
940 lv = sizeof(struct timeval);
941 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
942 v.tm.tv_sec = 0;
943 v.tm.tv_usec = 0;
944 } else {
945 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
946 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
947 }
948 break;
949
950 case SO_SNDTIMEO:
951 lv = sizeof(struct timeval);
952 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
953 v.tm.tv_sec = 0;
954 v.tm.tv_usec = 0;
955 } else {
956 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
957 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
958 }
959 break;
960
961 case SO_RCVLOWAT:
962 v.val = sk->sk_rcvlowat;
963 break;
964
965 case SO_SNDLOWAT:
966 v.val = 1;
967 break;
968
969 case SO_PASSCRED:
970 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
971 break;
972
973 case SO_PEERCRED:
974 {
975 struct ucred peercred;
976 if (len > sizeof(peercred))
977 len = sizeof(peercred);
978 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
979 if (copy_to_user(optval, &peercred, len))
980 return -EFAULT;
981 goto lenout;
982 }
983
984 case SO_PEERNAME:
985 {
986 char address[128];
987
988 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
989 return -ENOTCONN;
990 if (lv < len)
991 return -EINVAL;
992 if (copy_to_user(optval, address, len))
993 return -EFAULT;
994 goto lenout;
995 }
996
997 /* Dubious BSD thing... Probably nobody even uses it, but
998 * the UNIX standard wants it for whatever reason... -DaveM
999 */
1000 case SO_ACCEPTCONN:
1001 v.val = sk->sk_state == TCP_LISTEN;
1002 break;
1003
1004 case SO_PASSSEC:
1005 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1006 break;
1007
1008 case SO_PEERSEC:
1009 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1010
1011 case SO_MARK:
1012 v.val = sk->sk_mark;
1013 break;
1014
1015 case SO_RXQ_OVFL:
1016 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1017 break;
1018
1019 case SO_WIFI_STATUS:
1020 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1021 break;
1022
1023 case SO_PEEK_OFF:
1024 if (!sock->ops->set_peek_off)
1025 return -EOPNOTSUPP;
1026
1027 v.val = sk->sk_peek_off;
1028 break;
1029 case SO_NOFCS:
1030 v.val = sock_flag(sk, SOCK_NOFCS);
1031 break;
1032 default:
1033 return -ENOPROTOOPT;
1034 }
1035
1036 if (len > lv)
1037 len = lv;
1038 if (copy_to_user(optval, &v, len))
1039 return -EFAULT;
1040lenout:
1041 if (put_user(len, optlen))
1042 return -EFAULT;
1043 return 0;
1044}
1045
1046/*
1047 * Initialize an sk_lock.
1048 *
1049 * (We also register the sk_lock with the lock validator.)
1050 */
1051static inline void sock_lock_init(struct sock *sk)
1052{
1053 sock_lock_init_class_and_name(sk,
1054 af_family_slock_key_strings[sk->sk_family],
1055 af_family_slock_keys + sk->sk_family,
1056 af_family_key_strings[sk->sk_family],
1057 af_family_keys + sk->sk_family);
1058}
1059
1060/*
1061 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1062 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1063 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1064 */
1065static void sock_copy(struct sock *nsk, const struct sock *osk)
1066{
1067#ifdef CONFIG_SECURITY_NETWORK
1068 void *sptr = nsk->sk_security;
1069#endif
1070 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1071
1072 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1073 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1074
1075#ifdef CONFIG_SECURITY_NETWORK
1076 nsk->sk_security = sptr;
1077 security_sk_clone(osk, nsk);
1078#endif
1079}
1080
1081/*
1082 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1083 * un-modified. Special care is taken when initializing object to zero.
1084 */
1085static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1086{
1087 if (offsetof(struct sock, sk_node.next) != 0)
1088 memset(sk, 0, offsetof(struct sock, sk_node.next));
1089 memset(&sk->sk_node.pprev, 0,
1090 size - offsetof(struct sock, sk_node.pprev));
1091}
1092
1093void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1094{
1095 unsigned long nulls1, nulls2;
1096
1097 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1098 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1099 if (nulls1 > nulls2)
1100 swap(nulls1, nulls2);
1101
1102 if (nulls1 != 0)
1103 memset((char *)sk, 0, nulls1);
1104 memset((char *)sk + nulls1 + sizeof(void *), 0,
1105 nulls2 - nulls1 - sizeof(void *));
1106 memset((char *)sk + nulls2 + sizeof(void *), 0,
1107 size - nulls2 - sizeof(void *));
1108}
1109EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1110
1111static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1112 int family)
1113{
1114 struct sock *sk;
1115 struct kmem_cache *slab;
1116
1117 slab = prot->slab;
1118 if (slab != NULL) {
1119 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1120 if (!sk)
1121 return sk;
1122 if (priority & __GFP_ZERO) {
1123 if (prot->clear_sk)
1124 prot->clear_sk(sk, prot->obj_size);
1125 else
1126 sk_prot_clear_nulls(sk, prot->obj_size);
1127 }
1128 } else
1129 sk = kmalloc(prot->obj_size, priority);
1130
1131 if (sk != NULL) {
1132 kmemcheck_annotate_bitfield(sk, flags);
1133
1134 if (security_sk_alloc(sk, family, priority))
1135 goto out_free;
1136
1137 if (!try_module_get(prot->owner))
1138 goto out_free_sec;
1139 sk_tx_queue_clear(sk);
1140 }
1141
1142 return sk;
1143
1144out_free_sec:
1145 security_sk_free(sk);
1146out_free:
1147 if (slab != NULL)
1148 kmem_cache_free(slab, sk);
1149 else
1150 kfree(sk);
1151 return NULL;
1152}
1153
1154static void sk_prot_free(struct proto *prot, struct sock *sk)
1155{
1156 struct kmem_cache *slab;
1157 struct module *owner;
1158
1159 owner = prot->owner;
1160 slab = prot->slab;
1161
1162 security_sk_free(sk);
1163 if (slab != NULL)
1164 kmem_cache_free(slab, sk);
1165 else
1166 kfree(sk);
1167 module_put(owner);
1168}
1169
1170#ifdef CONFIG_CGROUPS
1171void sock_update_classid(struct sock *sk)
1172{
1173 u32 classid;
1174
1175 rcu_read_lock(); /* doing current task, which cannot vanish. */
1176 classid = task_cls_classid(current);
1177 rcu_read_unlock();
1178 if (classid && classid != sk->sk_classid)
1179 sk->sk_classid = classid;
1180}
1181EXPORT_SYMBOL(sock_update_classid);
1182
1183void sock_update_netprioidx(struct sock *sk)
1184{
1185 if (in_interrupt())
1186 return;
1187
1188 sk->sk_cgrp_prioidx = task_netprioidx(current);
1189}
1190EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1191#endif
1192
1193/**
1194 * sk_alloc - All socket objects are allocated here
1195 * @net: the applicable net namespace
1196 * @family: protocol family
1197 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1198 * @prot: struct proto associated with this new sock instance
1199 */
1200struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1201 struct proto *prot)
1202{
1203 struct sock *sk;
1204
1205 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1206 if (sk) {
1207 sk->sk_family = family;
1208 /*
1209 * See comment in struct sock definition to understand
1210 * why we need sk_prot_creator -acme
1211 */
1212 sk->sk_prot = sk->sk_prot_creator = prot;
1213 sock_lock_init(sk);
1214 sock_net_set(sk, get_net(net));
1215 atomic_set(&sk->sk_wmem_alloc, 1);
1216
1217 sock_update_classid(sk);
1218 sock_update_netprioidx(sk);
1219 }
1220
1221 return sk;
1222}
1223EXPORT_SYMBOL(sk_alloc);
1224
1225static void __sk_free(struct sock *sk)
1226{
1227 struct sk_filter *filter;
1228
1229 if (sk->sk_destruct)
1230 sk->sk_destruct(sk);
1231
1232 filter = rcu_dereference_check(sk->sk_filter,
1233 atomic_read(&sk->sk_wmem_alloc) == 0);
1234 if (filter) {
1235 sk_filter_uncharge(sk, filter);
1236 RCU_INIT_POINTER(sk->sk_filter, NULL);
1237 }
1238
1239 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1240
1241 if (atomic_read(&sk->sk_omem_alloc))
1242 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1243 __func__, atomic_read(&sk->sk_omem_alloc));
1244
1245 if (sk->sk_peer_cred)
1246 put_cred(sk->sk_peer_cred);
1247 put_pid(sk->sk_peer_pid);
1248 put_net(sock_net(sk));
1249 sk_prot_free(sk->sk_prot_creator, sk);
1250}
1251
1252void sk_free(struct sock *sk)
1253{
1254 /*
1255 * We subtract one from sk_wmem_alloc and can know if
1256 * some packets are still in some tx queue.
1257 * If not null, sock_wfree() will call __sk_free(sk) later
1258 */
1259 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1260 __sk_free(sk);
1261}
1262EXPORT_SYMBOL(sk_free);
1263
1264/*
1265 * Last sock_put should drop reference to sk->sk_net. It has already
1266 * been dropped in sk_change_net. Taking reference to stopping namespace
1267 * is not an option.
1268 * Take reference to a socket to remove it from hash _alive_ and after that
1269 * destroy it in the context of init_net.
1270 */
1271void sk_release_kernel(struct sock *sk)
1272{
1273 if (sk == NULL || sk->sk_socket == NULL)
1274 return;
1275
1276 sock_hold(sk);
1277 sock_release(sk->sk_socket);
1278 release_net(sock_net(sk));
1279 sock_net_set(sk, get_net(&init_net));
1280 sock_put(sk);
1281}
1282EXPORT_SYMBOL(sk_release_kernel);
1283
1284static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1285{
1286 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1287 sock_update_memcg(newsk);
1288}
1289
1290/**
1291 * sk_clone_lock - clone a socket, and lock its clone
1292 * @sk: the socket to clone
1293 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1294 *
1295 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1296 */
1297struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1298{
1299 struct sock *newsk;
1300
1301 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1302 if (newsk != NULL) {
1303 struct sk_filter *filter;
1304
1305 sock_copy(newsk, sk);
1306
1307 /* SANITY */
1308 get_net(sock_net(newsk));
1309 sk_node_init(&newsk->sk_node);
1310 sock_lock_init(newsk);
1311 bh_lock_sock(newsk);
1312 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1313 newsk->sk_backlog.len = 0;
1314
1315 atomic_set(&newsk->sk_rmem_alloc, 0);
1316 /*
1317 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1318 */
1319 atomic_set(&newsk->sk_wmem_alloc, 1);
1320 atomic_set(&newsk->sk_omem_alloc, 0);
1321 skb_queue_head_init(&newsk->sk_receive_queue);
1322 skb_queue_head_init(&newsk->sk_write_queue);
1323#ifdef CONFIG_NET_DMA
1324 skb_queue_head_init(&newsk->sk_async_wait_queue);
1325#endif
1326
1327 spin_lock_init(&newsk->sk_dst_lock);
1328 rwlock_init(&newsk->sk_callback_lock);
1329 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1330 af_callback_keys + newsk->sk_family,
1331 af_family_clock_key_strings[newsk->sk_family]);
1332
1333 newsk->sk_dst_cache = NULL;
1334 newsk->sk_wmem_queued = 0;
1335 newsk->sk_forward_alloc = 0;
1336 newsk->sk_send_head = NULL;
1337 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1338
1339 sock_reset_flag(newsk, SOCK_DONE);
1340 skb_queue_head_init(&newsk->sk_error_queue);
1341
1342 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1343 if (filter != NULL)
1344 sk_filter_charge(newsk, filter);
1345
1346 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1347 /* It is still raw copy of parent, so invalidate
1348 * destructor and make plain sk_free() */
1349 newsk->sk_destruct = NULL;
1350 bh_unlock_sock(newsk);
1351 sk_free(newsk);
1352 newsk = NULL;
1353 goto out;
1354 }
1355
1356 newsk->sk_err = 0;
1357 newsk->sk_priority = 0;
1358 /*
1359 * Before updating sk_refcnt, we must commit prior changes to memory
1360 * (Documentation/RCU/rculist_nulls.txt for details)
1361 */
1362 smp_wmb();
1363 atomic_set(&newsk->sk_refcnt, 2);
1364
1365 /*
1366 * Increment the counter in the same struct proto as the master
1367 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1368 * is the same as sk->sk_prot->socks, as this field was copied
1369 * with memcpy).
1370 *
1371 * This _changes_ the previous behaviour, where
1372 * tcp_create_openreq_child always was incrementing the
1373 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1374 * to be taken into account in all callers. -acme
1375 */
1376 sk_refcnt_debug_inc(newsk);
1377 sk_set_socket(newsk, NULL);
1378 newsk->sk_wq = NULL;
1379
1380 sk_update_clone(sk, newsk);
1381
1382 if (newsk->sk_prot->sockets_allocated)
1383 sk_sockets_allocated_inc(newsk);
1384
1385 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1386 net_enable_timestamp();
1387 }
1388out:
1389 return newsk;
1390}
1391EXPORT_SYMBOL_GPL(sk_clone_lock);
1392
1393void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1394{
1395 __sk_dst_set(sk, dst);
1396 sk->sk_route_caps = dst->dev->features;
1397 if (sk->sk_route_caps & NETIF_F_GSO)
1398 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1399 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1400 if (sk_can_gso(sk)) {
1401 if (dst->header_len) {
1402 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1403 } else {
1404 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1405 sk->sk_gso_max_size = dst->dev->gso_max_size;
1406 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1407 }
1408 }
1409}
1410EXPORT_SYMBOL_GPL(sk_setup_caps);
1411
1412void __init sk_init(void)
1413{
1414 if (totalram_pages <= 4096) {
1415 sysctl_wmem_max = 32767;
1416 sysctl_rmem_max = 32767;
1417 sysctl_wmem_default = 32767;
1418 sysctl_rmem_default = 32767;
1419 } else if (totalram_pages >= 131072) {
1420 sysctl_wmem_max = 131071;
1421 sysctl_rmem_max = 131071;
1422 }
1423}
1424
1425/*
1426 * Simple resource managers for sockets.
1427 */
1428
1429
1430/*
1431 * Write buffer destructor automatically called from kfree_skb.
1432 */
1433void sock_wfree(struct sk_buff *skb)
1434{
1435 struct sock *sk = skb->sk;
1436 unsigned int len = skb->truesize;
1437
1438 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1439 /*
1440 * Keep a reference on sk_wmem_alloc, this will be released
1441 * after sk_write_space() call
1442 */
1443 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1444 sk->sk_write_space(sk);
1445 len = 1;
1446 }
1447 /*
1448 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1449 * could not do because of in-flight packets
1450 */
1451 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1452 __sk_free(sk);
1453}
1454EXPORT_SYMBOL(sock_wfree);
1455
1456/*
1457 * Read buffer destructor automatically called from kfree_skb.
1458 */
1459void sock_rfree(struct sk_buff *skb)
1460{
1461 struct sock *sk = skb->sk;
1462 unsigned int len = skb->truesize;
1463
1464 atomic_sub(len, &sk->sk_rmem_alloc);
1465 sk_mem_uncharge(sk, len);
1466}
1467EXPORT_SYMBOL(sock_rfree);
1468
1469
1470int sock_i_uid(struct sock *sk)
1471{
1472 int uid;
1473
1474 read_lock_bh(&sk->sk_callback_lock);
1475 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1476 read_unlock_bh(&sk->sk_callback_lock);
1477 return uid;
1478}
1479EXPORT_SYMBOL(sock_i_uid);
1480
1481unsigned long sock_i_ino(struct sock *sk)
1482{
1483 unsigned long ino;
1484
1485 read_lock_bh(&sk->sk_callback_lock);
1486 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1487 read_unlock_bh(&sk->sk_callback_lock);
1488 return ino;
1489}
1490EXPORT_SYMBOL(sock_i_ino);
1491
1492/*
1493 * Allocate a skb from the socket's send buffer.
1494 */
1495struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1496 gfp_t priority)
1497{
1498 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1499 struct sk_buff *skb = alloc_skb(size, priority);
1500 if (skb) {
1501 skb_set_owner_w(skb, sk);
1502 return skb;
1503 }
1504 }
1505 return NULL;
1506}
1507EXPORT_SYMBOL(sock_wmalloc);
1508
1509/*
1510 * Allocate a skb from the socket's receive buffer.
1511 */
1512struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1513 gfp_t priority)
1514{
1515 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1516 struct sk_buff *skb = alloc_skb(size, priority);
1517 if (skb) {
1518 skb_set_owner_r(skb, sk);
1519 return skb;
1520 }
1521 }
1522 return NULL;
1523}
1524
1525/*
1526 * Allocate a memory block from the socket's option memory buffer.
1527 */
1528void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1529{
1530 if ((unsigned int)size <= sysctl_optmem_max &&
1531 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1532 void *mem;
1533 /* First do the add, to avoid the race if kmalloc
1534 * might sleep.
1535 */
1536 atomic_add(size, &sk->sk_omem_alloc);
1537 mem = kmalloc(size, priority);
1538 if (mem)
1539 return mem;
1540 atomic_sub(size, &sk->sk_omem_alloc);
1541 }
1542 return NULL;
1543}
1544EXPORT_SYMBOL(sock_kmalloc);
1545
1546/*
1547 * Free an option memory block.
1548 */
1549void sock_kfree_s(struct sock *sk, void *mem, int size)
1550{
1551 kfree(mem);
1552 atomic_sub(size, &sk->sk_omem_alloc);
1553}
1554EXPORT_SYMBOL(sock_kfree_s);
1555
1556/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1557 I think, these locks should be removed for datagram sockets.
1558 */
1559static long sock_wait_for_wmem(struct sock *sk, long timeo)
1560{
1561 DEFINE_WAIT(wait);
1562
1563 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1564 for (;;) {
1565 if (!timeo)
1566 break;
1567 if (signal_pending(current))
1568 break;
1569 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1570 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1571 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1572 break;
1573 if (sk->sk_shutdown & SEND_SHUTDOWN)
1574 break;
1575 if (sk->sk_err)
1576 break;
1577 timeo = schedule_timeout(timeo);
1578 }
1579 finish_wait(sk_sleep(sk), &wait);
1580 return timeo;
1581}
1582
1583
1584/*
1585 * Generic send/receive buffer handlers
1586 */
1587
1588struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1589 unsigned long data_len, int noblock,
1590 int *errcode)
1591{
1592 struct sk_buff *skb;
1593 gfp_t gfp_mask;
1594 long timeo;
1595 int err;
1596 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1597
1598 err = -EMSGSIZE;
1599 if (npages > MAX_SKB_FRAGS)
1600 goto failure;
1601
1602 gfp_mask = sk->sk_allocation;
1603 if (gfp_mask & __GFP_WAIT)
1604 gfp_mask |= __GFP_REPEAT;
1605
1606 timeo = sock_sndtimeo(sk, noblock);
1607 while (1) {
1608 err = sock_error(sk);
1609 if (err != 0)
1610 goto failure;
1611
1612 err = -EPIPE;
1613 if (sk->sk_shutdown & SEND_SHUTDOWN)
1614 goto failure;
1615
1616 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1617 skb = alloc_skb(header_len, gfp_mask);
1618 if (skb) {
1619 int i;
1620
1621 /* No pages, we're done... */
1622 if (!data_len)
1623 break;
1624
1625 skb->truesize += data_len;
1626 skb_shinfo(skb)->nr_frags = npages;
1627 for (i = 0; i < npages; i++) {
1628 struct page *page;
1629
1630 page = alloc_pages(sk->sk_allocation, 0);
1631 if (!page) {
1632 err = -ENOBUFS;
1633 skb_shinfo(skb)->nr_frags = i;
1634 kfree_skb(skb);
1635 goto failure;
1636 }
1637
1638 __skb_fill_page_desc(skb, i,
1639 page, 0,
1640 (data_len >= PAGE_SIZE ?
1641 PAGE_SIZE :
1642 data_len));
1643 data_len -= PAGE_SIZE;
1644 }
1645
1646 /* Full success... */
1647 break;
1648 }
1649 err = -ENOBUFS;
1650 goto failure;
1651 }
1652 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1653 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1654 err = -EAGAIN;
1655 if (!timeo)
1656 goto failure;
1657 if (signal_pending(current))
1658 goto interrupted;
1659 timeo = sock_wait_for_wmem(sk, timeo);
1660 }
1661
1662 skb_set_owner_w(skb, sk);
1663 return skb;
1664
1665interrupted:
1666 err = sock_intr_errno(timeo);
1667failure:
1668 *errcode = err;
1669 return NULL;
1670}
1671EXPORT_SYMBOL(sock_alloc_send_pskb);
1672
1673struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1674 int noblock, int *errcode)
1675{
1676 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1677}
1678EXPORT_SYMBOL(sock_alloc_send_skb);
1679
1680static void __lock_sock(struct sock *sk)
1681 __releases(&sk->sk_lock.slock)
1682 __acquires(&sk->sk_lock.slock)
1683{
1684 DEFINE_WAIT(wait);
1685
1686 for (;;) {
1687 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1688 TASK_UNINTERRUPTIBLE);
1689 spin_unlock_bh(&sk->sk_lock.slock);
1690 schedule();
1691 spin_lock_bh(&sk->sk_lock.slock);
1692 if (!sock_owned_by_user(sk))
1693 break;
1694 }
1695 finish_wait(&sk->sk_lock.wq, &wait);
1696}
1697
1698static void __release_sock(struct sock *sk)
1699 __releases(&sk->sk_lock.slock)
1700 __acquires(&sk->sk_lock.slock)
1701{
1702 struct sk_buff *skb = sk->sk_backlog.head;
1703
1704 do {
1705 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1706 bh_unlock_sock(sk);
1707
1708 do {
1709 struct sk_buff *next = skb->next;
1710
1711 prefetch(next);
1712 WARN_ON_ONCE(skb_dst_is_noref(skb));
1713 skb->next = NULL;
1714 sk_backlog_rcv(sk, skb);
1715
1716 /*
1717 * We are in process context here with softirqs
1718 * disabled, use cond_resched_softirq() to preempt.
1719 * This is safe to do because we've taken the backlog
1720 * queue private:
1721 */
1722 cond_resched_softirq();
1723
1724 skb = next;
1725 } while (skb != NULL);
1726
1727 bh_lock_sock(sk);
1728 } while ((skb = sk->sk_backlog.head) != NULL);
1729
1730 /*
1731 * Doing the zeroing here guarantee we can not loop forever
1732 * while a wild producer attempts to flood us.
1733 */
1734 sk->sk_backlog.len = 0;
1735}
1736
1737/**
1738 * sk_wait_data - wait for data to arrive at sk_receive_queue
1739 * @sk: sock to wait on
1740 * @timeo: for how long
1741 *
1742 * Now socket state including sk->sk_err is changed only under lock,
1743 * hence we may omit checks after joining wait queue.
1744 * We check receive queue before schedule() only as optimization;
1745 * it is very likely that release_sock() added new data.
1746 */
1747int sk_wait_data(struct sock *sk, long *timeo)
1748{
1749 int rc;
1750 DEFINE_WAIT(wait);
1751
1752 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1753 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1754 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1755 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1756 finish_wait(sk_sleep(sk), &wait);
1757 return rc;
1758}
1759EXPORT_SYMBOL(sk_wait_data);
1760
1761/**
1762 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1763 * @sk: socket
1764 * @size: memory size to allocate
1765 * @kind: allocation type
1766 *
1767 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1768 * rmem allocation. This function assumes that protocols which have
1769 * memory_pressure use sk_wmem_queued as write buffer accounting.
1770 */
1771int __sk_mem_schedule(struct sock *sk, int size, int kind)
1772{
1773 struct proto *prot = sk->sk_prot;
1774 int amt = sk_mem_pages(size);
1775 long allocated;
1776 int parent_status = UNDER_LIMIT;
1777
1778 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1779
1780 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1781
1782 /* Under limit. */
1783 if (parent_status == UNDER_LIMIT &&
1784 allocated <= sk_prot_mem_limits(sk, 0)) {
1785 sk_leave_memory_pressure(sk);
1786 return 1;
1787 }
1788
1789 /* Under pressure. (we or our parents) */
1790 if ((parent_status > SOFT_LIMIT) ||
1791 allocated > sk_prot_mem_limits(sk, 1))
1792 sk_enter_memory_pressure(sk);
1793
1794 /* Over hard limit (we or our parents) */
1795 if ((parent_status == OVER_LIMIT) ||
1796 (allocated > sk_prot_mem_limits(sk, 2)))
1797 goto suppress_allocation;
1798
1799 /* guarantee minimum buffer size under pressure */
1800 if (kind == SK_MEM_RECV) {
1801 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1802 return 1;
1803
1804 } else { /* SK_MEM_SEND */
1805 if (sk->sk_type == SOCK_STREAM) {
1806 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1807 return 1;
1808 } else if (atomic_read(&sk->sk_wmem_alloc) <
1809 prot->sysctl_wmem[0])
1810 return 1;
1811 }
1812
1813 if (sk_has_memory_pressure(sk)) {
1814 int alloc;
1815
1816 if (!sk_under_memory_pressure(sk))
1817 return 1;
1818 alloc = sk_sockets_allocated_read_positive(sk);
1819 if (sk_prot_mem_limits(sk, 2) > alloc *
1820 sk_mem_pages(sk->sk_wmem_queued +
1821 atomic_read(&sk->sk_rmem_alloc) +
1822 sk->sk_forward_alloc))
1823 return 1;
1824 }
1825
1826suppress_allocation:
1827
1828 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1829 sk_stream_moderate_sndbuf(sk);
1830
1831 /* Fail only if socket is _under_ its sndbuf.
1832 * In this case we cannot block, so that we have to fail.
1833 */
1834 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1835 return 1;
1836 }
1837
1838 trace_sock_exceed_buf_limit(sk, prot, allocated);
1839
1840 /* Alas. Undo changes. */
1841 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1842
1843 sk_memory_allocated_sub(sk, amt);
1844
1845 return 0;
1846}
1847EXPORT_SYMBOL(__sk_mem_schedule);
1848
1849/**
1850 * __sk_reclaim - reclaim memory_allocated
1851 * @sk: socket
1852 */
1853void __sk_mem_reclaim(struct sock *sk)
1854{
1855 sk_memory_allocated_sub(sk,
1856 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
1857 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1858
1859 if (sk_under_memory_pressure(sk) &&
1860 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1861 sk_leave_memory_pressure(sk);
1862}
1863EXPORT_SYMBOL(__sk_mem_reclaim);
1864
1865
1866/*
1867 * Set of default routines for initialising struct proto_ops when
1868 * the protocol does not support a particular function. In certain
1869 * cases where it makes no sense for a protocol to have a "do nothing"
1870 * function, some default processing is provided.
1871 */
1872
1873int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1874{
1875 return -EOPNOTSUPP;
1876}
1877EXPORT_SYMBOL(sock_no_bind);
1878
1879int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1880 int len, int flags)
1881{
1882 return -EOPNOTSUPP;
1883}
1884EXPORT_SYMBOL(sock_no_connect);
1885
1886int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1887{
1888 return -EOPNOTSUPP;
1889}
1890EXPORT_SYMBOL(sock_no_socketpair);
1891
1892int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1893{
1894 return -EOPNOTSUPP;
1895}
1896EXPORT_SYMBOL(sock_no_accept);
1897
1898int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1899 int *len, int peer)
1900{
1901 return -EOPNOTSUPP;
1902}
1903EXPORT_SYMBOL(sock_no_getname);
1904
1905unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1906{
1907 return 0;
1908}
1909EXPORT_SYMBOL(sock_no_poll);
1910
1911int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1912{
1913 return -EOPNOTSUPP;
1914}
1915EXPORT_SYMBOL(sock_no_ioctl);
1916
1917int sock_no_listen(struct socket *sock, int backlog)
1918{
1919 return -EOPNOTSUPP;
1920}
1921EXPORT_SYMBOL(sock_no_listen);
1922
1923int sock_no_shutdown(struct socket *sock, int how)
1924{
1925 return -EOPNOTSUPP;
1926}
1927EXPORT_SYMBOL(sock_no_shutdown);
1928
1929int sock_no_setsockopt(struct socket *sock, int level, int optname,
1930 char __user *optval, unsigned int optlen)
1931{
1932 return -EOPNOTSUPP;
1933}
1934EXPORT_SYMBOL(sock_no_setsockopt);
1935
1936int sock_no_getsockopt(struct socket *sock, int level, int optname,
1937 char __user *optval, int __user *optlen)
1938{
1939 return -EOPNOTSUPP;
1940}
1941EXPORT_SYMBOL(sock_no_getsockopt);
1942
1943int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1944 size_t len)
1945{
1946 return -EOPNOTSUPP;
1947}
1948EXPORT_SYMBOL(sock_no_sendmsg);
1949
1950int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1951 size_t len, int flags)
1952{
1953 return -EOPNOTSUPP;
1954}
1955EXPORT_SYMBOL(sock_no_recvmsg);
1956
1957int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1958{
1959 /* Mirror missing mmap method error code */
1960 return -ENODEV;
1961}
1962EXPORT_SYMBOL(sock_no_mmap);
1963
1964ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1965{
1966 ssize_t res;
1967 struct msghdr msg = {.msg_flags = flags};
1968 struct kvec iov;
1969 char *kaddr = kmap(page);
1970 iov.iov_base = kaddr + offset;
1971 iov.iov_len = size;
1972 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1973 kunmap(page);
1974 return res;
1975}
1976EXPORT_SYMBOL(sock_no_sendpage);
1977
1978/*
1979 * Default Socket Callbacks
1980 */
1981
1982static void sock_def_wakeup(struct sock *sk)
1983{
1984 struct socket_wq *wq;
1985
1986 rcu_read_lock();
1987 wq = rcu_dereference(sk->sk_wq);
1988 if (wq_has_sleeper(wq))
1989 wake_up_interruptible_all(&wq->wait);
1990 rcu_read_unlock();
1991}
1992
1993static void sock_def_error_report(struct sock *sk)
1994{
1995 struct socket_wq *wq;
1996
1997 rcu_read_lock();
1998 wq = rcu_dereference(sk->sk_wq);
1999 if (wq_has_sleeper(wq))
2000 wake_up_interruptible_poll(&wq->wait, POLLERR);
2001 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2002 rcu_read_unlock();
2003}
2004
2005static void sock_def_readable(struct sock *sk, int len)
2006{
2007 struct socket_wq *wq;
2008
2009 rcu_read_lock();
2010 wq = rcu_dereference(sk->sk_wq);
2011 if (wq_has_sleeper(wq))
2012 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2013 POLLRDNORM | POLLRDBAND);
2014 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2015 rcu_read_unlock();
2016}
2017
2018static void sock_def_write_space(struct sock *sk)
2019{
2020 struct socket_wq *wq;
2021
2022 rcu_read_lock();
2023
2024 /* Do not wake up a writer until he can make "significant"
2025 * progress. --DaveM
2026 */
2027 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2028 wq = rcu_dereference(sk->sk_wq);
2029 if (wq_has_sleeper(wq))
2030 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2031 POLLWRNORM | POLLWRBAND);
2032
2033 /* Should agree with poll, otherwise some programs break */
2034 if (sock_writeable(sk))
2035 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2036 }
2037
2038 rcu_read_unlock();
2039}
2040
2041static void sock_def_destruct(struct sock *sk)
2042{
2043 kfree(sk->sk_protinfo);
2044}
2045
2046void sk_send_sigurg(struct sock *sk)
2047{
2048 if (sk->sk_socket && sk->sk_socket->file)
2049 if (send_sigurg(&sk->sk_socket->file->f_owner))
2050 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2051}
2052EXPORT_SYMBOL(sk_send_sigurg);
2053
2054void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2055 unsigned long expires)
2056{
2057 if (!mod_timer(timer, expires))
2058 sock_hold(sk);
2059}
2060EXPORT_SYMBOL(sk_reset_timer);
2061
2062void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2063{
2064 if (timer_pending(timer) && del_timer(timer))
2065 __sock_put(sk);
2066}
2067EXPORT_SYMBOL(sk_stop_timer);
2068
2069void sock_init_data(struct socket *sock, struct sock *sk)
2070{
2071 skb_queue_head_init(&sk->sk_receive_queue);
2072 skb_queue_head_init(&sk->sk_write_queue);
2073 skb_queue_head_init(&sk->sk_error_queue);
2074#ifdef CONFIG_NET_DMA
2075 skb_queue_head_init(&sk->sk_async_wait_queue);
2076#endif
2077
2078 sk->sk_send_head = NULL;
2079
2080 init_timer(&sk->sk_timer);
2081
2082 sk->sk_allocation = GFP_KERNEL;
2083 sk->sk_rcvbuf = sysctl_rmem_default;
2084 sk->sk_sndbuf = sysctl_wmem_default;
2085 sk->sk_state = TCP_CLOSE;
2086 sk_set_socket(sk, sock);
2087
2088 sock_set_flag(sk, SOCK_ZAPPED);
2089
2090 if (sock) {
2091 sk->sk_type = sock->type;
2092 sk->sk_wq = sock->wq;
2093 sock->sk = sk;
2094 } else
2095 sk->sk_wq = NULL;
2096
2097 spin_lock_init(&sk->sk_dst_lock);
2098 rwlock_init(&sk->sk_callback_lock);
2099 lockdep_set_class_and_name(&sk->sk_callback_lock,
2100 af_callback_keys + sk->sk_family,
2101 af_family_clock_key_strings[sk->sk_family]);
2102
2103 sk->sk_state_change = sock_def_wakeup;
2104 sk->sk_data_ready = sock_def_readable;
2105 sk->sk_write_space = sock_def_write_space;
2106 sk->sk_error_report = sock_def_error_report;
2107 sk->sk_destruct = sock_def_destruct;
2108
2109 sk->sk_sndmsg_page = NULL;
2110 sk->sk_sndmsg_off = 0;
2111 sk->sk_peek_off = -1;
2112
2113 sk->sk_peer_pid = NULL;
2114 sk->sk_peer_cred = NULL;
2115 sk->sk_write_pending = 0;
2116 sk->sk_rcvlowat = 1;
2117 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2118 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2119
2120 sk->sk_stamp = ktime_set(-1L, 0);
2121
2122 /*
2123 * Before updating sk_refcnt, we must commit prior changes to memory
2124 * (Documentation/RCU/rculist_nulls.txt for details)
2125 */
2126 smp_wmb();
2127 atomic_set(&sk->sk_refcnt, 1);
2128 atomic_set(&sk->sk_drops, 0);
2129}
2130EXPORT_SYMBOL(sock_init_data);
2131
2132void lock_sock_nested(struct sock *sk, int subclass)
2133{
2134 might_sleep();
2135 spin_lock_bh(&sk->sk_lock.slock);
2136 if (sk->sk_lock.owned)
2137 __lock_sock(sk);
2138 sk->sk_lock.owned = 1;
2139 spin_unlock(&sk->sk_lock.slock);
2140 /*
2141 * The sk_lock has mutex_lock() semantics here:
2142 */
2143 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2144 local_bh_enable();
2145}
2146EXPORT_SYMBOL(lock_sock_nested);
2147
2148void release_sock(struct sock *sk)
2149{
2150 /*
2151 * The sk_lock has mutex_unlock() semantics:
2152 */
2153 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2154
2155 spin_lock_bh(&sk->sk_lock.slock);
2156 if (sk->sk_backlog.tail)
2157 __release_sock(sk);
2158 sk->sk_lock.owned = 0;
2159 if (waitqueue_active(&sk->sk_lock.wq))
2160 wake_up(&sk->sk_lock.wq);
2161 spin_unlock_bh(&sk->sk_lock.slock);
2162}
2163EXPORT_SYMBOL(release_sock);
2164
2165/**
2166 * lock_sock_fast - fast version of lock_sock
2167 * @sk: socket
2168 *
2169 * This version should be used for very small section, where process wont block
2170 * return false if fast path is taken
2171 * sk_lock.slock locked, owned = 0, BH disabled
2172 * return true if slow path is taken
2173 * sk_lock.slock unlocked, owned = 1, BH enabled
2174 */
2175bool lock_sock_fast(struct sock *sk)
2176{
2177 might_sleep();
2178 spin_lock_bh(&sk->sk_lock.slock);
2179
2180 if (!sk->sk_lock.owned)
2181 /*
2182 * Note : We must disable BH
2183 */
2184 return false;
2185
2186 __lock_sock(sk);
2187 sk->sk_lock.owned = 1;
2188 spin_unlock(&sk->sk_lock.slock);
2189 /*
2190 * The sk_lock has mutex_lock() semantics here:
2191 */
2192 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2193 local_bh_enable();
2194 return true;
2195}
2196EXPORT_SYMBOL(lock_sock_fast);
2197
2198int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2199{
2200 struct timeval tv;
2201 if (!sock_flag(sk, SOCK_TIMESTAMP))
2202 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2203 tv = ktime_to_timeval(sk->sk_stamp);
2204 if (tv.tv_sec == -1)
2205 return -ENOENT;
2206 if (tv.tv_sec == 0) {
2207 sk->sk_stamp = ktime_get_real();
2208 tv = ktime_to_timeval(sk->sk_stamp);
2209 }
2210 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2211}
2212EXPORT_SYMBOL(sock_get_timestamp);
2213
2214int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2215{
2216 struct timespec ts;
2217 if (!sock_flag(sk, SOCK_TIMESTAMP))
2218 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2219 ts = ktime_to_timespec(sk->sk_stamp);
2220 if (ts.tv_sec == -1)
2221 return -ENOENT;
2222 if (ts.tv_sec == 0) {
2223 sk->sk_stamp = ktime_get_real();
2224 ts = ktime_to_timespec(sk->sk_stamp);
2225 }
2226 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2227}
2228EXPORT_SYMBOL(sock_get_timestampns);
2229
2230void sock_enable_timestamp(struct sock *sk, int flag)
2231{
2232 if (!sock_flag(sk, flag)) {
2233 unsigned long previous_flags = sk->sk_flags;
2234
2235 sock_set_flag(sk, flag);
2236 /*
2237 * we just set one of the two flags which require net
2238 * time stamping, but time stamping might have been on
2239 * already because of the other one
2240 */
2241 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2242 net_enable_timestamp();
2243 }
2244}
2245
2246/*
2247 * Get a socket option on an socket.
2248 *
2249 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2250 * asynchronous errors should be reported by getsockopt. We assume
2251 * this means if you specify SO_ERROR (otherwise whats the point of it).
2252 */
2253int sock_common_getsockopt(struct socket *sock, int level, int optname,
2254 char __user *optval, int __user *optlen)
2255{
2256 struct sock *sk = sock->sk;
2257
2258 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2259}
2260EXPORT_SYMBOL(sock_common_getsockopt);
2261
2262#ifdef CONFIG_COMPAT
2263int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2264 char __user *optval, int __user *optlen)
2265{
2266 struct sock *sk = sock->sk;
2267
2268 if (sk->sk_prot->compat_getsockopt != NULL)
2269 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2270 optval, optlen);
2271 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2272}
2273EXPORT_SYMBOL(compat_sock_common_getsockopt);
2274#endif
2275
2276int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2277 struct msghdr *msg, size_t size, int flags)
2278{
2279 struct sock *sk = sock->sk;
2280 int addr_len = 0;
2281 int err;
2282
2283 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2284 flags & ~MSG_DONTWAIT, &addr_len);
2285 if (err >= 0)
2286 msg->msg_namelen = addr_len;
2287 return err;
2288}
2289EXPORT_SYMBOL(sock_common_recvmsg);
2290
2291/*
2292 * Set socket options on an inet socket.
2293 */
2294int sock_common_setsockopt(struct socket *sock, int level, int optname,
2295 char __user *optval, unsigned int optlen)
2296{
2297 struct sock *sk = sock->sk;
2298
2299 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2300}
2301EXPORT_SYMBOL(sock_common_setsockopt);
2302
2303#ifdef CONFIG_COMPAT
2304int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2305 char __user *optval, unsigned int optlen)
2306{
2307 struct sock *sk = sock->sk;
2308
2309 if (sk->sk_prot->compat_setsockopt != NULL)
2310 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2311 optval, optlen);
2312 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2313}
2314EXPORT_SYMBOL(compat_sock_common_setsockopt);
2315#endif
2316
2317void sk_common_release(struct sock *sk)
2318{
2319 if (sk->sk_prot->destroy)
2320 sk->sk_prot->destroy(sk);
2321
2322 /*
2323 * Observation: when sock_common_release is called, processes have
2324 * no access to socket. But net still has.
2325 * Step one, detach it from networking:
2326 *
2327 * A. Remove from hash tables.
2328 */
2329
2330 sk->sk_prot->unhash(sk);
2331
2332 /*
2333 * In this point socket cannot receive new packets, but it is possible
2334 * that some packets are in flight because some CPU runs receiver and
2335 * did hash table lookup before we unhashed socket. They will achieve
2336 * receive queue and will be purged by socket destructor.
2337 *
2338 * Also we still have packets pending on receive queue and probably,
2339 * our own packets waiting in device queues. sock_destroy will drain
2340 * receive queue, but transmitted packets will delay socket destruction
2341 * until the last reference will be released.
2342 */
2343
2344 sock_orphan(sk);
2345
2346 xfrm_sk_free_policy(sk);
2347
2348 sk_refcnt_debug_release(sk);
2349 sock_put(sk);
2350}
2351EXPORT_SYMBOL(sk_common_release);
2352
2353#ifdef CONFIG_PROC_FS
2354#define PROTO_INUSE_NR 64 /* should be enough for the first time */
2355struct prot_inuse {
2356 int val[PROTO_INUSE_NR];
2357};
2358
2359static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2360
2361#ifdef CONFIG_NET_NS
2362void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2363{
2364 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2365}
2366EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2367
2368int sock_prot_inuse_get(struct net *net, struct proto *prot)
2369{
2370 int cpu, idx = prot->inuse_idx;
2371 int res = 0;
2372
2373 for_each_possible_cpu(cpu)
2374 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2375
2376 return res >= 0 ? res : 0;
2377}
2378EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2379
2380static int __net_init sock_inuse_init_net(struct net *net)
2381{
2382 net->core.inuse = alloc_percpu(struct prot_inuse);
2383 return net->core.inuse ? 0 : -ENOMEM;
2384}
2385
2386static void __net_exit sock_inuse_exit_net(struct net *net)
2387{
2388 free_percpu(net->core.inuse);
2389}
2390
2391static struct pernet_operations net_inuse_ops = {
2392 .init = sock_inuse_init_net,
2393 .exit = sock_inuse_exit_net,
2394};
2395
2396static __init int net_inuse_init(void)
2397{
2398 if (register_pernet_subsys(&net_inuse_ops))
2399 panic("Cannot initialize net inuse counters");
2400
2401 return 0;
2402}
2403
2404core_initcall(net_inuse_init);
2405#else
2406static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2407
2408void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2409{
2410 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2411}
2412EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2413
2414int sock_prot_inuse_get(struct net *net, struct proto *prot)
2415{
2416 int cpu, idx = prot->inuse_idx;
2417 int res = 0;
2418
2419 for_each_possible_cpu(cpu)
2420 res += per_cpu(prot_inuse, cpu).val[idx];
2421
2422 return res >= 0 ? res : 0;
2423}
2424EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2425#endif
2426
2427static void assign_proto_idx(struct proto *prot)
2428{
2429 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2430
2431 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2432 pr_err("PROTO_INUSE_NR exhausted\n");
2433 return;
2434 }
2435
2436 set_bit(prot->inuse_idx, proto_inuse_idx);
2437}
2438
2439static void release_proto_idx(struct proto *prot)
2440{
2441 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2442 clear_bit(prot->inuse_idx, proto_inuse_idx);
2443}
2444#else
2445static inline void assign_proto_idx(struct proto *prot)
2446{
2447}
2448
2449static inline void release_proto_idx(struct proto *prot)
2450{
2451}
2452#endif
2453
2454int proto_register(struct proto *prot, int alloc_slab)
2455{
2456 if (alloc_slab) {
2457 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2458 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2459 NULL);
2460
2461 if (prot->slab == NULL) {
2462 pr_crit("%s: Can't create sock SLAB cache!\n",
2463 prot->name);
2464 goto out;
2465 }
2466
2467 if (prot->rsk_prot != NULL) {
2468 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2469 if (prot->rsk_prot->slab_name == NULL)
2470 goto out_free_sock_slab;
2471
2472 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2473 prot->rsk_prot->obj_size, 0,
2474 SLAB_HWCACHE_ALIGN, NULL);
2475
2476 if (prot->rsk_prot->slab == NULL) {
2477 pr_crit("%s: Can't create request sock SLAB cache!\n",
2478 prot->name);
2479 goto out_free_request_sock_slab_name;
2480 }
2481 }
2482
2483 if (prot->twsk_prot != NULL) {
2484 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2485
2486 if (prot->twsk_prot->twsk_slab_name == NULL)
2487 goto out_free_request_sock_slab;
2488
2489 prot->twsk_prot->twsk_slab =
2490 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2491 prot->twsk_prot->twsk_obj_size,
2492 0,
2493 SLAB_HWCACHE_ALIGN |
2494 prot->slab_flags,
2495 NULL);
2496 if (prot->twsk_prot->twsk_slab == NULL)
2497 goto out_free_timewait_sock_slab_name;
2498 }
2499 }
2500
2501 mutex_lock(&proto_list_mutex);
2502 list_add(&prot->node, &proto_list);
2503 assign_proto_idx(prot);
2504 mutex_unlock(&proto_list_mutex);
2505 return 0;
2506
2507out_free_timewait_sock_slab_name:
2508 kfree(prot->twsk_prot->twsk_slab_name);
2509out_free_request_sock_slab:
2510 if (prot->rsk_prot && prot->rsk_prot->slab) {
2511 kmem_cache_destroy(prot->rsk_prot->slab);
2512 prot->rsk_prot->slab = NULL;
2513 }
2514out_free_request_sock_slab_name:
2515 if (prot->rsk_prot)
2516 kfree(prot->rsk_prot->slab_name);
2517out_free_sock_slab:
2518 kmem_cache_destroy(prot->slab);
2519 prot->slab = NULL;
2520out:
2521 return -ENOBUFS;
2522}
2523EXPORT_SYMBOL(proto_register);
2524
2525void proto_unregister(struct proto *prot)
2526{
2527 mutex_lock(&proto_list_mutex);
2528 release_proto_idx(prot);
2529 list_del(&prot->node);
2530 mutex_unlock(&proto_list_mutex);
2531
2532 if (prot->slab != NULL) {
2533 kmem_cache_destroy(prot->slab);
2534 prot->slab = NULL;
2535 }
2536
2537 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2538 kmem_cache_destroy(prot->rsk_prot->slab);
2539 kfree(prot->rsk_prot->slab_name);
2540 prot->rsk_prot->slab = NULL;
2541 }
2542
2543 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2544 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2545 kfree(prot->twsk_prot->twsk_slab_name);
2546 prot->twsk_prot->twsk_slab = NULL;
2547 }
2548}
2549EXPORT_SYMBOL(proto_unregister);
2550
2551#ifdef CONFIG_PROC_FS
2552static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2553 __acquires(proto_list_mutex)
2554{
2555 mutex_lock(&proto_list_mutex);
2556 return seq_list_start_head(&proto_list, *pos);
2557}
2558
2559static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2560{
2561 return seq_list_next(v, &proto_list, pos);
2562}
2563
2564static void proto_seq_stop(struct seq_file *seq, void *v)
2565 __releases(proto_list_mutex)
2566{
2567 mutex_unlock(&proto_list_mutex);
2568}
2569
2570static char proto_method_implemented(const void *method)
2571{
2572 return method == NULL ? 'n' : 'y';
2573}
2574static long sock_prot_memory_allocated(struct proto *proto)
2575{
2576 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2577}
2578
2579static char *sock_prot_memory_pressure(struct proto *proto)
2580{
2581 return proto->memory_pressure != NULL ?
2582 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2583}
2584
2585static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2586{
2587
2588 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2589 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2590 proto->name,
2591 proto->obj_size,
2592 sock_prot_inuse_get(seq_file_net(seq), proto),
2593 sock_prot_memory_allocated(proto),
2594 sock_prot_memory_pressure(proto),
2595 proto->max_header,
2596 proto->slab == NULL ? "no" : "yes",
2597 module_name(proto->owner),
2598 proto_method_implemented(proto->close),
2599 proto_method_implemented(proto->connect),
2600 proto_method_implemented(proto->disconnect),
2601 proto_method_implemented(proto->accept),
2602 proto_method_implemented(proto->ioctl),
2603 proto_method_implemented(proto->init),
2604 proto_method_implemented(proto->destroy),
2605 proto_method_implemented(proto->shutdown),
2606 proto_method_implemented(proto->setsockopt),
2607 proto_method_implemented(proto->getsockopt),
2608 proto_method_implemented(proto->sendmsg),
2609 proto_method_implemented(proto->recvmsg),
2610 proto_method_implemented(proto->sendpage),
2611 proto_method_implemented(proto->bind),
2612 proto_method_implemented(proto->backlog_rcv),
2613 proto_method_implemented(proto->hash),
2614 proto_method_implemented(proto->unhash),
2615 proto_method_implemented(proto->get_port),
2616 proto_method_implemented(proto->enter_memory_pressure));
2617}
2618
2619static int proto_seq_show(struct seq_file *seq, void *v)
2620{
2621 if (v == &proto_list)
2622 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2623 "protocol",
2624 "size",
2625 "sockets",
2626 "memory",
2627 "press",
2628 "maxhdr",
2629 "slab",
2630 "module",
2631 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2632 else
2633 proto_seq_printf(seq, list_entry(v, struct proto, node));
2634 return 0;
2635}
2636
2637static const struct seq_operations proto_seq_ops = {
2638 .start = proto_seq_start,
2639 .next = proto_seq_next,
2640 .stop = proto_seq_stop,
2641 .show = proto_seq_show,
2642};
2643
2644static int proto_seq_open(struct inode *inode, struct file *file)
2645{
2646 return seq_open_net(inode, file, &proto_seq_ops,
2647 sizeof(struct seq_net_private));
2648}
2649
2650static const struct file_operations proto_seq_fops = {
2651 .owner = THIS_MODULE,
2652 .open = proto_seq_open,
2653 .read = seq_read,
2654 .llseek = seq_lseek,
2655 .release = seq_release_net,
2656};
2657
2658static __net_init int proto_init_net(struct net *net)
2659{
2660 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2661 return -ENOMEM;
2662
2663 return 0;
2664}
2665
2666static __net_exit void proto_exit_net(struct net *net)
2667{
2668 proc_net_remove(net, "protocols");
2669}
2670
2671
2672static __net_initdata struct pernet_operations proto_net_ops = {
2673 .init = proto_init_net,
2674 .exit = proto_exit_net,
2675};
2676
2677static int __init proto_init(void)
2678{
2679 return register_pernet_subsys(&proto_net_ops);
2680}
2681
2682subsys_initcall(proto_init);
2683
2684#endif /* PROC_FS */