Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#include <linux/capability.h>
93#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
98#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
112#include <linux/highmem.h>
113#include <linux/user_namespace.h>
114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
121#include <net/net_namespace.h>
122#include <net/request_sock.h>
123#include <net/sock.h>
124#include <linux/net_tstamp.h>
125#include <net/xfrm.h>
126#include <linux/ipsec.h>
127#include <net/cls_cgroup.h>
128
129#include <linux/filter.h>
130
131#include <trace/events/sock.h>
132
133#ifdef CONFIG_INET
134#include <net/tcp.h>
135#endif
136
137/*
138 * Each address family might have different locking rules, so we have
139 * one slock key per address family:
140 */
141static struct lock_class_key af_family_keys[AF_MAX];
142static struct lock_class_key af_family_slock_keys[AF_MAX];
143
144/*
145 * Make lock validator output more readable. (we pre-construct these
146 * strings build-time, so that runtime initialization of socket
147 * locks is fast):
148 */
149static const char *const af_family_key_strings[AF_MAX+1] = {
150 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
151 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
152 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
153 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
154 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
155 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
156 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
157 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
158 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
159 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
160 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
161 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
162 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
163 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
164};
165static const char *const af_family_slock_key_strings[AF_MAX+1] = {
166 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
167 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
168 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
169 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
170 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
171 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
172 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
173 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
174 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
175 "slock-27" , "slock-28" , "slock-AF_CAN" ,
176 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
177 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
178 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
179 "slock-AF_NFC" , "slock-AF_MAX"
180};
181static const char *const af_family_clock_key_strings[AF_MAX+1] = {
182 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
183 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
184 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
185 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
186 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
187 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
188 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
189 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
190 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
191 "clock-27" , "clock-28" , "clock-AF_CAN" ,
192 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
193 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
194 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
195 "clock-AF_NFC" , "clock-AF_MAX"
196};
197
198/*
199 * sk_callback_lock locking rules are per-address-family,
200 * so split the lock classes by using a per-AF key:
201 */
202static struct lock_class_key af_callback_keys[AF_MAX];
203
204/* Take into consideration the size of the struct sk_buff overhead in the
205 * determination of these values, since that is non-constant across
206 * platforms. This makes socket queueing behavior and performance
207 * not depend upon such differences.
208 */
209#define _SK_MEM_PACKETS 256
210#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
213
214/* Run time adjustable parameters. */
215__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
217__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
218__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
219
220/* Maximal space eaten by iovec or ancillary data plus some space */
221int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
222EXPORT_SYMBOL(sysctl_optmem_max);
223
224#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
225int net_cls_subsys_id = -1;
226EXPORT_SYMBOL_GPL(net_cls_subsys_id);
227#endif
228
229static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
230{
231 struct timeval tv;
232
233 if (optlen < sizeof(tv))
234 return -EINVAL;
235 if (copy_from_user(&tv, optval, sizeof(tv)))
236 return -EFAULT;
237 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
238 return -EDOM;
239
240 if (tv.tv_sec < 0) {
241 static int warned __read_mostly;
242
243 *timeo_p = 0;
244 if (warned < 10 && net_ratelimit()) {
245 warned++;
246 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
247 "tries to set negative timeout\n",
248 current->comm, task_pid_nr(current));
249 }
250 return 0;
251 }
252 *timeo_p = MAX_SCHEDULE_TIMEOUT;
253 if (tv.tv_sec == 0 && tv.tv_usec == 0)
254 return 0;
255 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
256 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
257 return 0;
258}
259
260static void sock_warn_obsolete_bsdism(const char *name)
261{
262 static int warned;
263 static char warncomm[TASK_COMM_LEN];
264 if (strcmp(warncomm, current->comm) && warned < 5) {
265 strcpy(warncomm, current->comm);
266 printk(KERN_WARNING "process `%s' is using obsolete "
267 "%s SO_BSDCOMPAT\n", warncomm, name);
268 warned++;
269 }
270}
271
272static void sock_disable_timestamp(struct sock *sk, int flag)
273{
274 if (sock_flag(sk, flag)) {
275 sock_reset_flag(sk, flag);
276 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
277 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
278 net_disable_timestamp();
279 }
280 }
281}
282
283
284int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
285{
286 int err;
287 int skb_len;
288 unsigned long flags;
289 struct sk_buff_head *list = &sk->sk_receive_queue;
290
291 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
292 number of warnings when compiling with -W --ANK
293 */
294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
295 (unsigned)sk->sk_rcvbuf) {
296 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb);
298 return -ENOMEM;
299 }
300
301 err = sk_filter(sk, skb);
302 if (err)
303 return err;
304
305 if (!sk_rmem_schedule(sk, skb->truesize)) {
306 atomic_inc(&sk->sk_drops);
307 return -ENOBUFS;
308 }
309
310 skb->dev = NULL;
311 skb_set_owner_r(skb, sk);
312
313 /* Cache the SKB length before we tack it onto the receive
314 * queue. Once it is added it no longer belongs to us and
315 * may be freed by other threads of control pulling packets
316 * from the queue.
317 */
318 skb_len = skb->len;
319
320 /* we escape from rcu protected region, make sure we dont leak
321 * a norefcounted dst
322 */
323 skb_dst_force(skb);
324
325 spin_lock_irqsave(&list->lock, flags);
326 skb->dropcount = atomic_read(&sk->sk_drops);
327 __skb_queue_tail(list, skb);
328 spin_unlock_irqrestore(&list->lock, flags);
329
330 if (!sock_flag(sk, SOCK_DEAD))
331 sk->sk_data_ready(sk, skb_len);
332 return 0;
333}
334EXPORT_SYMBOL(sock_queue_rcv_skb);
335
336int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
337{
338 int rc = NET_RX_SUCCESS;
339
340 if (sk_filter(sk, skb))
341 goto discard_and_relse;
342
343 skb->dev = NULL;
344
345 if (sk_rcvqueues_full(sk, skb)) {
346 atomic_inc(&sk->sk_drops);
347 goto discard_and_relse;
348 }
349 if (nested)
350 bh_lock_sock_nested(sk);
351 else
352 bh_lock_sock(sk);
353 if (!sock_owned_by_user(sk)) {
354 /*
355 * trylock + unlock semantics:
356 */
357 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
358
359 rc = sk_backlog_rcv(sk, skb);
360
361 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
362 } else if (sk_add_backlog(sk, skb)) {
363 bh_unlock_sock(sk);
364 atomic_inc(&sk->sk_drops);
365 goto discard_and_relse;
366 }
367
368 bh_unlock_sock(sk);
369out:
370 sock_put(sk);
371 return rc;
372discard_and_relse:
373 kfree_skb(skb);
374 goto out;
375}
376EXPORT_SYMBOL(sk_receive_skb);
377
378void sk_reset_txq(struct sock *sk)
379{
380 sk_tx_queue_clear(sk);
381}
382EXPORT_SYMBOL(sk_reset_txq);
383
384struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
385{
386 struct dst_entry *dst = __sk_dst_get(sk);
387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk);
390 rcu_assign_pointer(sk->sk_dst_cache, NULL);
391 dst_release(dst);
392 return NULL;
393 }
394
395 return dst;
396}
397EXPORT_SYMBOL(__sk_dst_check);
398
399struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
400{
401 struct dst_entry *dst = sk_dst_get(sk);
402
403 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
404 sk_dst_reset(sk);
405 dst_release(dst);
406 return NULL;
407 }
408
409 return dst;
410}
411EXPORT_SYMBOL(sk_dst_check);
412
413static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
414{
415 int ret = -ENOPROTOOPT;
416#ifdef CONFIG_NETDEVICES
417 struct net *net = sock_net(sk);
418 char devname[IFNAMSIZ];
419 int index;
420
421 /* Sorry... */
422 ret = -EPERM;
423 if (!capable(CAP_NET_RAW))
424 goto out;
425
426 ret = -EINVAL;
427 if (optlen < 0)
428 goto out;
429
430 /* Bind this socket to a particular device like "eth0",
431 * as specified in the passed interface name. If the
432 * name is "" or the option length is zero the socket
433 * is not bound.
434 */
435 if (optlen > IFNAMSIZ - 1)
436 optlen = IFNAMSIZ - 1;
437 memset(devname, 0, sizeof(devname));
438
439 ret = -EFAULT;
440 if (copy_from_user(devname, optval, optlen))
441 goto out;
442
443 index = 0;
444 if (devname[0] != '\0') {
445 struct net_device *dev;
446
447 rcu_read_lock();
448 dev = dev_get_by_name_rcu(net, devname);
449 if (dev)
450 index = dev->ifindex;
451 rcu_read_unlock();
452 ret = -ENODEV;
453 if (!dev)
454 goto out;
455 }
456
457 lock_sock(sk);
458 sk->sk_bound_dev_if = index;
459 sk_dst_reset(sk);
460 release_sock(sk);
461
462 ret = 0;
463
464out:
465#endif
466
467 return ret;
468}
469
470static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
471{
472 if (valbool)
473 sock_set_flag(sk, bit);
474 else
475 sock_reset_flag(sk, bit);
476}
477
478/*
479 * This is meant for all protocols to use and covers goings on
480 * at the socket level. Everything here is generic.
481 */
482
483int sock_setsockopt(struct socket *sock, int level, int optname,
484 char __user *optval, unsigned int optlen)
485{
486 struct sock *sk = sock->sk;
487 int val;
488 int valbool;
489 struct linger ling;
490 int ret = 0;
491
492 /*
493 * Options without arguments
494 */
495
496 if (optname == SO_BINDTODEVICE)
497 return sock_bindtodevice(sk, optval, optlen);
498
499 if (optlen < sizeof(int))
500 return -EINVAL;
501
502 if (get_user(val, (int __user *)optval))
503 return -EFAULT;
504
505 valbool = val ? 1 : 0;
506
507 lock_sock(sk);
508
509 switch (optname) {
510 case SO_DEBUG:
511 if (val && !capable(CAP_NET_ADMIN))
512 ret = -EACCES;
513 else
514 sock_valbool_flag(sk, SOCK_DBG, valbool);
515 break;
516 case SO_REUSEADDR:
517 sk->sk_reuse = valbool;
518 break;
519 case SO_TYPE:
520 case SO_PROTOCOL:
521 case SO_DOMAIN:
522 case SO_ERROR:
523 ret = -ENOPROTOOPT;
524 break;
525 case SO_DONTROUTE:
526 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
527 break;
528 case SO_BROADCAST:
529 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
530 break;
531 case SO_SNDBUF:
532 /* Don't error on this BSD doesn't and if you think
533 about it this is right. Otherwise apps have to
534 play 'guess the biggest size' games. RCVBUF/SNDBUF
535 are treated in BSD as hints */
536
537 if (val > sysctl_wmem_max)
538 val = sysctl_wmem_max;
539set_sndbuf:
540 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
541 if ((val * 2) < SOCK_MIN_SNDBUF)
542 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
543 else
544 sk->sk_sndbuf = val * 2;
545
546 /*
547 * Wake up sending tasks if we
548 * upped the value.
549 */
550 sk->sk_write_space(sk);
551 break;
552
553 case SO_SNDBUFFORCE:
554 if (!capable(CAP_NET_ADMIN)) {
555 ret = -EPERM;
556 break;
557 }
558 goto set_sndbuf;
559
560 case SO_RCVBUF:
561 /* Don't error on this BSD doesn't and if you think
562 about it this is right. Otherwise apps have to
563 play 'guess the biggest size' games. RCVBUF/SNDBUF
564 are treated in BSD as hints */
565
566 if (val > sysctl_rmem_max)
567 val = sysctl_rmem_max;
568set_rcvbuf:
569 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
570 /*
571 * We double it on the way in to account for
572 * "struct sk_buff" etc. overhead. Applications
573 * assume that the SO_RCVBUF setting they make will
574 * allow that much actual data to be received on that
575 * socket.
576 *
577 * Applications are unaware that "struct sk_buff" and
578 * other overheads allocate from the receive buffer
579 * during socket buffer allocation.
580 *
581 * And after considering the possible alternatives,
582 * returning the value we actually used in getsockopt
583 * is the most desirable behavior.
584 */
585 if ((val * 2) < SOCK_MIN_RCVBUF)
586 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
587 else
588 sk->sk_rcvbuf = val * 2;
589 break;
590
591 case SO_RCVBUFFORCE:
592 if (!capable(CAP_NET_ADMIN)) {
593 ret = -EPERM;
594 break;
595 }
596 goto set_rcvbuf;
597
598 case SO_KEEPALIVE:
599#ifdef CONFIG_INET
600 if (sk->sk_protocol == IPPROTO_TCP)
601 tcp_set_keepalive(sk, valbool);
602#endif
603 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
604 break;
605
606 case SO_OOBINLINE:
607 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
608 break;
609
610 case SO_NO_CHECK:
611 sk->sk_no_check = valbool;
612 break;
613
614 case SO_PRIORITY:
615 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
616 sk->sk_priority = val;
617 else
618 ret = -EPERM;
619 break;
620
621 case SO_LINGER:
622 if (optlen < sizeof(ling)) {
623 ret = -EINVAL; /* 1003.1g */
624 break;
625 }
626 if (copy_from_user(&ling, optval, sizeof(ling))) {
627 ret = -EFAULT;
628 break;
629 }
630 if (!ling.l_onoff)
631 sock_reset_flag(sk, SOCK_LINGER);
632 else {
633#if (BITS_PER_LONG == 32)
634 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
635 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
636 else
637#endif
638 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
639 sock_set_flag(sk, SOCK_LINGER);
640 }
641 break;
642
643 case SO_BSDCOMPAT:
644 sock_warn_obsolete_bsdism("setsockopt");
645 break;
646
647 case SO_PASSCRED:
648 if (valbool)
649 set_bit(SOCK_PASSCRED, &sock->flags);
650 else
651 clear_bit(SOCK_PASSCRED, &sock->flags);
652 break;
653
654 case SO_TIMESTAMP:
655 case SO_TIMESTAMPNS:
656 if (valbool) {
657 if (optname == SO_TIMESTAMP)
658 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
659 else
660 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
661 sock_set_flag(sk, SOCK_RCVTSTAMP);
662 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
663 } else {
664 sock_reset_flag(sk, SOCK_RCVTSTAMP);
665 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
666 }
667 break;
668
669 case SO_TIMESTAMPING:
670 if (val & ~SOF_TIMESTAMPING_MASK) {
671 ret = -EINVAL;
672 break;
673 }
674 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
675 val & SOF_TIMESTAMPING_TX_HARDWARE);
676 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
677 val & SOF_TIMESTAMPING_TX_SOFTWARE);
678 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
679 val & SOF_TIMESTAMPING_RX_HARDWARE);
680 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
681 sock_enable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 else
684 sock_disable_timestamp(sk,
685 SOCK_TIMESTAMPING_RX_SOFTWARE);
686 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
687 val & SOF_TIMESTAMPING_SOFTWARE);
688 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
689 val & SOF_TIMESTAMPING_SYS_HARDWARE);
690 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
691 val & SOF_TIMESTAMPING_RAW_HARDWARE);
692 break;
693
694 case SO_RCVLOWAT:
695 if (val < 0)
696 val = INT_MAX;
697 sk->sk_rcvlowat = val ? : 1;
698 break;
699
700 case SO_RCVTIMEO:
701 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
702 break;
703
704 case SO_SNDTIMEO:
705 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
706 break;
707
708 case SO_ATTACH_FILTER:
709 ret = -EINVAL;
710 if (optlen == sizeof(struct sock_fprog)) {
711 struct sock_fprog fprog;
712
713 ret = -EFAULT;
714 if (copy_from_user(&fprog, optval, sizeof(fprog)))
715 break;
716
717 ret = sk_attach_filter(&fprog, sk);
718 }
719 break;
720
721 case SO_DETACH_FILTER:
722 ret = sk_detach_filter(sk);
723 break;
724
725 case SO_PASSSEC:
726 if (valbool)
727 set_bit(SOCK_PASSSEC, &sock->flags);
728 else
729 clear_bit(SOCK_PASSSEC, &sock->flags);
730 break;
731 case SO_MARK:
732 if (!capable(CAP_NET_ADMIN))
733 ret = -EPERM;
734 else
735 sk->sk_mark = val;
736 break;
737
738 /* We implement the SO_SNDLOWAT etc to
739 not be settable (1003.1g 5.3) */
740 case SO_RXQ_OVFL:
741 if (valbool)
742 sock_set_flag(sk, SOCK_RXQ_OVFL);
743 else
744 sock_reset_flag(sk, SOCK_RXQ_OVFL);
745 break;
746 default:
747 ret = -ENOPROTOOPT;
748 break;
749 }
750 release_sock(sk);
751 return ret;
752}
753EXPORT_SYMBOL(sock_setsockopt);
754
755
756void cred_to_ucred(struct pid *pid, const struct cred *cred,
757 struct ucred *ucred)
758{
759 ucred->pid = pid_vnr(pid);
760 ucred->uid = ucred->gid = -1;
761 if (cred) {
762 struct user_namespace *current_ns = current_user_ns();
763
764 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
765 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
766 }
767}
768EXPORT_SYMBOL_GPL(cred_to_ucred);
769
770int sock_getsockopt(struct socket *sock, int level, int optname,
771 char __user *optval, int __user *optlen)
772{
773 struct sock *sk = sock->sk;
774
775 union {
776 int val;
777 struct linger ling;
778 struct timeval tm;
779 } v;
780
781 int lv = sizeof(int);
782 int len;
783
784 if (get_user(len, optlen))
785 return -EFAULT;
786 if (len < 0)
787 return -EINVAL;
788
789 memset(&v, 0, sizeof(v));
790
791 switch (optname) {
792 case SO_DEBUG:
793 v.val = sock_flag(sk, SOCK_DBG);
794 break;
795
796 case SO_DONTROUTE:
797 v.val = sock_flag(sk, SOCK_LOCALROUTE);
798 break;
799
800 case SO_BROADCAST:
801 v.val = !!sock_flag(sk, SOCK_BROADCAST);
802 break;
803
804 case SO_SNDBUF:
805 v.val = sk->sk_sndbuf;
806 break;
807
808 case SO_RCVBUF:
809 v.val = sk->sk_rcvbuf;
810 break;
811
812 case SO_REUSEADDR:
813 v.val = sk->sk_reuse;
814 break;
815
816 case SO_KEEPALIVE:
817 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
818 break;
819
820 case SO_TYPE:
821 v.val = sk->sk_type;
822 break;
823
824 case SO_PROTOCOL:
825 v.val = sk->sk_protocol;
826 break;
827
828 case SO_DOMAIN:
829 v.val = sk->sk_family;
830 break;
831
832 case SO_ERROR:
833 v.val = -sock_error(sk);
834 if (v.val == 0)
835 v.val = xchg(&sk->sk_err_soft, 0);
836 break;
837
838 case SO_OOBINLINE:
839 v.val = !!sock_flag(sk, SOCK_URGINLINE);
840 break;
841
842 case SO_NO_CHECK:
843 v.val = sk->sk_no_check;
844 break;
845
846 case SO_PRIORITY:
847 v.val = sk->sk_priority;
848 break;
849
850 case SO_LINGER:
851 lv = sizeof(v.ling);
852 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
853 v.ling.l_linger = sk->sk_lingertime / HZ;
854 break;
855
856 case SO_BSDCOMPAT:
857 sock_warn_obsolete_bsdism("getsockopt");
858 break;
859
860 case SO_TIMESTAMP:
861 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
862 !sock_flag(sk, SOCK_RCVTSTAMPNS);
863 break;
864
865 case SO_TIMESTAMPNS:
866 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
867 break;
868
869 case SO_TIMESTAMPING:
870 v.val = 0;
871 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
872 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
873 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
874 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
875 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
876 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
877 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
878 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
879 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
880 v.val |= SOF_TIMESTAMPING_SOFTWARE;
881 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
882 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
883 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
884 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
885 break;
886
887 case SO_RCVTIMEO:
888 lv = sizeof(struct timeval);
889 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
890 v.tm.tv_sec = 0;
891 v.tm.tv_usec = 0;
892 } else {
893 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
894 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
895 }
896 break;
897
898 case SO_SNDTIMEO:
899 lv = sizeof(struct timeval);
900 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
901 v.tm.tv_sec = 0;
902 v.tm.tv_usec = 0;
903 } else {
904 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
905 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
906 }
907 break;
908
909 case SO_RCVLOWAT:
910 v.val = sk->sk_rcvlowat;
911 break;
912
913 case SO_SNDLOWAT:
914 v.val = 1;
915 break;
916
917 case SO_PASSCRED:
918 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
919 break;
920
921 case SO_PEERCRED:
922 {
923 struct ucred peercred;
924 if (len > sizeof(peercred))
925 len = sizeof(peercred);
926 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
927 if (copy_to_user(optval, &peercred, len))
928 return -EFAULT;
929 goto lenout;
930 }
931
932 case SO_PEERNAME:
933 {
934 char address[128];
935
936 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
937 return -ENOTCONN;
938 if (lv < len)
939 return -EINVAL;
940 if (copy_to_user(optval, address, len))
941 return -EFAULT;
942 goto lenout;
943 }
944
945 /* Dubious BSD thing... Probably nobody even uses it, but
946 * the UNIX standard wants it for whatever reason... -DaveM
947 */
948 case SO_ACCEPTCONN:
949 v.val = sk->sk_state == TCP_LISTEN;
950 break;
951
952 case SO_PASSSEC:
953 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
954 break;
955
956 case SO_PEERSEC:
957 return security_socket_getpeersec_stream(sock, optval, optlen, len);
958
959 case SO_MARK:
960 v.val = sk->sk_mark;
961 break;
962
963 case SO_RXQ_OVFL:
964 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
965 break;
966
967 default:
968 return -ENOPROTOOPT;
969 }
970
971 if (len > lv)
972 len = lv;
973 if (copy_to_user(optval, &v, len))
974 return -EFAULT;
975lenout:
976 if (put_user(len, optlen))
977 return -EFAULT;
978 return 0;
979}
980
981/*
982 * Initialize an sk_lock.
983 *
984 * (We also register the sk_lock with the lock validator.)
985 */
986static inline void sock_lock_init(struct sock *sk)
987{
988 sock_lock_init_class_and_name(sk,
989 af_family_slock_key_strings[sk->sk_family],
990 af_family_slock_keys + sk->sk_family,
991 af_family_key_strings[sk->sk_family],
992 af_family_keys + sk->sk_family);
993}
994
995/*
996 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
997 * even temporarly, because of RCU lookups. sk_node should also be left as is.
998 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
999 */
1000static void sock_copy(struct sock *nsk, const struct sock *osk)
1001{
1002#ifdef CONFIG_SECURITY_NETWORK
1003 void *sptr = nsk->sk_security;
1004#endif
1005 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1006
1007 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1008 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1009
1010#ifdef CONFIG_SECURITY_NETWORK
1011 nsk->sk_security = sptr;
1012 security_sk_clone(osk, nsk);
1013#endif
1014}
1015
1016/*
1017 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1018 * un-modified. Special care is taken when initializing object to zero.
1019 */
1020static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1021{
1022 if (offsetof(struct sock, sk_node.next) != 0)
1023 memset(sk, 0, offsetof(struct sock, sk_node.next));
1024 memset(&sk->sk_node.pprev, 0,
1025 size - offsetof(struct sock, sk_node.pprev));
1026}
1027
1028void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1029{
1030 unsigned long nulls1, nulls2;
1031
1032 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1033 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1034 if (nulls1 > nulls2)
1035 swap(nulls1, nulls2);
1036
1037 if (nulls1 != 0)
1038 memset((char *)sk, 0, nulls1);
1039 memset((char *)sk + nulls1 + sizeof(void *), 0,
1040 nulls2 - nulls1 - sizeof(void *));
1041 memset((char *)sk + nulls2 + sizeof(void *), 0,
1042 size - nulls2 - sizeof(void *));
1043}
1044EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1045
1046static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1047 int family)
1048{
1049 struct sock *sk;
1050 struct kmem_cache *slab;
1051
1052 slab = prot->slab;
1053 if (slab != NULL) {
1054 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1055 if (!sk)
1056 return sk;
1057 if (priority & __GFP_ZERO) {
1058 if (prot->clear_sk)
1059 prot->clear_sk(sk, prot->obj_size);
1060 else
1061 sk_prot_clear_nulls(sk, prot->obj_size);
1062 }
1063 } else
1064 sk = kmalloc(prot->obj_size, priority);
1065
1066 if (sk != NULL) {
1067 kmemcheck_annotate_bitfield(sk, flags);
1068
1069 if (security_sk_alloc(sk, family, priority))
1070 goto out_free;
1071
1072 if (!try_module_get(prot->owner))
1073 goto out_free_sec;
1074 sk_tx_queue_clear(sk);
1075 }
1076
1077 return sk;
1078
1079out_free_sec:
1080 security_sk_free(sk);
1081out_free:
1082 if (slab != NULL)
1083 kmem_cache_free(slab, sk);
1084 else
1085 kfree(sk);
1086 return NULL;
1087}
1088
1089static void sk_prot_free(struct proto *prot, struct sock *sk)
1090{
1091 struct kmem_cache *slab;
1092 struct module *owner;
1093
1094 owner = prot->owner;
1095 slab = prot->slab;
1096
1097 security_sk_free(sk);
1098 if (slab != NULL)
1099 kmem_cache_free(slab, sk);
1100 else
1101 kfree(sk);
1102 module_put(owner);
1103}
1104
1105#ifdef CONFIG_CGROUPS
1106void sock_update_classid(struct sock *sk)
1107{
1108 u32 classid;
1109
1110 rcu_read_lock(); /* doing current task, which cannot vanish. */
1111 classid = task_cls_classid(current);
1112 rcu_read_unlock();
1113 if (classid && classid != sk->sk_classid)
1114 sk->sk_classid = classid;
1115}
1116EXPORT_SYMBOL(sock_update_classid);
1117#endif
1118
1119/**
1120 * sk_alloc - All socket objects are allocated here
1121 * @net: the applicable net namespace
1122 * @family: protocol family
1123 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1124 * @prot: struct proto associated with this new sock instance
1125 */
1126struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1127 struct proto *prot)
1128{
1129 struct sock *sk;
1130
1131 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1132 if (sk) {
1133 sk->sk_family = family;
1134 /*
1135 * See comment in struct sock definition to understand
1136 * why we need sk_prot_creator -acme
1137 */
1138 sk->sk_prot = sk->sk_prot_creator = prot;
1139 sock_lock_init(sk);
1140 sock_net_set(sk, get_net(net));
1141 atomic_set(&sk->sk_wmem_alloc, 1);
1142
1143 sock_update_classid(sk);
1144 }
1145
1146 return sk;
1147}
1148EXPORT_SYMBOL(sk_alloc);
1149
1150static void __sk_free(struct sock *sk)
1151{
1152 struct sk_filter *filter;
1153
1154 if (sk->sk_destruct)
1155 sk->sk_destruct(sk);
1156
1157 filter = rcu_dereference_check(sk->sk_filter,
1158 atomic_read(&sk->sk_wmem_alloc) == 0);
1159 if (filter) {
1160 sk_filter_uncharge(sk, filter);
1161 rcu_assign_pointer(sk->sk_filter, NULL);
1162 }
1163
1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1165 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1166
1167 if (atomic_read(&sk->sk_omem_alloc))
1168 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1169 __func__, atomic_read(&sk->sk_omem_alloc));
1170
1171 if (sk->sk_peer_cred)
1172 put_cred(sk->sk_peer_cred);
1173 put_pid(sk->sk_peer_pid);
1174 put_net(sock_net(sk));
1175 sk_prot_free(sk->sk_prot_creator, sk);
1176}
1177
1178void sk_free(struct sock *sk)
1179{
1180 /*
1181 * We subtract one from sk_wmem_alloc and can know if
1182 * some packets are still in some tx queue.
1183 * If not null, sock_wfree() will call __sk_free(sk) later
1184 */
1185 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1186 __sk_free(sk);
1187}
1188EXPORT_SYMBOL(sk_free);
1189
1190/*
1191 * Last sock_put should drop reference to sk->sk_net. It has already
1192 * been dropped in sk_change_net. Taking reference to stopping namespace
1193 * is not an option.
1194 * Take reference to a socket to remove it from hash _alive_ and after that
1195 * destroy it in the context of init_net.
1196 */
1197void sk_release_kernel(struct sock *sk)
1198{
1199 if (sk == NULL || sk->sk_socket == NULL)
1200 return;
1201
1202 sock_hold(sk);
1203 sock_release(sk->sk_socket);
1204 release_net(sock_net(sk));
1205 sock_net_set(sk, get_net(&init_net));
1206 sock_put(sk);
1207}
1208EXPORT_SYMBOL(sk_release_kernel);
1209
1210struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1211{
1212 struct sock *newsk;
1213
1214 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1215 if (newsk != NULL) {
1216 struct sk_filter *filter;
1217
1218 sock_copy(newsk, sk);
1219
1220 /* SANITY */
1221 get_net(sock_net(newsk));
1222 sk_node_init(&newsk->sk_node);
1223 sock_lock_init(newsk);
1224 bh_lock_sock(newsk);
1225 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1226 newsk->sk_backlog.len = 0;
1227
1228 atomic_set(&newsk->sk_rmem_alloc, 0);
1229 /*
1230 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1231 */
1232 atomic_set(&newsk->sk_wmem_alloc, 1);
1233 atomic_set(&newsk->sk_omem_alloc, 0);
1234 skb_queue_head_init(&newsk->sk_receive_queue);
1235 skb_queue_head_init(&newsk->sk_write_queue);
1236#ifdef CONFIG_NET_DMA
1237 skb_queue_head_init(&newsk->sk_async_wait_queue);
1238#endif
1239
1240 spin_lock_init(&newsk->sk_dst_lock);
1241 rwlock_init(&newsk->sk_callback_lock);
1242 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1243 af_callback_keys + newsk->sk_family,
1244 af_family_clock_key_strings[newsk->sk_family]);
1245
1246 newsk->sk_dst_cache = NULL;
1247 newsk->sk_wmem_queued = 0;
1248 newsk->sk_forward_alloc = 0;
1249 newsk->sk_send_head = NULL;
1250 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1251
1252 sock_reset_flag(newsk, SOCK_DONE);
1253 skb_queue_head_init(&newsk->sk_error_queue);
1254
1255 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1256 if (filter != NULL)
1257 sk_filter_charge(newsk, filter);
1258
1259 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1260 /* It is still raw copy of parent, so invalidate
1261 * destructor and make plain sk_free() */
1262 newsk->sk_destruct = NULL;
1263 sk_free(newsk);
1264 newsk = NULL;
1265 goto out;
1266 }
1267
1268 newsk->sk_err = 0;
1269 newsk->sk_priority = 0;
1270 /*
1271 * Before updating sk_refcnt, we must commit prior changes to memory
1272 * (Documentation/RCU/rculist_nulls.txt for details)
1273 */
1274 smp_wmb();
1275 atomic_set(&newsk->sk_refcnt, 2);
1276
1277 /*
1278 * Increment the counter in the same struct proto as the master
1279 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1280 * is the same as sk->sk_prot->socks, as this field was copied
1281 * with memcpy).
1282 *
1283 * This _changes_ the previous behaviour, where
1284 * tcp_create_openreq_child always was incrementing the
1285 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1286 * to be taken into account in all callers. -acme
1287 */
1288 sk_refcnt_debug_inc(newsk);
1289 sk_set_socket(newsk, NULL);
1290 newsk->sk_wq = NULL;
1291
1292 if (newsk->sk_prot->sockets_allocated)
1293 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1294
1295 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1296 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1297 net_enable_timestamp();
1298 }
1299out:
1300 return newsk;
1301}
1302EXPORT_SYMBOL_GPL(sk_clone);
1303
1304void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1305{
1306 __sk_dst_set(sk, dst);
1307 sk->sk_route_caps = dst->dev->features;
1308 if (sk->sk_route_caps & NETIF_F_GSO)
1309 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1310 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1311 if (sk_can_gso(sk)) {
1312 if (dst->header_len) {
1313 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1314 } else {
1315 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1316 sk->sk_gso_max_size = dst->dev->gso_max_size;
1317 }
1318 }
1319}
1320EXPORT_SYMBOL_GPL(sk_setup_caps);
1321
1322void __init sk_init(void)
1323{
1324 if (totalram_pages <= 4096) {
1325 sysctl_wmem_max = 32767;
1326 sysctl_rmem_max = 32767;
1327 sysctl_wmem_default = 32767;
1328 sysctl_rmem_default = 32767;
1329 } else if (totalram_pages >= 131072) {
1330 sysctl_wmem_max = 131071;
1331 sysctl_rmem_max = 131071;
1332 }
1333}
1334
1335/*
1336 * Simple resource managers for sockets.
1337 */
1338
1339
1340/*
1341 * Write buffer destructor automatically called from kfree_skb.
1342 */
1343void sock_wfree(struct sk_buff *skb)
1344{
1345 struct sock *sk = skb->sk;
1346 unsigned int len = skb->truesize;
1347
1348 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1349 /*
1350 * Keep a reference on sk_wmem_alloc, this will be released
1351 * after sk_write_space() call
1352 */
1353 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1354 sk->sk_write_space(sk);
1355 len = 1;
1356 }
1357 /*
1358 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1359 * could not do because of in-flight packets
1360 */
1361 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1362 __sk_free(sk);
1363}
1364EXPORT_SYMBOL(sock_wfree);
1365
1366/*
1367 * Read buffer destructor automatically called from kfree_skb.
1368 */
1369void sock_rfree(struct sk_buff *skb)
1370{
1371 struct sock *sk = skb->sk;
1372 unsigned int len = skb->truesize;
1373
1374 atomic_sub(len, &sk->sk_rmem_alloc);
1375 sk_mem_uncharge(sk, len);
1376}
1377EXPORT_SYMBOL(sock_rfree);
1378
1379
1380int sock_i_uid(struct sock *sk)
1381{
1382 int uid;
1383
1384 read_lock_bh(&sk->sk_callback_lock);
1385 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1386 read_unlock_bh(&sk->sk_callback_lock);
1387 return uid;
1388}
1389EXPORT_SYMBOL(sock_i_uid);
1390
1391unsigned long sock_i_ino(struct sock *sk)
1392{
1393 unsigned long ino;
1394
1395 read_lock_bh(&sk->sk_callback_lock);
1396 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1397 read_unlock_bh(&sk->sk_callback_lock);
1398 return ino;
1399}
1400EXPORT_SYMBOL(sock_i_ino);
1401
1402/*
1403 * Allocate a skb from the socket's send buffer.
1404 */
1405struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1406 gfp_t priority)
1407{
1408 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1409 struct sk_buff *skb = alloc_skb(size, priority);
1410 if (skb) {
1411 skb_set_owner_w(skb, sk);
1412 return skb;
1413 }
1414 }
1415 return NULL;
1416}
1417EXPORT_SYMBOL(sock_wmalloc);
1418
1419/*
1420 * Allocate a skb from the socket's receive buffer.
1421 */
1422struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1423 gfp_t priority)
1424{
1425 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1426 struct sk_buff *skb = alloc_skb(size, priority);
1427 if (skb) {
1428 skb_set_owner_r(skb, sk);
1429 return skb;
1430 }
1431 }
1432 return NULL;
1433}
1434
1435/*
1436 * Allocate a memory block from the socket's option memory buffer.
1437 */
1438void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1439{
1440 if ((unsigned)size <= sysctl_optmem_max &&
1441 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1442 void *mem;
1443 /* First do the add, to avoid the race if kmalloc
1444 * might sleep.
1445 */
1446 atomic_add(size, &sk->sk_omem_alloc);
1447 mem = kmalloc(size, priority);
1448 if (mem)
1449 return mem;
1450 atomic_sub(size, &sk->sk_omem_alloc);
1451 }
1452 return NULL;
1453}
1454EXPORT_SYMBOL(sock_kmalloc);
1455
1456/*
1457 * Free an option memory block.
1458 */
1459void sock_kfree_s(struct sock *sk, void *mem, int size)
1460{
1461 kfree(mem);
1462 atomic_sub(size, &sk->sk_omem_alloc);
1463}
1464EXPORT_SYMBOL(sock_kfree_s);
1465
1466/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1467 I think, these locks should be removed for datagram sockets.
1468 */
1469static long sock_wait_for_wmem(struct sock *sk, long timeo)
1470{
1471 DEFINE_WAIT(wait);
1472
1473 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1474 for (;;) {
1475 if (!timeo)
1476 break;
1477 if (signal_pending(current))
1478 break;
1479 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1480 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1481 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1482 break;
1483 if (sk->sk_shutdown & SEND_SHUTDOWN)
1484 break;
1485 if (sk->sk_err)
1486 break;
1487 timeo = schedule_timeout(timeo);
1488 }
1489 finish_wait(sk_sleep(sk), &wait);
1490 return timeo;
1491}
1492
1493
1494/*
1495 * Generic send/receive buffer handlers
1496 */
1497
1498struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1499 unsigned long data_len, int noblock,
1500 int *errcode)
1501{
1502 struct sk_buff *skb;
1503 gfp_t gfp_mask;
1504 long timeo;
1505 int err;
1506
1507 gfp_mask = sk->sk_allocation;
1508 if (gfp_mask & __GFP_WAIT)
1509 gfp_mask |= __GFP_REPEAT;
1510
1511 timeo = sock_sndtimeo(sk, noblock);
1512 while (1) {
1513 err = sock_error(sk);
1514 if (err != 0)
1515 goto failure;
1516
1517 err = -EPIPE;
1518 if (sk->sk_shutdown & SEND_SHUTDOWN)
1519 goto failure;
1520
1521 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1522 skb = alloc_skb(header_len, gfp_mask);
1523 if (skb) {
1524 int npages;
1525 int i;
1526
1527 /* No pages, we're done... */
1528 if (!data_len)
1529 break;
1530
1531 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1532 skb->truesize += data_len;
1533 skb_shinfo(skb)->nr_frags = npages;
1534 for (i = 0; i < npages; i++) {
1535 struct page *page;
1536 skb_frag_t *frag;
1537
1538 page = alloc_pages(sk->sk_allocation, 0);
1539 if (!page) {
1540 err = -ENOBUFS;
1541 skb_shinfo(skb)->nr_frags = i;
1542 kfree_skb(skb);
1543 goto failure;
1544 }
1545
1546 frag = &skb_shinfo(skb)->frags[i];
1547 frag->page = page;
1548 frag->page_offset = 0;
1549 frag->size = (data_len >= PAGE_SIZE ?
1550 PAGE_SIZE :
1551 data_len);
1552 data_len -= PAGE_SIZE;
1553 }
1554
1555 /* Full success... */
1556 break;
1557 }
1558 err = -ENOBUFS;
1559 goto failure;
1560 }
1561 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1562 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1563 err = -EAGAIN;
1564 if (!timeo)
1565 goto failure;
1566 if (signal_pending(current))
1567 goto interrupted;
1568 timeo = sock_wait_for_wmem(sk, timeo);
1569 }
1570
1571 skb_set_owner_w(skb, sk);
1572 return skb;
1573
1574interrupted:
1575 err = sock_intr_errno(timeo);
1576failure:
1577 *errcode = err;
1578 return NULL;
1579}
1580EXPORT_SYMBOL(sock_alloc_send_pskb);
1581
1582struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1583 int noblock, int *errcode)
1584{
1585 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1586}
1587EXPORT_SYMBOL(sock_alloc_send_skb);
1588
1589static void __lock_sock(struct sock *sk)
1590 __releases(&sk->sk_lock.slock)
1591 __acquires(&sk->sk_lock.slock)
1592{
1593 DEFINE_WAIT(wait);
1594
1595 for (;;) {
1596 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1597 TASK_UNINTERRUPTIBLE);
1598 spin_unlock_bh(&sk->sk_lock.slock);
1599 schedule();
1600 spin_lock_bh(&sk->sk_lock.slock);
1601 if (!sock_owned_by_user(sk))
1602 break;
1603 }
1604 finish_wait(&sk->sk_lock.wq, &wait);
1605}
1606
1607static void __release_sock(struct sock *sk)
1608 __releases(&sk->sk_lock.slock)
1609 __acquires(&sk->sk_lock.slock)
1610{
1611 struct sk_buff *skb = sk->sk_backlog.head;
1612
1613 do {
1614 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1615 bh_unlock_sock(sk);
1616
1617 do {
1618 struct sk_buff *next = skb->next;
1619
1620 WARN_ON_ONCE(skb_dst_is_noref(skb));
1621 skb->next = NULL;
1622 sk_backlog_rcv(sk, skb);
1623
1624 /*
1625 * We are in process context here with softirqs
1626 * disabled, use cond_resched_softirq() to preempt.
1627 * This is safe to do because we've taken the backlog
1628 * queue private:
1629 */
1630 cond_resched_softirq();
1631
1632 skb = next;
1633 } while (skb != NULL);
1634
1635 bh_lock_sock(sk);
1636 } while ((skb = sk->sk_backlog.head) != NULL);
1637
1638 /*
1639 * Doing the zeroing here guarantee we can not loop forever
1640 * while a wild producer attempts to flood us.
1641 */
1642 sk->sk_backlog.len = 0;
1643}
1644
1645/**
1646 * sk_wait_data - wait for data to arrive at sk_receive_queue
1647 * @sk: sock to wait on
1648 * @timeo: for how long
1649 *
1650 * Now socket state including sk->sk_err is changed only under lock,
1651 * hence we may omit checks after joining wait queue.
1652 * We check receive queue before schedule() only as optimization;
1653 * it is very likely that release_sock() added new data.
1654 */
1655int sk_wait_data(struct sock *sk, long *timeo)
1656{
1657 int rc;
1658 DEFINE_WAIT(wait);
1659
1660 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1661 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1662 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1663 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1664 finish_wait(sk_sleep(sk), &wait);
1665 return rc;
1666}
1667EXPORT_SYMBOL(sk_wait_data);
1668
1669/**
1670 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1671 * @sk: socket
1672 * @size: memory size to allocate
1673 * @kind: allocation type
1674 *
1675 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1676 * rmem allocation. This function assumes that protocols which have
1677 * memory_pressure use sk_wmem_queued as write buffer accounting.
1678 */
1679int __sk_mem_schedule(struct sock *sk, int size, int kind)
1680{
1681 struct proto *prot = sk->sk_prot;
1682 int amt = sk_mem_pages(size);
1683 long allocated;
1684
1685 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1686 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1687
1688 /* Under limit. */
1689 if (allocated <= prot->sysctl_mem[0]) {
1690 if (prot->memory_pressure && *prot->memory_pressure)
1691 *prot->memory_pressure = 0;
1692 return 1;
1693 }
1694
1695 /* Under pressure. */
1696 if (allocated > prot->sysctl_mem[1])
1697 if (prot->enter_memory_pressure)
1698 prot->enter_memory_pressure(sk);
1699
1700 /* Over hard limit. */
1701 if (allocated > prot->sysctl_mem[2])
1702 goto suppress_allocation;
1703
1704 /* guarantee minimum buffer size under pressure */
1705 if (kind == SK_MEM_RECV) {
1706 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1707 return 1;
1708 } else { /* SK_MEM_SEND */
1709 if (sk->sk_type == SOCK_STREAM) {
1710 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1711 return 1;
1712 } else if (atomic_read(&sk->sk_wmem_alloc) <
1713 prot->sysctl_wmem[0])
1714 return 1;
1715 }
1716
1717 if (prot->memory_pressure) {
1718 int alloc;
1719
1720 if (!*prot->memory_pressure)
1721 return 1;
1722 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1723 if (prot->sysctl_mem[2] > alloc *
1724 sk_mem_pages(sk->sk_wmem_queued +
1725 atomic_read(&sk->sk_rmem_alloc) +
1726 sk->sk_forward_alloc))
1727 return 1;
1728 }
1729
1730suppress_allocation:
1731
1732 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1733 sk_stream_moderate_sndbuf(sk);
1734
1735 /* Fail only if socket is _under_ its sndbuf.
1736 * In this case we cannot block, so that we have to fail.
1737 */
1738 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1739 return 1;
1740 }
1741
1742 trace_sock_exceed_buf_limit(sk, prot, allocated);
1743
1744 /* Alas. Undo changes. */
1745 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1746 atomic_long_sub(amt, prot->memory_allocated);
1747 return 0;
1748}
1749EXPORT_SYMBOL(__sk_mem_schedule);
1750
1751/**
1752 * __sk_reclaim - reclaim memory_allocated
1753 * @sk: socket
1754 */
1755void __sk_mem_reclaim(struct sock *sk)
1756{
1757 struct proto *prot = sk->sk_prot;
1758
1759 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1760 prot->memory_allocated);
1761 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1762
1763 if (prot->memory_pressure && *prot->memory_pressure &&
1764 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1765 *prot->memory_pressure = 0;
1766}
1767EXPORT_SYMBOL(__sk_mem_reclaim);
1768
1769
1770/*
1771 * Set of default routines for initialising struct proto_ops when
1772 * the protocol does not support a particular function. In certain
1773 * cases where it makes no sense for a protocol to have a "do nothing"
1774 * function, some default processing is provided.
1775 */
1776
1777int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1778{
1779 return -EOPNOTSUPP;
1780}
1781EXPORT_SYMBOL(sock_no_bind);
1782
1783int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1784 int len, int flags)
1785{
1786 return -EOPNOTSUPP;
1787}
1788EXPORT_SYMBOL(sock_no_connect);
1789
1790int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1791{
1792 return -EOPNOTSUPP;
1793}
1794EXPORT_SYMBOL(sock_no_socketpair);
1795
1796int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1797{
1798 return -EOPNOTSUPP;
1799}
1800EXPORT_SYMBOL(sock_no_accept);
1801
1802int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1803 int *len, int peer)
1804{
1805 return -EOPNOTSUPP;
1806}
1807EXPORT_SYMBOL(sock_no_getname);
1808
1809unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1810{
1811 return 0;
1812}
1813EXPORT_SYMBOL(sock_no_poll);
1814
1815int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1816{
1817 return -EOPNOTSUPP;
1818}
1819EXPORT_SYMBOL(sock_no_ioctl);
1820
1821int sock_no_listen(struct socket *sock, int backlog)
1822{
1823 return -EOPNOTSUPP;
1824}
1825EXPORT_SYMBOL(sock_no_listen);
1826
1827int sock_no_shutdown(struct socket *sock, int how)
1828{
1829 return -EOPNOTSUPP;
1830}
1831EXPORT_SYMBOL(sock_no_shutdown);
1832
1833int sock_no_setsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, unsigned int optlen)
1835{
1836 return -EOPNOTSUPP;
1837}
1838EXPORT_SYMBOL(sock_no_setsockopt);
1839
1840int sock_no_getsockopt(struct socket *sock, int level, int optname,
1841 char __user *optval, int __user *optlen)
1842{
1843 return -EOPNOTSUPP;
1844}
1845EXPORT_SYMBOL(sock_no_getsockopt);
1846
1847int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1848 size_t len)
1849{
1850 return -EOPNOTSUPP;
1851}
1852EXPORT_SYMBOL(sock_no_sendmsg);
1853
1854int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1855 size_t len, int flags)
1856{
1857 return -EOPNOTSUPP;
1858}
1859EXPORT_SYMBOL(sock_no_recvmsg);
1860
1861int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1862{
1863 /* Mirror missing mmap method error code */
1864 return -ENODEV;
1865}
1866EXPORT_SYMBOL(sock_no_mmap);
1867
1868ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1869{
1870 ssize_t res;
1871 struct msghdr msg = {.msg_flags = flags};
1872 struct kvec iov;
1873 char *kaddr = kmap(page);
1874 iov.iov_base = kaddr + offset;
1875 iov.iov_len = size;
1876 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1877 kunmap(page);
1878 return res;
1879}
1880EXPORT_SYMBOL(sock_no_sendpage);
1881
1882/*
1883 * Default Socket Callbacks
1884 */
1885
1886static void sock_def_wakeup(struct sock *sk)
1887{
1888 struct socket_wq *wq;
1889
1890 rcu_read_lock();
1891 wq = rcu_dereference(sk->sk_wq);
1892 if (wq_has_sleeper(wq))
1893 wake_up_interruptible_all(&wq->wait);
1894 rcu_read_unlock();
1895}
1896
1897static void sock_def_error_report(struct sock *sk)
1898{
1899 struct socket_wq *wq;
1900
1901 rcu_read_lock();
1902 wq = rcu_dereference(sk->sk_wq);
1903 if (wq_has_sleeper(wq))
1904 wake_up_interruptible_poll(&wq->wait, POLLERR);
1905 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1906 rcu_read_unlock();
1907}
1908
1909static void sock_def_readable(struct sock *sk, int len)
1910{
1911 struct socket_wq *wq;
1912
1913 rcu_read_lock();
1914 wq = rcu_dereference(sk->sk_wq);
1915 if (wq_has_sleeper(wq))
1916 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
1917 POLLRDNORM | POLLRDBAND);
1918 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1919 rcu_read_unlock();
1920}
1921
1922static void sock_def_write_space(struct sock *sk)
1923{
1924 struct socket_wq *wq;
1925
1926 rcu_read_lock();
1927
1928 /* Do not wake up a writer until he can make "significant"
1929 * progress. --DaveM
1930 */
1931 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1932 wq = rcu_dereference(sk->sk_wq);
1933 if (wq_has_sleeper(wq))
1934 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1935 POLLWRNORM | POLLWRBAND);
1936
1937 /* Should agree with poll, otherwise some programs break */
1938 if (sock_writeable(sk))
1939 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1940 }
1941
1942 rcu_read_unlock();
1943}
1944
1945static void sock_def_destruct(struct sock *sk)
1946{
1947 kfree(sk->sk_protinfo);
1948}
1949
1950void sk_send_sigurg(struct sock *sk)
1951{
1952 if (sk->sk_socket && sk->sk_socket->file)
1953 if (send_sigurg(&sk->sk_socket->file->f_owner))
1954 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1955}
1956EXPORT_SYMBOL(sk_send_sigurg);
1957
1958void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1959 unsigned long expires)
1960{
1961 if (!mod_timer(timer, expires))
1962 sock_hold(sk);
1963}
1964EXPORT_SYMBOL(sk_reset_timer);
1965
1966void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1967{
1968 if (timer_pending(timer) && del_timer(timer))
1969 __sock_put(sk);
1970}
1971EXPORT_SYMBOL(sk_stop_timer);
1972
1973void sock_init_data(struct socket *sock, struct sock *sk)
1974{
1975 skb_queue_head_init(&sk->sk_receive_queue);
1976 skb_queue_head_init(&sk->sk_write_queue);
1977 skb_queue_head_init(&sk->sk_error_queue);
1978#ifdef CONFIG_NET_DMA
1979 skb_queue_head_init(&sk->sk_async_wait_queue);
1980#endif
1981
1982 sk->sk_send_head = NULL;
1983
1984 init_timer(&sk->sk_timer);
1985
1986 sk->sk_allocation = GFP_KERNEL;
1987 sk->sk_rcvbuf = sysctl_rmem_default;
1988 sk->sk_sndbuf = sysctl_wmem_default;
1989 sk->sk_state = TCP_CLOSE;
1990 sk_set_socket(sk, sock);
1991
1992 sock_set_flag(sk, SOCK_ZAPPED);
1993
1994 if (sock) {
1995 sk->sk_type = sock->type;
1996 sk->sk_wq = sock->wq;
1997 sock->sk = sk;
1998 } else
1999 sk->sk_wq = NULL;
2000
2001 spin_lock_init(&sk->sk_dst_lock);
2002 rwlock_init(&sk->sk_callback_lock);
2003 lockdep_set_class_and_name(&sk->sk_callback_lock,
2004 af_callback_keys + sk->sk_family,
2005 af_family_clock_key_strings[sk->sk_family]);
2006
2007 sk->sk_state_change = sock_def_wakeup;
2008 sk->sk_data_ready = sock_def_readable;
2009 sk->sk_write_space = sock_def_write_space;
2010 sk->sk_error_report = sock_def_error_report;
2011 sk->sk_destruct = sock_def_destruct;
2012
2013 sk->sk_sndmsg_page = NULL;
2014 sk->sk_sndmsg_off = 0;
2015
2016 sk->sk_peer_pid = NULL;
2017 sk->sk_peer_cred = NULL;
2018 sk->sk_write_pending = 0;
2019 sk->sk_rcvlowat = 1;
2020 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2021 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2022
2023 sk->sk_stamp = ktime_set(-1L, 0);
2024
2025 /*
2026 * Before updating sk_refcnt, we must commit prior changes to memory
2027 * (Documentation/RCU/rculist_nulls.txt for details)
2028 */
2029 smp_wmb();
2030 atomic_set(&sk->sk_refcnt, 1);
2031 atomic_set(&sk->sk_drops, 0);
2032}
2033EXPORT_SYMBOL(sock_init_data);
2034
2035void lock_sock_nested(struct sock *sk, int subclass)
2036{
2037 might_sleep();
2038 spin_lock_bh(&sk->sk_lock.slock);
2039 if (sk->sk_lock.owned)
2040 __lock_sock(sk);
2041 sk->sk_lock.owned = 1;
2042 spin_unlock(&sk->sk_lock.slock);
2043 /*
2044 * The sk_lock has mutex_lock() semantics here:
2045 */
2046 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2047 local_bh_enable();
2048}
2049EXPORT_SYMBOL(lock_sock_nested);
2050
2051void release_sock(struct sock *sk)
2052{
2053 /*
2054 * The sk_lock has mutex_unlock() semantics:
2055 */
2056 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2057
2058 spin_lock_bh(&sk->sk_lock.slock);
2059 if (sk->sk_backlog.tail)
2060 __release_sock(sk);
2061 sk->sk_lock.owned = 0;
2062 if (waitqueue_active(&sk->sk_lock.wq))
2063 wake_up(&sk->sk_lock.wq);
2064 spin_unlock_bh(&sk->sk_lock.slock);
2065}
2066EXPORT_SYMBOL(release_sock);
2067
2068/**
2069 * lock_sock_fast - fast version of lock_sock
2070 * @sk: socket
2071 *
2072 * This version should be used for very small section, where process wont block
2073 * return false if fast path is taken
2074 * sk_lock.slock locked, owned = 0, BH disabled
2075 * return true if slow path is taken
2076 * sk_lock.slock unlocked, owned = 1, BH enabled
2077 */
2078bool lock_sock_fast(struct sock *sk)
2079{
2080 might_sleep();
2081 spin_lock_bh(&sk->sk_lock.slock);
2082
2083 if (!sk->sk_lock.owned)
2084 /*
2085 * Note : We must disable BH
2086 */
2087 return false;
2088
2089 __lock_sock(sk);
2090 sk->sk_lock.owned = 1;
2091 spin_unlock(&sk->sk_lock.slock);
2092 /*
2093 * The sk_lock has mutex_lock() semantics here:
2094 */
2095 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2096 local_bh_enable();
2097 return true;
2098}
2099EXPORT_SYMBOL(lock_sock_fast);
2100
2101int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2102{
2103 struct timeval tv;
2104 if (!sock_flag(sk, SOCK_TIMESTAMP))
2105 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2106 tv = ktime_to_timeval(sk->sk_stamp);
2107 if (tv.tv_sec == -1)
2108 return -ENOENT;
2109 if (tv.tv_sec == 0) {
2110 sk->sk_stamp = ktime_get_real();
2111 tv = ktime_to_timeval(sk->sk_stamp);
2112 }
2113 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2114}
2115EXPORT_SYMBOL(sock_get_timestamp);
2116
2117int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2118{
2119 struct timespec ts;
2120 if (!sock_flag(sk, SOCK_TIMESTAMP))
2121 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2122 ts = ktime_to_timespec(sk->sk_stamp);
2123 if (ts.tv_sec == -1)
2124 return -ENOENT;
2125 if (ts.tv_sec == 0) {
2126 sk->sk_stamp = ktime_get_real();
2127 ts = ktime_to_timespec(sk->sk_stamp);
2128 }
2129 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2130}
2131EXPORT_SYMBOL(sock_get_timestampns);
2132
2133void sock_enable_timestamp(struct sock *sk, int flag)
2134{
2135 if (!sock_flag(sk, flag)) {
2136 sock_set_flag(sk, flag);
2137 /*
2138 * we just set one of the two flags which require net
2139 * time stamping, but time stamping might have been on
2140 * already because of the other one
2141 */
2142 if (!sock_flag(sk,
2143 flag == SOCK_TIMESTAMP ?
2144 SOCK_TIMESTAMPING_RX_SOFTWARE :
2145 SOCK_TIMESTAMP))
2146 net_enable_timestamp();
2147 }
2148}
2149
2150/*
2151 * Get a socket option on an socket.
2152 *
2153 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2154 * asynchronous errors should be reported by getsockopt. We assume
2155 * this means if you specify SO_ERROR (otherwise whats the point of it).
2156 */
2157int sock_common_getsockopt(struct socket *sock, int level, int optname,
2158 char __user *optval, int __user *optlen)
2159{
2160 struct sock *sk = sock->sk;
2161
2162 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2163}
2164EXPORT_SYMBOL(sock_common_getsockopt);
2165
2166#ifdef CONFIG_COMPAT
2167int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2168 char __user *optval, int __user *optlen)
2169{
2170 struct sock *sk = sock->sk;
2171
2172 if (sk->sk_prot->compat_getsockopt != NULL)
2173 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2174 optval, optlen);
2175 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2176}
2177EXPORT_SYMBOL(compat_sock_common_getsockopt);
2178#endif
2179
2180int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2181 struct msghdr *msg, size_t size, int flags)
2182{
2183 struct sock *sk = sock->sk;
2184 int addr_len = 0;
2185 int err;
2186
2187 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2188 flags & ~MSG_DONTWAIT, &addr_len);
2189 if (err >= 0)
2190 msg->msg_namelen = addr_len;
2191 return err;
2192}
2193EXPORT_SYMBOL(sock_common_recvmsg);
2194
2195/*
2196 * Set socket options on an inet socket.
2197 */
2198int sock_common_setsockopt(struct socket *sock, int level, int optname,
2199 char __user *optval, unsigned int optlen)
2200{
2201 struct sock *sk = sock->sk;
2202
2203 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2204}
2205EXPORT_SYMBOL(sock_common_setsockopt);
2206
2207#ifdef CONFIG_COMPAT
2208int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2209 char __user *optval, unsigned int optlen)
2210{
2211 struct sock *sk = sock->sk;
2212
2213 if (sk->sk_prot->compat_setsockopt != NULL)
2214 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2215 optval, optlen);
2216 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2217}
2218EXPORT_SYMBOL(compat_sock_common_setsockopt);
2219#endif
2220
2221void sk_common_release(struct sock *sk)
2222{
2223 if (sk->sk_prot->destroy)
2224 sk->sk_prot->destroy(sk);
2225
2226 /*
2227 * Observation: when sock_common_release is called, processes have
2228 * no access to socket. But net still has.
2229 * Step one, detach it from networking:
2230 *
2231 * A. Remove from hash tables.
2232 */
2233
2234 sk->sk_prot->unhash(sk);
2235
2236 /*
2237 * In this point socket cannot receive new packets, but it is possible
2238 * that some packets are in flight because some CPU runs receiver and
2239 * did hash table lookup before we unhashed socket. They will achieve
2240 * receive queue and will be purged by socket destructor.
2241 *
2242 * Also we still have packets pending on receive queue and probably,
2243 * our own packets waiting in device queues. sock_destroy will drain
2244 * receive queue, but transmitted packets will delay socket destruction
2245 * until the last reference will be released.
2246 */
2247
2248 sock_orphan(sk);
2249
2250 xfrm_sk_free_policy(sk);
2251
2252 sk_refcnt_debug_release(sk);
2253 sock_put(sk);
2254}
2255EXPORT_SYMBOL(sk_common_release);
2256
2257static DEFINE_RWLOCK(proto_list_lock);
2258static LIST_HEAD(proto_list);
2259
2260#ifdef CONFIG_PROC_FS
2261#define PROTO_INUSE_NR 64 /* should be enough for the first time */
2262struct prot_inuse {
2263 int val[PROTO_INUSE_NR];
2264};
2265
2266static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2267
2268#ifdef CONFIG_NET_NS
2269void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2270{
2271 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2272}
2273EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2274
2275int sock_prot_inuse_get(struct net *net, struct proto *prot)
2276{
2277 int cpu, idx = prot->inuse_idx;
2278 int res = 0;
2279
2280 for_each_possible_cpu(cpu)
2281 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2282
2283 return res >= 0 ? res : 0;
2284}
2285EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2286
2287static int __net_init sock_inuse_init_net(struct net *net)
2288{
2289 net->core.inuse = alloc_percpu(struct prot_inuse);
2290 return net->core.inuse ? 0 : -ENOMEM;
2291}
2292
2293static void __net_exit sock_inuse_exit_net(struct net *net)
2294{
2295 free_percpu(net->core.inuse);
2296}
2297
2298static struct pernet_operations net_inuse_ops = {
2299 .init = sock_inuse_init_net,
2300 .exit = sock_inuse_exit_net,
2301};
2302
2303static __init int net_inuse_init(void)
2304{
2305 if (register_pernet_subsys(&net_inuse_ops))
2306 panic("Cannot initialize net inuse counters");
2307
2308 return 0;
2309}
2310
2311core_initcall(net_inuse_init);
2312#else
2313static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2314
2315void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2316{
2317 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2318}
2319EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2320
2321int sock_prot_inuse_get(struct net *net, struct proto *prot)
2322{
2323 int cpu, idx = prot->inuse_idx;
2324 int res = 0;
2325
2326 for_each_possible_cpu(cpu)
2327 res += per_cpu(prot_inuse, cpu).val[idx];
2328
2329 return res >= 0 ? res : 0;
2330}
2331EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2332#endif
2333
2334static void assign_proto_idx(struct proto *prot)
2335{
2336 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2337
2338 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2339 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2340 return;
2341 }
2342
2343 set_bit(prot->inuse_idx, proto_inuse_idx);
2344}
2345
2346static void release_proto_idx(struct proto *prot)
2347{
2348 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2349 clear_bit(prot->inuse_idx, proto_inuse_idx);
2350}
2351#else
2352static inline void assign_proto_idx(struct proto *prot)
2353{
2354}
2355
2356static inline void release_proto_idx(struct proto *prot)
2357{
2358}
2359#endif
2360
2361int proto_register(struct proto *prot, int alloc_slab)
2362{
2363 if (alloc_slab) {
2364 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2365 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2366 NULL);
2367
2368 if (prot->slab == NULL) {
2369 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2370 prot->name);
2371 goto out;
2372 }
2373
2374 if (prot->rsk_prot != NULL) {
2375 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2376 if (prot->rsk_prot->slab_name == NULL)
2377 goto out_free_sock_slab;
2378
2379 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2380 prot->rsk_prot->obj_size, 0,
2381 SLAB_HWCACHE_ALIGN, NULL);
2382
2383 if (prot->rsk_prot->slab == NULL) {
2384 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2385 prot->name);
2386 goto out_free_request_sock_slab_name;
2387 }
2388 }
2389
2390 if (prot->twsk_prot != NULL) {
2391 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2392
2393 if (prot->twsk_prot->twsk_slab_name == NULL)
2394 goto out_free_request_sock_slab;
2395
2396 prot->twsk_prot->twsk_slab =
2397 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2398 prot->twsk_prot->twsk_obj_size,
2399 0,
2400 SLAB_HWCACHE_ALIGN |
2401 prot->slab_flags,
2402 NULL);
2403 if (prot->twsk_prot->twsk_slab == NULL)
2404 goto out_free_timewait_sock_slab_name;
2405 }
2406 }
2407
2408 write_lock(&proto_list_lock);
2409 list_add(&prot->node, &proto_list);
2410 assign_proto_idx(prot);
2411 write_unlock(&proto_list_lock);
2412 return 0;
2413
2414out_free_timewait_sock_slab_name:
2415 kfree(prot->twsk_prot->twsk_slab_name);
2416out_free_request_sock_slab:
2417 if (prot->rsk_prot && prot->rsk_prot->slab) {
2418 kmem_cache_destroy(prot->rsk_prot->slab);
2419 prot->rsk_prot->slab = NULL;
2420 }
2421out_free_request_sock_slab_name:
2422 if (prot->rsk_prot)
2423 kfree(prot->rsk_prot->slab_name);
2424out_free_sock_slab:
2425 kmem_cache_destroy(prot->slab);
2426 prot->slab = NULL;
2427out:
2428 return -ENOBUFS;
2429}
2430EXPORT_SYMBOL(proto_register);
2431
2432void proto_unregister(struct proto *prot)
2433{
2434 write_lock(&proto_list_lock);
2435 release_proto_idx(prot);
2436 list_del(&prot->node);
2437 write_unlock(&proto_list_lock);
2438
2439 if (prot->slab != NULL) {
2440 kmem_cache_destroy(prot->slab);
2441 prot->slab = NULL;
2442 }
2443
2444 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2445 kmem_cache_destroy(prot->rsk_prot->slab);
2446 kfree(prot->rsk_prot->slab_name);
2447 prot->rsk_prot->slab = NULL;
2448 }
2449
2450 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2451 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2452 kfree(prot->twsk_prot->twsk_slab_name);
2453 prot->twsk_prot->twsk_slab = NULL;
2454 }
2455}
2456EXPORT_SYMBOL(proto_unregister);
2457
2458#ifdef CONFIG_PROC_FS
2459static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2460 __acquires(proto_list_lock)
2461{
2462 read_lock(&proto_list_lock);
2463 return seq_list_start_head(&proto_list, *pos);
2464}
2465
2466static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2467{
2468 return seq_list_next(v, &proto_list, pos);
2469}
2470
2471static void proto_seq_stop(struct seq_file *seq, void *v)
2472 __releases(proto_list_lock)
2473{
2474 read_unlock(&proto_list_lock);
2475}
2476
2477static char proto_method_implemented(const void *method)
2478{
2479 return method == NULL ? 'n' : 'y';
2480}
2481
2482static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2483{
2484 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2485 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2486 proto->name,
2487 proto->obj_size,
2488 sock_prot_inuse_get(seq_file_net(seq), proto),
2489 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2490 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2491 proto->max_header,
2492 proto->slab == NULL ? "no" : "yes",
2493 module_name(proto->owner),
2494 proto_method_implemented(proto->close),
2495 proto_method_implemented(proto->connect),
2496 proto_method_implemented(proto->disconnect),
2497 proto_method_implemented(proto->accept),
2498 proto_method_implemented(proto->ioctl),
2499 proto_method_implemented(proto->init),
2500 proto_method_implemented(proto->destroy),
2501 proto_method_implemented(proto->shutdown),
2502 proto_method_implemented(proto->setsockopt),
2503 proto_method_implemented(proto->getsockopt),
2504 proto_method_implemented(proto->sendmsg),
2505 proto_method_implemented(proto->recvmsg),
2506 proto_method_implemented(proto->sendpage),
2507 proto_method_implemented(proto->bind),
2508 proto_method_implemented(proto->backlog_rcv),
2509 proto_method_implemented(proto->hash),
2510 proto_method_implemented(proto->unhash),
2511 proto_method_implemented(proto->get_port),
2512 proto_method_implemented(proto->enter_memory_pressure));
2513}
2514
2515static int proto_seq_show(struct seq_file *seq, void *v)
2516{
2517 if (v == &proto_list)
2518 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2519 "protocol",
2520 "size",
2521 "sockets",
2522 "memory",
2523 "press",
2524 "maxhdr",
2525 "slab",
2526 "module",
2527 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2528 else
2529 proto_seq_printf(seq, list_entry(v, struct proto, node));
2530 return 0;
2531}
2532
2533static const struct seq_operations proto_seq_ops = {
2534 .start = proto_seq_start,
2535 .next = proto_seq_next,
2536 .stop = proto_seq_stop,
2537 .show = proto_seq_show,
2538};
2539
2540static int proto_seq_open(struct inode *inode, struct file *file)
2541{
2542 return seq_open_net(inode, file, &proto_seq_ops,
2543 sizeof(struct seq_net_private));
2544}
2545
2546static const struct file_operations proto_seq_fops = {
2547 .owner = THIS_MODULE,
2548 .open = proto_seq_open,
2549 .read = seq_read,
2550 .llseek = seq_lseek,
2551 .release = seq_release_net,
2552};
2553
2554static __net_init int proto_init_net(struct net *net)
2555{
2556 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2557 return -ENOMEM;
2558
2559 return 0;
2560}
2561
2562static __net_exit void proto_exit_net(struct net *net)
2563{
2564 proc_net_remove(net, "protocols");
2565}
2566
2567
2568static __net_initdata struct pernet_operations proto_net_ops = {
2569 .init = proto_init_net,
2570 .exit = proto_exit_net,
2571};
2572
2573static int __init proto_init(void)
2574{
2575 return register_pernet_subsys(&proto_net_ops);
2576}
2577
2578subsys_initcall(proto_init);
2579
2580#endif /* PROC_FS */
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#include <linux/capability.h>
95#include <linux/errno.h>
96#include <linux/errqueue.h>
97#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/sched/mm.h>
106#include <linux/timer.h>
107#include <linux/string.h>
108#include <linux/sockios.h>
109#include <linux/net.h>
110#include <linux/mm.h>
111#include <linux/slab.h>
112#include <linux/interrupt.h>
113#include <linux/poll.h>
114#include <linux/tcp.h>
115#include <linux/init.h>
116#include <linux/highmem.h>
117#include <linux/user_namespace.h>
118#include <linux/static_key.h>
119#include <linux/memcontrol.h>
120#include <linux/prefetch.h>
121
122#include <linux/uaccess.h>
123
124#include <linux/netdevice.h>
125#include <net/protocol.h>
126#include <linux/skbuff.h>
127#include <net/net_namespace.h>
128#include <net/request_sock.h>
129#include <net/sock.h>
130#include <linux/net_tstamp.h>
131#include <net/xfrm.h>
132#include <linux/ipsec.h>
133#include <net/cls_cgroup.h>
134#include <net/netprio_cgroup.h>
135#include <linux/sock_diag.h>
136
137#include <linux/filter.h>
138#include <net/sock_reuseport.h>
139
140#include <trace/events/sock.h>
141
142#include <net/tcp.h>
143#include <net/busy_poll.h>
144
145static DEFINE_MUTEX(proto_list_mutex);
146static LIST_HEAD(proto_list);
147
148static void sock_inuse_add(struct net *net, int val);
149
150/**
151 * sk_ns_capable - General socket capability test
152 * @sk: Socket to use a capability on or through
153 * @user_ns: The user namespace of the capability to use
154 * @cap: The capability to use
155 *
156 * Test to see if the opener of the socket had when the socket was
157 * created and the current process has the capability @cap in the user
158 * namespace @user_ns.
159 */
160bool sk_ns_capable(const struct sock *sk,
161 struct user_namespace *user_ns, int cap)
162{
163 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 ns_capable(user_ns, cap);
165}
166EXPORT_SYMBOL(sk_ns_capable);
167
168/**
169 * sk_capable - Socket global capability test
170 * @sk: Socket to use a capability on or through
171 * @cap: The global capability to use
172 *
173 * Test to see if the opener of the socket had when the socket was
174 * created and the current process has the capability @cap in all user
175 * namespaces.
176 */
177bool sk_capable(const struct sock *sk, int cap)
178{
179 return sk_ns_capable(sk, &init_user_ns, cap);
180}
181EXPORT_SYMBOL(sk_capable);
182
183/**
184 * sk_net_capable - Network namespace socket capability test
185 * @sk: Socket to use a capability on or through
186 * @cap: The capability to use
187 *
188 * Test to see if the opener of the socket had when the socket was created
189 * and the current process has the capability @cap over the network namespace
190 * the socket is a member of.
191 */
192bool sk_net_capable(const struct sock *sk, int cap)
193{
194 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195}
196EXPORT_SYMBOL(sk_net_capable);
197
198/*
199 * Each address family might have different locking rules, so we have
200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
202 */
203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
207
208/*
209 * Make lock validator output more readable. (we pre-construct these
210 * strings build-time, so that runtime initialization of socket
211 * locks is fast):
212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
231static const char *const af_family_key_strings[AF_MAX+1] = {
232 _sock_locks("sk_lock-")
233};
234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
235 _sock_locks("slock-")
236};
237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
238 _sock_locks("clock-")
239};
240
241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
242 _sock_locks("k-sk_lock-")
243};
244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
245 _sock_locks("k-slock-")
246};
247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
248 _sock_locks("k-clock-")
249};
250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
251 "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
252 "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
253 "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
254 "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
255 "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
256 "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
257 "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
258 "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
259 "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
260 "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
261 "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
262 "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
263 "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
264 "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
265 "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX"
266};
267static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
268 "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
269 "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
270 "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
271 "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
272 "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
273 "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
274 "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
275 "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
276 "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
277 "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
278 "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
279 "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
280 "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
281 "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
282 "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX"
283};
284static const char *const af_family_elock_key_strings[AF_MAX+1] = {
285 "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
286 "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
287 "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
288 "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
289 "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
290 "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
291 "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
292 "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
293 "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
294 "elock-27" , "elock-28" , "elock-AF_CAN" ,
295 "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
296 "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
297 "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
298 "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
299 "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX"
300};
301
302/*
303 * sk_callback_lock and sk queues locking rules are per-address-family,
304 * so split the lock classes by using a per-AF key:
305 */
306static struct lock_class_key af_callback_keys[AF_MAX];
307static struct lock_class_key af_rlock_keys[AF_MAX];
308static struct lock_class_key af_wlock_keys[AF_MAX];
309static struct lock_class_key af_elock_keys[AF_MAX];
310static struct lock_class_key af_kern_callback_keys[AF_MAX];
311
312/* Run time adjustable parameters. */
313__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
314EXPORT_SYMBOL(sysctl_wmem_max);
315__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
316EXPORT_SYMBOL(sysctl_rmem_max);
317__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
318__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
319
320/* Maximal space eaten by iovec or ancillary data plus some space */
321int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
322EXPORT_SYMBOL(sysctl_optmem_max);
323
324int sysctl_tstamp_allow_data __read_mostly = 1;
325
326struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
327EXPORT_SYMBOL_GPL(memalloc_socks);
328
329/**
330 * sk_set_memalloc - sets %SOCK_MEMALLOC
331 * @sk: socket to set it on
332 *
333 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
334 * It's the responsibility of the admin to adjust min_free_kbytes
335 * to meet the requirements
336 */
337void sk_set_memalloc(struct sock *sk)
338{
339 sock_set_flag(sk, SOCK_MEMALLOC);
340 sk->sk_allocation |= __GFP_MEMALLOC;
341 static_key_slow_inc(&memalloc_socks);
342}
343EXPORT_SYMBOL_GPL(sk_set_memalloc);
344
345void sk_clear_memalloc(struct sock *sk)
346{
347 sock_reset_flag(sk, SOCK_MEMALLOC);
348 sk->sk_allocation &= ~__GFP_MEMALLOC;
349 static_key_slow_dec(&memalloc_socks);
350
351 /*
352 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
353 * progress of swapping. SOCK_MEMALLOC may be cleared while
354 * it has rmem allocations due to the last swapfile being deactivated
355 * but there is a risk that the socket is unusable due to exceeding
356 * the rmem limits. Reclaim the reserves and obey rmem limits again.
357 */
358 sk_mem_reclaim(sk);
359}
360EXPORT_SYMBOL_GPL(sk_clear_memalloc);
361
362int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
363{
364 int ret;
365 unsigned int noreclaim_flag;
366
367 /* these should have been dropped before queueing */
368 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
369
370 noreclaim_flag = memalloc_noreclaim_save();
371 ret = sk->sk_backlog_rcv(sk, skb);
372 memalloc_noreclaim_restore(noreclaim_flag);
373
374 return ret;
375}
376EXPORT_SYMBOL(__sk_backlog_rcv);
377
378static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
379{
380 struct timeval tv;
381
382 if (optlen < sizeof(tv))
383 return -EINVAL;
384 if (copy_from_user(&tv, optval, sizeof(tv)))
385 return -EFAULT;
386 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
387 return -EDOM;
388
389 if (tv.tv_sec < 0) {
390 static int warned __read_mostly;
391
392 *timeo_p = 0;
393 if (warned < 10 && net_ratelimit()) {
394 warned++;
395 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
396 __func__, current->comm, task_pid_nr(current));
397 }
398 return 0;
399 }
400 *timeo_p = MAX_SCHEDULE_TIMEOUT;
401 if (tv.tv_sec == 0 && tv.tv_usec == 0)
402 return 0;
403 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
404 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
405 return 0;
406}
407
408static void sock_warn_obsolete_bsdism(const char *name)
409{
410 static int warned;
411 static char warncomm[TASK_COMM_LEN];
412 if (strcmp(warncomm, current->comm) && warned < 5) {
413 strcpy(warncomm, current->comm);
414 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
415 warncomm, name);
416 warned++;
417 }
418}
419
420static bool sock_needs_netstamp(const struct sock *sk)
421{
422 switch (sk->sk_family) {
423 case AF_UNSPEC:
424 case AF_UNIX:
425 return false;
426 default:
427 return true;
428 }
429}
430
431static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
432{
433 if (sk->sk_flags & flags) {
434 sk->sk_flags &= ~flags;
435 if (sock_needs_netstamp(sk) &&
436 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
437 net_disable_timestamp();
438 }
439}
440
441
442int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
443{
444 unsigned long flags;
445 struct sk_buff_head *list = &sk->sk_receive_queue;
446
447 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
448 atomic_inc(&sk->sk_drops);
449 trace_sock_rcvqueue_full(sk, skb);
450 return -ENOMEM;
451 }
452
453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
456 }
457
458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
460
461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
466 spin_lock_irqsave(&list->lock, flags);
467 sock_skb_set_dropcount(sk, skb);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
470
471 if (!sock_flag(sk, SOCK_DEAD))
472 sk->sk_data_ready(sk);
473 return 0;
474}
475EXPORT_SYMBOL(__sock_queue_rcv_skb);
476
477int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
478{
479 int err;
480
481 err = sk_filter(sk, skb);
482 if (err)
483 return err;
484
485 return __sock_queue_rcv_skb(sk, skb);
486}
487EXPORT_SYMBOL(sock_queue_rcv_skb);
488
489int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
490 const int nested, unsigned int trim_cap, bool refcounted)
491{
492 int rc = NET_RX_SUCCESS;
493
494 if (sk_filter_trim_cap(sk, skb, trim_cap))
495 goto discard_and_relse;
496
497 skb->dev = NULL;
498
499 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
500 atomic_inc(&sk->sk_drops);
501 goto discard_and_relse;
502 }
503 if (nested)
504 bh_lock_sock_nested(sk);
505 else
506 bh_lock_sock(sk);
507 if (!sock_owned_by_user(sk)) {
508 /*
509 * trylock + unlock semantics:
510 */
511 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
512
513 rc = sk_backlog_rcv(sk, skb);
514
515 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
516 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
517 bh_unlock_sock(sk);
518 atomic_inc(&sk->sk_drops);
519 goto discard_and_relse;
520 }
521
522 bh_unlock_sock(sk);
523out:
524 if (refcounted)
525 sock_put(sk);
526 return rc;
527discard_and_relse:
528 kfree_skb(skb);
529 goto out;
530}
531EXPORT_SYMBOL(__sk_receive_skb);
532
533struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
534{
535 struct dst_entry *dst = __sk_dst_get(sk);
536
537 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
538 sk_tx_queue_clear(sk);
539 sk->sk_dst_pending_confirm = 0;
540 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
541 dst_release(dst);
542 return NULL;
543 }
544
545 return dst;
546}
547EXPORT_SYMBOL(__sk_dst_check);
548
549struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
550{
551 struct dst_entry *dst = sk_dst_get(sk);
552
553 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
554 sk_dst_reset(sk);
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560}
561EXPORT_SYMBOL(sk_dst_check);
562
563static int sock_setbindtodevice(struct sock *sk, char __user *optval,
564 int optlen)
565{
566 int ret = -ENOPROTOOPT;
567#ifdef CONFIG_NETDEVICES
568 struct net *net = sock_net(sk);
569 char devname[IFNAMSIZ];
570 int index;
571
572 /* Sorry... */
573 ret = -EPERM;
574 if (!ns_capable(net->user_ns, CAP_NET_RAW))
575 goto out;
576
577 ret = -EINVAL;
578 if (optlen < 0)
579 goto out;
580
581 /* Bind this socket to a particular device like "eth0",
582 * as specified in the passed interface name. If the
583 * name is "" or the option length is zero the socket
584 * is not bound.
585 */
586 if (optlen > IFNAMSIZ - 1)
587 optlen = IFNAMSIZ - 1;
588 memset(devname, 0, sizeof(devname));
589
590 ret = -EFAULT;
591 if (copy_from_user(devname, optval, optlen))
592 goto out;
593
594 index = 0;
595 if (devname[0] != '\0') {
596 struct net_device *dev;
597
598 rcu_read_lock();
599 dev = dev_get_by_name_rcu(net, devname);
600 if (dev)
601 index = dev->ifindex;
602 rcu_read_unlock();
603 ret = -ENODEV;
604 if (!dev)
605 goto out;
606 }
607
608 lock_sock(sk);
609 sk->sk_bound_dev_if = index;
610 sk_dst_reset(sk);
611 release_sock(sk);
612
613 ret = 0;
614
615out:
616#endif
617
618 return ret;
619}
620
621static int sock_getbindtodevice(struct sock *sk, char __user *optval,
622 int __user *optlen, int len)
623{
624 int ret = -ENOPROTOOPT;
625#ifdef CONFIG_NETDEVICES
626 struct net *net = sock_net(sk);
627 char devname[IFNAMSIZ];
628
629 if (sk->sk_bound_dev_if == 0) {
630 len = 0;
631 goto zero;
632 }
633
634 ret = -EINVAL;
635 if (len < IFNAMSIZ)
636 goto out;
637
638 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
639 if (ret)
640 goto out;
641
642 len = strlen(devname) + 1;
643
644 ret = -EFAULT;
645 if (copy_to_user(optval, devname, len))
646 goto out;
647
648zero:
649 ret = -EFAULT;
650 if (put_user(len, optlen))
651 goto out;
652
653 ret = 0;
654
655out:
656#endif
657
658 return ret;
659}
660
661static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
662{
663 if (valbool)
664 sock_set_flag(sk, bit);
665 else
666 sock_reset_flag(sk, bit);
667}
668
669bool sk_mc_loop(struct sock *sk)
670{
671 if (dev_recursion_level())
672 return false;
673 if (!sk)
674 return true;
675 switch (sk->sk_family) {
676 case AF_INET:
677 return inet_sk(sk)->mc_loop;
678#if IS_ENABLED(CONFIG_IPV6)
679 case AF_INET6:
680 return inet6_sk(sk)->mc_loop;
681#endif
682 }
683 WARN_ON(1);
684 return true;
685}
686EXPORT_SYMBOL(sk_mc_loop);
687
688/*
689 * This is meant for all protocols to use and covers goings on
690 * at the socket level. Everything here is generic.
691 */
692
693int sock_setsockopt(struct socket *sock, int level, int optname,
694 char __user *optval, unsigned int optlen)
695{
696 struct sock *sk = sock->sk;
697 int val;
698 int valbool;
699 struct linger ling;
700 int ret = 0;
701
702 /*
703 * Options without arguments
704 */
705
706 if (optname == SO_BINDTODEVICE)
707 return sock_setbindtodevice(sk, optval, optlen);
708
709 if (optlen < sizeof(int))
710 return -EINVAL;
711
712 if (get_user(val, (int __user *)optval))
713 return -EFAULT;
714
715 valbool = val ? 1 : 0;
716
717 lock_sock(sk);
718
719 switch (optname) {
720 case SO_DEBUG:
721 if (val && !capable(CAP_NET_ADMIN))
722 ret = -EACCES;
723 else
724 sock_valbool_flag(sk, SOCK_DBG, valbool);
725 break;
726 case SO_REUSEADDR:
727 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
728 break;
729 case SO_REUSEPORT:
730 sk->sk_reuseport = valbool;
731 break;
732 case SO_TYPE:
733 case SO_PROTOCOL:
734 case SO_DOMAIN:
735 case SO_ERROR:
736 ret = -ENOPROTOOPT;
737 break;
738 case SO_DONTROUTE:
739 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
740 break;
741 case SO_BROADCAST:
742 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
743 break;
744 case SO_SNDBUF:
745 /* Don't error on this BSD doesn't and if you think
746 * about it this is right. Otherwise apps have to
747 * play 'guess the biggest size' games. RCVBUF/SNDBUF
748 * are treated in BSD as hints
749 */
750 val = min_t(u32, val, sysctl_wmem_max);
751set_sndbuf:
752 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
753 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
754 /* Wake up sending tasks if we upped the value. */
755 sk->sk_write_space(sk);
756 break;
757
758 case SO_SNDBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_sndbuf;
764
765 case SO_RCVBUF:
766 /* Don't error on this BSD doesn't and if you think
767 * about it this is right. Otherwise apps have to
768 * play 'guess the biggest size' games. RCVBUF/SNDBUF
769 * are treated in BSD as hints
770 */
771 val = min_t(u32, val, sysctl_rmem_max);
772set_rcvbuf:
773 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
774 /*
775 * We double it on the way in to account for
776 * "struct sk_buff" etc. overhead. Applications
777 * assume that the SO_RCVBUF setting they make will
778 * allow that much actual data to be received on that
779 * socket.
780 *
781 * Applications are unaware that "struct sk_buff" and
782 * other overheads allocate from the receive buffer
783 * during socket buffer allocation.
784 *
785 * And after considering the possible alternatives,
786 * returning the value we actually used in getsockopt
787 * is the most desirable behavior.
788 */
789 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
790 break;
791
792 case SO_RCVBUFFORCE:
793 if (!capable(CAP_NET_ADMIN)) {
794 ret = -EPERM;
795 break;
796 }
797 goto set_rcvbuf;
798
799 case SO_KEEPALIVE:
800 if (sk->sk_prot->keepalive)
801 sk->sk_prot->keepalive(sk, valbool);
802 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
803 break;
804
805 case SO_OOBINLINE:
806 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
807 break;
808
809 case SO_NO_CHECK:
810 sk->sk_no_check_tx = valbool;
811 break;
812
813 case SO_PRIORITY:
814 if ((val >= 0 && val <= 6) ||
815 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
816 sk->sk_priority = val;
817 else
818 ret = -EPERM;
819 break;
820
821 case SO_LINGER:
822 if (optlen < sizeof(ling)) {
823 ret = -EINVAL; /* 1003.1g */
824 break;
825 }
826 if (copy_from_user(&ling, optval, sizeof(ling))) {
827 ret = -EFAULT;
828 break;
829 }
830 if (!ling.l_onoff)
831 sock_reset_flag(sk, SOCK_LINGER);
832 else {
833#if (BITS_PER_LONG == 32)
834 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
835 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
836 else
837#endif
838 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
839 sock_set_flag(sk, SOCK_LINGER);
840 }
841 break;
842
843 case SO_BSDCOMPAT:
844 sock_warn_obsolete_bsdism("setsockopt");
845 break;
846
847 case SO_PASSCRED:
848 if (valbool)
849 set_bit(SOCK_PASSCRED, &sock->flags);
850 else
851 clear_bit(SOCK_PASSCRED, &sock->flags);
852 break;
853
854 case SO_TIMESTAMP:
855 case SO_TIMESTAMPNS:
856 if (valbool) {
857 if (optname == SO_TIMESTAMP)
858 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
859 else
860 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
861 sock_set_flag(sk, SOCK_RCVTSTAMP);
862 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
863 } else {
864 sock_reset_flag(sk, SOCK_RCVTSTAMP);
865 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
866 }
867 break;
868
869 case SO_TIMESTAMPING:
870 if (val & ~SOF_TIMESTAMPING_MASK) {
871 ret = -EINVAL;
872 break;
873 }
874
875 if (val & SOF_TIMESTAMPING_OPT_ID &&
876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
877 if (sk->sk_protocol == IPPROTO_TCP &&
878 sk->sk_type == SOCK_STREAM) {
879 if ((1 << sk->sk_state) &
880 (TCPF_CLOSE | TCPF_LISTEN)) {
881 ret = -EINVAL;
882 break;
883 }
884 sk->sk_tskey = tcp_sk(sk)->snd_una;
885 } else {
886 sk->sk_tskey = 0;
887 }
888 }
889
890 if (val & SOF_TIMESTAMPING_OPT_STATS &&
891 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
892 ret = -EINVAL;
893 break;
894 }
895
896 sk->sk_tsflags = val;
897 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
898 sock_enable_timestamp(sk,
899 SOCK_TIMESTAMPING_RX_SOFTWARE);
900 else
901 sock_disable_timestamp(sk,
902 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
903 break;
904
905 case SO_RCVLOWAT:
906 if (val < 0)
907 val = INT_MAX;
908 sk->sk_rcvlowat = val ? : 1;
909 break;
910
911 case SO_RCVTIMEO:
912 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
913 break;
914
915 case SO_SNDTIMEO:
916 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
917 break;
918
919 case SO_ATTACH_FILTER:
920 ret = -EINVAL;
921 if (optlen == sizeof(struct sock_fprog)) {
922 struct sock_fprog fprog;
923
924 ret = -EFAULT;
925 if (copy_from_user(&fprog, optval, sizeof(fprog)))
926 break;
927
928 ret = sk_attach_filter(&fprog, sk);
929 }
930 break;
931
932 case SO_ATTACH_BPF:
933 ret = -EINVAL;
934 if (optlen == sizeof(u32)) {
935 u32 ufd;
936
937 ret = -EFAULT;
938 if (copy_from_user(&ufd, optval, sizeof(ufd)))
939 break;
940
941 ret = sk_attach_bpf(ufd, sk);
942 }
943 break;
944
945 case SO_ATTACH_REUSEPORT_CBPF:
946 ret = -EINVAL;
947 if (optlen == sizeof(struct sock_fprog)) {
948 struct sock_fprog fprog;
949
950 ret = -EFAULT;
951 if (copy_from_user(&fprog, optval, sizeof(fprog)))
952 break;
953
954 ret = sk_reuseport_attach_filter(&fprog, sk);
955 }
956 break;
957
958 case SO_ATTACH_REUSEPORT_EBPF:
959 ret = -EINVAL;
960 if (optlen == sizeof(u32)) {
961 u32 ufd;
962
963 ret = -EFAULT;
964 if (copy_from_user(&ufd, optval, sizeof(ufd)))
965 break;
966
967 ret = sk_reuseport_attach_bpf(ufd, sk);
968 }
969 break;
970
971 case SO_DETACH_FILTER:
972 ret = sk_detach_filter(sk);
973 break;
974
975 case SO_LOCK_FILTER:
976 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
977 ret = -EPERM;
978 else
979 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
980 break;
981
982 case SO_PASSSEC:
983 if (valbool)
984 set_bit(SOCK_PASSSEC, &sock->flags);
985 else
986 clear_bit(SOCK_PASSSEC, &sock->flags);
987 break;
988 case SO_MARK:
989 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
990 ret = -EPERM;
991 else
992 sk->sk_mark = val;
993 break;
994
995 case SO_RXQ_OVFL:
996 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
997 break;
998
999 case SO_WIFI_STATUS:
1000 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1001 break;
1002
1003 case SO_PEEK_OFF:
1004 if (sock->ops->set_peek_off)
1005 ret = sock->ops->set_peek_off(sk, val);
1006 else
1007 ret = -EOPNOTSUPP;
1008 break;
1009
1010 case SO_NOFCS:
1011 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1012 break;
1013
1014 case SO_SELECT_ERR_QUEUE:
1015 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1016 break;
1017
1018#ifdef CONFIG_NET_RX_BUSY_POLL
1019 case SO_BUSY_POLL:
1020 /* allow unprivileged users to decrease the value */
1021 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1022 ret = -EPERM;
1023 else {
1024 if (val < 0)
1025 ret = -EINVAL;
1026 else
1027 sk->sk_ll_usec = val;
1028 }
1029 break;
1030#endif
1031
1032 case SO_MAX_PACING_RATE:
1033 if (val != ~0U)
1034 cmpxchg(&sk->sk_pacing_status,
1035 SK_PACING_NONE,
1036 SK_PACING_NEEDED);
1037 sk->sk_max_pacing_rate = val;
1038 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
1039 sk->sk_max_pacing_rate);
1040 break;
1041
1042 case SO_INCOMING_CPU:
1043 sk->sk_incoming_cpu = val;
1044 break;
1045
1046 case SO_CNX_ADVICE:
1047 if (val == 1)
1048 dst_negative_advice(sk);
1049 break;
1050
1051 case SO_ZEROCOPY:
1052 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1053 if (sk->sk_protocol != IPPROTO_TCP)
1054 ret = -ENOTSUPP;
1055 } else if (sk->sk_family != PF_RDS) {
1056 ret = -ENOTSUPP;
1057 }
1058 if (!ret) {
1059 if (val < 0 || val > 1)
1060 ret = -EINVAL;
1061 else
1062 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1063 }
1064 break;
1065
1066 default:
1067 ret = -ENOPROTOOPT;
1068 break;
1069 }
1070 release_sock(sk);
1071 return ret;
1072}
1073EXPORT_SYMBOL(sock_setsockopt);
1074
1075
1076static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1077 struct ucred *ucred)
1078{
1079 ucred->pid = pid_vnr(pid);
1080 ucred->uid = ucred->gid = -1;
1081 if (cred) {
1082 struct user_namespace *current_ns = current_user_ns();
1083
1084 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1085 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1086 }
1087}
1088
1089static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1090{
1091 struct user_namespace *user_ns = current_user_ns();
1092 int i;
1093
1094 for (i = 0; i < src->ngroups; i++)
1095 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1096 return -EFAULT;
1097
1098 return 0;
1099}
1100
1101int sock_getsockopt(struct socket *sock, int level, int optname,
1102 char __user *optval, int __user *optlen)
1103{
1104 struct sock *sk = sock->sk;
1105
1106 union {
1107 int val;
1108 u64 val64;
1109 struct linger ling;
1110 struct timeval tm;
1111 } v;
1112
1113 int lv = sizeof(int);
1114 int len;
1115
1116 if (get_user(len, optlen))
1117 return -EFAULT;
1118 if (len < 0)
1119 return -EINVAL;
1120
1121 memset(&v, 0, sizeof(v));
1122
1123 switch (optname) {
1124 case SO_DEBUG:
1125 v.val = sock_flag(sk, SOCK_DBG);
1126 break;
1127
1128 case SO_DONTROUTE:
1129 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1130 break;
1131
1132 case SO_BROADCAST:
1133 v.val = sock_flag(sk, SOCK_BROADCAST);
1134 break;
1135
1136 case SO_SNDBUF:
1137 v.val = sk->sk_sndbuf;
1138 break;
1139
1140 case SO_RCVBUF:
1141 v.val = sk->sk_rcvbuf;
1142 break;
1143
1144 case SO_REUSEADDR:
1145 v.val = sk->sk_reuse;
1146 break;
1147
1148 case SO_REUSEPORT:
1149 v.val = sk->sk_reuseport;
1150 break;
1151
1152 case SO_KEEPALIVE:
1153 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1154 break;
1155
1156 case SO_TYPE:
1157 v.val = sk->sk_type;
1158 break;
1159
1160 case SO_PROTOCOL:
1161 v.val = sk->sk_protocol;
1162 break;
1163
1164 case SO_DOMAIN:
1165 v.val = sk->sk_family;
1166 break;
1167
1168 case SO_ERROR:
1169 v.val = -sock_error(sk);
1170 if (v.val == 0)
1171 v.val = xchg(&sk->sk_err_soft, 0);
1172 break;
1173
1174 case SO_OOBINLINE:
1175 v.val = sock_flag(sk, SOCK_URGINLINE);
1176 break;
1177
1178 case SO_NO_CHECK:
1179 v.val = sk->sk_no_check_tx;
1180 break;
1181
1182 case SO_PRIORITY:
1183 v.val = sk->sk_priority;
1184 break;
1185
1186 case SO_LINGER:
1187 lv = sizeof(v.ling);
1188 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1189 v.ling.l_linger = sk->sk_lingertime / HZ;
1190 break;
1191
1192 case SO_BSDCOMPAT:
1193 sock_warn_obsolete_bsdism("getsockopt");
1194 break;
1195
1196 case SO_TIMESTAMP:
1197 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1198 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1199 break;
1200
1201 case SO_TIMESTAMPNS:
1202 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1203 break;
1204
1205 case SO_TIMESTAMPING:
1206 v.val = sk->sk_tsflags;
1207 break;
1208
1209 case SO_RCVTIMEO:
1210 lv = sizeof(struct timeval);
1211 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1212 v.tm.tv_sec = 0;
1213 v.tm.tv_usec = 0;
1214 } else {
1215 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1216 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
1217 }
1218 break;
1219
1220 case SO_SNDTIMEO:
1221 lv = sizeof(struct timeval);
1222 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1223 v.tm.tv_sec = 0;
1224 v.tm.tv_usec = 0;
1225 } else {
1226 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1227 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
1228 }
1229 break;
1230
1231 case SO_RCVLOWAT:
1232 v.val = sk->sk_rcvlowat;
1233 break;
1234
1235 case SO_SNDLOWAT:
1236 v.val = 1;
1237 break;
1238
1239 case SO_PASSCRED:
1240 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1241 break;
1242
1243 case SO_PEERCRED:
1244 {
1245 struct ucred peercred;
1246 if (len > sizeof(peercred))
1247 len = sizeof(peercred);
1248 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1249 if (copy_to_user(optval, &peercred, len))
1250 return -EFAULT;
1251 goto lenout;
1252 }
1253
1254 case SO_PEERGROUPS:
1255 {
1256 int ret, n;
1257
1258 if (!sk->sk_peer_cred)
1259 return -ENODATA;
1260
1261 n = sk->sk_peer_cred->group_info->ngroups;
1262 if (len < n * sizeof(gid_t)) {
1263 len = n * sizeof(gid_t);
1264 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1265 }
1266 len = n * sizeof(gid_t);
1267
1268 ret = groups_to_user((gid_t __user *)optval,
1269 sk->sk_peer_cred->group_info);
1270 if (ret)
1271 return ret;
1272 goto lenout;
1273 }
1274
1275 case SO_PEERNAME:
1276 {
1277 char address[128];
1278
1279 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1280 if (lv < 0)
1281 return -ENOTCONN;
1282 if (lv < len)
1283 return -EINVAL;
1284 if (copy_to_user(optval, address, len))
1285 return -EFAULT;
1286 goto lenout;
1287 }
1288
1289 /* Dubious BSD thing... Probably nobody even uses it, but
1290 * the UNIX standard wants it for whatever reason... -DaveM
1291 */
1292 case SO_ACCEPTCONN:
1293 v.val = sk->sk_state == TCP_LISTEN;
1294 break;
1295
1296 case SO_PASSSEC:
1297 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1298 break;
1299
1300 case SO_PEERSEC:
1301 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1302
1303 case SO_MARK:
1304 v.val = sk->sk_mark;
1305 break;
1306
1307 case SO_RXQ_OVFL:
1308 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1309 break;
1310
1311 case SO_WIFI_STATUS:
1312 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1313 break;
1314
1315 case SO_PEEK_OFF:
1316 if (!sock->ops->set_peek_off)
1317 return -EOPNOTSUPP;
1318
1319 v.val = sk->sk_peek_off;
1320 break;
1321 case SO_NOFCS:
1322 v.val = sock_flag(sk, SOCK_NOFCS);
1323 break;
1324
1325 case SO_BINDTODEVICE:
1326 return sock_getbindtodevice(sk, optval, optlen, len);
1327
1328 case SO_GET_FILTER:
1329 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1330 if (len < 0)
1331 return len;
1332
1333 goto lenout;
1334
1335 case SO_LOCK_FILTER:
1336 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1337 break;
1338
1339 case SO_BPF_EXTENSIONS:
1340 v.val = bpf_tell_extensions();
1341 break;
1342
1343 case SO_SELECT_ERR_QUEUE:
1344 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1345 break;
1346
1347#ifdef CONFIG_NET_RX_BUSY_POLL
1348 case SO_BUSY_POLL:
1349 v.val = sk->sk_ll_usec;
1350 break;
1351#endif
1352
1353 case SO_MAX_PACING_RATE:
1354 v.val = sk->sk_max_pacing_rate;
1355 break;
1356
1357 case SO_INCOMING_CPU:
1358 v.val = sk->sk_incoming_cpu;
1359 break;
1360
1361 case SO_MEMINFO:
1362 {
1363 u32 meminfo[SK_MEMINFO_VARS];
1364
1365 if (get_user(len, optlen))
1366 return -EFAULT;
1367
1368 sk_get_meminfo(sk, meminfo);
1369
1370 len = min_t(unsigned int, len, sizeof(meminfo));
1371 if (copy_to_user(optval, &meminfo, len))
1372 return -EFAULT;
1373
1374 goto lenout;
1375 }
1376
1377#ifdef CONFIG_NET_RX_BUSY_POLL
1378 case SO_INCOMING_NAPI_ID:
1379 v.val = READ_ONCE(sk->sk_napi_id);
1380
1381 /* aggregate non-NAPI IDs down to 0 */
1382 if (v.val < MIN_NAPI_ID)
1383 v.val = 0;
1384
1385 break;
1386#endif
1387
1388 case SO_COOKIE:
1389 lv = sizeof(u64);
1390 if (len < lv)
1391 return -EINVAL;
1392 v.val64 = sock_gen_cookie(sk);
1393 break;
1394
1395 case SO_ZEROCOPY:
1396 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1397 break;
1398
1399 default:
1400 /* We implement the SO_SNDLOWAT etc to not be settable
1401 * (1003.1g 7).
1402 */
1403 return -ENOPROTOOPT;
1404 }
1405
1406 if (len > lv)
1407 len = lv;
1408 if (copy_to_user(optval, &v, len))
1409 return -EFAULT;
1410lenout:
1411 if (put_user(len, optlen))
1412 return -EFAULT;
1413 return 0;
1414}
1415
1416/*
1417 * Initialize an sk_lock.
1418 *
1419 * (We also register the sk_lock with the lock validator.)
1420 */
1421static inline void sock_lock_init(struct sock *sk)
1422{
1423 if (sk->sk_kern_sock)
1424 sock_lock_init_class_and_name(
1425 sk,
1426 af_family_kern_slock_key_strings[sk->sk_family],
1427 af_family_kern_slock_keys + sk->sk_family,
1428 af_family_kern_key_strings[sk->sk_family],
1429 af_family_kern_keys + sk->sk_family);
1430 else
1431 sock_lock_init_class_and_name(
1432 sk,
1433 af_family_slock_key_strings[sk->sk_family],
1434 af_family_slock_keys + sk->sk_family,
1435 af_family_key_strings[sk->sk_family],
1436 af_family_keys + sk->sk_family);
1437}
1438
1439/*
1440 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1441 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1442 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1443 */
1444static void sock_copy(struct sock *nsk, const struct sock *osk)
1445{
1446#ifdef CONFIG_SECURITY_NETWORK
1447 void *sptr = nsk->sk_security;
1448#endif
1449 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1450
1451 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1452 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1453
1454#ifdef CONFIG_SECURITY_NETWORK
1455 nsk->sk_security = sptr;
1456 security_sk_clone(osk, nsk);
1457#endif
1458}
1459
1460static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1461 int family)
1462{
1463 struct sock *sk;
1464 struct kmem_cache *slab;
1465
1466 slab = prot->slab;
1467 if (slab != NULL) {
1468 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1469 if (!sk)
1470 return sk;
1471 if (priority & __GFP_ZERO)
1472 sk_prot_clear_nulls(sk, prot->obj_size);
1473 } else
1474 sk = kmalloc(prot->obj_size, priority);
1475
1476 if (sk != NULL) {
1477 if (security_sk_alloc(sk, family, priority))
1478 goto out_free;
1479
1480 if (!try_module_get(prot->owner))
1481 goto out_free_sec;
1482 sk_tx_queue_clear(sk);
1483 }
1484
1485 return sk;
1486
1487out_free_sec:
1488 security_sk_free(sk);
1489out_free:
1490 if (slab != NULL)
1491 kmem_cache_free(slab, sk);
1492 else
1493 kfree(sk);
1494 return NULL;
1495}
1496
1497static void sk_prot_free(struct proto *prot, struct sock *sk)
1498{
1499 struct kmem_cache *slab;
1500 struct module *owner;
1501
1502 owner = prot->owner;
1503 slab = prot->slab;
1504
1505 cgroup_sk_free(&sk->sk_cgrp_data);
1506 mem_cgroup_sk_free(sk);
1507 security_sk_free(sk);
1508 if (slab != NULL)
1509 kmem_cache_free(slab, sk);
1510 else
1511 kfree(sk);
1512 module_put(owner);
1513}
1514
1515/**
1516 * sk_alloc - All socket objects are allocated here
1517 * @net: the applicable net namespace
1518 * @family: protocol family
1519 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1520 * @prot: struct proto associated with this new sock instance
1521 * @kern: is this to be a kernel socket?
1522 */
1523struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1524 struct proto *prot, int kern)
1525{
1526 struct sock *sk;
1527
1528 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1529 if (sk) {
1530 sk->sk_family = family;
1531 /*
1532 * See comment in struct sock definition to understand
1533 * why we need sk_prot_creator -acme
1534 */
1535 sk->sk_prot = sk->sk_prot_creator = prot;
1536 sk->sk_kern_sock = kern;
1537 sock_lock_init(sk);
1538 sk->sk_net_refcnt = kern ? 0 : 1;
1539 if (likely(sk->sk_net_refcnt)) {
1540 get_net(net);
1541 sock_inuse_add(net, 1);
1542 }
1543
1544 sock_net_set(sk, net);
1545 refcount_set(&sk->sk_wmem_alloc, 1);
1546
1547 mem_cgroup_sk_alloc(sk);
1548 cgroup_sk_alloc(&sk->sk_cgrp_data);
1549 sock_update_classid(&sk->sk_cgrp_data);
1550 sock_update_netprioidx(&sk->sk_cgrp_data);
1551 }
1552
1553 return sk;
1554}
1555EXPORT_SYMBOL(sk_alloc);
1556
1557/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1558 * grace period. This is the case for UDP sockets and TCP listeners.
1559 */
1560static void __sk_destruct(struct rcu_head *head)
1561{
1562 struct sock *sk = container_of(head, struct sock, sk_rcu);
1563 struct sk_filter *filter;
1564
1565 if (sk->sk_destruct)
1566 sk->sk_destruct(sk);
1567
1568 filter = rcu_dereference_check(sk->sk_filter,
1569 refcount_read(&sk->sk_wmem_alloc) == 0);
1570 if (filter) {
1571 sk_filter_uncharge(sk, filter);
1572 RCU_INIT_POINTER(sk->sk_filter, NULL);
1573 }
1574 if (rcu_access_pointer(sk->sk_reuseport_cb))
1575 reuseport_detach_sock(sk);
1576
1577 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1578
1579 if (atomic_read(&sk->sk_omem_alloc))
1580 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1581 __func__, atomic_read(&sk->sk_omem_alloc));
1582
1583 if (sk->sk_frag.page) {
1584 put_page(sk->sk_frag.page);
1585 sk->sk_frag.page = NULL;
1586 }
1587
1588 if (sk->sk_peer_cred)
1589 put_cred(sk->sk_peer_cred);
1590 put_pid(sk->sk_peer_pid);
1591 if (likely(sk->sk_net_refcnt))
1592 put_net(sock_net(sk));
1593 sk_prot_free(sk->sk_prot_creator, sk);
1594}
1595
1596void sk_destruct(struct sock *sk)
1597{
1598 if (sock_flag(sk, SOCK_RCU_FREE))
1599 call_rcu(&sk->sk_rcu, __sk_destruct);
1600 else
1601 __sk_destruct(&sk->sk_rcu);
1602}
1603
1604static void __sk_free(struct sock *sk)
1605{
1606 if (likely(sk->sk_net_refcnt))
1607 sock_inuse_add(sock_net(sk), -1);
1608
1609 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1610 sock_diag_broadcast_destroy(sk);
1611 else
1612 sk_destruct(sk);
1613}
1614
1615void sk_free(struct sock *sk)
1616{
1617 /*
1618 * We subtract one from sk_wmem_alloc and can know if
1619 * some packets are still in some tx queue.
1620 * If not null, sock_wfree() will call __sk_free(sk) later
1621 */
1622 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1623 __sk_free(sk);
1624}
1625EXPORT_SYMBOL(sk_free);
1626
1627static void sk_init_common(struct sock *sk)
1628{
1629 skb_queue_head_init(&sk->sk_receive_queue);
1630 skb_queue_head_init(&sk->sk_write_queue);
1631 skb_queue_head_init(&sk->sk_error_queue);
1632
1633 rwlock_init(&sk->sk_callback_lock);
1634 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1635 af_rlock_keys + sk->sk_family,
1636 af_family_rlock_key_strings[sk->sk_family]);
1637 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1638 af_wlock_keys + sk->sk_family,
1639 af_family_wlock_key_strings[sk->sk_family]);
1640 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1641 af_elock_keys + sk->sk_family,
1642 af_family_elock_key_strings[sk->sk_family]);
1643 lockdep_set_class_and_name(&sk->sk_callback_lock,
1644 af_callback_keys + sk->sk_family,
1645 af_family_clock_key_strings[sk->sk_family]);
1646}
1647
1648/**
1649 * sk_clone_lock - clone a socket, and lock its clone
1650 * @sk: the socket to clone
1651 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1652 *
1653 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1654 */
1655struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1656{
1657 struct sock *newsk;
1658 bool is_charged = true;
1659
1660 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1661 if (newsk != NULL) {
1662 struct sk_filter *filter;
1663
1664 sock_copy(newsk, sk);
1665
1666 newsk->sk_prot_creator = sk->sk_prot;
1667
1668 /* SANITY */
1669 if (likely(newsk->sk_net_refcnt))
1670 get_net(sock_net(newsk));
1671 sk_node_init(&newsk->sk_node);
1672 sock_lock_init(newsk);
1673 bh_lock_sock(newsk);
1674 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1675 newsk->sk_backlog.len = 0;
1676
1677 atomic_set(&newsk->sk_rmem_alloc, 0);
1678 /*
1679 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1680 */
1681 refcount_set(&newsk->sk_wmem_alloc, 1);
1682 atomic_set(&newsk->sk_omem_alloc, 0);
1683 sk_init_common(newsk);
1684
1685 newsk->sk_dst_cache = NULL;
1686 newsk->sk_dst_pending_confirm = 0;
1687 newsk->sk_wmem_queued = 0;
1688 newsk->sk_forward_alloc = 0;
1689 atomic_set(&newsk->sk_drops, 0);
1690 newsk->sk_send_head = NULL;
1691 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1692 atomic_set(&newsk->sk_zckey, 0);
1693
1694 sock_reset_flag(newsk, SOCK_DONE);
1695 mem_cgroup_sk_alloc(newsk);
1696 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1697
1698 rcu_read_lock();
1699 filter = rcu_dereference(sk->sk_filter);
1700 if (filter != NULL)
1701 /* though it's an empty new sock, the charging may fail
1702 * if sysctl_optmem_max was changed between creation of
1703 * original socket and cloning
1704 */
1705 is_charged = sk_filter_charge(newsk, filter);
1706 RCU_INIT_POINTER(newsk->sk_filter, filter);
1707 rcu_read_unlock();
1708
1709 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1710 /* We need to make sure that we don't uncharge the new
1711 * socket if we couldn't charge it in the first place
1712 * as otherwise we uncharge the parent's filter.
1713 */
1714 if (!is_charged)
1715 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1716 sk_free_unlock_clone(newsk);
1717 newsk = NULL;
1718 goto out;
1719 }
1720 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1721
1722 newsk->sk_err = 0;
1723 newsk->sk_err_soft = 0;
1724 newsk->sk_priority = 0;
1725 newsk->sk_incoming_cpu = raw_smp_processor_id();
1726 atomic64_set(&newsk->sk_cookie, 0);
1727 if (likely(newsk->sk_net_refcnt))
1728 sock_inuse_add(sock_net(newsk), 1);
1729
1730 /*
1731 * Before updating sk_refcnt, we must commit prior changes to memory
1732 * (Documentation/RCU/rculist_nulls.txt for details)
1733 */
1734 smp_wmb();
1735 refcount_set(&newsk->sk_refcnt, 2);
1736
1737 /*
1738 * Increment the counter in the same struct proto as the master
1739 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1740 * is the same as sk->sk_prot->socks, as this field was copied
1741 * with memcpy).
1742 *
1743 * This _changes_ the previous behaviour, where
1744 * tcp_create_openreq_child always was incrementing the
1745 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1746 * to be taken into account in all callers. -acme
1747 */
1748 sk_refcnt_debug_inc(newsk);
1749 sk_set_socket(newsk, NULL);
1750 newsk->sk_wq = NULL;
1751
1752 if (newsk->sk_prot->sockets_allocated)
1753 sk_sockets_allocated_inc(newsk);
1754
1755 if (sock_needs_netstamp(sk) &&
1756 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1757 net_enable_timestamp();
1758 }
1759out:
1760 return newsk;
1761}
1762EXPORT_SYMBOL_GPL(sk_clone_lock);
1763
1764void sk_free_unlock_clone(struct sock *sk)
1765{
1766 /* It is still raw copy of parent, so invalidate
1767 * destructor and make plain sk_free() */
1768 sk->sk_destruct = NULL;
1769 bh_unlock_sock(sk);
1770 sk_free(sk);
1771}
1772EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1773
1774void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1775{
1776 u32 max_segs = 1;
1777
1778 sk_dst_set(sk, dst);
1779 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
1780 if (sk->sk_route_caps & NETIF_F_GSO)
1781 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1782 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1783 if (sk_can_gso(sk)) {
1784 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
1785 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1786 } else {
1787 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1788 sk->sk_gso_max_size = dst->dev->gso_max_size;
1789 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1790 }
1791 }
1792 sk->sk_gso_max_segs = max_segs;
1793}
1794EXPORT_SYMBOL_GPL(sk_setup_caps);
1795
1796/*
1797 * Simple resource managers for sockets.
1798 */
1799
1800
1801/*
1802 * Write buffer destructor automatically called from kfree_skb.
1803 */
1804void sock_wfree(struct sk_buff *skb)
1805{
1806 struct sock *sk = skb->sk;
1807 unsigned int len = skb->truesize;
1808
1809 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1810 /*
1811 * Keep a reference on sk_wmem_alloc, this will be released
1812 * after sk_write_space() call
1813 */
1814 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1815 sk->sk_write_space(sk);
1816 len = 1;
1817 }
1818 /*
1819 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1820 * could not do because of in-flight packets
1821 */
1822 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
1823 __sk_free(sk);
1824}
1825EXPORT_SYMBOL(sock_wfree);
1826
1827/* This variant of sock_wfree() is used by TCP,
1828 * since it sets SOCK_USE_WRITE_QUEUE.
1829 */
1830void __sock_wfree(struct sk_buff *skb)
1831{
1832 struct sock *sk = skb->sk;
1833
1834 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1835 __sk_free(sk);
1836}
1837
1838void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1839{
1840 skb_orphan(skb);
1841 skb->sk = sk;
1842#ifdef CONFIG_INET
1843 if (unlikely(!sk_fullsock(sk))) {
1844 skb->destructor = sock_edemux;
1845 sock_hold(sk);
1846 return;
1847 }
1848#endif
1849 skb->destructor = sock_wfree;
1850 skb_set_hash_from_sk(skb, sk);
1851 /*
1852 * We used to take a refcount on sk, but following operation
1853 * is enough to guarantee sk_free() wont free this sock until
1854 * all in-flight packets are completed
1855 */
1856 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1857}
1858EXPORT_SYMBOL(skb_set_owner_w);
1859
1860/* This helper is used by netem, as it can hold packets in its
1861 * delay queue. We want to allow the owner socket to send more
1862 * packets, as if they were already TX completed by a typical driver.
1863 * But we also want to keep skb->sk set because some packet schedulers
1864 * rely on it (sch_fq for example).
1865 */
1866void skb_orphan_partial(struct sk_buff *skb)
1867{
1868 if (skb_is_tcp_pure_ack(skb))
1869 return;
1870
1871 if (skb->destructor == sock_wfree
1872#ifdef CONFIG_INET
1873 || skb->destructor == tcp_wfree
1874#endif
1875 ) {
1876 struct sock *sk = skb->sk;
1877
1878 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
1879 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
1880 skb->destructor = sock_efree;
1881 }
1882 } else {
1883 skb_orphan(skb);
1884 }
1885}
1886EXPORT_SYMBOL(skb_orphan_partial);
1887
1888/*
1889 * Read buffer destructor automatically called from kfree_skb.
1890 */
1891void sock_rfree(struct sk_buff *skb)
1892{
1893 struct sock *sk = skb->sk;
1894 unsigned int len = skb->truesize;
1895
1896 atomic_sub(len, &sk->sk_rmem_alloc);
1897 sk_mem_uncharge(sk, len);
1898}
1899EXPORT_SYMBOL(sock_rfree);
1900
1901/*
1902 * Buffer destructor for skbs that are not used directly in read or write
1903 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1904 */
1905void sock_efree(struct sk_buff *skb)
1906{
1907 sock_put(skb->sk);
1908}
1909EXPORT_SYMBOL(sock_efree);
1910
1911kuid_t sock_i_uid(struct sock *sk)
1912{
1913 kuid_t uid;
1914
1915 read_lock_bh(&sk->sk_callback_lock);
1916 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1917 read_unlock_bh(&sk->sk_callback_lock);
1918 return uid;
1919}
1920EXPORT_SYMBOL(sock_i_uid);
1921
1922unsigned long sock_i_ino(struct sock *sk)
1923{
1924 unsigned long ino;
1925
1926 read_lock_bh(&sk->sk_callback_lock);
1927 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1928 read_unlock_bh(&sk->sk_callback_lock);
1929 return ino;
1930}
1931EXPORT_SYMBOL(sock_i_ino);
1932
1933/*
1934 * Allocate a skb from the socket's send buffer.
1935 */
1936struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1937 gfp_t priority)
1938{
1939 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1940 struct sk_buff *skb = alloc_skb(size, priority);
1941 if (skb) {
1942 skb_set_owner_w(skb, sk);
1943 return skb;
1944 }
1945 }
1946 return NULL;
1947}
1948EXPORT_SYMBOL(sock_wmalloc);
1949
1950static void sock_ofree(struct sk_buff *skb)
1951{
1952 struct sock *sk = skb->sk;
1953
1954 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
1955}
1956
1957struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1958 gfp_t priority)
1959{
1960 struct sk_buff *skb;
1961
1962 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
1963 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
1964 sysctl_optmem_max)
1965 return NULL;
1966
1967 skb = alloc_skb(size, priority);
1968 if (!skb)
1969 return NULL;
1970
1971 atomic_add(skb->truesize, &sk->sk_omem_alloc);
1972 skb->sk = sk;
1973 skb->destructor = sock_ofree;
1974 return skb;
1975}
1976
1977/*
1978 * Allocate a memory block from the socket's option memory buffer.
1979 */
1980void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1981{
1982 if ((unsigned int)size <= sysctl_optmem_max &&
1983 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1984 void *mem;
1985 /* First do the add, to avoid the race if kmalloc
1986 * might sleep.
1987 */
1988 atomic_add(size, &sk->sk_omem_alloc);
1989 mem = kmalloc(size, priority);
1990 if (mem)
1991 return mem;
1992 atomic_sub(size, &sk->sk_omem_alloc);
1993 }
1994 return NULL;
1995}
1996EXPORT_SYMBOL(sock_kmalloc);
1997
1998/* Free an option memory block. Note, we actually want the inline
1999 * here as this allows gcc to detect the nullify and fold away the
2000 * condition entirely.
2001 */
2002static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2003 const bool nullify)
2004{
2005 if (WARN_ON_ONCE(!mem))
2006 return;
2007 if (nullify)
2008 kzfree(mem);
2009 else
2010 kfree(mem);
2011 atomic_sub(size, &sk->sk_omem_alloc);
2012}
2013
2014void sock_kfree_s(struct sock *sk, void *mem, int size)
2015{
2016 __sock_kfree_s(sk, mem, size, false);
2017}
2018EXPORT_SYMBOL(sock_kfree_s);
2019
2020void sock_kzfree_s(struct sock *sk, void *mem, int size)
2021{
2022 __sock_kfree_s(sk, mem, size, true);
2023}
2024EXPORT_SYMBOL(sock_kzfree_s);
2025
2026/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2027 I think, these locks should be removed for datagram sockets.
2028 */
2029static long sock_wait_for_wmem(struct sock *sk, long timeo)
2030{
2031 DEFINE_WAIT(wait);
2032
2033 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2034 for (;;) {
2035 if (!timeo)
2036 break;
2037 if (signal_pending(current))
2038 break;
2039 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2040 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2041 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
2042 break;
2043 if (sk->sk_shutdown & SEND_SHUTDOWN)
2044 break;
2045 if (sk->sk_err)
2046 break;
2047 timeo = schedule_timeout(timeo);
2048 }
2049 finish_wait(sk_sleep(sk), &wait);
2050 return timeo;
2051}
2052
2053
2054/*
2055 * Generic send/receive buffer handlers
2056 */
2057
2058struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2059 unsigned long data_len, int noblock,
2060 int *errcode, int max_page_order)
2061{
2062 struct sk_buff *skb;
2063 long timeo;
2064 int err;
2065
2066 timeo = sock_sndtimeo(sk, noblock);
2067 for (;;) {
2068 err = sock_error(sk);
2069 if (err != 0)
2070 goto failure;
2071
2072 err = -EPIPE;
2073 if (sk->sk_shutdown & SEND_SHUTDOWN)
2074 goto failure;
2075
2076 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
2077 break;
2078
2079 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2080 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2081 err = -EAGAIN;
2082 if (!timeo)
2083 goto failure;
2084 if (signal_pending(current))
2085 goto interrupted;
2086 timeo = sock_wait_for_wmem(sk, timeo);
2087 }
2088 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2089 errcode, sk->sk_allocation);
2090 if (skb)
2091 skb_set_owner_w(skb, sk);
2092 return skb;
2093
2094interrupted:
2095 err = sock_intr_errno(timeo);
2096failure:
2097 *errcode = err;
2098 return NULL;
2099}
2100EXPORT_SYMBOL(sock_alloc_send_pskb);
2101
2102struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2103 int noblock, int *errcode)
2104{
2105 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2106}
2107EXPORT_SYMBOL(sock_alloc_send_skb);
2108
2109int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2110 struct sockcm_cookie *sockc)
2111{
2112 u32 tsflags;
2113
2114 switch (cmsg->cmsg_type) {
2115 case SO_MARK:
2116 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2117 return -EPERM;
2118 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2119 return -EINVAL;
2120 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2121 break;
2122 case SO_TIMESTAMPING:
2123 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2124 return -EINVAL;
2125
2126 tsflags = *(u32 *)CMSG_DATA(cmsg);
2127 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2128 return -EINVAL;
2129
2130 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2131 sockc->tsflags |= tsflags;
2132 break;
2133 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2134 case SCM_RIGHTS:
2135 case SCM_CREDENTIALS:
2136 break;
2137 default:
2138 return -EINVAL;
2139 }
2140 return 0;
2141}
2142EXPORT_SYMBOL(__sock_cmsg_send);
2143
2144int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2145 struct sockcm_cookie *sockc)
2146{
2147 struct cmsghdr *cmsg;
2148 int ret;
2149
2150 for_each_cmsghdr(cmsg, msg) {
2151 if (!CMSG_OK(msg, cmsg))
2152 return -EINVAL;
2153 if (cmsg->cmsg_level != SOL_SOCKET)
2154 continue;
2155 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2156 if (ret)
2157 return ret;
2158 }
2159 return 0;
2160}
2161EXPORT_SYMBOL(sock_cmsg_send);
2162
2163static void sk_enter_memory_pressure(struct sock *sk)
2164{
2165 if (!sk->sk_prot->enter_memory_pressure)
2166 return;
2167
2168 sk->sk_prot->enter_memory_pressure(sk);
2169}
2170
2171static void sk_leave_memory_pressure(struct sock *sk)
2172{
2173 if (sk->sk_prot->leave_memory_pressure) {
2174 sk->sk_prot->leave_memory_pressure(sk);
2175 } else {
2176 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2177
2178 if (memory_pressure && *memory_pressure)
2179 *memory_pressure = 0;
2180 }
2181}
2182
2183/* On 32bit arches, an skb frag is limited to 2^15 */
2184#define SKB_FRAG_PAGE_ORDER get_order(32768)
2185
2186/**
2187 * skb_page_frag_refill - check that a page_frag contains enough room
2188 * @sz: minimum size of the fragment we want to get
2189 * @pfrag: pointer to page_frag
2190 * @gfp: priority for memory allocation
2191 *
2192 * Note: While this allocator tries to use high order pages, there is
2193 * no guarantee that allocations succeed. Therefore, @sz MUST be
2194 * less or equal than PAGE_SIZE.
2195 */
2196bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2197{
2198 if (pfrag->page) {
2199 if (page_ref_count(pfrag->page) == 1) {
2200 pfrag->offset = 0;
2201 return true;
2202 }
2203 if (pfrag->offset + sz <= pfrag->size)
2204 return true;
2205 put_page(pfrag->page);
2206 }
2207
2208 pfrag->offset = 0;
2209 if (SKB_FRAG_PAGE_ORDER) {
2210 /* Avoid direct reclaim but allow kswapd to wake */
2211 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2212 __GFP_COMP | __GFP_NOWARN |
2213 __GFP_NORETRY,
2214 SKB_FRAG_PAGE_ORDER);
2215 if (likely(pfrag->page)) {
2216 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2217 return true;
2218 }
2219 }
2220 pfrag->page = alloc_page(gfp);
2221 if (likely(pfrag->page)) {
2222 pfrag->size = PAGE_SIZE;
2223 return true;
2224 }
2225 return false;
2226}
2227EXPORT_SYMBOL(skb_page_frag_refill);
2228
2229bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2230{
2231 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2232 return true;
2233
2234 sk_enter_memory_pressure(sk);
2235 sk_stream_moderate_sndbuf(sk);
2236 return false;
2237}
2238EXPORT_SYMBOL(sk_page_frag_refill);
2239
2240int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2241 int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
2242 int first_coalesce)
2243{
2244 int sg_curr = *sg_curr_index, use = 0, rc = 0;
2245 unsigned int size = *sg_curr_size;
2246 struct page_frag *pfrag;
2247 struct scatterlist *sge;
2248
2249 len -= size;
2250 pfrag = sk_page_frag(sk);
2251
2252 while (len > 0) {
2253 unsigned int orig_offset;
2254
2255 if (!sk_page_frag_refill(sk, pfrag)) {
2256 rc = -ENOMEM;
2257 goto out;
2258 }
2259
2260 use = min_t(int, len, pfrag->size - pfrag->offset);
2261
2262 if (!sk_wmem_schedule(sk, use)) {
2263 rc = -ENOMEM;
2264 goto out;
2265 }
2266
2267 sk_mem_charge(sk, use);
2268 size += use;
2269 orig_offset = pfrag->offset;
2270 pfrag->offset += use;
2271
2272 sge = sg + sg_curr - 1;
2273 if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
2274 sg->offset + sg->length == orig_offset) {
2275 sg->length += use;
2276 } else {
2277 sge = sg + sg_curr;
2278 sg_unmark_end(sge);
2279 sg_set_page(sge, pfrag->page, use, orig_offset);
2280 get_page(pfrag->page);
2281 sg_curr++;
2282
2283 if (sg_curr == MAX_SKB_FRAGS)
2284 sg_curr = 0;
2285
2286 if (sg_curr == sg_start) {
2287 rc = -ENOSPC;
2288 break;
2289 }
2290 }
2291
2292 len -= use;
2293 }
2294out:
2295 *sg_curr_size = size;
2296 *sg_curr_index = sg_curr;
2297 return rc;
2298}
2299EXPORT_SYMBOL(sk_alloc_sg);
2300
2301static void __lock_sock(struct sock *sk)
2302 __releases(&sk->sk_lock.slock)
2303 __acquires(&sk->sk_lock.slock)
2304{
2305 DEFINE_WAIT(wait);
2306
2307 for (;;) {
2308 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2309 TASK_UNINTERRUPTIBLE);
2310 spin_unlock_bh(&sk->sk_lock.slock);
2311 schedule();
2312 spin_lock_bh(&sk->sk_lock.slock);
2313 if (!sock_owned_by_user(sk))
2314 break;
2315 }
2316 finish_wait(&sk->sk_lock.wq, &wait);
2317}
2318
2319static void __release_sock(struct sock *sk)
2320 __releases(&sk->sk_lock.slock)
2321 __acquires(&sk->sk_lock.slock)
2322{
2323 struct sk_buff *skb, *next;
2324
2325 while ((skb = sk->sk_backlog.head) != NULL) {
2326 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2327
2328 spin_unlock_bh(&sk->sk_lock.slock);
2329
2330 do {
2331 next = skb->next;
2332 prefetch(next);
2333 WARN_ON_ONCE(skb_dst_is_noref(skb));
2334 skb->next = NULL;
2335 sk_backlog_rcv(sk, skb);
2336
2337 cond_resched();
2338
2339 skb = next;
2340 } while (skb != NULL);
2341
2342 spin_lock_bh(&sk->sk_lock.slock);
2343 }
2344
2345 /*
2346 * Doing the zeroing here guarantee we can not loop forever
2347 * while a wild producer attempts to flood us.
2348 */
2349 sk->sk_backlog.len = 0;
2350}
2351
2352void __sk_flush_backlog(struct sock *sk)
2353{
2354 spin_lock_bh(&sk->sk_lock.slock);
2355 __release_sock(sk);
2356 spin_unlock_bh(&sk->sk_lock.slock);
2357}
2358
2359/**
2360 * sk_wait_data - wait for data to arrive at sk_receive_queue
2361 * @sk: sock to wait on
2362 * @timeo: for how long
2363 * @skb: last skb seen on sk_receive_queue
2364 *
2365 * Now socket state including sk->sk_err is changed only under lock,
2366 * hence we may omit checks after joining wait queue.
2367 * We check receive queue before schedule() only as optimization;
2368 * it is very likely that release_sock() added new data.
2369 */
2370int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2371{
2372 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2373 int rc;
2374
2375 add_wait_queue(sk_sleep(sk), &wait);
2376 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2377 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2378 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2379 remove_wait_queue(sk_sleep(sk), &wait);
2380 return rc;
2381}
2382EXPORT_SYMBOL(sk_wait_data);
2383
2384/**
2385 * __sk_mem_raise_allocated - increase memory_allocated
2386 * @sk: socket
2387 * @size: memory size to allocate
2388 * @amt: pages to allocate
2389 * @kind: allocation type
2390 *
2391 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2392 */
2393int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2394{
2395 struct proto *prot = sk->sk_prot;
2396 long allocated = sk_memory_allocated_add(sk, amt);
2397
2398 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2399 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2400 goto suppress_allocation;
2401
2402 /* Under limit. */
2403 if (allocated <= sk_prot_mem_limits(sk, 0)) {
2404 sk_leave_memory_pressure(sk);
2405 return 1;
2406 }
2407
2408 /* Under pressure. */
2409 if (allocated > sk_prot_mem_limits(sk, 1))
2410 sk_enter_memory_pressure(sk);
2411
2412 /* Over hard limit. */
2413 if (allocated > sk_prot_mem_limits(sk, 2))
2414 goto suppress_allocation;
2415
2416 /* guarantee minimum buffer size under pressure */
2417 if (kind == SK_MEM_RECV) {
2418 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2419 return 1;
2420
2421 } else { /* SK_MEM_SEND */
2422 int wmem0 = sk_get_wmem0(sk, prot);
2423
2424 if (sk->sk_type == SOCK_STREAM) {
2425 if (sk->sk_wmem_queued < wmem0)
2426 return 1;
2427 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2428 return 1;
2429 }
2430 }
2431
2432 if (sk_has_memory_pressure(sk)) {
2433 int alloc;
2434
2435 if (!sk_under_memory_pressure(sk))
2436 return 1;
2437 alloc = sk_sockets_allocated_read_positive(sk);
2438 if (sk_prot_mem_limits(sk, 2) > alloc *
2439 sk_mem_pages(sk->sk_wmem_queued +
2440 atomic_read(&sk->sk_rmem_alloc) +
2441 sk->sk_forward_alloc))
2442 return 1;
2443 }
2444
2445suppress_allocation:
2446
2447 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2448 sk_stream_moderate_sndbuf(sk);
2449
2450 /* Fail only if socket is _under_ its sndbuf.
2451 * In this case we cannot block, so that we have to fail.
2452 */
2453 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2454 return 1;
2455 }
2456
2457 trace_sock_exceed_buf_limit(sk, prot, allocated);
2458
2459 sk_memory_allocated_sub(sk, amt);
2460
2461 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2462 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2463
2464 return 0;
2465}
2466EXPORT_SYMBOL(__sk_mem_raise_allocated);
2467
2468/**
2469 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2470 * @sk: socket
2471 * @size: memory size to allocate
2472 * @kind: allocation type
2473 *
2474 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2475 * rmem allocation. This function assumes that protocols which have
2476 * memory_pressure use sk_wmem_queued as write buffer accounting.
2477 */
2478int __sk_mem_schedule(struct sock *sk, int size, int kind)
2479{
2480 int ret, amt = sk_mem_pages(size);
2481
2482 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2483 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2484 if (!ret)
2485 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2486 return ret;
2487}
2488EXPORT_SYMBOL(__sk_mem_schedule);
2489
2490/**
2491 * __sk_mem_reduce_allocated - reclaim memory_allocated
2492 * @sk: socket
2493 * @amount: number of quanta
2494 *
2495 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2496 */
2497void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2498{
2499 sk_memory_allocated_sub(sk, amount);
2500
2501 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2502 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2503
2504 if (sk_under_memory_pressure(sk) &&
2505 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2506 sk_leave_memory_pressure(sk);
2507}
2508EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2509
2510/**
2511 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2512 * @sk: socket
2513 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2514 */
2515void __sk_mem_reclaim(struct sock *sk, int amount)
2516{
2517 amount >>= SK_MEM_QUANTUM_SHIFT;
2518 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2519 __sk_mem_reduce_allocated(sk, amount);
2520}
2521EXPORT_SYMBOL(__sk_mem_reclaim);
2522
2523int sk_set_peek_off(struct sock *sk, int val)
2524{
2525 sk->sk_peek_off = val;
2526 return 0;
2527}
2528EXPORT_SYMBOL_GPL(sk_set_peek_off);
2529
2530/*
2531 * Set of default routines for initialising struct proto_ops when
2532 * the protocol does not support a particular function. In certain
2533 * cases where it makes no sense for a protocol to have a "do nothing"
2534 * function, some default processing is provided.
2535 */
2536
2537int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2538{
2539 return -EOPNOTSUPP;
2540}
2541EXPORT_SYMBOL(sock_no_bind);
2542
2543int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2544 int len, int flags)
2545{
2546 return -EOPNOTSUPP;
2547}
2548EXPORT_SYMBOL(sock_no_connect);
2549
2550int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2551{
2552 return -EOPNOTSUPP;
2553}
2554EXPORT_SYMBOL(sock_no_socketpair);
2555
2556int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2557 bool kern)
2558{
2559 return -EOPNOTSUPP;
2560}
2561EXPORT_SYMBOL(sock_no_accept);
2562
2563int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2564 int peer)
2565{
2566 return -EOPNOTSUPP;
2567}
2568EXPORT_SYMBOL(sock_no_getname);
2569
2570__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2571{
2572 return 0;
2573}
2574EXPORT_SYMBOL(sock_no_poll);
2575
2576int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2577{
2578 return -EOPNOTSUPP;
2579}
2580EXPORT_SYMBOL(sock_no_ioctl);
2581
2582int sock_no_listen(struct socket *sock, int backlog)
2583{
2584 return -EOPNOTSUPP;
2585}
2586EXPORT_SYMBOL(sock_no_listen);
2587
2588int sock_no_shutdown(struct socket *sock, int how)
2589{
2590 return -EOPNOTSUPP;
2591}
2592EXPORT_SYMBOL(sock_no_shutdown);
2593
2594int sock_no_setsockopt(struct socket *sock, int level, int optname,
2595 char __user *optval, unsigned int optlen)
2596{
2597 return -EOPNOTSUPP;
2598}
2599EXPORT_SYMBOL(sock_no_setsockopt);
2600
2601int sock_no_getsockopt(struct socket *sock, int level, int optname,
2602 char __user *optval, int __user *optlen)
2603{
2604 return -EOPNOTSUPP;
2605}
2606EXPORT_SYMBOL(sock_no_getsockopt);
2607
2608int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2609{
2610 return -EOPNOTSUPP;
2611}
2612EXPORT_SYMBOL(sock_no_sendmsg);
2613
2614int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2615{
2616 return -EOPNOTSUPP;
2617}
2618EXPORT_SYMBOL(sock_no_sendmsg_locked);
2619
2620int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2621 int flags)
2622{
2623 return -EOPNOTSUPP;
2624}
2625EXPORT_SYMBOL(sock_no_recvmsg);
2626
2627int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2628{
2629 /* Mirror missing mmap method error code */
2630 return -ENODEV;
2631}
2632EXPORT_SYMBOL(sock_no_mmap);
2633
2634ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2635{
2636 ssize_t res;
2637 struct msghdr msg = {.msg_flags = flags};
2638 struct kvec iov;
2639 char *kaddr = kmap(page);
2640 iov.iov_base = kaddr + offset;
2641 iov.iov_len = size;
2642 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2643 kunmap(page);
2644 return res;
2645}
2646EXPORT_SYMBOL(sock_no_sendpage);
2647
2648ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2649 int offset, size_t size, int flags)
2650{
2651 ssize_t res;
2652 struct msghdr msg = {.msg_flags = flags};
2653 struct kvec iov;
2654 char *kaddr = kmap(page);
2655
2656 iov.iov_base = kaddr + offset;
2657 iov.iov_len = size;
2658 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2659 kunmap(page);
2660 return res;
2661}
2662EXPORT_SYMBOL(sock_no_sendpage_locked);
2663
2664/*
2665 * Default Socket Callbacks
2666 */
2667
2668static void sock_def_wakeup(struct sock *sk)
2669{
2670 struct socket_wq *wq;
2671
2672 rcu_read_lock();
2673 wq = rcu_dereference(sk->sk_wq);
2674 if (skwq_has_sleeper(wq))
2675 wake_up_interruptible_all(&wq->wait);
2676 rcu_read_unlock();
2677}
2678
2679static void sock_def_error_report(struct sock *sk)
2680{
2681 struct socket_wq *wq;
2682
2683 rcu_read_lock();
2684 wq = rcu_dereference(sk->sk_wq);
2685 if (skwq_has_sleeper(wq))
2686 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2687 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2688 rcu_read_unlock();
2689}
2690
2691static void sock_def_readable(struct sock *sk)
2692{
2693 struct socket_wq *wq;
2694
2695 rcu_read_lock();
2696 wq = rcu_dereference(sk->sk_wq);
2697 if (skwq_has_sleeper(wq))
2698 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2699 EPOLLRDNORM | EPOLLRDBAND);
2700 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2701 rcu_read_unlock();
2702}
2703
2704static void sock_def_write_space(struct sock *sk)
2705{
2706 struct socket_wq *wq;
2707
2708 rcu_read_lock();
2709
2710 /* Do not wake up a writer until he can make "significant"
2711 * progress. --DaveM
2712 */
2713 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2714 wq = rcu_dereference(sk->sk_wq);
2715 if (skwq_has_sleeper(wq))
2716 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2717 EPOLLWRNORM | EPOLLWRBAND);
2718
2719 /* Should agree with poll, otherwise some programs break */
2720 if (sock_writeable(sk))
2721 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2722 }
2723
2724 rcu_read_unlock();
2725}
2726
2727static void sock_def_destruct(struct sock *sk)
2728{
2729}
2730
2731void sk_send_sigurg(struct sock *sk)
2732{
2733 if (sk->sk_socket && sk->sk_socket->file)
2734 if (send_sigurg(&sk->sk_socket->file->f_owner))
2735 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2736}
2737EXPORT_SYMBOL(sk_send_sigurg);
2738
2739void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2740 unsigned long expires)
2741{
2742 if (!mod_timer(timer, expires))
2743 sock_hold(sk);
2744}
2745EXPORT_SYMBOL(sk_reset_timer);
2746
2747void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2748{
2749 if (del_timer(timer))
2750 __sock_put(sk);
2751}
2752EXPORT_SYMBOL(sk_stop_timer);
2753
2754void sock_init_data(struct socket *sock, struct sock *sk)
2755{
2756 sk_init_common(sk);
2757 sk->sk_send_head = NULL;
2758
2759 timer_setup(&sk->sk_timer, NULL, 0);
2760
2761 sk->sk_allocation = GFP_KERNEL;
2762 sk->sk_rcvbuf = sysctl_rmem_default;
2763 sk->sk_sndbuf = sysctl_wmem_default;
2764 sk->sk_state = TCP_CLOSE;
2765 sk_set_socket(sk, sock);
2766
2767 sock_set_flag(sk, SOCK_ZAPPED);
2768
2769 if (sock) {
2770 sk->sk_type = sock->type;
2771 sk->sk_wq = sock->wq;
2772 sock->sk = sk;
2773 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2774 } else {
2775 sk->sk_wq = NULL;
2776 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2777 }
2778
2779 rwlock_init(&sk->sk_callback_lock);
2780 if (sk->sk_kern_sock)
2781 lockdep_set_class_and_name(
2782 &sk->sk_callback_lock,
2783 af_kern_callback_keys + sk->sk_family,
2784 af_family_kern_clock_key_strings[sk->sk_family]);
2785 else
2786 lockdep_set_class_and_name(
2787 &sk->sk_callback_lock,
2788 af_callback_keys + sk->sk_family,
2789 af_family_clock_key_strings[sk->sk_family]);
2790
2791 sk->sk_state_change = sock_def_wakeup;
2792 sk->sk_data_ready = sock_def_readable;
2793 sk->sk_write_space = sock_def_write_space;
2794 sk->sk_error_report = sock_def_error_report;
2795 sk->sk_destruct = sock_def_destruct;
2796
2797 sk->sk_frag.page = NULL;
2798 sk->sk_frag.offset = 0;
2799 sk->sk_peek_off = -1;
2800
2801 sk->sk_peer_pid = NULL;
2802 sk->sk_peer_cred = NULL;
2803 sk->sk_write_pending = 0;
2804 sk->sk_rcvlowat = 1;
2805 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2806 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2807
2808 sk->sk_stamp = SK_DEFAULT_STAMP;
2809 atomic_set(&sk->sk_zckey, 0);
2810
2811#ifdef CONFIG_NET_RX_BUSY_POLL
2812 sk->sk_napi_id = 0;
2813 sk->sk_ll_usec = sysctl_net_busy_read;
2814#endif
2815
2816 sk->sk_max_pacing_rate = ~0U;
2817 sk->sk_pacing_rate = ~0U;
2818 sk->sk_pacing_shift = 10;
2819 sk->sk_incoming_cpu = -1;
2820 /*
2821 * Before updating sk_refcnt, we must commit prior changes to memory
2822 * (Documentation/RCU/rculist_nulls.txt for details)
2823 */
2824 smp_wmb();
2825 refcount_set(&sk->sk_refcnt, 1);
2826 atomic_set(&sk->sk_drops, 0);
2827}
2828EXPORT_SYMBOL(sock_init_data);
2829
2830void lock_sock_nested(struct sock *sk, int subclass)
2831{
2832 might_sleep();
2833 spin_lock_bh(&sk->sk_lock.slock);
2834 if (sk->sk_lock.owned)
2835 __lock_sock(sk);
2836 sk->sk_lock.owned = 1;
2837 spin_unlock(&sk->sk_lock.slock);
2838 /*
2839 * The sk_lock has mutex_lock() semantics here:
2840 */
2841 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2842 local_bh_enable();
2843}
2844EXPORT_SYMBOL(lock_sock_nested);
2845
2846void release_sock(struct sock *sk)
2847{
2848 spin_lock_bh(&sk->sk_lock.slock);
2849 if (sk->sk_backlog.tail)
2850 __release_sock(sk);
2851
2852 /* Warning : release_cb() might need to release sk ownership,
2853 * ie call sock_release_ownership(sk) before us.
2854 */
2855 if (sk->sk_prot->release_cb)
2856 sk->sk_prot->release_cb(sk);
2857
2858 sock_release_ownership(sk);
2859 if (waitqueue_active(&sk->sk_lock.wq))
2860 wake_up(&sk->sk_lock.wq);
2861 spin_unlock_bh(&sk->sk_lock.slock);
2862}
2863EXPORT_SYMBOL(release_sock);
2864
2865/**
2866 * lock_sock_fast - fast version of lock_sock
2867 * @sk: socket
2868 *
2869 * This version should be used for very small section, where process wont block
2870 * return false if fast path is taken:
2871 *
2872 * sk_lock.slock locked, owned = 0, BH disabled
2873 *
2874 * return true if slow path is taken:
2875 *
2876 * sk_lock.slock unlocked, owned = 1, BH enabled
2877 */
2878bool lock_sock_fast(struct sock *sk)
2879{
2880 might_sleep();
2881 spin_lock_bh(&sk->sk_lock.slock);
2882
2883 if (!sk->sk_lock.owned)
2884 /*
2885 * Note : We must disable BH
2886 */
2887 return false;
2888
2889 __lock_sock(sk);
2890 sk->sk_lock.owned = 1;
2891 spin_unlock(&sk->sk_lock.slock);
2892 /*
2893 * The sk_lock has mutex_lock() semantics here:
2894 */
2895 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2896 local_bh_enable();
2897 return true;
2898}
2899EXPORT_SYMBOL(lock_sock_fast);
2900
2901int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2902{
2903 struct timeval tv;
2904 if (!sock_flag(sk, SOCK_TIMESTAMP))
2905 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2906 tv = ktime_to_timeval(sk->sk_stamp);
2907 if (tv.tv_sec == -1)
2908 return -ENOENT;
2909 if (tv.tv_sec == 0) {
2910 sk->sk_stamp = ktime_get_real();
2911 tv = ktime_to_timeval(sk->sk_stamp);
2912 }
2913 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2914}
2915EXPORT_SYMBOL(sock_get_timestamp);
2916
2917int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2918{
2919 struct timespec ts;
2920 if (!sock_flag(sk, SOCK_TIMESTAMP))
2921 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2922 ts = ktime_to_timespec(sk->sk_stamp);
2923 if (ts.tv_sec == -1)
2924 return -ENOENT;
2925 if (ts.tv_sec == 0) {
2926 sk->sk_stamp = ktime_get_real();
2927 ts = ktime_to_timespec(sk->sk_stamp);
2928 }
2929 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2930}
2931EXPORT_SYMBOL(sock_get_timestampns);
2932
2933void sock_enable_timestamp(struct sock *sk, int flag)
2934{
2935 if (!sock_flag(sk, flag)) {
2936 unsigned long previous_flags = sk->sk_flags;
2937
2938 sock_set_flag(sk, flag);
2939 /*
2940 * we just set one of the two flags which require net
2941 * time stamping, but time stamping might have been on
2942 * already because of the other one
2943 */
2944 if (sock_needs_netstamp(sk) &&
2945 !(previous_flags & SK_FLAGS_TIMESTAMP))
2946 net_enable_timestamp();
2947 }
2948}
2949
2950int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2951 int level, int type)
2952{
2953 struct sock_exterr_skb *serr;
2954 struct sk_buff *skb;
2955 int copied, err;
2956
2957 err = -EAGAIN;
2958 skb = sock_dequeue_err_skb(sk);
2959 if (skb == NULL)
2960 goto out;
2961
2962 copied = skb->len;
2963 if (copied > len) {
2964 msg->msg_flags |= MSG_TRUNC;
2965 copied = len;
2966 }
2967 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2968 if (err)
2969 goto out_free_skb;
2970
2971 sock_recv_timestamp(msg, sk, skb);
2972
2973 serr = SKB_EXT_ERR(skb);
2974 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2975
2976 msg->msg_flags |= MSG_ERRQUEUE;
2977 err = copied;
2978
2979out_free_skb:
2980 kfree_skb(skb);
2981out:
2982 return err;
2983}
2984EXPORT_SYMBOL(sock_recv_errqueue);
2985
2986/*
2987 * Get a socket option on an socket.
2988 *
2989 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2990 * asynchronous errors should be reported by getsockopt. We assume
2991 * this means if you specify SO_ERROR (otherwise whats the point of it).
2992 */
2993int sock_common_getsockopt(struct socket *sock, int level, int optname,
2994 char __user *optval, int __user *optlen)
2995{
2996 struct sock *sk = sock->sk;
2997
2998 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2999}
3000EXPORT_SYMBOL(sock_common_getsockopt);
3001
3002#ifdef CONFIG_COMPAT
3003int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3004 char __user *optval, int __user *optlen)
3005{
3006 struct sock *sk = sock->sk;
3007
3008 if (sk->sk_prot->compat_getsockopt != NULL)
3009 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3010 optval, optlen);
3011 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3012}
3013EXPORT_SYMBOL(compat_sock_common_getsockopt);
3014#endif
3015
3016int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3017 int flags)
3018{
3019 struct sock *sk = sock->sk;
3020 int addr_len = 0;
3021 int err;
3022
3023 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3024 flags & ~MSG_DONTWAIT, &addr_len);
3025 if (err >= 0)
3026 msg->msg_namelen = addr_len;
3027 return err;
3028}
3029EXPORT_SYMBOL(sock_common_recvmsg);
3030
3031/*
3032 * Set socket options on an inet socket.
3033 */
3034int sock_common_setsockopt(struct socket *sock, int level, int optname,
3035 char __user *optval, unsigned int optlen)
3036{
3037 struct sock *sk = sock->sk;
3038
3039 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3040}
3041EXPORT_SYMBOL(sock_common_setsockopt);
3042
3043#ifdef CONFIG_COMPAT
3044int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
3045 char __user *optval, unsigned int optlen)
3046{
3047 struct sock *sk = sock->sk;
3048
3049 if (sk->sk_prot->compat_setsockopt != NULL)
3050 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3051 optval, optlen);
3052 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3053}
3054EXPORT_SYMBOL(compat_sock_common_setsockopt);
3055#endif
3056
3057void sk_common_release(struct sock *sk)
3058{
3059 if (sk->sk_prot->destroy)
3060 sk->sk_prot->destroy(sk);
3061
3062 /*
3063 * Observation: when sock_common_release is called, processes have
3064 * no access to socket. But net still has.
3065 * Step one, detach it from networking:
3066 *
3067 * A. Remove from hash tables.
3068 */
3069
3070 sk->sk_prot->unhash(sk);
3071
3072 /*
3073 * In this point socket cannot receive new packets, but it is possible
3074 * that some packets are in flight because some CPU runs receiver and
3075 * did hash table lookup before we unhashed socket. They will achieve
3076 * receive queue and will be purged by socket destructor.
3077 *
3078 * Also we still have packets pending on receive queue and probably,
3079 * our own packets waiting in device queues. sock_destroy will drain
3080 * receive queue, but transmitted packets will delay socket destruction
3081 * until the last reference will be released.
3082 */
3083
3084 sock_orphan(sk);
3085
3086 xfrm_sk_free_policy(sk);
3087
3088 sk_refcnt_debug_release(sk);
3089
3090 sock_put(sk);
3091}
3092EXPORT_SYMBOL(sk_common_release);
3093
3094void sk_get_meminfo(const struct sock *sk, u32 *mem)
3095{
3096 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3097
3098 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3099 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
3100 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3101 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
3102 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3103 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
3104 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3105 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
3106 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3107}
3108
3109#ifdef CONFIG_PROC_FS
3110#define PROTO_INUSE_NR 64 /* should be enough for the first time */
3111struct prot_inuse {
3112 int val[PROTO_INUSE_NR];
3113};
3114
3115static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3116
3117void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3118{
3119 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3120}
3121EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3122
3123int sock_prot_inuse_get(struct net *net, struct proto *prot)
3124{
3125 int cpu, idx = prot->inuse_idx;
3126 int res = 0;
3127
3128 for_each_possible_cpu(cpu)
3129 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3130
3131 return res >= 0 ? res : 0;
3132}
3133EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3134
3135static void sock_inuse_add(struct net *net, int val)
3136{
3137 this_cpu_add(*net->core.sock_inuse, val);
3138}
3139
3140int sock_inuse_get(struct net *net)
3141{
3142 int cpu, res = 0;
3143
3144 for_each_possible_cpu(cpu)
3145 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3146
3147 return res;
3148}
3149
3150EXPORT_SYMBOL_GPL(sock_inuse_get);
3151
3152static int __net_init sock_inuse_init_net(struct net *net)
3153{
3154 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3155 if (net->core.prot_inuse == NULL)
3156 return -ENOMEM;
3157
3158 net->core.sock_inuse = alloc_percpu(int);
3159 if (net->core.sock_inuse == NULL)
3160 goto out;
3161
3162 return 0;
3163
3164out:
3165 free_percpu(net->core.prot_inuse);
3166 return -ENOMEM;
3167}
3168
3169static void __net_exit sock_inuse_exit_net(struct net *net)
3170{
3171 free_percpu(net->core.prot_inuse);
3172 free_percpu(net->core.sock_inuse);
3173}
3174
3175static struct pernet_operations net_inuse_ops = {
3176 .init = sock_inuse_init_net,
3177 .exit = sock_inuse_exit_net,
3178};
3179
3180static __init int net_inuse_init(void)
3181{
3182 if (register_pernet_subsys(&net_inuse_ops))
3183 panic("Cannot initialize net inuse counters");
3184
3185 return 0;
3186}
3187
3188core_initcall(net_inuse_init);
3189
3190static void assign_proto_idx(struct proto *prot)
3191{
3192 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3193
3194 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3195 pr_err("PROTO_INUSE_NR exhausted\n");
3196 return;
3197 }
3198
3199 set_bit(prot->inuse_idx, proto_inuse_idx);
3200}
3201
3202static void release_proto_idx(struct proto *prot)
3203{
3204 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3205 clear_bit(prot->inuse_idx, proto_inuse_idx);
3206}
3207#else
3208static inline void assign_proto_idx(struct proto *prot)
3209{
3210}
3211
3212static inline void release_proto_idx(struct proto *prot)
3213{
3214}
3215
3216static void sock_inuse_add(struct net *net, int val)
3217{
3218}
3219#endif
3220
3221static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3222{
3223 if (!rsk_prot)
3224 return;
3225 kfree(rsk_prot->slab_name);
3226 rsk_prot->slab_name = NULL;
3227 kmem_cache_destroy(rsk_prot->slab);
3228 rsk_prot->slab = NULL;
3229}
3230
3231static int req_prot_init(const struct proto *prot)
3232{
3233 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3234
3235 if (!rsk_prot)
3236 return 0;
3237
3238 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3239 prot->name);
3240 if (!rsk_prot->slab_name)
3241 return -ENOMEM;
3242
3243 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3244 rsk_prot->obj_size, 0,
3245 prot->slab_flags, NULL);
3246
3247 if (!rsk_prot->slab) {
3248 pr_crit("%s: Can't create request sock SLAB cache!\n",
3249 prot->name);
3250 return -ENOMEM;
3251 }
3252 return 0;
3253}
3254
3255int proto_register(struct proto *prot, int alloc_slab)
3256{
3257 if (alloc_slab) {
3258 prot->slab = kmem_cache_create_usercopy(prot->name,
3259 prot->obj_size, 0,
3260 SLAB_HWCACHE_ALIGN | prot->slab_flags,
3261 prot->useroffset, prot->usersize,
3262 NULL);
3263
3264 if (prot->slab == NULL) {
3265 pr_crit("%s: Can't create sock SLAB cache!\n",
3266 prot->name);
3267 goto out;
3268 }
3269
3270 if (req_prot_init(prot))
3271 goto out_free_request_sock_slab;
3272
3273 if (prot->twsk_prot != NULL) {
3274 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3275
3276 if (prot->twsk_prot->twsk_slab_name == NULL)
3277 goto out_free_request_sock_slab;
3278
3279 prot->twsk_prot->twsk_slab =
3280 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3281 prot->twsk_prot->twsk_obj_size,
3282 0,
3283 prot->slab_flags,
3284 NULL);
3285 if (prot->twsk_prot->twsk_slab == NULL)
3286 goto out_free_timewait_sock_slab_name;
3287 }
3288 }
3289
3290 mutex_lock(&proto_list_mutex);
3291 list_add(&prot->node, &proto_list);
3292 assign_proto_idx(prot);
3293 mutex_unlock(&proto_list_mutex);
3294 return 0;
3295
3296out_free_timewait_sock_slab_name:
3297 kfree(prot->twsk_prot->twsk_slab_name);
3298out_free_request_sock_slab:
3299 req_prot_cleanup(prot->rsk_prot);
3300
3301 kmem_cache_destroy(prot->slab);
3302 prot->slab = NULL;
3303out:
3304 return -ENOBUFS;
3305}
3306EXPORT_SYMBOL(proto_register);
3307
3308void proto_unregister(struct proto *prot)
3309{
3310 mutex_lock(&proto_list_mutex);
3311 release_proto_idx(prot);
3312 list_del(&prot->node);
3313 mutex_unlock(&proto_list_mutex);
3314
3315 kmem_cache_destroy(prot->slab);
3316 prot->slab = NULL;
3317
3318 req_prot_cleanup(prot->rsk_prot);
3319
3320 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
3321 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
3322 kfree(prot->twsk_prot->twsk_slab_name);
3323 prot->twsk_prot->twsk_slab = NULL;
3324 }
3325}
3326EXPORT_SYMBOL(proto_unregister);
3327
3328int sock_load_diag_module(int family, int protocol)
3329{
3330 if (!protocol) {
3331 if (!sock_is_registered(family))
3332 return -ENOENT;
3333
3334 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3335 NETLINK_SOCK_DIAG, family);
3336 }
3337
3338#ifdef CONFIG_INET
3339 if (family == AF_INET &&
3340 !rcu_access_pointer(inet_protos[protocol]))
3341 return -ENOENT;
3342#endif
3343
3344 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3345 NETLINK_SOCK_DIAG, family, protocol);
3346}
3347EXPORT_SYMBOL(sock_load_diag_module);
3348
3349#ifdef CONFIG_PROC_FS
3350static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3351 __acquires(proto_list_mutex)
3352{
3353 mutex_lock(&proto_list_mutex);
3354 return seq_list_start_head(&proto_list, *pos);
3355}
3356
3357static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3358{
3359 return seq_list_next(v, &proto_list, pos);
3360}
3361
3362static void proto_seq_stop(struct seq_file *seq, void *v)
3363 __releases(proto_list_mutex)
3364{
3365 mutex_unlock(&proto_list_mutex);
3366}
3367
3368static char proto_method_implemented(const void *method)
3369{
3370 return method == NULL ? 'n' : 'y';
3371}
3372static long sock_prot_memory_allocated(struct proto *proto)
3373{
3374 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3375}
3376
3377static char *sock_prot_memory_pressure(struct proto *proto)
3378{
3379 return proto->memory_pressure != NULL ?
3380 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3381}
3382
3383static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3384{
3385
3386 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
3387 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3388 proto->name,
3389 proto->obj_size,
3390 sock_prot_inuse_get(seq_file_net(seq), proto),
3391 sock_prot_memory_allocated(proto),
3392 sock_prot_memory_pressure(proto),
3393 proto->max_header,
3394 proto->slab == NULL ? "no" : "yes",
3395 module_name(proto->owner),
3396 proto_method_implemented(proto->close),
3397 proto_method_implemented(proto->connect),
3398 proto_method_implemented(proto->disconnect),
3399 proto_method_implemented(proto->accept),
3400 proto_method_implemented(proto->ioctl),
3401 proto_method_implemented(proto->init),
3402 proto_method_implemented(proto->destroy),
3403 proto_method_implemented(proto->shutdown),
3404 proto_method_implemented(proto->setsockopt),
3405 proto_method_implemented(proto->getsockopt),
3406 proto_method_implemented(proto->sendmsg),
3407 proto_method_implemented(proto->recvmsg),
3408 proto_method_implemented(proto->sendpage),
3409 proto_method_implemented(proto->bind),
3410 proto_method_implemented(proto->backlog_rcv),
3411 proto_method_implemented(proto->hash),
3412 proto_method_implemented(proto->unhash),
3413 proto_method_implemented(proto->get_port),
3414 proto_method_implemented(proto->enter_memory_pressure));
3415}
3416
3417static int proto_seq_show(struct seq_file *seq, void *v)
3418{
3419 if (v == &proto_list)
3420 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3421 "protocol",
3422 "size",
3423 "sockets",
3424 "memory",
3425 "press",
3426 "maxhdr",
3427 "slab",
3428 "module",
3429 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3430 else
3431 proto_seq_printf(seq, list_entry(v, struct proto, node));
3432 return 0;
3433}
3434
3435static const struct seq_operations proto_seq_ops = {
3436 .start = proto_seq_start,
3437 .next = proto_seq_next,
3438 .stop = proto_seq_stop,
3439 .show = proto_seq_show,
3440};
3441
3442static int proto_seq_open(struct inode *inode, struct file *file)
3443{
3444 return seq_open_net(inode, file, &proto_seq_ops,
3445 sizeof(struct seq_net_private));
3446}
3447
3448static const struct file_operations proto_seq_fops = {
3449 .open = proto_seq_open,
3450 .read = seq_read,
3451 .llseek = seq_lseek,
3452 .release = seq_release_net,
3453};
3454
3455static __net_init int proto_init_net(struct net *net)
3456{
3457 if (!proc_create("protocols", 0444, net->proc_net, &proto_seq_fops))
3458 return -ENOMEM;
3459
3460 return 0;
3461}
3462
3463static __net_exit void proto_exit_net(struct net *net)
3464{
3465 remove_proc_entry("protocols", net->proc_net);
3466}
3467
3468
3469static __net_initdata struct pernet_operations proto_net_ops = {
3470 .init = proto_init_net,
3471 .exit = proto_exit_net,
3472};
3473
3474static int __init proto_init(void)
3475{
3476 return register_pernet_subsys(&proto_net_ops);
3477}
3478
3479subsys_initcall(proto_init);
3480
3481#endif /* PROC_FS */
3482
3483#ifdef CONFIG_NET_RX_BUSY_POLL
3484bool sk_busy_loop_end(void *p, unsigned long start_time)
3485{
3486 struct sock *sk = p;
3487
3488 return !skb_queue_empty(&sk->sk_receive_queue) ||
3489 sk_busy_loop_timeout(sk, start_time);
3490}
3491EXPORT_SYMBOL(sk_busy_loop_end);
3492#endif /* CONFIG_NET_RX_BUSY_POLL */