Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#include <linux/capability.h>
95#include <linux/errno.h>
96#include <linux/errqueue.h>
97#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
115#include <linux/highmem.h>
116#include <linux/user_namespace.h>
117#include <linux/static_key.h>
118#include <linux/memcontrol.h>
119#include <linux/prefetch.h>
120
121#include <asm/uaccess.h>
122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
126#include <net/net_namespace.h>
127#include <net/request_sock.h>
128#include <net/sock.h>
129#include <linux/net_tstamp.h>
130#include <net/xfrm.h>
131#include <linux/ipsec.h>
132#include <net/cls_cgroup.h>
133#include <net/netprio_cgroup.h>
134
135#include <linux/filter.h>
136
137#include <trace/events/sock.h>
138
139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
143#include <net/busy_poll.h>
144
145static DEFINE_MUTEX(proto_list_mutex);
146static LIST_HEAD(proto_list);
147
148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
169 * @cap: The global capbility to use
170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
186 * Test to see if the opener of the socket had when the socke was created
187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
197#ifdef CONFIG_MEMCG_KMEM
198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
199{
200 struct proto *proto;
201 int ret = 0;
202
203 mutex_lock(&proto_list_mutex);
204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
206 ret = proto->init_cgroup(memcg, ss);
207 if (ret)
208 goto out;
209 }
210 }
211
212 mutex_unlock(&proto_list_mutex);
213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
217 proto->destroy_cgroup(memcg);
218 mutex_unlock(&proto_list_mutex);
219 return ret;
220}
221
222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
223{
224 struct proto *proto;
225
226 mutex_lock(&proto_list_mutex);
227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
229 proto->destroy_cgroup(memcg);
230 mutex_unlock(&proto_list_mutex);
231}
232#endif
233
234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
241#if defined(CONFIG_MEMCG_KMEM)
242struct static_key memcg_socket_limit_enabled;
243EXPORT_SYMBOL(memcg_socket_limit_enabled);
244#endif
245
246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
251static const char *const af_family_key_strings[AF_MAX+1] = {
252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
266};
267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
282};
283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
298};
299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
318EXPORT_SYMBOL(sysctl_wmem_max);
319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
320EXPORT_SYMBOL(sysctl_rmem_max);
321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
323
324/* Maximal space eaten by iovec or ancillary data plus some space */
325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
326EXPORT_SYMBOL(sysctl_optmem_max);
327
328struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329EXPORT_SYMBOL_GPL(memalloc_socks);
330
331/**
332 * sk_set_memalloc - sets %SOCK_MEMALLOC
333 * @sk: socket to set it on
334 *
335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336 * It's the responsibility of the admin to adjust min_free_kbytes
337 * to meet the requirements
338 */
339void sk_set_memalloc(struct sock *sk)
340{
341 sock_set_flag(sk, SOCK_MEMALLOC);
342 sk->sk_allocation |= __GFP_MEMALLOC;
343 static_key_slow_inc(&memalloc_socks);
344}
345EXPORT_SYMBOL_GPL(sk_set_memalloc);
346
347void sk_clear_memalloc(struct sock *sk)
348{
349 sock_reset_flag(sk, SOCK_MEMALLOC);
350 sk->sk_allocation &= ~__GFP_MEMALLOC;
351 static_key_slow_dec(&memalloc_socks);
352
353 /*
354 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 * it has rmem allocations there is a risk that the user of the
357 * socket cannot make forward progress due to exceeding the rmem
358 * limits. By rights, sk_clear_memalloc() should only be called
359 * on sockets being torn down but warn and reset the accounting if
360 * that assumption breaks.
361 */
362 if (WARN_ON(sk->sk_forward_alloc))
363 sk_mem_reclaim(sk);
364}
365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366
367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368{
369 int ret;
370 unsigned long pflags = current->flags;
371
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
378
379 return ret;
380}
381EXPORT_SYMBOL(__sk_backlog_rcv);
382
383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384{
385 struct timeval tv;
386
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
393
394 if (tv.tv_sec < 0) {
395 static int warned __read_mostly;
396
397 *timeo_p = 0;
398 if (warned < 10 && net_ratelimit()) {
399 warned++;
400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
402 }
403 return 0;
404 }
405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
411}
412
413static void sock_warn_obsolete_bsdism(const char *name)
414{
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
421 warned++;
422 }
423}
424
425#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426
427static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
428{
429 if (sk->sk_flags & flags) {
430 sk->sk_flags &= ~flags;
431 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
432 net_disable_timestamp();
433 }
434}
435
436
437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438{
439 int err;
440 int skb_len;
441 unsigned long flags;
442 struct sk_buff_head *list = &sk->sk_receive_queue;
443
444 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
445 atomic_inc(&sk->sk_drops);
446 trace_sock_rcvqueue_full(sk, skb);
447 return -ENOMEM;
448 }
449
450 err = sk_filter(sk, skb);
451 if (err)
452 return err;
453
454 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
455 atomic_inc(&sk->sk_drops);
456 return -ENOBUFS;
457 }
458
459 skb->dev = NULL;
460 skb_set_owner_r(skb, sk);
461
462 /* Cache the SKB length before we tack it onto the receive
463 * queue. Once it is added it no longer belongs to us and
464 * may be freed by other threads of control pulling packets
465 * from the queue.
466 */
467 skb_len = skb->len;
468
469 /* we escape from rcu protected region, make sure we dont leak
470 * a norefcounted dst
471 */
472 skb_dst_force(skb);
473
474 spin_lock_irqsave(&list->lock, flags);
475 skb->dropcount = atomic_read(&sk->sk_drops);
476 __skb_queue_tail(list, skb);
477 spin_unlock_irqrestore(&list->lock, flags);
478
479 if (!sock_flag(sk, SOCK_DEAD))
480 sk->sk_data_ready(sk);
481 return 0;
482}
483EXPORT_SYMBOL(sock_queue_rcv_skb);
484
485int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
486{
487 int rc = NET_RX_SUCCESS;
488
489 if (sk_filter(sk, skb))
490 goto discard_and_relse;
491
492 skb->dev = NULL;
493
494 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
495 atomic_inc(&sk->sk_drops);
496 goto discard_and_relse;
497 }
498 if (nested)
499 bh_lock_sock_nested(sk);
500 else
501 bh_lock_sock(sk);
502 if (!sock_owned_by_user(sk)) {
503 /*
504 * trylock + unlock semantics:
505 */
506 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
507
508 rc = sk_backlog_rcv(sk, skb);
509
510 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
511 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
512 bh_unlock_sock(sk);
513 atomic_inc(&sk->sk_drops);
514 goto discard_and_relse;
515 }
516
517 bh_unlock_sock(sk);
518out:
519 sock_put(sk);
520 return rc;
521discard_and_relse:
522 kfree_skb(skb);
523 goto out;
524}
525EXPORT_SYMBOL(sk_receive_skb);
526
527struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
528{
529 struct dst_entry *dst = __sk_dst_get(sk);
530
531 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
532 sk_tx_queue_clear(sk);
533 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
534 dst_release(dst);
535 return NULL;
536 }
537
538 return dst;
539}
540EXPORT_SYMBOL(__sk_dst_check);
541
542struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
543{
544 struct dst_entry *dst = sk_dst_get(sk);
545
546 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
547 sk_dst_reset(sk);
548 dst_release(dst);
549 return NULL;
550 }
551
552 return dst;
553}
554EXPORT_SYMBOL(sk_dst_check);
555
556static int sock_setbindtodevice(struct sock *sk, char __user *optval,
557 int optlen)
558{
559 int ret = -ENOPROTOOPT;
560#ifdef CONFIG_NETDEVICES
561 struct net *net = sock_net(sk);
562 char devname[IFNAMSIZ];
563 int index;
564
565 /* Sorry... */
566 ret = -EPERM;
567 if (!ns_capable(net->user_ns, CAP_NET_RAW))
568 goto out;
569
570 ret = -EINVAL;
571 if (optlen < 0)
572 goto out;
573
574 /* Bind this socket to a particular device like "eth0",
575 * as specified in the passed interface name. If the
576 * name is "" or the option length is zero the socket
577 * is not bound.
578 */
579 if (optlen > IFNAMSIZ - 1)
580 optlen = IFNAMSIZ - 1;
581 memset(devname, 0, sizeof(devname));
582
583 ret = -EFAULT;
584 if (copy_from_user(devname, optval, optlen))
585 goto out;
586
587 index = 0;
588 if (devname[0] != '\0') {
589 struct net_device *dev;
590
591 rcu_read_lock();
592 dev = dev_get_by_name_rcu(net, devname);
593 if (dev)
594 index = dev->ifindex;
595 rcu_read_unlock();
596 ret = -ENODEV;
597 if (!dev)
598 goto out;
599 }
600
601 lock_sock(sk);
602 sk->sk_bound_dev_if = index;
603 sk_dst_reset(sk);
604 release_sock(sk);
605
606 ret = 0;
607
608out:
609#endif
610
611 return ret;
612}
613
614static int sock_getbindtodevice(struct sock *sk, char __user *optval,
615 int __user *optlen, int len)
616{
617 int ret = -ENOPROTOOPT;
618#ifdef CONFIG_NETDEVICES
619 struct net *net = sock_net(sk);
620 char devname[IFNAMSIZ];
621
622 if (sk->sk_bound_dev_if == 0) {
623 len = 0;
624 goto zero;
625 }
626
627 ret = -EINVAL;
628 if (len < IFNAMSIZ)
629 goto out;
630
631 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
632 if (ret)
633 goto out;
634
635 len = strlen(devname) + 1;
636
637 ret = -EFAULT;
638 if (copy_to_user(optval, devname, len))
639 goto out;
640
641zero:
642 ret = -EFAULT;
643 if (put_user(len, optlen))
644 goto out;
645
646 ret = 0;
647
648out:
649#endif
650
651 return ret;
652}
653
654static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
655{
656 if (valbool)
657 sock_set_flag(sk, bit);
658 else
659 sock_reset_flag(sk, bit);
660}
661
662/*
663 * This is meant for all protocols to use and covers goings on
664 * at the socket level. Everything here is generic.
665 */
666
667int sock_setsockopt(struct socket *sock, int level, int optname,
668 char __user *optval, unsigned int optlen)
669{
670 struct sock *sk = sock->sk;
671 int val;
672 int valbool;
673 struct linger ling;
674 int ret = 0;
675
676 /*
677 * Options without arguments
678 */
679
680 if (optname == SO_BINDTODEVICE)
681 return sock_setbindtodevice(sk, optval, optlen);
682
683 if (optlen < sizeof(int))
684 return -EINVAL;
685
686 if (get_user(val, (int __user *)optval))
687 return -EFAULT;
688
689 valbool = val ? 1 : 0;
690
691 lock_sock(sk);
692
693 switch (optname) {
694 case SO_DEBUG:
695 if (val && !capable(CAP_NET_ADMIN))
696 ret = -EACCES;
697 else
698 sock_valbool_flag(sk, SOCK_DBG, valbool);
699 break;
700 case SO_REUSEADDR:
701 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
702 break;
703 case SO_REUSEPORT:
704 sk->sk_reuseport = valbool;
705 break;
706 case SO_TYPE:
707 case SO_PROTOCOL:
708 case SO_DOMAIN:
709 case SO_ERROR:
710 ret = -ENOPROTOOPT;
711 break;
712 case SO_DONTROUTE:
713 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
714 break;
715 case SO_BROADCAST:
716 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
717 break;
718 case SO_SNDBUF:
719 /* Don't error on this BSD doesn't and if you think
720 * about it this is right. Otherwise apps have to
721 * play 'guess the biggest size' games. RCVBUF/SNDBUF
722 * are treated in BSD as hints
723 */
724 val = min_t(u32, val, sysctl_wmem_max);
725set_sndbuf:
726 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
727 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
728 /* Wake up sending tasks if we upped the value. */
729 sk->sk_write_space(sk);
730 break;
731
732 case SO_SNDBUFFORCE:
733 if (!capable(CAP_NET_ADMIN)) {
734 ret = -EPERM;
735 break;
736 }
737 goto set_sndbuf;
738
739 case SO_RCVBUF:
740 /* Don't error on this BSD doesn't and if you think
741 * about it this is right. Otherwise apps have to
742 * play 'guess the biggest size' games. RCVBUF/SNDBUF
743 * are treated in BSD as hints
744 */
745 val = min_t(u32, val, sysctl_rmem_max);
746set_rcvbuf:
747 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
748 /*
749 * We double it on the way in to account for
750 * "struct sk_buff" etc. overhead. Applications
751 * assume that the SO_RCVBUF setting they make will
752 * allow that much actual data to be received on that
753 * socket.
754 *
755 * Applications are unaware that "struct sk_buff" and
756 * other overheads allocate from the receive buffer
757 * during socket buffer allocation.
758 *
759 * And after considering the possible alternatives,
760 * returning the value we actually used in getsockopt
761 * is the most desirable behavior.
762 */
763 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
764 break;
765
766 case SO_RCVBUFFORCE:
767 if (!capable(CAP_NET_ADMIN)) {
768 ret = -EPERM;
769 break;
770 }
771 goto set_rcvbuf;
772
773 case SO_KEEPALIVE:
774#ifdef CONFIG_INET
775 if (sk->sk_protocol == IPPROTO_TCP &&
776 sk->sk_type == SOCK_STREAM)
777 tcp_set_keepalive(sk, valbool);
778#endif
779 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
780 break;
781
782 case SO_OOBINLINE:
783 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
784 break;
785
786 case SO_NO_CHECK:
787 sk->sk_no_check = valbool;
788 break;
789
790 case SO_PRIORITY:
791 if ((val >= 0 && val <= 6) ||
792 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
793 sk->sk_priority = val;
794 else
795 ret = -EPERM;
796 break;
797
798 case SO_LINGER:
799 if (optlen < sizeof(ling)) {
800 ret = -EINVAL; /* 1003.1g */
801 break;
802 }
803 if (copy_from_user(&ling, optval, sizeof(ling))) {
804 ret = -EFAULT;
805 break;
806 }
807 if (!ling.l_onoff)
808 sock_reset_flag(sk, SOCK_LINGER);
809 else {
810#if (BITS_PER_LONG == 32)
811 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
812 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
813 else
814#endif
815 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
816 sock_set_flag(sk, SOCK_LINGER);
817 }
818 break;
819
820 case SO_BSDCOMPAT:
821 sock_warn_obsolete_bsdism("setsockopt");
822 break;
823
824 case SO_PASSCRED:
825 if (valbool)
826 set_bit(SOCK_PASSCRED, &sock->flags);
827 else
828 clear_bit(SOCK_PASSCRED, &sock->flags);
829 break;
830
831 case SO_TIMESTAMP:
832 case SO_TIMESTAMPNS:
833 if (valbool) {
834 if (optname == SO_TIMESTAMP)
835 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
836 else
837 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
838 sock_set_flag(sk, SOCK_RCVTSTAMP);
839 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
840 } else {
841 sock_reset_flag(sk, SOCK_RCVTSTAMP);
842 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
843 }
844 break;
845
846 case SO_TIMESTAMPING:
847 if (val & ~SOF_TIMESTAMPING_MASK) {
848 ret = -EINVAL;
849 break;
850 }
851 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
852 val & SOF_TIMESTAMPING_TX_HARDWARE);
853 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
854 val & SOF_TIMESTAMPING_TX_SOFTWARE);
855 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
856 val & SOF_TIMESTAMPING_RX_HARDWARE);
857 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
858 sock_enable_timestamp(sk,
859 SOCK_TIMESTAMPING_RX_SOFTWARE);
860 else
861 sock_disable_timestamp(sk,
862 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
863 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
864 val & SOF_TIMESTAMPING_SOFTWARE);
865 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
866 val & SOF_TIMESTAMPING_SYS_HARDWARE);
867 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
868 val & SOF_TIMESTAMPING_RAW_HARDWARE);
869 break;
870
871 case SO_RCVLOWAT:
872 if (val < 0)
873 val = INT_MAX;
874 sk->sk_rcvlowat = val ? : 1;
875 break;
876
877 case SO_RCVTIMEO:
878 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
879 break;
880
881 case SO_SNDTIMEO:
882 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
883 break;
884
885 case SO_ATTACH_FILTER:
886 ret = -EINVAL;
887 if (optlen == sizeof(struct sock_fprog)) {
888 struct sock_fprog fprog;
889
890 ret = -EFAULT;
891 if (copy_from_user(&fprog, optval, sizeof(fprog)))
892 break;
893
894 ret = sk_attach_filter(&fprog, sk);
895 }
896 break;
897
898 case SO_DETACH_FILTER:
899 ret = sk_detach_filter(sk);
900 break;
901
902 case SO_LOCK_FILTER:
903 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
904 ret = -EPERM;
905 else
906 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
907 break;
908
909 case SO_PASSSEC:
910 if (valbool)
911 set_bit(SOCK_PASSSEC, &sock->flags);
912 else
913 clear_bit(SOCK_PASSSEC, &sock->flags);
914 break;
915 case SO_MARK:
916 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
917 ret = -EPERM;
918 else
919 sk->sk_mark = val;
920 break;
921
922 /* We implement the SO_SNDLOWAT etc to
923 not be settable (1003.1g 5.3) */
924 case SO_RXQ_OVFL:
925 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
926 break;
927
928 case SO_WIFI_STATUS:
929 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
930 break;
931
932 case SO_PEEK_OFF:
933 if (sock->ops->set_peek_off)
934 ret = sock->ops->set_peek_off(sk, val);
935 else
936 ret = -EOPNOTSUPP;
937 break;
938
939 case SO_NOFCS:
940 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
941 break;
942
943 case SO_SELECT_ERR_QUEUE:
944 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
945 break;
946
947#ifdef CONFIG_NET_RX_BUSY_POLL
948 case SO_BUSY_POLL:
949 /* allow unprivileged users to decrease the value */
950 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
951 ret = -EPERM;
952 else {
953 if (val < 0)
954 ret = -EINVAL;
955 else
956 sk->sk_ll_usec = val;
957 }
958 break;
959#endif
960
961 case SO_MAX_PACING_RATE:
962 sk->sk_max_pacing_rate = val;
963 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
964 sk->sk_max_pacing_rate);
965 break;
966
967 default:
968 ret = -ENOPROTOOPT;
969 break;
970 }
971 release_sock(sk);
972 return ret;
973}
974EXPORT_SYMBOL(sock_setsockopt);
975
976
977static void cred_to_ucred(struct pid *pid, const struct cred *cred,
978 struct ucred *ucred)
979{
980 ucred->pid = pid_vnr(pid);
981 ucred->uid = ucred->gid = -1;
982 if (cred) {
983 struct user_namespace *current_ns = current_user_ns();
984
985 ucred->uid = from_kuid_munged(current_ns, cred->euid);
986 ucred->gid = from_kgid_munged(current_ns, cred->egid);
987 }
988}
989
990int sock_getsockopt(struct socket *sock, int level, int optname,
991 char __user *optval, int __user *optlen)
992{
993 struct sock *sk = sock->sk;
994
995 union {
996 int val;
997 struct linger ling;
998 struct timeval tm;
999 } v;
1000
1001 int lv = sizeof(int);
1002 int len;
1003
1004 if (get_user(len, optlen))
1005 return -EFAULT;
1006 if (len < 0)
1007 return -EINVAL;
1008
1009 memset(&v, 0, sizeof(v));
1010
1011 switch (optname) {
1012 case SO_DEBUG:
1013 v.val = sock_flag(sk, SOCK_DBG);
1014 break;
1015
1016 case SO_DONTROUTE:
1017 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1018 break;
1019
1020 case SO_BROADCAST:
1021 v.val = sock_flag(sk, SOCK_BROADCAST);
1022 break;
1023
1024 case SO_SNDBUF:
1025 v.val = sk->sk_sndbuf;
1026 break;
1027
1028 case SO_RCVBUF:
1029 v.val = sk->sk_rcvbuf;
1030 break;
1031
1032 case SO_REUSEADDR:
1033 v.val = sk->sk_reuse;
1034 break;
1035
1036 case SO_REUSEPORT:
1037 v.val = sk->sk_reuseport;
1038 break;
1039
1040 case SO_KEEPALIVE:
1041 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1042 break;
1043
1044 case SO_TYPE:
1045 v.val = sk->sk_type;
1046 break;
1047
1048 case SO_PROTOCOL:
1049 v.val = sk->sk_protocol;
1050 break;
1051
1052 case SO_DOMAIN:
1053 v.val = sk->sk_family;
1054 break;
1055
1056 case SO_ERROR:
1057 v.val = -sock_error(sk);
1058 if (v.val == 0)
1059 v.val = xchg(&sk->sk_err_soft, 0);
1060 break;
1061
1062 case SO_OOBINLINE:
1063 v.val = sock_flag(sk, SOCK_URGINLINE);
1064 break;
1065
1066 case SO_NO_CHECK:
1067 v.val = sk->sk_no_check;
1068 break;
1069
1070 case SO_PRIORITY:
1071 v.val = sk->sk_priority;
1072 break;
1073
1074 case SO_LINGER:
1075 lv = sizeof(v.ling);
1076 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1077 v.ling.l_linger = sk->sk_lingertime / HZ;
1078 break;
1079
1080 case SO_BSDCOMPAT:
1081 sock_warn_obsolete_bsdism("getsockopt");
1082 break;
1083
1084 case SO_TIMESTAMP:
1085 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1086 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1087 break;
1088
1089 case SO_TIMESTAMPNS:
1090 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1091 break;
1092
1093 case SO_TIMESTAMPING:
1094 v.val = 0;
1095 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1096 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1097 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1098 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1099 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1100 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1101 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1102 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1103 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1104 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1105 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1106 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1107 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1108 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1109 break;
1110
1111 case SO_RCVTIMEO:
1112 lv = sizeof(struct timeval);
1113 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1114 v.tm.tv_sec = 0;
1115 v.tm.tv_usec = 0;
1116 } else {
1117 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1118 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1119 }
1120 break;
1121
1122 case SO_SNDTIMEO:
1123 lv = sizeof(struct timeval);
1124 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1125 v.tm.tv_sec = 0;
1126 v.tm.tv_usec = 0;
1127 } else {
1128 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1129 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1130 }
1131 break;
1132
1133 case SO_RCVLOWAT:
1134 v.val = sk->sk_rcvlowat;
1135 break;
1136
1137 case SO_SNDLOWAT:
1138 v.val = 1;
1139 break;
1140
1141 case SO_PASSCRED:
1142 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1143 break;
1144
1145 case SO_PEERCRED:
1146 {
1147 struct ucred peercred;
1148 if (len > sizeof(peercred))
1149 len = sizeof(peercred);
1150 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1151 if (copy_to_user(optval, &peercred, len))
1152 return -EFAULT;
1153 goto lenout;
1154 }
1155
1156 case SO_PEERNAME:
1157 {
1158 char address[128];
1159
1160 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1161 return -ENOTCONN;
1162 if (lv < len)
1163 return -EINVAL;
1164 if (copy_to_user(optval, address, len))
1165 return -EFAULT;
1166 goto lenout;
1167 }
1168
1169 /* Dubious BSD thing... Probably nobody even uses it, but
1170 * the UNIX standard wants it for whatever reason... -DaveM
1171 */
1172 case SO_ACCEPTCONN:
1173 v.val = sk->sk_state == TCP_LISTEN;
1174 break;
1175
1176 case SO_PASSSEC:
1177 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1178 break;
1179
1180 case SO_PEERSEC:
1181 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1182
1183 case SO_MARK:
1184 v.val = sk->sk_mark;
1185 break;
1186
1187 case SO_RXQ_OVFL:
1188 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1189 break;
1190
1191 case SO_WIFI_STATUS:
1192 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1193 break;
1194
1195 case SO_PEEK_OFF:
1196 if (!sock->ops->set_peek_off)
1197 return -EOPNOTSUPP;
1198
1199 v.val = sk->sk_peek_off;
1200 break;
1201 case SO_NOFCS:
1202 v.val = sock_flag(sk, SOCK_NOFCS);
1203 break;
1204
1205 case SO_BINDTODEVICE:
1206 return sock_getbindtodevice(sk, optval, optlen, len);
1207
1208 case SO_GET_FILTER:
1209 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1210 if (len < 0)
1211 return len;
1212
1213 goto lenout;
1214
1215 case SO_LOCK_FILTER:
1216 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1217 break;
1218
1219 case SO_BPF_EXTENSIONS:
1220 v.val = bpf_tell_extensions();
1221 break;
1222
1223 case SO_SELECT_ERR_QUEUE:
1224 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1225 break;
1226
1227#ifdef CONFIG_NET_RX_BUSY_POLL
1228 case SO_BUSY_POLL:
1229 v.val = sk->sk_ll_usec;
1230 break;
1231#endif
1232
1233 case SO_MAX_PACING_RATE:
1234 v.val = sk->sk_max_pacing_rate;
1235 break;
1236
1237 default:
1238 return -ENOPROTOOPT;
1239 }
1240
1241 if (len > lv)
1242 len = lv;
1243 if (copy_to_user(optval, &v, len))
1244 return -EFAULT;
1245lenout:
1246 if (put_user(len, optlen))
1247 return -EFAULT;
1248 return 0;
1249}
1250
1251/*
1252 * Initialize an sk_lock.
1253 *
1254 * (We also register the sk_lock with the lock validator.)
1255 */
1256static inline void sock_lock_init(struct sock *sk)
1257{
1258 sock_lock_init_class_and_name(sk,
1259 af_family_slock_key_strings[sk->sk_family],
1260 af_family_slock_keys + sk->sk_family,
1261 af_family_key_strings[sk->sk_family],
1262 af_family_keys + sk->sk_family);
1263}
1264
1265/*
1266 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1267 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1268 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1269 */
1270static void sock_copy(struct sock *nsk, const struct sock *osk)
1271{
1272#ifdef CONFIG_SECURITY_NETWORK
1273 void *sptr = nsk->sk_security;
1274#endif
1275 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1276
1277 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1278 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1279
1280#ifdef CONFIG_SECURITY_NETWORK
1281 nsk->sk_security = sptr;
1282 security_sk_clone(osk, nsk);
1283#endif
1284}
1285
1286void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1287{
1288 unsigned long nulls1, nulls2;
1289
1290 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1291 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1292 if (nulls1 > nulls2)
1293 swap(nulls1, nulls2);
1294
1295 if (nulls1 != 0)
1296 memset((char *)sk, 0, nulls1);
1297 memset((char *)sk + nulls1 + sizeof(void *), 0,
1298 nulls2 - nulls1 - sizeof(void *));
1299 memset((char *)sk + nulls2 + sizeof(void *), 0,
1300 size - nulls2 - sizeof(void *));
1301}
1302EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1303
1304static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1305 int family)
1306{
1307 struct sock *sk;
1308 struct kmem_cache *slab;
1309
1310 slab = prot->slab;
1311 if (slab != NULL) {
1312 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1313 if (!sk)
1314 return sk;
1315 if (priority & __GFP_ZERO) {
1316 if (prot->clear_sk)
1317 prot->clear_sk(sk, prot->obj_size);
1318 else
1319 sk_prot_clear_nulls(sk, prot->obj_size);
1320 }
1321 } else
1322 sk = kmalloc(prot->obj_size, priority);
1323
1324 if (sk != NULL) {
1325 kmemcheck_annotate_bitfield(sk, flags);
1326
1327 if (security_sk_alloc(sk, family, priority))
1328 goto out_free;
1329
1330 if (!try_module_get(prot->owner))
1331 goto out_free_sec;
1332 sk_tx_queue_clear(sk);
1333 }
1334
1335 return sk;
1336
1337out_free_sec:
1338 security_sk_free(sk);
1339out_free:
1340 if (slab != NULL)
1341 kmem_cache_free(slab, sk);
1342 else
1343 kfree(sk);
1344 return NULL;
1345}
1346
1347static void sk_prot_free(struct proto *prot, struct sock *sk)
1348{
1349 struct kmem_cache *slab;
1350 struct module *owner;
1351
1352 owner = prot->owner;
1353 slab = prot->slab;
1354
1355 security_sk_free(sk);
1356 if (slab != NULL)
1357 kmem_cache_free(slab, sk);
1358 else
1359 kfree(sk);
1360 module_put(owner);
1361}
1362
1363#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1364void sock_update_netprioidx(struct sock *sk)
1365{
1366 if (in_interrupt())
1367 return;
1368
1369 sk->sk_cgrp_prioidx = task_netprioidx(current);
1370}
1371EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1372#endif
1373
1374/**
1375 * sk_alloc - All socket objects are allocated here
1376 * @net: the applicable net namespace
1377 * @family: protocol family
1378 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1379 * @prot: struct proto associated with this new sock instance
1380 */
1381struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1382 struct proto *prot)
1383{
1384 struct sock *sk;
1385
1386 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1387 if (sk) {
1388 sk->sk_family = family;
1389 /*
1390 * See comment in struct sock definition to understand
1391 * why we need sk_prot_creator -acme
1392 */
1393 sk->sk_prot = sk->sk_prot_creator = prot;
1394 sock_lock_init(sk);
1395 sock_net_set(sk, get_net(net));
1396 atomic_set(&sk->sk_wmem_alloc, 1);
1397
1398 sock_update_classid(sk);
1399 sock_update_netprioidx(sk);
1400 }
1401
1402 return sk;
1403}
1404EXPORT_SYMBOL(sk_alloc);
1405
1406static void __sk_free(struct sock *sk)
1407{
1408 struct sk_filter *filter;
1409
1410 if (sk->sk_destruct)
1411 sk->sk_destruct(sk);
1412
1413 filter = rcu_dereference_check(sk->sk_filter,
1414 atomic_read(&sk->sk_wmem_alloc) == 0);
1415 if (filter) {
1416 sk_filter_uncharge(sk, filter);
1417 RCU_INIT_POINTER(sk->sk_filter, NULL);
1418 }
1419
1420 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1421
1422 if (atomic_read(&sk->sk_omem_alloc))
1423 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1424 __func__, atomic_read(&sk->sk_omem_alloc));
1425
1426 if (sk->sk_peer_cred)
1427 put_cred(sk->sk_peer_cred);
1428 put_pid(sk->sk_peer_pid);
1429 put_net(sock_net(sk));
1430 sk_prot_free(sk->sk_prot_creator, sk);
1431}
1432
1433void sk_free(struct sock *sk)
1434{
1435 /*
1436 * We subtract one from sk_wmem_alloc and can know if
1437 * some packets are still in some tx queue.
1438 * If not null, sock_wfree() will call __sk_free(sk) later
1439 */
1440 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1441 __sk_free(sk);
1442}
1443EXPORT_SYMBOL(sk_free);
1444
1445/*
1446 * Last sock_put should drop reference to sk->sk_net. It has already
1447 * been dropped in sk_change_net. Taking reference to stopping namespace
1448 * is not an option.
1449 * Take reference to a socket to remove it from hash _alive_ and after that
1450 * destroy it in the context of init_net.
1451 */
1452void sk_release_kernel(struct sock *sk)
1453{
1454 if (sk == NULL || sk->sk_socket == NULL)
1455 return;
1456
1457 sock_hold(sk);
1458 sock_release(sk->sk_socket);
1459 release_net(sock_net(sk));
1460 sock_net_set(sk, get_net(&init_net));
1461 sock_put(sk);
1462}
1463EXPORT_SYMBOL(sk_release_kernel);
1464
1465static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1466{
1467 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1468 sock_update_memcg(newsk);
1469}
1470
1471/**
1472 * sk_clone_lock - clone a socket, and lock its clone
1473 * @sk: the socket to clone
1474 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1475 *
1476 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1477 */
1478struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1479{
1480 struct sock *newsk;
1481
1482 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1483 if (newsk != NULL) {
1484 struct sk_filter *filter;
1485
1486 sock_copy(newsk, sk);
1487
1488 /* SANITY */
1489 get_net(sock_net(newsk));
1490 sk_node_init(&newsk->sk_node);
1491 sock_lock_init(newsk);
1492 bh_lock_sock(newsk);
1493 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1494 newsk->sk_backlog.len = 0;
1495
1496 atomic_set(&newsk->sk_rmem_alloc, 0);
1497 /*
1498 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1499 */
1500 atomic_set(&newsk->sk_wmem_alloc, 1);
1501 atomic_set(&newsk->sk_omem_alloc, 0);
1502 skb_queue_head_init(&newsk->sk_receive_queue);
1503 skb_queue_head_init(&newsk->sk_write_queue);
1504#ifdef CONFIG_NET_DMA
1505 skb_queue_head_init(&newsk->sk_async_wait_queue);
1506#endif
1507
1508 spin_lock_init(&newsk->sk_dst_lock);
1509 rwlock_init(&newsk->sk_callback_lock);
1510 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1511 af_callback_keys + newsk->sk_family,
1512 af_family_clock_key_strings[newsk->sk_family]);
1513
1514 newsk->sk_dst_cache = NULL;
1515 newsk->sk_wmem_queued = 0;
1516 newsk->sk_forward_alloc = 0;
1517 newsk->sk_send_head = NULL;
1518 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1519
1520 sock_reset_flag(newsk, SOCK_DONE);
1521 skb_queue_head_init(&newsk->sk_error_queue);
1522
1523 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1524 if (filter != NULL)
1525 sk_filter_charge(newsk, filter);
1526
1527 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1528 /* It is still raw copy of parent, so invalidate
1529 * destructor and make plain sk_free() */
1530 newsk->sk_destruct = NULL;
1531 bh_unlock_sock(newsk);
1532 sk_free(newsk);
1533 newsk = NULL;
1534 goto out;
1535 }
1536
1537 newsk->sk_err = 0;
1538 newsk->sk_priority = 0;
1539 /*
1540 * Before updating sk_refcnt, we must commit prior changes to memory
1541 * (Documentation/RCU/rculist_nulls.txt for details)
1542 */
1543 smp_wmb();
1544 atomic_set(&newsk->sk_refcnt, 2);
1545
1546 /*
1547 * Increment the counter in the same struct proto as the master
1548 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1549 * is the same as sk->sk_prot->socks, as this field was copied
1550 * with memcpy).
1551 *
1552 * This _changes_ the previous behaviour, where
1553 * tcp_create_openreq_child always was incrementing the
1554 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1555 * to be taken into account in all callers. -acme
1556 */
1557 sk_refcnt_debug_inc(newsk);
1558 sk_set_socket(newsk, NULL);
1559 newsk->sk_wq = NULL;
1560
1561 sk_update_clone(sk, newsk);
1562
1563 if (newsk->sk_prot->sockets_allocated)
1564 sk_sockets_allocated_inc(newsk);
1565
1566 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1567 net_enable_timestamp();
1568 }
1569out:
1570 return newsk;
1571}
1572EXPORT_SYMBOL_GPL(sk_clone_lock);
1573
1574void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1575{
1576 __sk_dst_set(sk, dst);
1577 sk->sk_route_caps = dst->dev->features;
1578 if (sk->sk_route_caps & NETIF_F_GSO)
1579 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1580 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1581 if (sk_can_gso(sk)) {
1582 if (dst->header_len) {
1583 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1584 } else {
1585 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1586 sk->sk_gso_max_size = dst->dev->gso_max_size;
1587 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1588 }
1589 }
1590}
1591EXPORT_SYMBOL_GPL(sk_setup_caps);
1592
1593/*
1594 * Simple resource managers for sockets.
1595 */
1596
1597
1598/*
1599 * Write buffer destructor automatically called from kfree_skb.
1600 */
1601void sock_wfree(struct sk_buff *skb)
1602{
1603 struct sock *sk = skb->sk;
1604 unsigned int len = skb->truesize;
1605
1606 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1607 /*
1608 * Keep a reference on sk_wmem_alloc, this will be released
1609 * after sk_write_space() call
1610 */
1611 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1612 sk->sk_write_space(sk);
1613 len = 1;
1614 }
1615 /*
1616 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1617 * could not do because of in-flight packets
1618 */
1619 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1620 __sk_free(sk);
1621}
1622EXPORT_SYMBOL(sock_wfree);
1623
1624void skb_orphan_partial(struct sk_buff *skb)
1625{
1626 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1627 * so we do not completely orphan skb, but transfert all
1628 * accounted bytes but one, to avoid unexpected reorders.
1629 */
1630 if (skb->destructor == sock_wfree
1631#ifdef CONFIG_INET
1632 || skb->destructor == tcp_wfree
1633#endif
1634 ) {
1635 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1636 skb->truesize = 1;
1637 } else {
1638 skb_orphan(skb);
1639 }
1640}
1641EXPORT_SYMBOL(skb_orphan_partial);
1642
1643/*
1644 * Read buffer destructor automatically called from kfree_skb.
1645 */
1646void sock_rfree(struct sk_buff *skb)
1647{
1648 struct sock *sk = skb->sk;
1649 unsigned int len = skb->truesize;
1650
1651 atomic_sub(len, &sk->sk_rmem_alloc);
1652 sk_mem_uncharge(sk, len);
1653}
1654EXPORT_SYMBOL(sock_rfree);
1655
1656void sock_edemux(struct sk_buff *skb)
1657{
1658 struct sock *sk = skb->sk;
1659
1660#ifdef CONFIG_INET
1661 if (sk->sk_state == TCP_TIME_WAIT)
1662 inet_twsk_put(inet_twsk(sk));
1663 else
1664#endif
1665 sock_put(sk);
1666}
1667EXPORT_SYMBOL(sock_edemux);
1668
1669kuid_t sock_i_uid(struct sock *sk)
1670{
1671 kuid_t uid;
1672
1673 read_lock_bh(&sk->sk_callback_lock);
1674 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1675 read_unlock_bh(&sk->sk_callback_lock);
1676 return uid;
1677}
1678EXPORT_SYMBOL(sock_i_uid);
1679
1680unsigned long sock_i_ino(struct sock *sk)
1681{
1682 unsigned long ino;
1683
1684 read_lock_bh(&sk->sk_callback_lock);
1685 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1686 read_unlock_bh(&sk->sk_callback_lock);
1687 return ino;
1688}
1689EXPORT_SYMBOL(sock_i_ino);
1690
1691/*
1692 * Allocate a skb from the socket's send buffer.
1693 */
1694struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1695 gfp_t priority)
1696{
1697 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1698 struct sk_buff *skb = alloc_skb(size, priority);
1699 if (skb) {
1700 skb_set_owner_w(skb, sk);
1701 return skb;
1702 }
1703 }
1704 return NULL;
1705}
1706EXPORT_SYMBOL(sock_wmalloc);
1707
1708/*
1709 * Allocate a memory block from the socket's option memory buffer.
1710 */
1711void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1712{
1713 if ((unsigned int)size <= sysctl_optmem_max &&
1714 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1715 void *mem;
1716 /* First do the add, to avoid the race if kmalloc
1717 * might sleep.
1718 */
1719 atomic_add(size, &sk->sk_omem_alloc);
1720 mem = kmalloc(size, priority);
1721 if (mem)
1722 return mem;
1723 atomic_sub(size, &sk->sk_omem_alloc);
1724 }
1725 return NULL;
1726}
1727EXPORT_SYMBOL(sock_kmalloc);
1728
1729/*
1730 * Free an option memory block.
1731 */
1732void sock_kfree_s(struct sock *sk, void *mem, int size)
1733{
1734 kfree(mem);
1735 atomic_sub(size, &sk->sk_omem_alloc);
1736}
1737EXPORT_SYMBOL(sock_kfree_s);
1738
1739/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1740 I think, these locks should be removed for datagram sockets.
1741 */
1742static long sock_wait_for_wmem(struct sock *sk, long timeo)
1743{
1744 DEFINE_WAIT(wait);
1745
1746 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1747 for (;;) {
1748 if (!timeo)
1749 break;
1750 if (signal_pending(current))
1751 break;
1752 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1753 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1754 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1755 break;
1756 if (sk->sk_shutdown & SEND_SHUTDOWN)
1757 break;
1758 if (sk->sk_err)
1759 break;
1760 timeo = schedule_timeout(timeo);
1761 }
1762 finish_wait(sk_sleep(sk), &wait);
1763 return timeo;
1764}
1765
1766
1767/*
1768 * Generic send/receive buffer handlers
1769 */
1770
1771struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1772 unsigned long data_len, int noblock,
1773 int *errcode, int max_page_order)
1774{
1775 struct sk_buff *skb = NULL;
1776 unsigned long chunk;
1777 gfp_t gfp_mask;
1778 long timeo;
1779 int err;
1780 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1781 struct page *page;
1782 int i;
1783
1784 err = -EMSGSIZE;
1785 if (npages > MAX_SKB_FRAGS)
1786 goto failure;
1787
1788 timeo = sock_sndtimeo(sk, noblock);
1789 while (!skb) {
1790 err = sock_error(sk);
1791 if (err != 0)
1792 goto failure;
1793
1794 err = -EPIPE;
1795 if (sk->sk_shutdown & SEND_SHUTDOWN)
1796 goto failure;
1797
1798 if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
1799 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1800 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1801 err = -EAGAIN;
1802 if (!timeo)
1803 goto failure;
1804 if (signal_pending(current))
1805 goto interrupted;
1806 timeo = sock_wait_for_wmem(sk, timeo);
1807 continue;
1808 }
1809
1810 err = -ENOBUFS;
1811 gfp_mask = sk->sk_allocation;
1812 if (gfp_mask & __GFP_WAIT)
1813 gfp_mask |= __GFP_REPEAT;
1814
1815 skb = alloc_skb(header_len, gfp_mask);
1816 if (!skb)
1817 goto failure;
1818
1819 skb->truesize += data_len;
1820
1821 for (i = 0; npages > 0; i++) {
1822 int order = max_page_order;
1823
1824 while (order) {
1825 if (npages >= 1 << order) {
1826 page = alloc_pages(sk->sk_allocation |
1827 __GFP_COMP |
1828 __GFP_NOWARN |
1829 __GFP_NORETRY,
1830 order);
1831 if (page)
1832 goto fill_page;
1833 }
1834 order--;
1835 }
1836 page = alloc_page(sk->sk_allocation);
1837 if (!page)
1838 goto failure;
1839fill_page:
1840 chunk = min_t(unsigned long, data_len,
1841 PAGE_SIZE << order);
1842 skb_fill_page_desc(skb, i, page, 0, chunk);
1843 data_len -= chunk;
1844 npages -= 1 << order;
1845 }
1846 }
1847
1848 skb_set_owner_w(skb, sk);
1849 return skb;
1850
1851interrupted:
1852 err = sock_intr_errno(timeo);
1853failure:
1854 kfree_skb(skb);
1855 *errcode = err;
1856 return NULL;
1857}
1858EXPORT_SYMBOL(sock_alloc_send_pskb);
1859
1860struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1861 int noblock, int *errcode)
1862{
1863 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1864}
1865EXPORT_SYMBOL(sock_alloc_send_skb);
1866
1867/* On 32bit arches, an skb frag is limited to 2^15 */
1868#define SKB_FRAG_PAGE_ORDER get_order(32768)
1869
1870/**
1871 * skb_page_frag_refill - check that a page_frag contains enough room
1872 * @sz: minimum size of the fragment we want to get
1873 * @pfrag: pointer to page_frag
1874 * @prio: priority for memory allocation
1875 *
1876 * Note: While this allocator tries to use high order pages, there is
1877 * no guarantee that allocations succeed. Therefore, @sz MUST be
1878 * less or equal than PAGE_SIZE.
1879 */
1880bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1881{
1882 int order;
1883
1884 if (pfrag->page) {
1885 if (atomic_read(&pfrag->page->_count) == 1) {
1886 pfrag->offset = 0;
1887 return true;
1888 }
1889 if (pfrag->offset + sz <= pfrag->size)
1890 return true;
1891 put_page(pfrag->page);
1892 }
1893
1894 order = SKB_FRAG_PAGE_ORDER;
1895 do {
1896 gfp_t gfp = prio;
1897
1898 if (order)
1899 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1900 pfrag->page = alloc_pages(gfp, order);
1901 if (likely(pfrag->page)) {
1902 pfrag->offset = 0;
1903 pfrag->size = PAGE_SIZE << order;
1904 return true;
1905 }
1906 } while (--order >= 0);
1907
1908 return false;
1909}
1910EXPORT_SYMBOL(skb_page_frag_refill);
1911
1912bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1913{
1914 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1915 return true;
1916
1917 sk_enter_memory_pressure(sk);
1918 sk_stream_moderate_sndbuf(sk);
1919 return false;
1920}
1921EXPORT_SYMBOL(sk_page_frag_refill);
1922
1923static void __lock_sock(struct sock *sk)
1924 __releases(&sk->sk_lock.slock)
1925 __acquires(&sk->sk_lock.slock)
1926{
1927 DEFINE_WAIT(wait);
1928
1929 for (;;) {
1930 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1931 TASK_UNINTERRUPTIBLE);
1932 spin_unlock_bh(&sk->sk_lock.slock);
1933 schedule();
1934 spin_lock_bh(&sk->sk_lock.slock);
1935 if (!sock_owned_by_user(sk))
1936 break;
1937 }
1938 finish_wait(&sk->sk_lock.wq, &wait);
1939}
1940
1941static void __release_sock(struct sock *sk)
1942 __releases(&sk->sk_lock.slock)
1943 __acquires(&sk->sk_lock.slock)
1944{
1945 struct sk_buff *skb = sk->sk_backlog.head;
1946
1947 do {
1948 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1949 bh_unlock_sock(sk);
1950
1951 do {
1952 struct sk_buff *next = skb->next;
1953
1954 prefetch(next);
1955 WARN_ON_ONCE(skb_dst_is_noref(skb));
1956 skb->next = NULL;
1957 sk_backlog_rcv(sk, skb);
1958
1959 /*
1960 * We are in process context here with softirqs
1961 * disabled, use cond_resched_softirq() to preempt.
1962 * This is safe to do because we've taken the backlog
1963 * queue private:
1964 */
1965 cond_resched_softirq();
1966
1967 skb = next;
1968 } while (skb != NULL);
1969
1970 bh_lock_sock(sk);
1971 } while ((skb = sk->sk_backlog.head) != NULL);
1972
1973 /*
1974 * Doing the zeroing here guarantee we can not loop forever
1975 * while a wild producer attempts to flood us.
1976 */
1977 sk->sk_backlog.len = 0;
1978}
1979
1980/**
1981 * sk_wait_data - wait for data to arrive at sk_receive_queue
1982 * @sk: sock to wait on
1983 * @timeo: for how long
1984 *
1985 * Now socket state including sk->sk_err is changed only under lock,
1986 * hence we may omit checks after joining wait queue.
1987 * We check receive queue before schedule() only as optimization;
1988 * it is very likely that release_sock() added new data.
1989 */
1990int sk_wait_data(struct sock *sk, long *timeo)
1991{
1992 int rc;
1993 DEFINE_WAIT(wait);
1994
1995 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1996 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1997 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1998 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1999 finish_wait(sk_sleep(sk), &wait);
2000 return rc;
2001}
2002EXPORT_SYMBOL(sk_wait_data);
2003
2004/**
2005 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2006 * @sk: socket
2007 * @size: memory size to allocate
2008 * @kind: allocation type
2009 *
2010 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2011 * rmem allocation. This function assumes that protocols which have
2012 * memory_pressure use sk_wmem_queued as write buffer accounting.
2013 */
2014int __sk_mem_schedule(struct sock *sk, int size, int kind)
2015{
2016 struct proto *prot = sk->sk_prot;
2017 int amt = sk_mem_pages(size);
2018 long allocated;
2019 int parent_status = UNDER_LIMIT;
2020
2021 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2022
2023 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2024
2025 /* Under limit. */
2026 if (parent_status == UNDER_LIMIT &&
2027 allocated <= sk_prot_mem_limits(sk, 0)) {
2028 sk_leave_memory_pressure(sk);
2029 return 1;
2030 }
2031
2032 /* Under pressure. (we or our parents) */
2033 if ((parent_status > SOFT_LIMIT) ||
2034 allocated > sk_prot_mem_limits(sk, 1))
2035 sk_enter_memory_pressure(sk);
2036
2037 /* Over hard limit (we or our parents) */
2038 if ((parent_status == OVER_LIMIT) ||
2039 (allocated > sk_prot_mem_limits(sk, 2)))
2040 goto suppress_allocation;
2041
2042 /* guarantee minimum buffer size under pressure */
2043 if (kind == SK_MEM_RECV) {
2044 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2045 return 1;
2046
2047 } else { /* SK_MEM_SEND */
2048 if (sk->sk_type == SOCK_STREAM) {
2049 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2050 return 1;
2051 } else if (atomic_read(&sk->sk_wmem_alloc) <
2052 prot->sysctl_wmem[0])
2053 return 1;
2054 }
2055
2056 if (sk_has_memory_pressure(sk)) {
2057 int alloc;
2058
2059 if (!sk_under_memory_pressure(sk))
2060 return 1;
2061 alloc = sk_sockets_allocated_read_positive(sk);
2062 if (sk_prot_mem_limits(sk, 2) > alloc *
2063 sk_mem_pages(sk->sk_wmem_queued +
2064 atomic_read(&sk->sk_rmem_alloc) +
2065 sk->sk_forward_alloc))
2066 return 1;
2067 }
2068
2069suppress_allocation:
2070
2071 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2072 sk_stream_moderate_sndbuf(sk);
2073
2074 /* Fail only if socket is _under_ its sndbuf.
2075 * In this case we cannot block, so that we have to fail.
2076 */
2077 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2078 return 1;
2079 }
2080
2081 trace_sock_exceed_buf_limit(sk, prot, allocated);
2082
2083 /* Alas. Undo changes. */
2084 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2085
2086 sk_memory_allocated_sub(sk, amt);
2087
2088 return 0;
2089}
2090EXPORT_SYMBOL(__sk_mem_schedule);
2091
2092/**
2093 * __sk_reclaim - reclaim memory_allocated
2094 * @sk: socket
2095 */
2096void __sk_mem_reclaim(struct sock *sk)
2097{
2098 sk_memory_allocated_sub(sk,
2099 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2100 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2101
2102 if (sk_under_memory_pressure(sk) &&
2103 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2104 sk_leave_memory_pressure(sk);
2105}
2106EXPORT_SYMBOL(__sk_mem_reclaim);
2107
2108
2109/*
2110 * Set of default routines for initialising struct proto_ops when
2111 * the protocol does not support a particular function. In certain
2112 * cases where it makes no sense for a protocol to have a "do nothing"
2113 * function, some default processing is provided.
2114 */
2115
2116int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2117{
2118 return -EOPNOTSUPP;
2119}
2120EXPORT_SYMBOL(sock_no_bind);
2121
2122int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2123 int len, int flags)
2124{
2125 return -EOPNOTSUPP;
2126}
2127EXPORT_SYMBOL(sock_no_connect);
2128
2129int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2130{
2131 return -EOPNOTSUPP;
2132}
2133EXPORT_SYMBOL(sock_no_socketpair);
2134
2135int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2136{
2137 return -EOPNOTSUPP;
2138}
2139EXPORT_SYMBOL(sock_no_accept);
2140
2141int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2142 int *len, int peer)
2143{
2144 return -EOPNOTSUPP;
2145}
2146EXPORT_SYMBOL(sock_no_getname);
2147
2148unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2149{
2150 return 0;
2151}
2152EXPORT_SYMBOL(sock_no_poll);
2153
2154int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2155{
2156 return -EOPNOTSUPP;
2157}
2158EXPORT_SYMBOL(sock_no_ioctl);
2159
2160int sock_no_listen(struct socket *sock, int backlog)
2161{
2162 return -EOPNOTSUPP;
2163}
2164EXPORT_SYMBOL(sock_no_listen);
2165
2166int sock_no_shutdown(struct socket *sock, int how)
2167{
2168 return -EOPNOTSUPP;
2169}
2170EXPORT_SYMBOL(sock_no_shutdown);
2171
2172int sock_no_setsockopt(struct socket *sock, int level, int optname,
2173 char __user *optval, unsigned int optlen)
2174{
2175 return -EOPNOTSUPP;
2176}
2177EXPORT_SYMBOL(sock_no_setsockopt);
2178
2179int sock_no_getsockopt(struct socket *sock, int level, int optname,
2180 char __user *optval, int __user *optlen)
2181{
2182 return -EOPNOTSUPP;
2183}
2184EXPORT_SYMBOL(sock_no_getsockopt);
2185
2186int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2187 size_t len)
2188{
2189 return -EOPNOTSUPP;
2190}
2191EXPORT_SYMBOL(sock_no_sendmsg);
2192
2193int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2194 size_t len, int flags)
2195{
2196 return -EOPNOTSUPP;
2197}
2198EXPORT_SYMBOL(sock_no_recvmsg);
2199
2200int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2201{
2202 /* Mirror missing mmap method error code */
2203 return -ENODEV;
2204}
2205EXPORT_SYMBOL(sock_no_mmap);
2206
2207ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2208{
2209 ssize_t res;
2210 struct msghdr msg = {.msg_flags = flags};
2211 struct kvec iov;
2212 char *kaddr = kmap(page);
2213 iov.iov_base = kaddr + offset;
2214 iov.iov_len = size;
2215 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2216 kunmap(page);
2217 return res;
2218}
2219EXPORT_SYMBOL(sock_no_sendpage);
2220
2221/*
2222 * Default Socket Callbacks
2223 */
2224
2225static void sock_def_wakeup(struct sock *sk)
2226{
2227 struct socket_wq *wq;
2228
2229 rcu_read_lock();
2230 wq = rcu_dereference(sk->sk_wq);
2231 if (wq_has_sleeper(wq))
2232 wake_up_interruptible_all(&wq->wait);
2233 rcu_read_unlock();
2234}
2235
2236static void sock_def_error_report(struct sock *sk)
2237{
2238 struct socket_wq *wq;
2239
2240 rcu_read_lock();
2241 wq = rcu_dereference(sk->sk_wq);
2242 if (wq_has_sleeper(wq))
2243 wake_up_interruptible_poll(&wq->wait, POLLERR);
2244 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2245 rcu_read_unlock();
2246}
2247
2248static void sock_def_readable(struct sock *sk)
2249{
2250 struct socket_wq *wq;
2251
2252 rcu_read_lock();
2253 wq = rcu_dereference(sk->sk_wq);
2254 if (wq_has_sleeper(wq))
2255 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2256 POLLRDNORM | POLLRDBAND);
2257 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2258 rcu_read_unlock();
2259}
2260
2261static void sock_def_write_space(struct sock *sk)
2262{
2263 struct socket_wq *wq;
2264
2265 rcu_read_lock();
2266
2267 /* Do not wake up a writer until he can make "significant"
2268 * progress. --DaveM
2269 */
2270 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2271 wq = rcu_dereference(sk->sk_wq);
2272 if (wq_has_sleeper(wq))
2273 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2274 POLLWRNORM | POLLWRBAND);
2275
2276 /* Should agree with poll, otherwise some programs break */
2277 if (sock_writeable(sk))
2278 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2279 }
2280
2281 rcu_read_unlock();
2282}
2283
2284static void sock_def_destruct(struct sock *sk)
2285{
2286 kfree(sk->sk_protinfo);
2287}
2288
2289void sk_send_sigurg(struct sock *sk)
2290{
2291 if (sk->sk_socket && sk->sk_socket->file)
2292 if (send_sigurg(&sk->sk_socket->file->f_owner))
2293 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2294}
2295EXPORT_SYMBOL(sk_send_sigurg);
2296
2297void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2298 unsigned long expires)
2299{
2300 if (!mod_timer(timer, expires))
2301 sock_hold(sk);
2302}
2303EXPORT_SYMBOL(sk_reset_timer);
2304
2305void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2306{
2307 if (del_timer(timer))
2308 __sock_put(sk);
2309}
2310EXPORT_SYMBOL(sk_stop_timer);
2311
2312void sock_init_data(struct socket *sock, struct sock *sk)
2313{
2314 skb_queue_head_init(&sk->sk_receive_queue);
2315 skb_queue_head_init(&sk->sk_write_queue);
2316 skb_queue_head_init(&sk->sk_error_queue);
2317#ifdef CONFIG_NET_DMA
2318 skb_queue_head_init(&sk->sk_async_wait_queue);
2319#endif
2320
2321 sk->sk_send_head = NULL;
2322
2323 init_timer(&sk->sk_timer);
2324
2325 sk->sk_allocation = GFP_KERNEL;
2326 sk->sk_rcvbuf = sysctl_rmem_default;
2327 sk->sk_sndbuf = sysctl_wmem_default;
2328 sk->sk_state = TCP_CLOSE;
2329 sk_set_socket(sk, sock);
2330
2331 sock_set_flag(sk, SOCK_ZAPPED);
2332
2333 if (sock) {
2334 sk->sk_type = sock->type;
2335 sk->sk_wq = sock->wq;
2336 sock->sk = sk;
2337 } else
2338 sk->sk_wq = NULL;
2339
2340 spin_lock_init(&sk->sk_dst_lock);
2341 rwlock_init(&sk->sk_callback_lock);
2342 lockdep_set_class_and_name(&sk->sk_callback_lock,
2343 af_callback_keys + sk->sk_family,
2344 af_family_clock_key_strings[sk->sk_family]);
2345
2346 sk->sk_state_change = sock_def_wakeup;
2347 sk->sk_data_ready = sock_def_readable;
2348 sk->sk_write_space = sock_def_write_space;
2349 sk->sk_error_report = sock_def_error_report;
2350 sk->sk_destruct = sock_def_destruct;
2351
2352 sk->sk_frag.page = NULL;
2353 sk->sk_frag.offset = 0;
2354 sk->sk_peek_off = -1;
2355
2356 sk->sk_peer_pid = NULL;
2357 sk->sk_peer_cred = NULL;
2358 sk->sk_write_pending = 0;
2359 sk->sk_rcvlowat = 1;
2360 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2361 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2362
2363 sk->sk_stamp = ktime_set(-1L, 0);
2364
2365#ifdef CONFIG_NET_RX_BUSY_POLL
2366 sk->sk_napi_id = 0;
2367 sk->sk_ll_usec = sysctl_net_busy_read;
2368#endif
2369
2370 sk->sk_max_pacing_rate = ~0U;
2371 sk->sk_pacing_rate = ~0U;
2372 /*
2373 * Before updating sk_refcnt, we must commit prior changes to memory
2374 * (Documentation/RCU/rculist_nulls.txt for details)
2375 */
2376 smp_wmb();
2377 atomic_set(&sk->sk_refcnt, 1);
2378 atomic_set(&sk->sk_drops, 0);
2379}
2380EXPORT_SYMBOL(sock_init_data);
2381
2382void lock_sock_nested(struct sock *sk, int subclass)
2383{
2384 might_sleep();
2385 spin_lock_bh(&sk->sk_lock.slock);
2386 if (sk->sk_lock.owned)
2387 __lock_sock(sk);
2388 sk->sk_lock.owned = 1;
2389 spin_unlock(&sk->sk_lock.slock);
2390 /*
2391 * The sk_lock has mutex_lock() semantics here:
2392 */
2393 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2394 local_bh_enable();
2395}
2396EXPORT_SYMBOL(lock_sock_nested);
2397
2398void release_sock(struct sock *sk)
2399{
2400 /*
2401 * The sk_lock has mutex_unlock() semantics:
2402 */
2403 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2404
2405 spin_lock_bh(&sk->sk_lock.slock);
2406 if (sk->sk_backlog.tail)
2407 __release_sock(sk);
2408
2409 /* Warning : release_cb() might need to release sk ownership,
2410 * ie call sock_release_ownership(sk) before us.
2411 */
2412 if (sk->sk_prot->release_cb)
2413 sk->sk_prot->release_cb(sk);
2414
2415 sock_release_ownership(sk);
2416 if (waitqueue_active(&sk->sk_lock.wq))
2417 wake_up(&sk->sk_lock.wq);
2418 spin_unlock_bh(&sk->sk_lock.slock);
2419}
2420EXPORT_SYMBOL(release_sock);
2421
2422/**
2423 * lock_sock_fast - fast version of lock_sock
2424 * @sk: socket
2425 *
2426 * This version should be used for very small section, where process wont block
2427 * return false if fast path is taken
2428 * sk_lock.slock locked, owned = 0, BH disabled
2429 * return true if slow path is taken
2430 * sk_lock.slock unlocked, owned = 1, BH enabled
2431 */
2432bool lock_sock_fast(struct sock *sk)
2433{
2434 might_sleep();
2435 spin_lock_bh(&sk->sk_lock.slock);
2436
2437 if (!sk->sk_lock.owned)
2438 /*
2439 * Note : We must disable BH
2440 */
2441 return false;
2442
2443 __lock_sock(sk);
2444 sk->sk_lock.owned = 1;
2445 spin_unlock(&sk->sk_lock.slock);
2446 /*
2447 * The sk_lock has mutex_lock() semantics here:
2448 */
2449 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2450 local_bh_enable();
2451 return true;
2452}
2453EXPORT_SYMBOL(lock_sock_fast);
2454
2455int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2456{
2457 struct timeval tv;
2458 if (!sock_flag(sk, SOCK_TIMESTAMP))
2459 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2460 tv = ktime_to_timeval(sk->sk_stamp);
2461 if (tv.tv_sec == -1)
2462 return -ENOENT;
2463 if (tv.tv_sec == 0) {
2464 sk->sk_stamp = ktime_get_real();
2465 tv = ktime_to_timeval(sk->sk_stamp);
2466 }
2467 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2468}
2469EXPORT_SYMBOL(sock_get_timestamp);
2470
2471int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2472{
2473 struct timespec ts;
2474 if (!sock_flag(sk, SOCK_TIMESTAMP))
2475 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2476 ts = ktime_to_timespec(sk->sk_stamp);
2477 if (ts.tv_sec == -1)
2478 return -ENOENT;
2479 if (ts.tv_sec == 0) {
2480 sk->sk_stamp = ktime_get_real();
2481 ts = ktime_to_timespec(sk->sk_stamp);
2482 }
2483 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2484}
2485EXPORT_SYMBOL(sock_get_timestampns);
2486
2487void sock_enable_timestamp(struct sock *sk, int flag)
2488{
2489 if (!sock_flag(sk, flag)) {
2490 unsigned long previous_flags = sk->sk_flags;
2491
2492 sock_set_flag(sk, flag);
2493 /*
2494 * we just set one of the two flags which require net
2495 * time stamping, but time stamping might have been on
2496 * already because of the other one
2497 */
2498 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2499 net_enable_timestamp();
2500 }
2501}
2502
2503int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2504 int level, int type)
2505{
2506 struct sock_exterr_skb *serr;
2507 struct sk_buff *skb, *skb2;
2508 int copied, err;
2509
2510 err = -EAGAIN;
2511 skb = skb_dequeue(&sk->sk_error_queue);
2512 if (skb == NULL)
2513 goto out;
2514
2515 copied = skb->len;
2516 if (copied > len) {
2517 msg->msg_flags |= MSG_TRUNC;
2518 copied = len;
2519 }
2520 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2521 if (err)
2522 goto out_free_skb;
2523
2524 sock_recv_timestamp(msg, sk, skb);
2525
2526 serr = SKB_EXT_ERR(skb);
2527 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2528
2529 msg->msg_flags |= MSG_ERRQUEUE;
2530 err = copied;
2531
2532 /* Reset and regenerate socket error */
2533 spin_lock_bh(&sk->sk_error_queue.lock);
2534 sk->sk_err = 0;
2535 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2536 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2537 spin_unlock_bh(&sk->sk_error_queue.lock);
2538 sk->sk_error_report(sk);
2539 } else
2540 spin_unlock_bh(&sk->sk_error_queue.lock);
2541
2542out_free_skb:
2543 kfree_skb(skb);
2544out:
2545 return err;
2546}
2547EXPORT_SYMBOL(sock_recv_errqueue);
2548
2549/*
2550 * Get a socket option on an socket.
2551 *
2552 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2553 * asynchronous errors should be reported by getsockopt. We assume
2554 * this means if you specify SO_ERROR (otherwise whats the point of it).
2555 */
2556int sock_common_getsockopt(struct socket *sock, int level, int optname,
2557 char __user *optval, int __user *optlen)
2558{
2559 struct sock *sk = sock->sk;
2560
2561 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2562}
2563EXPORT_SYMBOL(sock_common_getsockopt);
2564
2565#ifdef CONFIG_COMPAT
2566int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2567 char __user *optval, int __user *optlen)
2568{
2569 struct sock *sk = sock->sk;
2570
2571 if (sk->sk_prot->compat_getsockopt != NULL)
2572 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2573 optval, optlen);
2574 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2575}
2576EXPORT_SYMBOL(compat_sock_common_getsockopt);
2577#endif
2578
2579int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2580 struct msghdr *msg, size_t size, int flags)
2581{
2582 struct sock *sk = sock->sk;
2583 int addr_len = 0;
2584 int err;
2585
2586 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2587 flags & ~MSG_DONTWAIT, &addr_len);
2588 if (err >= 0)
2589 msg->msg_namelen = addr_len;
2590 return err;
2591}
2592EXPORT_SYMBOL(sock_common_recvmsg);
2593
2594/*
2595 * Set socket options on an inet socket.
2596 */
2597int sock_common_setsockopt(struct socket *sock, int level, int optname,
2598 char __user *optval, unsigned int optlen)
2599{
2600 struct sock *sk = sock->sk;
2601
2602 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2603}
2604EXPORT_SYMBOL(sock_common_setsockopt);
2605
2606#ifdef CONFIG_COMPAT
2607int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2608 char __user *optval, unsigned int optlen)
2609{
2610 struct sock *sk = sock->sk;
2611
2612 if (sk->sk_prot->compat_setsockopt != NULL)
2613 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2614 optval, optlen);
2615 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2616}
2617EXPORT_SYMBOL(compat_sock_common_setsockopt);
2618#endif
2619
2620void sk_common_release(struct sock *sk)
2621{
2622 if (sk->sk_prot->destroy)
2623 sk->sk_prot->destroy(sk);
2624
2625 /*
2626 * Observation: when sock_common_release is called, processes have
2627 * no access to socket. But net still has.
2628 * Step one, detach it from networking:
2629 *
2630 * A. Remove from hash tables.
2631 */
2632
2633 sk->sk_prot->unhash(sk);
2634
2635 /*
2636 * In this point socket cannot receive new packets, but it is possible
2637 * that some packets are in flight because some CPU runs receiver and
2638 * did hash table lookup before we unhashed socket. They will achieve
2639 * receive queue and will be purged by socket destructor.
2640 *
2641 * Also we still have packets pending on receive queue and probably,
2642 * our own packets waiting in device queues. sock_destroy will drain
2643 * receive queue, but transmitted packets will delay socket destruction
2644 * until the last reference will be released.
2645 */
2646
2647 sock_orphan(sk);
2648
2649 xfrm_sk_free_policy(sk);
2650
2651 sk_refcnt_debug_release(sk);
2652
2653 if (sk->sk_frag.page) {
2654 put_page(sk->sk_frag.page);
2655 sk->sk_frag.page = NULL;
2656 }
2657
2658 sock_put(sk);
2659}
2660EXPORT_SYMBOL(sk_common_release);
2661
2662#ifdef CONFIG_PROC_FS
2663#define PROTO_INUSE_NR 64 /* should be enough for the first time */
2664struct prot_inuse {
2665 int val[PROTO_INUSE_NR];
2666};
2667
2668static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2669
2670#ifdef CONFIG_NET_NS
2671void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2672{
2673 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2674}
2675EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2676
2677int sock_prot_inuse_get(struct net *net, struct proto *prot)
2678{
2679 int cpu, idx = prot->inuse_idx;
2680 int res = 0;
2681
2682 for_each_possible_cpu(cpu)
2683 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2684
2685 return res >= 0 ? res : 0;
2686}
2687EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2688
2689static int __net_init sock_inuse_init_net(struct net *net)
2690{
2691 net->core.inuse = alloc_percpu(struct prot_inuse);
2692 return net->core.inuse ? 0 : -ENOMEM;
2693}
2694
2695static void __net_exit sock_inuse_exit_net(struct net *net)
2696{
2697 free_percpu(net->core.inuse);
2698}
2699
2700static struct pernet_operations net_inuse_ops = {
2701 .init = sock_inuse_init_net,
2702 .exit = sock_inuse_exit_net,
2703};
2704
2705static __init int net_inuse_init(void)
2706{
2707 if (register_pernet_subsys(&net_inuse_ops))
2708 panic("Cannot initialize net inuse counters");
2709
2710 return 0;
2711}
2712
2713core_initcall(net_inuse_init);
2714#else
2715static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2716
2717void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2718{
2719 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2720}
2721EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2722
2723int sock_prot_inuse_get(struct net *net, struct proto *prot)
2724{
2725 int cpu, idx = prot->inuse_idx;
2726 int res = 0;
2727
2728 for_each_possible_cpu(cpu)
2729 res += per_cpu(prot_inuse, cpu).val[idx];
2730
2731 return res >= 0 ? res : 0;
2732}
2733EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2734#endif
2735
2736static void assign_proto_idx(struct proto *prot)
2737{
2738 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2739
2740 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2741 pr_err("PROTO_INUSE_NR exhausted\n");
2742 return;
2743 }
2744
2745 set_bit(prot->inuse_idx, proto_inuse_idx);
2746}
2747
2748static void release_proto_idx(struct proto *prot)
2749{
2750 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2751 clear_bit(prot->inuse_idx, proto_inuse_idx);
2752}
2753#else
2754static inline void assign_proto_idx(struct proto *prot)
2755{
2756}
2757
2758static inline void release_proto_idx(struct proto *prot)
2759{
2760}
2761#endif
2762
2763int proto_register(struct proto *prot, int alloc_slab)
2764{
2765 if (alloc_slab) {
2766 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2767 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2768 NULL);
2769
2770 if (prot->slab == NULL) {
2771 pr_crit("%s: Can't create sock SLAB cache!\n",
2772 prot->name);
2773 goto out;
2774 }
2775
2776 if (prot->rsk_prot != NULL) {
2777 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2778 if (prot->rsk_prot->slab_name == NULL)
2779 goto out_free_sock_slab;
2780
2781 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2782 prot->rsk_prot->obj_size, 0,
2783 SLAB_HWCACHE_ALIGN, NULL);
2784
2785 if (prot->rsk_prot->slab == NULL) {
2786 pr_crit("%s: Can't create request sock SLAB cache!\n",
2787 prot->name);
2788 goto out_free_request_sock_slab_name;
2789 }
2790 }
2791
2792 if (prot->twsk_prot != NULL) {
2793 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2794
2795 if (prot->twsk_prot->twsk_slab_name == NULL)
2796 goto out_free_request_sock_slab;
2797
2798 prot->twsk_prot->twsk_slab =
2799 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2800 prot->twsk_prot->twsk_obj_size,
2801 0,
2802 SLAB_HWCACHE_ALIGN |
2803 prot->slab_flags,
2804 NULL);
2805 if (prot->twsk_prot->twsk_slab == NULL)
2806 goto out_free_timewait_sock_slab_name;
2807 }
2808 }
2809
2810 mutex_lock(&proto_list_mutex);
2811 list_add(&prot->node, &proto_list);
2812 assign_proto_idx(prot);
2813 mutex_unlock(&proto_list_mutex);
2814 return 0;
2815
2816out_free_timewait_sock_slab_name:
2817 kfree(prot->twsk_prot->twsk_slab_name);
2818out_free_request_sock_slab:
2819 if (prot->rsk_prot && prot->rsk_prot->slab) {
2820 kmem_cache_destroy(prot->rsk_prot->slab);
2821 prot->rsk_prot->slab = NULL;
2822 }
2823out_free_request_sock_slab_name:
2824 if (prot->rsk_prot)
2825 kfree(prot->rsk_prot->slab_name);
2826out_free_sock_slab:
2827 kmem_cache_destroy(prot->slab);
2828 prot->slab = NULL;
2829out:
2830 return -ENOBUFS;
2831}
2832EXPORT_SYMBOL(proto_register);
2833
2834void proto_unregister(struct proto *prot)
2835{
2836 mutex_lock(&proto_list_mutex);
2837 release_proto_idx(prot);
2838 list_del(&prot->node);
2839 mutex_unlock(&proto_list_mutex);
2840
2841 if (prot->slab != NULL) {
2842 kmem_cache_destroy(prot->slab);
2843 prot->slab = NULL;
2844 }
2845
2846 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2847 kmem_cache_destroy(prot->rsk_prot->slab);
2848 kfree(prot->rsk_prot->slab_name);
2849 prot->rsk_prot->slab = NULL;
2850 }
2851
2852 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2853 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2854 kfree(prot->twsk_prot->twsk_slab_name);
2855 prot->twsk_prot->twsk_slab = NULL;
2856 }
2857}
2858EXPORT_SYMBOL(proto_unregister);
2859
2860#ifdef CONFIG_PROC_FS
2861static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2862 __acquires(proto_list_mutex)
2863{
2864 mutex_lock(&proto_list_mutex);
2865 return seq_list_start_head(&proto_list, *pos);
2866}
2867
2868static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2869{
2870 return seq_list_next(v, &proto_list, pos);
2871}
2872
2873static void proto_seq_stop(struct seq_file *seq, void *v)
2874 __releases(proto_list_mutex)
2875{
2876 mutex_unlock(&proto_list_mutex);
2877}
2878
2879static char proto_method_implemented(const void *method)
2880{
2881 return method == NULL ? 'n' : 'y';
2882}
2883static long sock_prot_memory_allocated(struct proto *proto)
2884{
2885 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2886}
2887
2888static char *sock_prot_memory_pressure(struct proto *proto)
2889{
2890 return proto->memory_pressure != NULL ?
2891 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2892}
2893
2894static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2895{
2896
2897 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2898 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2899 proto->name,
2900 proto->obj_size,
2901 sock_prot_inuse_get(seq_file_net(seq), proto),
2902 sock_prot_memory_allocated(proto),
2903 sock_prot_memory_pressure(proto),
2904 proto->max_header,
2905 proto->slab == NULL ? "no" : "yes",
2906 module_name(proto->owner),
2907 proto_method_implemented(proto->close),
2908 proto_method_implemented(proto->connect),
2909 proto_method_implemented(proto->disconnect),
2910 proto_method_implemented(proto->accept),
2911 proto_method_implemented(proto->ioctl),
2912 proto_method_implemented(proto->init),
2913 proto_method_implemented(proto->destroy),
2914 proto_method_implemented(proto->shutdown),
2915 proto_method_implemented(proto->setsockopt),
2916 proto_method_implemented(proto->getsockopt),
2917 proto_method_implemented(proto->sendmsg),
2918 proto_method_implemented(proto->recvmsg),
2919 proto_method_implemented(proto->sendpage),
2920 proto_method_implemented(proto->bind),
2921 proto_method_implemented(proto->backlog_rcv),
2922 proto_method_implemented(proto->hash),
2923 proto_method_implemented(proto->unhash),
2924 proto_method_implemented(proto->get_port),
2925 proto_method_implemented(proto->enter_memory_pressure));
2926}
2927
2928static int proto_seq_show(struct seq_file *seq, void *v)
2929{
2930 if (v == &proto_list)
2931 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2932 "protocol",
2933 "size",
2934 "sockets",
2935 "memory",
2936 "press",
2937 "maxhdr",
2938 "slab",
2939 "module",
2940 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2941 else
2942 proto_seq_printf(seq, list_entry(v, struct proto, node));
2943 return 0;
2944}
2945
2946static const struct seq_operations proto_seq_ops = {
2947 .start = proto_seq_start,
2948 .next = proto_seq_next,
2949 .stop = proto_seq_stop,
2950 .show = proto_seq_show,
2951};
2952
2953static int proto_seq_open(struct inode *inode, struct file *file)
2954{
2955 return seq_open_net(inode, file, &proto_seq_ops,
2956 sizeof(struct seq_net_private));
2957}
2958
2959static const struct file_operations proto_seq_fops = {
2960 .owner = THIS_MODULE,
2961 .open = proto_seq_open,
2962 .read = seq_read,
2963 .llseek = seq_lseek,
2964 .release = seq_release_net,
2965};
2966
2967static __net_init int proto_init_net(struct net *net)
2968{
2969 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2970 return -ENOMEM;
2971
2972 return 0;
2973}
2974
2975static __net_exit void proto_exit_net(struct net *net)
2976{
2977 remove_proc_entry("protocols", net->proc_net);
2978}
2979
2980
2981static __net_initdata struct pernet_operations proto_net_ops = {
2982 .init = proto_init_net,
2983 .exit = proto_exit_net,
2984};
2985
2986static int __init proto_init(void)
2987{
2988 return register_pernet_subsys(&proto_net_ops);
2989}
2990
2991subsys_initcall(proto_init);
2992
2993#endif /* PROC_FS */
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#include <linux/capability.h>
95#include <linux/errno.h>
96#include <linux/errqueue.h>
97#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/sched/mm.h>
106#include <linux/timer.h>
107#include <linux/string.h>
108#include <linux/sockios.h>
109#include <linux/net.h>
110#include <linux/mm.h>
111#include <linux/slab.h>
112#include <linux/interrupt.h>
113#include <linux/poll.h>
114#include <linux/tcp.h>
115#include <linux/init.h>
116#include <linux/highmem.h>
117#include <linux/user_namespace.h>
118#include <linux/static_key.h>
119#include <linux/memcontrol.h>
120#include <linux/prefetch.h>
121
122#include <linux/uaccess.h>
123
124#include <linux/netdevice.h>
125#include <net/protocol.h>
126#include <linux/skbuff.h>
127#include <net/net_namespace.h>
128#include <net/request_sock.h>
129#include <net/sock.h>
130#include <linux/net_tstamp.h>
131#include <net/xfrm.h>
132#include <linux/ipsec.h>
133#include <net/cls_cgroup.h>
134#include <net/netprio_cgroup.h>
135#include <linux/sock_diag.h>
136
137#include <linux/filter.h>
138#include <net/sock_reuseport.h>
139
140#include <trace/events/sock.h>
141
142#include <net/tcp.h>
143#include <net/busy_poll.h>
144
145static DEFINE_MUTEX(proto_list_mutex);
146static LIST_HEAD(proto_list);
147
148static void sock_inuse_add(struct net *net, int val);
149
150/**
151 * sk_ns_capable - General socket capability test
152 * @sk: Socket to use a capability on or through
153 * @user_ns: The user namespace of the capability to use
154 * @cap: The capability to use
155 *
156 * Test to see if the opener of the socket had when the socket was
157 * created and the current process has the capability @cap in the user
158 * namespace @user_ns.
159 */
160bool sk_ns_capable(const struct sock *sk,
161 struct user_namespace *user_ns, int cap)
162{
163 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 ns_capable(user_ns, cap);
165}
166EXPORT_SYMBOL(sk_ns_capable);
167
168/**
169 * sk_capable - Socket global capability test
170 * @sk: Socket to use a capability on or through
171 * @cap: The global capability to use
172 *
173 * Test to see if the opener of the socket had when the socket was
174 * created and the current process has the capability @cap in all user
175 * namespaces.
176 */
177bool sk_capable(const struct sock *sk, int cap)
178{
179 return sk_ns_capable(sk, &init_user_ns, cap);
180}
181EXPORT_SYMBOL(sk_capable);
182
183/**
184 * sk_net_capable - Network namespace socket capability test
185 * @sk: Socket to use a capability on or through
186 * @cap: The capability to use
187 *
188 * Test to see if the opener of the socket had when the socket was created
189 * and the current process has the capability @cap over the network namespace
190 * the socket is a member of.
191 */
192bool sk_net_capable(const struct sock *sk, int cap)
193{
194 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195}
196EXPORT_SYMBOL(sk_net_capable);
197
198/*
199 * Each address family might have different locking rules, so we have
200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
202 */
203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
207
208/*
209 * Make lock validator output more readable. (we pre-construct these
210 * strings build-time, so that runtime initialization of socket
211 * locks is fast):
212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
231static const char *const af_family_key_strings[AF_MAX+1] = {
232 _sock_locks("sk_lock-")
233};
234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
235 _sock_locks("slock-")
236};
237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
238 _sock_locks("clock-")
239};
240
241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
242 _sock_locks("k-sk_lock-")
243};
244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
245 _sock_locks("k-slock-")
246};
247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
248 _sock_locks("k-clock-")
249};
250static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
251 "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
252 "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
253 "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
254 "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
255 "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
256 "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
257 "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
258 "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
259 "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
260 "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
261 "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
262 "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
263 "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
264 "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
265 "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX"
266};
267static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
268 "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
269 "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
270 "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
271 "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
272 "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
273 "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
274 "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
275 "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
276 "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
277 "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
278 "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
279 "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
280 "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
281 "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
282 "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX"
283};
284static const char *const af_family_elock_key_strings[AF_MAX+1] = {
285 "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
286 "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
287 "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
288 "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
289 "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
290 "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
291 "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
292 "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
293 "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
294 "elock-27" , "elock-28" , "elock-AF_CAN" ,
295 "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
296 "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
297 "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
298 "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
299 "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX"
300};
301
302/*
303 * sk_callback_lock and sk queues locking rules are per-address-family,
304 * so split the lock classes by using a per-AF key:
305 */
306static struct lock_class_key af_callback_keys[AF_MAX];
307static struct lock_class_key af_rlock_keys[AF_MAX];
308static struct lock_class_key af_wlock_keys[AF_MAX];
309static struct lock_class_key af_elock_keys[AF_MAX];
310static struct lock_class_key af_kern_callback_keys[AF_MAX];
311
312/* Run time adjustable parameters. */
313__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
314EXPORT_SYMBOL(sysctl_wmem_max);
315__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
316EXPORT_SYMBOL(sysctl_rmem_max);
317__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
318__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
319
320/* Maximal space eaten by iovec or ancillary data plus some space */
321int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
322EXPORT_SYMBOL(sysctl_optmem_max);
323
324int sysctl_tstamp_allow_data __read_mostly = 1;
325
326struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
327EXPORT_SYMBOL_GPL(memalloc_socks);
328
329/**
330 * sk_set_memalloc - sets %SOCK_MEMALLOC
331 * @sk: socket to set it on
332 *
333 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
334 * It's the responsibility of the admin to adjust min_free_kbytes
335 * to meet the requirements
336 */
337void sk_set_memalloc(struct sock *sk)
338{
339 sock_set_flag(sk, SOCK_MEMALLOC);
340 sk->sk_allocation |= __GFP_MEMALLOC;
341 static_key_slow_inc(&memalloc_socks);
342}
343EXPORT_SYMBOL_GPL(sk_set_memalloc);
344
345void sk_clear_memalloc(struct sock *sk)
346{
347 sock_reset_flag(sk, SOCK_MEMALLOC);
348 sk->sk_allocation &= ~__GFP_MEMALLOC;
349 static_key_slow_dec(&memalloc_socks);
350
351 /*
352 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
353 * progress of swapping. SOCK_MEMALLOC may be cleared while
354 * it has rmem allocations due to the last swapfile being deactivated
355 * but there is a risk that the socket is unusable due to exceeding
356 * the rmem limits. Reclaim the reserves and obey rmem limits again.
357 */
358 sk_mem_reclaim(sk);
359}
360EXPORT_SYMBOL_GPL(sk_clear_memalloc);
361
362int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
363{
364 int ret;
365 unsigned int noreclaim_flag;
366
367 /* these should have been dropped before queueing */
368 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
369
370 noreclaim_flag = memalloc_noreclaim_save();
371 ret = sk->sk_backlog_rcv(sk, skb);
372 memalloc_noreclaim_restore(noreclaim_flag);
373
374 return ret;
375}
376EXPORT_SYMBOL(__sk_backlog_rcv);
377
378static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
379{
380 struct timeval tv;
381
382 if (optlen < sizeof(tv))
383 return -EINVAL;
384 if (copy_from_user(&tv, optval, sizeof(tv)))
385 return -EFAULT;
386 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
387 return -EDOM;
388
389 if (tv.tv_sec < 0) {
390 static int warned __read_mostly;
391
392 *timeo_p = 0;
393 if (warned < 10 && net_ratelimit()) {
394 warned++;
395 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
396 __func__, current->comm, task_pid_nr(current));
397 }
398 return 0;
399 }
400 *timeo_p = MAX_SCHEDULE_TIMEOUT;
401 if (tv.tv_sec == 0 && tv.tv_usec == 0)
402 return 0;
403 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
404 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
405 return 0;
406}
407
408static void sock_warn_obsolete_bsdism(const char *name)
409{
410 static int warned;
411 static char warncomm[TASK_COMM_LEN];
412 if (strcmp(warncomm, current->comm) && warned < 5) {
413 strcpy(warncomm, current->comm);
414 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
415 warncomm, name);
416 warned++;
417 }
418}
419
420static bool sock_needs_netstamp(const struct sock *sk)
421{
422 switch (sk->sk_family) {
423 case AF_UNSPEC:
424 case AF_UNIX:
425 return false;
426 default:
427 return true;
428 }
429}
430
431static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
432{
433 if (sk->sk_flags & flags) {
434 sk->sk_flags &= ~flags;
435 if (sock_needs_netstamp(sk) &&
436 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
437 net_disable_timestamp();
438 }
439}
440
441
442int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
443{
444 unsigned long flags;
445 struct sk_buff_head *list = &sk->sk_receive_queue;
446
447 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
448 atomic_inc(&sk->sk_drops);
449 trace_sock_rcvqueue_full(sk, skb);
450 return -ENOMEM;
451 }
452
453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
456 }
457
458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
460
461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
466 spin_lock_irqsave(&list->lock, flags);
467 sock_skb_set_dropcount(sk, skb);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
470
471 if (!sock_flag(sk, SOCK_DEAD))
472 sk->sk_data_ready(sk);
473 return 0;
474}
475EXPORT_SYMBOL(__sock_queue_rcv_skb);
476
477int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
478{
479 int err;
480
481 err = sk_filter(sk, skb);
482 if (err)
483 return err;
484
485 return __sock_queue_rcv_skb(sk, skb);
486}
487EXPORT_SYMBOL(sock_queue_rcv_skb);
488
489int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
490 const int nested, unsigned int trim_cap, bool refcounted)
491{
492 int rc = NET_RX_SUCCESS;
493
494 if (sk_filter_trim_cap(sk, skb, trim_cap))
495 goto discard_and_relse;
496
497 skb->dev = NULL;
498
499 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
500 atomic_inc(&sk->sk_drops);
501 goto discard_and_relse;
502 }
503 if (nested)
504 bh_lock_sock_nested(sk);
505 else
506 bh_lock_sock(sk);
507 if (!sock_owned_by_user(sk)) {
508 /*
509 * trylock + unlock semantics:
510 */
511 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
512
513 rc = sk_backlog_rcv(sk, skb);
514
515 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
516 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
517 bh_unlock_sock(sk);
518 atomic_inc(&sk->sk_drops);
519 goto discard_and_relse;
520 }
521
522 bh_unlock_sock(sk);
523out:
524 if (refcounted)
525 sock_put(sk);
526 return rc;
527discard_and_relse:
528 kfree_skb(skb);
529 goto out;
530}
531EXPORT_SYMBOL(__sk_receive_skb);
532
533struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
534{
535 struct dst_entry *dst = __sk_dst_get(sk);
536
537 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
538 sk_tx_queue_clear(sk);
539 sk->sk_dst_pending_confirm = 0;
540 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
541 dst_release(dst);
542 return NULL;
543 }
544
545 return dst;
546}
547EXPORT_SYMBOL(__sk_dst_check);
548
549struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
550{
551 struct dst_entry *dst = sk_dst_get(sk);
552
553 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
554 sk_dst_reset(sk);
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560}
561EXPORT_SYMBOL(sk_dst_check);
562
563static int sock_setbindtodevice(struct sock *sk, char __user *optval,
564 int optlen)
565{
566 int ret = -ENOPROTOOPT;
567#ifdef CONFIG_NETDEVICES
568 struct net *net = sock_net(sk);
569 char devname[IFNAMSIZ];
570 int index;
571
572 /* Sorry... */
573 ret = -EPERM;
574 if (!ns_capable(net->user_ns, CAP_NET_RAW))
575 goto out;
576
577 ret = -EINVAL;
578 if (optlen < 0)
579 goto out;
580
581 /* Bind this socket to a particular device like "eth0",
582 * as specified in the passed interface name. If the
583 * name is "" or the option length is zero the socket
584 * is not bound.
585 */
586 if (optlen > IFNAMSIZ - 1)
587 optlen = IFNAMSIZ - 1;
588 memset(devname, 0, sizeof(devname));
589
590 ret = -EFAULT;
591 if (copy_from_user(devname, optval, optlen))
592 goto out;
593
594 index = 0;
595 if (devname[0] != '\0') {
596 struct net_device *dev;
597
598 rcu_read_lock();
599 dev = dev_get_by_name_rcu(net, devname);
600 if (dev)
601 index = dev->ifindex;
602 rcu_read_unlock();
603 ret = -ENODEV;
604 if (!dev)
605 goto out;
606 }
607
608 lock_sock(sk);
609 sk->sk_bound_dev_if = index;
610 sk_dst_reset(sk);
611 release_sock(sk);
612
613 ret = 0;
614
615out:
616#endif
617
618 return ret;
619}
620
621static int sock_getbindtodevice(struct sock *sk, char __user *optval,
622 int __user *optlen, int len)
623{
624 int ret = -ENOPROTOOPT;
625#ifdef CONFIG_NETDEVICES
626 struct net *net = sock_net(sk);
627 char devname[IFNAMSIZ];
628
629 if (sk->sk_bound_dev_if == 0) {
630 len = 0;
631 goto zero;
632 }
633
634 ret = -EINVAL;
635 if (len < IFNAMSIZ)
636 goto out;
637
638 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
639 if (ret)
640 goto out;
641
642 len = strlen(devname) + 1;
643
644 ret = -EFAULT;
645 if (copy_to_user(optval, devname, len))
646 goto out;
647
648zero:
649 ret = -EFAULT;
650 if (put_user(len, optlen))
651 goto out;
652
653 ret = 0;
654
655out:
656#endif
657
658 return ret;
659}
660
661static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
662{
663 if (valbool)
664 sock_set_flag(sk, bit);
665 else
666 sock_reset_flag(sk, bit);
667}
668
669bool sk_mc_loop(struct sock *sk)
670{
671 if (dev_recursion_level())
672 return false;
673 if (!sk)
674 return true;
675 switch (sk->sk_family) {
676 case AF_INET:
677 return inet_sk(sk)->mc_loop;
678#if IS_ENABLED(CONFIG_IPV6)
679 case AF_INET6:
680 return inet6_sk(sk)->mc_loop;
681#endif
682 }
683 WARN_ON(1);
684 return true;
685}
686EXPORT_SYMBOL(sk_mc_loop);
687
688/*
689 * This is meant for all protocols to use and covers goings on
690 * at the socket level. Everything here is generic.
691 */
692
693int sock_setsockopt(struct socket *sock, int level, int optname,
694 char __user *optval, unsigned int optlen)
695{
696 struct sock *sk = sock->sk;
697 int val;
698 int valbool;
699 struct linger ling;
700 int ret = 0;
701
702 /*
703 * Options without arguments
704 */
705
706 if (optname == SO_BINDTODEVICE)
707 return sock_setbindtodevice(sk, optval, optlen);
708
709 if (optlen < sizeof(int))
710 return -EINVAL;
711
712 if (get_user(val, (int __user *)optval))
713 return -EFAULT;
714
715 valbool = val ? 1 : 0;
716
717 lock_sock(sk);
718
719 switch (optname) {
720 case SO_DEBUG:
721 if (val && !capable(CAP_NET_ADMIN))
722 ret = -EACCES;
723 else
724 sock_valbool_flag(sk, SOCK_DBG, valbool);
725 break;
726 case SO_REUSEADDR:
727 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
728 break;
729 case SO_REUSEPORT:
730 sk->sk_reuseport = valbool;
731 break;
732 case SO_TYPE:
733 case SO_PROTOCOL:
734 case SO_DOMAIN:
735 case SO_ERROR:
736 ret = -ENOPROTOOPT;
737 break;
738 case SO_DONTROUTE:
739 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
740 break;
741 case SO_BROADCAST:
742 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
743 break;
744 case SO_SNDBUF:
745 /* Don't error on this BSD doesn't and if you think
746 * about it this is right. Otherwise apps have to
747 * play 'guess the biggest size' games. RCVBUF/SNDBUF
748 * are treated in BSD as hints
749 */
750 val = min_t(u32, val, sysctl_wmem_max);
751set_sndbuf:
752 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
753 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
754 /* Wake up sending tasks if we upped the value. */
755 sk->sk_write_space(sk);
756 break;
757
758 case SO_SNDBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_sndbuf;
764
765 case SO_RCVBUF:
766 /* Don't error on this BSD doesn't and if you think
767 * about it this is right. Otherwise apps have to
768 * play 'guess the biggest size' games. RCVBUF/SNDBUF
769 * are treated in BSD as hints
770 */
771 val = min_t(u32, val, sysctl_rmem_max);
772set_rcvbuf:
773 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
774 /*
775 * We double it on the way in to account for
776 * "struct sk_buff" etc. overhead. Applications
777 * assume that the SO_RCVBUF setting they make will
778 * allow that much actual data to be received on that
779 * socket.
780 *
781 * Applications are unaware that "struct sk_buff" and
782 * other overheads allocate from the receive buffer
783 * during socket buffer allocation.
784 *
785 * And after considering the possible alternatives,
786 * returning the value we actually used in getsockopt
787 * is the most desirable behavior.
788 */
789 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
790 break;
791
792 case SO_RCVBUFFORCE:
793 if (!capable(CAP_NET_ADMIN)) {
794 ret = -EPERM;
795 break;
796 }
797 goto set_rcvbuf;
798
799 case SO_KEEPALIVE:
800 if (sk->sk_prot->keepalive)
801 sk->sk_prot->keepalive(sk, valbool);
802 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
803 break;
804
805 case SO_OOBINLINE:
806 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
807 break;
808
809 case SO_NO_CHECK:
810 sk->sk_no_check_tx = valbool;
811 break;
812
813 case SO_PRIORITY:
814 if ((val >= 0 && val <= 6) ||
815 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
816 sk->sk_priority = val;
817 else
818 ret = -EPERM;
819 break;
820
821 case SO_LINGER:
822 if (optlen < sizeof(ling)) {
823 ret = -EINVAL; /* 1003.1g */
824 break;
825 }
826 if (copy_from_user(&ling, optval, sizeof(ling))) {
827 ret = -EFAULT;
828 break;
829 }
830 if (!ling.l_onoff)
831 sock_reset_flag(sk, SOCK_LINGER);
832 else {
833#if (BITS_PER_LONG == 32)
834 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
835 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
836 else
837#endif
838 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
839 sock_set_flag(sk, SOCK_LINGER);
840 }
841 break;
842
843 case SO_BSDCOMPAT:
844 sock_warn_obsolete_bsdism("setsockopt");
845 break;
846
847 case SO_PASSCRED:
848 if (valbool)
849 set_bit(SOCK_PASSCRED, &sock->flags);
850 else
851 clear_bit(SOCK_PASSCRED, &sock->flags);
852 break;
853
854 case SO_TIMESTAMP:
855 case SO_TIMESTAMPNS:
856 if (valbool) {
857 if (optname == SO_TIMESTAMP)
858 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
859 else
860 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
861 sock_set_flag(sk, SOCK_RCVTSTAMP);
862 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
863 } else {
864 sock_reset_flag(sk, SOCK_RCVTSTAMP);
865 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
866 }
867 break;
868
869 case SO_TIMESTAMPING:
870 if (val & ~SOF_TIMESTAMPING_MASK) {
871 ret = -EINVAL;
872 break;
873 }
874
875 if (val & SOF_TIMESTAMPING_OPT_ID &&
876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
877 if (sk->sk_protocol == IPPROTO_TCP &&
878 sk->sk_type == SOCK_STREAM) {
879 if ((1 << sk->sk_state) &
880 (TCPF_CLOSE | TCPF_LISTEN)) {
881 ret = -EINVAL;
882 break;
883 }
884 sk->sk_tskey = tcp_sk(sk)->snd_una;
885 } else {
886 sk->sk_tskey = 0;
887 }
888 }
889
890 if (val & SOF_TIMESTAMPING_OPT_STATS &&
891 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
892 ret = -EINVAL;
893 break;
894 }
895
896 sk->sk_tsflags = val;
897 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
898 sock_enable_timestamp(sk,
899 SOCK_TIMESTAMPING_RX_SOFTWARE);
900 else
901 sock_disable_timestamp(sk,
902 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
903 break;
904
905 case SO_RCVLOWAT:
906 if (val < 0)
907 val = INT_MAX;
908 sk->sk_rcvlowat = val ? : 1;
909 break;
910
911 case SO_RCVTIMEO:
912 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
913 break;
914
915 case SO_SNDTIMEO:
916 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
917 break;
918
919 case SO_ATTACH_FILTER:
920 ret = -EINVAL;
921 if (optlen == sizeof(struct sock_fprog)) {
922 struct sock_fprog fprog;
923
924 ret = -EFAULT;
925 if (copy_from_user(&fprog, optval, sizeof(fprog)))
926 break;
927
928 ret = sk_attach_filter(&fprog, sk);
929 }
930 break;
931
932 case SO_ATTACH_BPF:
933 ret = -EINVAL;
934 if (optlen == sizeof(u32)) {
935 u32 ufd;
936
937 ret = -EFAULT;
938 if (copy_from_user(&ufd, optval, sizeof(ufd)))
939 break;
940
941 ret = sk_attach_bpf(ufd, sk);
942 }
943 break;
944
945 case SO_ATTACH_REUSEPORT_CBPF:
946 ret = -EINVAL;
947 if (optlen == sizeof(struct sock_fprog)) {
948 struct sock_fprog fprog;
949
950 ret = -EFAULT;
951 if (copy_from_user(&fprog, optval, sizeof(fprog)))
952 break;
953
954 ret = sk_reuseport_attach_filter(&fprog, sk);
955 }
956 break;
957
958 case SO_ATTACH_REUSEPORT_EBPF:
959 ret = -EINVAL;
960 if (optlen == sizeof(u32)) {
961 u32 ufd;
962
963 ret = -EFAULT;
964 if (copy_from_user(&ufd, optval, sizeof(ufd)))
965 break;
966
967 ret = sk_reuseport_attach_bpf(ufd, sk);
968 }
969 break;
970
971 case SO_DETACH_FILTER:
972 ret = sk_detach_filter(sk);
973 break;
974
975 case SO_LOCK_FILTER:
976 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
977 ret = -EPERM;
978 else
979 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
980 break;
981
982 case SO_PASSSEC:
983 if (valbool)
984 set_bit(SOCK_PASSSEC, &sock->flags);
985 else
986 clear_bit(SOCK_PASSSEC, &sock->flags);
987 break;
988 case SO_MARK:
989 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
990 ret = -EPERM;
991 else
992 sk->sk_mark = val;
993 break;
994
995 case SO_RXQ_OVFL:
996 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
997 break;
998
999 case SO_WIFI_STATUS:
1000 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1001 break;
1002
1003 case SO_PEEK_OFF:
1004 if (sock->ops->set_peek_off)
1005 ret = sock->ops->set_peek_off(sk, val);
1006 else
1007 ret = -EOPNOTSUPP;
1008 break;
1009
1010 case SO_NOFCS:
1011 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1012 break;
1013
1014 case SO_SELECT_ERR_QUEUE:
1015 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1016 break;
1017
1018#ifdef CONFIG_NET_RX_BUSY_POLL
1019 case SO_BUSY_POLL:
1020 /* allow unprivileged users to decrease the value */
1021 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1022 ret = -EPERM;
1023 else {
1024 if (val < 0)
1025 ret = -EINVAL;
1026 else
1027 sk->sk_ll_usec = val;
1028 }
1029 break;
1030#endif
1031
1032 case SO_MAX_PACING_RATE:
1033 if (val != ~0U)
1034 cmpxchg(&sk->sk_pacing_status,
1035 SK_PACING_NONE,
1036 SK_PACING_NEEDED);
1037 sk->sk_max_pacing_rate = val;
1038 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
1039 sk->sk_max_pacing_rate);
1040 break;
1041
1042 case SO_INCOMING_CPU:
1043 sk->sk_incoming_cpu = val;
1044 break;
1045
1046 case SO_CNX_ADVICE:
1047 if (val == 1)
1048 dst_negative_advice(sk);
1049 break;
1050
1051 case SO_ZEROCOPY:
1052 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1053 if (sk->sk_protocol != IPPROTO_TCP)
1054 ret = -ENOTSUPP;
1055 } else if (sk->sk_family != PF_RDS) {
1056 ret = -ENOTSUPP;
1057 }
1058 if (!ret) {
1059 if (val < 0 || val > 1)
1060 ret = -EINVAL;
1061 else
1062 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1063 }
1064 break;
1065
1066 default:
1067 ret = -ENOPROTOOPT;
1068 break;
1069 }
1070 release_sock(sk);
1071 return ret;
1072}
1073EXPORT_SYMBOL(sock_setsockopt);
1074
1075
1076static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1077 struct ucred *ucred)
1078{
1079 ucred->pid = pid_vnr(pid);
1080 ucred->uid = ucred->gid = -1;
1081 if (cred) {
1082 struct user_namespace *current_ns = current_user_ns();
1083
1084 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1085 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1086 }
1087}
1088
1089static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1090{
1091 struct user_namespace *user_ns = current_user_ns();
1092 int i;
1093
1094 for (i = 0; i < src->ngroups; i++)
1095 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1096 return -EFAULT;
1097
1098 return 0;
1099}
1100
1101int sock_getsockopt(struct socket *sock, int level, int optname,
1102 char __user *optval, int __user *optlen)
1103{
1104 struct sock *sk = sock->sk;
1105
1106 union {
1107 int val;
1108 u64 val64;
1109 struct linger ling;
1110 struct timeval tm;
1111 } v;
1112
1113 int lv = sizeof(int);
1114 int len;
1115
1116 if (get_user(len, optlen))
1117 return -EFAULT;
1118 if (len < 0)
1119 return -EINVAL;
1120
1121 memset(&v, 0, sizeof(v));
1122
1123 switch (optname) {
1124 case SO_DEBUG:
1125 v.val = sock_flag(sk, SOCK_DBG);
1126 break;
1127
1128 case SO_DONTROUTE:
1129 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1130 break;
1131
1132 case SO_BROADCAST:
1133 v.val = sock_flag(sk, SOCK_BROADCAST);
1134 break;
1135
1136 case SO_SNDBUF:
1137 v.val = sk->sk_sndbuf;
1138 break;
1139
1140 case SO_RCVBUF:
1141 v.val = sk->sk_rcvbuf;
1142 break;
1143
1144 case SO_REUSEADDR:
1145 v.val = sk->sk_reuse;
1146 break;
1147
1148 case SO_REUSEPORT:
1149 v.val = sk->sk_reuseport;
1150 break;
1151
1152 case SO_KEEPALIVE:
1153 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1154 break;
1155
1156 case SO_TYPE:
1157 v.val = sk->sk_type;
1158 break;
1159
1160 case SO_PROTOCOL:
1161 v.val = sk->sk_protocol;
1162 break;
1163
1164 case SO_DOMAIN:
1165 v.val = sk->sk_family;
1166 break;
1167
1168 case SO_ERROR:
1169 v.val = -sock_error(sk);
1170 if (v.val == 0)
1171 v.val = xchg(&sk->sk_err_soft, 0);
1172 break;
1173
1174 case SO_OOBINLINE:
1175 v.val = sock_flag(sk, SOCK_URGINLINE);
1176 break;
1177
1178 case SO_NO_CHECK:
1179 v.val = sk->sk_no_check_tx;
1180 break;
1181
1182 case SO_PRIORITY:
1183 v.val = sk->sk_priority;
1184 break;
1185
1186 case SO_LINGER:
1187 lv = sizeof(v.ling);
1188 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1189 v.ling.l_linger = sk->sk_lingertime / HZ;
1190 break;
1191
1192 case SO_BSDCOMPAT:
1193 sock_warn_obsolete_bsdism("getsockopt");
1194 break;
1195
1196 case SO_TIMESTAMP:
1197 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1198 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1199 break;
1200
1201 case SO_TIMESTAMPNS:
1202 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1203 break;
1204
1205 case SO_TIMESTAMPING:
1206 v.val = sk->sk_tsflags;
1207 break;
1208
1209 case SO_RCVTIMEO:
1210 lv = sizeof(struct timeval);
1211 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1212 v.tm.tv_sec = 0;
1213 v.tm.tv_usec = 0;
1214 } else {
1215 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1216 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
1217 }
1218 break;
1219
1220 case SO_SNDTIMEO:
1221 lv = sizeof(struct timeval);
1222 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1223 v.tm.tv_sec = 0;
1224 v.tm.tv_usec = 0;
1225 } else {
1226 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1227 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
1228 }
1229 break;
1230
1231 case SO_RCVLOWAT:
1232 v.val = sk->sk_rcvlowat;
1233 break;
1234
1235 case SO_SNDLOWAT:
1236 v.val = 1;
1237 break;
1238
1239 case SO_PASSCRED:
1240 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1241 break;
1242
1243 case SO_PEERCRED:
1244 {
1245 struct ucred peercred;
1246 if (len > sizeof(peercred))
1247 len = sizeof(peercred);
1248 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1249 if (copy_to_user(optval, &peercred, len))
1250 return -EFAULT;
1251 goto lenout;
1252 }
1253
1254 case SO_PEERGROUPS:
1255 {
1256 int ret, n;
1257
1258 if (!sk->sk_peer_cred)
1259 return -ENODATA;
1260
1261 n = sk->sk_peer_cred->group_info->ngroups;
1262 if (len < n * sizeof(gid_t)) {
1263 len = n * sizeof(gid_t);
1264 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1265 }
1266 len = n * sizeof(gid_t);
1267
1268 ret = groups_to_user((gid_t __user *)optval,
1269 sk->sk_peer_cred->group_info);
1270 if (ret)
1271 return ret;
1272 goto lenout;
1273 }
1274
1275 case SO_PEERNAME:
1276 {
1277 char address[128];
1278
1279 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1280 if (lv < 0)
1281 return -ENOTCONN;
1282 if (lv < len)
1283 return -EINVAL;
1284 if (copy_to_user(optval, address, len))
1285 return -EFAULT;
1286 goto lenout;
1287 }
1288
1289 /* Dubious BSD thing... Probably nobody even uses it, but
1290 * the UNIX standard wants it for whatever reason... -DaveM
1291 */
1292 case SO_ACCEPTCONN:
1293 v.val = sk->sk_state == TCP_LISTEN;
1294 break;
1295
1296 case SO_PASSSEC:
1297 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1298 break;
1299
1300 case SO_PEERSEC:
1301 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1302
1303 case SO_MARK:
1304 v.val = sk->sk_mark;
1305 break;
1306
1307 case SO_RXQ_OVFL:
1308 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1309 break;
1310
1311 case SO_WIFI_STATUS:
1312 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1313 break;
1314
1315 case SO_PEEK_OFF:
1316 if (!sock->ops->set_peek_off)
1317 return -EOPNOTSUPP;
1318
1319 v.val = sk->sk_peek_off;
1320 break;
1321 case SO_NOFCS:
1322 v.val = sock_flag(sk, SOCK_NOFCS);
1323 break;
1324
1325 case SO_BINDTODEVICE:
1326 return sock_getbindtodevice(sk, optval, optlen, len);
1327
1328 case SO_GET_FILTER:
1329 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1330 if (len < 0)
1331 return len;
1332
1333 goto lenout;
1334
1335 case SO_LOCK_FILTER:
1336 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1337 break;
1338
1339 case SO_BPF_EXTENSIONS:
1340 v.val = bpf_tell_extensions();
1341 break;
1342
1343 case SO_SELECT_ERR_QUEUE:
1344 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1345 break;
1346
1347#ifdef CONFIG_NET_RX_BUSY_POLL
1348 case SO_BUSY_POLL:
1349 v.val = sk->sk_ll_usec;
1350 break;
1351#endif
1352
1353 case SO_MAX_PACING_RATE:
1354 v.val = sk->sk_max_pacing_rate;
1355 break;
1356
1357 case SO_INCOMING_CPU:
1358 v.val = sk->sk_incoming_cpu;
1359 break;
1360
1361 case SO_MEMINFO:
1362 {
1363 u32 meminfo[SK_MEMINFO_VARS];
1364
1365 if (get_user(len, optlen))
1366 return -EFAULT;
1367
1368 sk_get_meminfo(sk, meminfo);
1369
1370 len = min_t(unsigned int, len, sizeof(meminfo));
1371 if (copy_to_user(optval, &meminfo, len))
1372 return -EFAULT;
1373
1374 goto lenout;
1375 }
1376
1377#ifdef CONFIG_NET_RX_BUSY_POLL
1378 case SO_INCOMING_NAPI_ID:
1379 v.val = READ_ONCE(sk->sk_napi_id);
1380
1381 /* aggregate non-NAPI IDs down to 0 */
1382 if (v.val < MIN_NAPI_ID)
1383 v.val = 0;
1384
1385 break;
1386#endif
1387
1388 case SO_COOKIE:
1389 lv = sizeof(u64);
1390 if (len < lv)
1391 return -EINVAL;
1392 v.val64 = sock_gen_cookie(sk);
1393 break;
1394
1395 case SO_ZEROCOPY:
1396 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1397 break;
1398
1399 default:
1400 /* We implement the SO_SNDLOWAT etc to not be settable
1401 * (1003.1g 7).
1402 */
1403 return -ENOPROTOOPT;
1404 }
1405
1406 if (len > lv)
1407 len = lv;
1408 if (copy_to_user(optval, &v, len))
1409 return -EFAULT;
1410lenout:
1411 if (put_user(len, optlen))
1412 return -EFAULT;
1413 return 0;
1414}
1415
1416/*
1417 * Initialize an sk_lock.
1418 *
1419 * (We also register the sk_lock with the lock validator.)
1420 */
1421static inline void sock_lock_init(struct sock *sk)
1422{
1423 if (sk->sk_kern_sock)
1424 sock_lock_init_class_and_name(
1425 sk,
1426 af_family_kern_slock_key_strings[sk->sk_family],
1427 af_family_kern_slock_keys + sk->sk_family,
1428 af_family_kern_key_strings[sk->sk_family],
1429 af_family_kern_keys + sk->sk_family);
1430 else
1431 sock_lock_init_class_and_name(
1432 sk,
1433 af_family_slock_key_strings[sk->sk_family],
1434 af_family_slock_keys + sk->sk_family,
1435 af_family_key_strings[sk->sk_family],
1436 af_family_keys + sk->sk_family);
1437}
1438
1439/*
1440 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1441 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1442 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1443 */
1444static void sock_copy(struct sock *nsk, const struct sock *osk)
1445{
1446#ifdef CONFIG_SECURITY_NETWORK
1447 void *sptr = nsk->sk_security;
1448#endif
1449 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1450
1451 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1452 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1453
1454#ifdef CONFIG_SECURITY_NETWORK
1455 nsk->sk_security = sptr;
1456 security_sk_clone(osk, nsk);
1457#endif
1458}
1459
1460static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1461 int family)
1462{
1463 struct sock *sk;
1464 struct kmem_cache *slab;
1465
1466 slab = prot->slab;
1467 if (slab != NULL) {
1468 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1469 if (!sk)
1470 return sk;
1471 if (priority & __GFP_ZERO)
1472 sk_prot_clear_nulls(sk, prot->obj_size);
1473 } else
1474 sk = kmalloc(prot->obj_size, priority);
1475
1476 if (sk != NULL) {
1477 if (security_sk_alloc(sk, family, priority))
1478 goto out_free;
1479
1480 if (!try_module_get(prot->owner))
1481 goto out_free_sec;
1482 sk_tx_queue_clear(sk);
1483 }
1484
1485 return sk;
1486
1487out_free_sec:
1488 security_sk_free(sk);
1489out_free:
1490 if (slab != NULL)
1491 kmem_cache_free(slab, sk);
1492 else
1493 kfree(sk);
1494 return NULL;
1495}
1496
1497static void sk_prot_free(struct proto *prot, struct sock *sk)
1498{
1499 struct kmem_cache *slab;
1500 struct module *owner;
1501
1502 owner = prot->owner;
1503 slab = prot->slab;
1504
1505 cgroup_sk_free(&sk->sk_cgrp_data);
1506 mem_cgroup_sk_free(sk);
1507 security_sk_free(sk);
1508 if (slab != NULL)
1509 kmem_cache_free(slab, sk);
1510 else
1511 kfree(sk);
1512 module_put(owner);
1513}
1514
1515/**
1516 * sk_alloc - All socket objects are allocated here
1517 * @net: the applicable net namespace
1518 * @family: protocol family
1519 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1520 * @prot: struct proto associated with this new sock instance
1521 * @kern: is this to be a kernel socket?
1522 */
1523struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1524 struct proto *prot, int kern)
1525{
1526 struct sock *sk;
1527
1528 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1529 if (sk) {
1530 sk->sk_family = family;
1531 /*
1532 * See comment in struct sock definition to understand
1533 * why we need sk_prot_creator -acme
1534 */
1535 sk->sk_prot = sk->sk_prot_creator = prot;
1536 sk->sk_kern_sock = kern;
1537 sock_lock_init(sk);
1538 sk->sk_net_refcnt = kern ? 0 : 1;
1539 if (likely(sk->sk_net_refcnt)) {
1540 get_net(net);
1541 sock_inuse_add(net, 1);
1542 }
1543
1544 sock_net_set(sk, net);
1545 refcount_set(&sk->sk_wmem_alloc, 1);
1546
1547 mem_cgroup_sk_alloc(sk);
1548 cgroup_sk_alloc(&sk->sk_cgrp_data);
1549 sock_update_classid(&sk->sk_cgrp_data);
1550 sock_update_netprioidx(&sk->sk_cgrp_data);
1551 }
1552
1553 return sk;
1554}
1555EXPORT_SYMBOL(sk_alloc);
1556
1557/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1558 * grace period. This is the case for UDP sockets and TCP listeners.
1559 */
1560static void __sk_destruct(struct rcu_head *head)
1561{
1562 struct sock *sk = container_of(head, struct sock, sk_rcu);
1563 struct sk_filter *filter;
1564
1565 if (sk->sk_destruct)
1566 sk->sk_destruct(sk);
1567
1568 filter = rcu_dereference_check(sk->sk_filter,
1569 refcount_read(&sk->sk_wmem_alloc) == 0);
1570 if (filter) {
1571 sk_filter_uncharge(sk, filter);
1572 RCU_INIT_POINTER(sk->sk_filter, NULL);
1573 }
1574 if (rcu_access_pointer(sk->sk_reuseport_cb))
1575 reuseport_detach_sock(sk);
1576
1577 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1578
1579 if (atomic_read(&sk->sk_omem_alloc))
1580 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1581 __func__, atomic_read(&sk->sk_omem_alloc));
1582
1583 if (sk->sk_frag.page) {
1584 put_page(sk->sk_frag.page);
1585 sk->sk_frag.page = NULL;
1586 }
1587
1588 if (sk->sk_peer_cred)
1589 put_cred(sk->sk_peer_cred);
1590 put_pid(sk->sk_peer_pid);
1591 if (likely(sk->sk_net_refcnt))
1592 put_net(sock_net(sk));
1593 sk_prot_free(sk->sk_prot_creator, sk);
1594}
1595
1596void sk_destruct(struct sock *sk)
1597{
1598 if (sock_flag(sk, SOCK_RCU_FREE))
1599 call_rcu(&sk->sk_rcu, __sk_destruct);
1600 else
1601 __sk_destruct(&sk->sk_rcu);
1602}
1603
1604static void __sk_free(struct sock *sk)
1605{
1606 if (likely(sk->sk_net_refcnt))
1607 sock_inuse_add(sock_net(sk), -1);
1608
1609 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1610 sock_diag_broadcast_destroy(sk);
1611 else
1612 sk_destruct(sk);
1613}
1614
1615void sk_free(struct sock *sk)
1616{
1617 /*
1618 * We subtract one from sk_wmem_alloc and can know if
1619 * some packets are still in some tx queue.
1620 * If not null, sock_wfree() will call __sk_free(sk) later
1621 */
1622 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1623 __sk_free(sk);
1624}
1625EXPORT_SYMBOL(sk_free);
1626
1627static void sk_init_common(struct sock *sk)
1628{
1629 skb_queue_head_init(&sk->sk_receive_queue);
1630 skb_queue_head_init(&sk->sk_write_queue);
1631 skb_queue_head_init(&sk->sk_error_queue);
1632
1633 rwlock_init(&sk->sk_callback_lock);
1634 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1635 af_rlock_keys + sk->sk_family,
1636 af_family_rlock_key_strings[sk->sk_family]);
1637 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1638 af_wlock_keys + sk->sk_family,
1639 af_family_wlock_key_strings[sk->sk_family]);
1640 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1641 af_elock_keys + sk->sk_family,
1642 af_family_elock_key_strings[sk->sk_family]);
1643 lockdep_set_class_and_name(&sk->sk_callback_lock,
1644 af_callback_keys + sk->sk_family,
1645 af_family_clock_key_strings[sk->sk_family]);
1646}
1647
1648/**
1649 * sk_clone_lock - clone a socket, and lock its clone
1650 * @sk: the socket to clone
1651 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1652 *
1653 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1654 */
1655struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1656{
1657 struct sock *newsk;
1658 bool is_charged = true;
1659
1660 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1661 if (newsk != NULL) {
1662 struct sk_filter *filter;
1663
1664 sock_copy(newsk, sk);
1665
1666 newsk->sk_prot_creator = sk->sk_prot;
1667
1668 /* SANITY */
1669 if (likely(newsk->sk_net_refcnt))
1670 get_net(sock_net(newsk));
1671 sk_node_init(&newsk->sk_node);
1672 sock_lock_init(newsk);
1673 bh_lock_sock(newsk);
1674 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1675 newsk->sk_backlog.len = 0;
1676
1677 atomic_set(&newsk->sk_rmem_alloc, 0);
1678 /*
1679 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1680 */
1681 refcount_set(&newsk->sk_wmem_alloc, 1);
1682 atomic_set(&newsk->sk_omem_alloc, 0);
1683 sk_init_common(newsk);
1684
1685 newsk->sk_dst_cache = NULL;
1686 newsk->sk_dst_pending_confirm = 0;
1687 newsk->sk_wmem_queued = 0;
1688 newsk->sk_forward_alloc = 0;
1689 atomic_set(&newsk->sk_drops, 0);
1690 newsk->sk_send_head = NULL;
1691 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1692 atomic_set(&newsk->sk_zckey, 0);
1693
1694 sock_reset_flag(newsk, SOCK_DONE);
1695 mem_cgroup_sk_alloc(newsk);
1696 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1697
1698 rcu_read_lock();
1699 filter = rcu_dereference(sk->sk_filter);
1700 if (filter != NULL)
1701 /* though it's an empty new sock, the charging may fail
1702 * if sysctl_optmem_max was changed between creation of
1703 * original socket and cloning
1704 */
1705 is_charged = sk_filter_charge(newsk, filter);
1706 RCU_INIT_POINTER(newsk->sk_filter, filter);
1707 rcu_read_unlock();
1708
1709 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1710 /* We need to make sure that we don't uncharge the new
1711 * socket if we couldn't charge it in the first place
1712 * as otherwise we uncharge the parent's filter.
1713 */
1714 if (!is_charged)
1715 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1716 sk_free_unlock_clone(newsk);
1717 newsk = NULL;
1718 goto out;
1719 }
1720 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1721
1722 newsk->sk_err = 0;
1723 newsk->sk_err_soft = 0;
1724 newsk->sk_priority = 0;
1725 newsk->sk_incoming_cpu = raw_smp_processor_id();
1726 atomic64_set(&newsk->sk_cookie, 0);
1727 if (likely(newsk->sk_net_refcnt))
1728 sock_inuse_add(sock_net(newsk), 1);
1729
1730 /*
1731 * Before updating sk_refcnt, we must commit prior changes to memory
1732 * (Documentation/RCU/rculist_nulls.txt for details)
1733 */
1734 smp_wmb();
1735 refcount_set(&newsk->sk_refcnt, 2);
1736
1737 /*
1738 * Increment the counter in the same struct proto as the master
1739 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1740 * is the same as sk->sk_prot->socks, as this field was copied
1741 * with memcpy).
1742 *
1743 * This _changes_ the previous behaviour, where
1744 * tcp_create_openreq_child always was incrementing the
1745 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1746 * to be taken into account in all callers. -acme
1747 */
1748 sk_refcnt_debug_inc(newsk);
1749 sk_set_socket(newsk, NULL);
1750 newsk->sk_wq = NULL;
1751
1752 if (newsk->sk_prot->sockets_allocated)
1753 sk_sockets_allocated_inc(newsk);
1754
1755 if (sock_needs_netstamp(sk) &&
1756 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1757 net_enable_timestamp();
1758 }
1759out:
1760 return newsk;
1761}
1762EXPORT_SYMBOL_GPL(sk_clone_lock);
1763
1764void sk_free_unlock_clone(struct sock *sk)
1765{
1766 /* It is still raw copy of parent, so invalidate
1767 * destructor and make plain sk_free() */
1768 sk->sk_destruct = NULL;
1769 bh_unlock_sock(sk);
1770 sk_free(sk);
1771}
1772EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1773
1774void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1775{
1776 u32 max_segs = 1;
1777
1778 sk_dst_set(sk, dst);
1779 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
1780 if (sk->sk_route_caps & NETIF_F_GSO)
1781 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1782 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1783 if (sk_can_gso(sk)) {
1784 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
1785 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1786 } else {
1787 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1788 sk->sk_gso_max_size = dst->dev->gso_max_size;
1789 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1790 }
1791 }
1792 sk->sk_gso_max_segs = max_segs;
1793}
1794EXPORT_SYMBOL_GPL(sk_setup_caps);
1795
1796/*
1797 * Simple resource managers for sockets.
1798 */
1799
1800
1801/*
1802 * Write buffer destructor automatically called from kfree_skb.
1803 */
1804void sock_wfree(struct sk_buff *skb)
1805{
1806 struct sock *sk = skb->sk;
1807 unsigned int len = skb->truesize;
1808
1809 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1810 /*
1811 * Keep a reference on sk_wmem_alloc, this will be released
1812 * after sk_write_space() call
1813 */
1814 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1815 sk->sk_write_space(sk);
1816 len = 1;
1817 }
1818 /*
1819 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1820 * could not do because of in-flight packets
1821 */
1822 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
1823 __sk_free(sk);
1824}
1825EXPORT_SYMBOL(sock_wfree);
1826
1827/* This variant of sock_wfree() is used by TCP,
1828 * since it sets SOCK_USE_WRITE_QUEUE.
1829 */
1830void __sock_wfree(struct sk_buff *skb)
1831{
1832 struct sock *sk = skb->sk;
1833
1834 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1835 __sk_free(sk);
1836}
1837
1838void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1839{
1840 skb_orphan(skb);
1841 skb->sk = sk;
1842#ifdef CONFIG_INET
1843 if (unlikely(!sk_fullsock(sk))) {
1844 skb->destructor = sock_edemux;
1845 sock_hold(sk);
1846 return;
1847 }
1848#endif
1849 skb->destructor = sock_wfree;
1850 skb_set_hash_from_sk(skb, sk);
1851 /*
1852 * We used to take a refcount on sk, but following operation
1853 * is enough to guarantee sk_free() wont free this sock until
1854 * all in-flight packets are completed
1855 */
1856 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1857}
1858EXPORT_SYMBOL(skb_set_owner_w);
1859
1860/* This helper is used by netem, as it can hold packets in its
1861 * delay queue. We want to allow the owner socket to send more
1862 * packets, as if they were already TX completed by a typical driver.
1863 * But we also want to keep skb->sk set because some packet schedulers
1864 * rely on it (sch_fq for example).
1865 */
1866void skb_orphan_partial(struct sk_buff *skb)
1867{
1868 if (skb_is_tcp_pure_ack(skb))
1869 return;
1870
1871 if (skb->destructor == sock_wfree
1872#ifdef CONFIG_INET
1873 || skb->destructor == tcp_wfree
1874#endif
1875 ) {
1876 struct sock *sk = skb->sk;
1877
1878 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
1879 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
1880 skb->destructor = sock_efree;
1881 }
1882 } else {
1883 skb_orphan(skb);
1884 }
1885}
1886EXPORT_SYMBOL(skb_orphan_partial);
1887
1888/*
1889 * Read buffer destructor automatically called from kfree_skb.
1890 */
1891void sock_rfree(struct sk_buff *skb)
1892{
1893 struct sock *sk = skb->sk;
1894 unsigned int len = skb->truesize;
1895
1896 atomic_sub(len, &sk->sk_rmem_alloc);
1897 sk_mem_uncharge(sk, len);
1898}
1899EXPORT_SYMBOL(sock_rfree);
1900
1901/*
1902 * Buffer destructor for skbs that are not used directly in read or write
1903 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1904 */
1905void sock_efree(struct sk_buff *skb)
1906{
1907 sock_put(skb->sk);
1908}
1909EXPORT_SYMBOL(sock_efree);
1910
1911kuid_t sock_i_uid(struct sock *sk)
1912{
1913 kuid_t uid;
1914
1915 read_lock_bh(&sk->sk_callback_lock);
1916 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1917 read_unlock_bh(&sk->sk_callback_lock);
1918 return uid;
1919}
1920EXPORT_SYMBOL(sock_i_uid);
1921
1922unsigned long sock_i_ino(struct sock *sk)
1923{
1924 unsigned long ino;
1925
1926 read_lock_bh(&sk->sk_callback_lock);
1927 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1928 read_unlock_bh(&sk->sk_callback_lock);
1929 return ino;
1930}
1931EXPORT_SYMBOL(sock_i_ino);
1932
1933/*
1934 * Allocate a skb from the socket's send buffer.
1935 */
1936struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1937 gfp_t priority)
1938{
1939 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1940 struct sk_buff *skb = alloc_skb(size, priority);
1941 if (skb) {
1942 skb_set_owner_w(skb, sk);
1943 return skb;
1944 }
1945 }
1946 return NULL;
1947}
1948EXPORT_SYMBOL(sock_wmalloc);
1949
1950static void sock_ofree(struct sk_buff *skb)
1951{
1952 struct sock *sk = skb->sk;
1953
1954 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
1955}
1956
1957struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1958 gfp_t priority)
1959{
1960 struct sk_buff *skb;
1961
1962 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
1963 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
1964 sysctl_optmem_max)
1965 return NULL;
1966
1967 skb = alloc_skb(size, priority);
1968 if (!skb)
1969 return NULL;
1970
1971 atomic_add(skb->truesize, &sk->sk_omem_alloc);
1972 skb->sk = sk;
1973 skb->destructor = sock_ofree;
1974 return skb;
1975}
1976
1977/*
1978 * Allocate a memory block from the socket's option memory buffer.
1979 */
1980void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1981{
1982 if ((unsigned int)size <= sysctl_optmem_max &&
1983 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1984 void *mem;
1985 /* First do the add, to avoid the race if kmalloc
1986 * might sleep.
1987 */
1988 atomic_add(size, &sk->sk_omem_alloc);
1989 mem = kmalloc(size, priority);
1990 if (mem)
1991 return mem;
1992 atomic_sub(size, &sk->sk_omem_alloc);
1993 }
1994 return NULL;
1995}
1996EXPORT_SYMBOL(sock_kmalloc);
1997
1998/* Free an option memory block. Note, we actually want the inline
1999 * here as this allows gcc to detect the nullify and fold away the
2000 * condition entirely.
2001 */
2002static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2003 const bool nullify)
2004{
2005 if (WARN_ON_ONCE(!mem))
2006 return;
2007 if (nullify)
2008 kzfree(mem);
2009 else
2010 kfree(mem);
2011 atomic_sub(size, &sk->sk_omem_alloc);
2012}
2013
2014void sock_kfree_s(struct sock *sk, void *mem, int size)
2015{
2016 __sock_kfree_s(sk, mem, size, false);
2017}
2018EXPORT_SYMBOL(sock_kfree_s);
2019
2020void sock_kzfree_s(struct sock *sk, void *mem, int size)
2021{
2022 __sock_kfree_s(sk, mem, size, true);
2023}
2024EXPORT_SYMBOL(sock_kzfree_s);
2025
2026/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2027 I think, these locks should be removed for datagram sockets.
2028 */
2029static long sock_wait_for_wmem(struct sock *sk, long timeo)
2030{
2031 DEFINE_WAIT(wait);
2032
2033 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2034 for (;;) {
2035 if (!timeo)
2036 break;
2037 if (signal_pending(current))
2038 break;
2039 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2040 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2041 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
2042 break;
2043 if (sk->sk_shutdown & SEND_SHUTDOWN)
2044 break;
2045 if (sk->sk_err)
2046 break;
2047 timeo = schedule_timeout(timeo);
2048 }
2049 finish_wait(sk_sleep(sk), &wait);
2050 return timeo;
2051}
2052
2053
2054/*
2055 * Generic send/receive buffer handlers
2056 */
2057
2058struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2059 unsigned long data_len, int noblock,
2060 int *errcode, int max_page_order)
2061{
2062 struct sk_buff *skb;
2063 long timeo;
2064 int err;
2065
2066 timeo = sock_sndtimeo(sk, noblock);
2067 for (;;) {
2068 err = sock_error(sk);
2069 if (err != 0)
2070 goto failure;
2071
2072 err = -EPIPE;
2073 if (sk->sk_shutdown & SEND_SHUTDOWN)
2074 goto failure;
2075
2076 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
2077 break;
2078
2079 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2080 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2081 err = -EAGAIN;
2082 if (!timeo)
2083 goto failure;
2084 if (signal_pending(current))
2085 goto interrupted;
2086 timeo = sock_wait_for_wmem(sk, timeo);
2087 }
2088 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2089 errcode, sk->sk_allocation);
2090 if (skb)
2091 skb_set_owner_w(skb, sk);
2092 return skb;
2093
2094interrupted:
2095 err = sock_intr_errno(timeo);
2096failure:
2097 *errcode = err;
2098 return NULL;
2099}
2100EXPORT_SYMBOL(sock_alloc_send_pskb);
2101
2102struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2103 int noblock, int *errcode)
2104{
2105 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2106}
2107EXPORT_SYMBOL(sock_alloc_send_skb);
2108
2109int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2110 struct sockcm_cookie *sockc)
2111{
2112 u32 tsflags;
2113
2114 switch (cmsg->cmsg_type) {
2115 case SO_MARK:
2116 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2117 return -EPERM;
2118 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2119 return -EINVAL;
2120 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2121 break;
2122 case SO_TIMESTAMPING:
2123 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2124 return -EINVAL;
2125
2126 tsflags = *(u32 *)CMSG_DATA(cmsg);
2127 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2128 return -EINVAL;
2129
2130 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2131 sockc->tsflags |= tsflags;
2132 break;
2133 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2134 case SCM_RIGHTS:
2135 case SCM_CREDENTIALS:
2136 break;
2137 default:
2138 return -EINVAL;
2139 }
2140 return 0;
2141}
2142EXPORT_SYMBOL(__sock_cmsg_send);
2143
2144int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2145 struct sockcm_cookie *sockc)
2146{
2147 struct cmsghdr *cmsg;
2148 int ret;
2149
2150 for_each_cmsghdr(cmsg, msg) {
2151 if (!CMSG_OK(msg, cmsg))
2152 return -EINVAL;
2153 if (cmsg->cmsg_level != SOL_SOCKET)
2154 continue;
2155 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2156 if (ret)
2157 return ret;
2158 }
2159 return 0;
2160}
2161EXPORT_SYMBOL(sock_cmsg_send);
2162
2163static void sk_enter_memory_pressure(struct sock *sk)
2164{
2165 if (!sk->sk_prot->enter_memory_pressure)
2166 return;
2167
2168 sk->sk_prot->enter_memory_pressure(sk);
2169}
2170
2171static void sk_leave_memory_pressure(struct sock *sk)
2172{
2173 if (sk->sk_prot->leave_memory_pressure) {
2174 sk->sk_prot->leave_memory_pressure(sk);
2175 } else {
2176 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2177
2178 if (memory_pressure && *memory_pressure)
2179 *memory_pressure = 0;
2180 }
2181}
2182
2183/* On 32bit arches, an skb frag is limited to 2^15 */
2184#define SKB_FRAG_PAGE_ORDER get_order(32768)
2185
2186/**
2187 * skb_page_frag_refill - check that a page_frag contains enough room
2188 * @sz: minimum size of the fragment we want to get
2189 * @pfrag: pointer to page_frag
2190 * @gfp: priority for memory allocation
2191 *
2192 * Note: While this allocator tries to use high order pages, there is
2193 * no guarantee that allocations succeed. Therefore, @sz MUST be
2194 * less or equal than PAGE_SIZE.
2195 */
2196bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2197{
2198 if (pfrag->page) {
2199 if (page_ref_count(pfrag->page) == 1) {
2200 pfrag->offset = 0;
2201 return true;
2202 }
2203 if (pfrag->offset + sz <= pfrag->size)
2204 return true;
2205 put_page(pfrag->page);
2206 }
2207
2208 pfrag->offset = 0;
2209 if (SKB_FRAG_PAGE_ORDER) {
2210 /* Avoid direct reclaim but allow kswapd to wake */
2211 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2212 __GFP_COMP | __GFP_NOWARN |
2213 __GFP_NORETRY,
2214 SKB_FRAG_PAGE_ORDER);
2215 if (likely(pfrag->page)) {
2216 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2217 return true;
2218 }
2219 }
2220 pfrag->page = alloc_page(gfp);
2221 if (likely(pfrag->page)) {
2222 pfrag->size = PAGE_SIZE;
2223 return true;
2224 }
2225 return false;
2226}
2227EXPORT_SYMBOL(skb_page_frag_refill);
2228
2229bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2230{
2231 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2232 return true;
2233
2234 sk_enter_memory_pressure(sk);
2235 sk_stream_moderate_sndbuf(sk);
2236 return false;
2237}
2238EXPORT_SYMBOL(sk_page_frag_refill);
2239
2240int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2241 int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
2242 int first_coalesce)
2243{
2244 int sg_curr = *sg_curr_index, use = 0, rc = 0;
2245 unsigned int size = *sg_curr_size;
2246 struct page_frag *pfrag;
2247 struct scatterlist *sge;
2248
2249 len -= size;
2250 pfrag = sk_page_frag(sk);
2251
2252 while (len > 0) {
2253 unsigned int orig_offset;
2254
2255 if (!sk_page_frag_refill(sk, pfrag)) {
2256 rc = -ENOMEM;
2257 goto out;
2258 }
2259
2260 use = min_t(int, len, pfrag->size - pfrag->offset);
2261
2262 if (!sk_wmem_schedule(sk, use)) {
2263 rc = -ENOMEM;
2264 goto out;
2265 }
2266
2267 sk_mem_charge(sk, use);
2268 size += use;
2269 orig_offset = pfrag->offset;
2270 pfrag->offset += use;
2271
2272 sge = sg + sg_curr - 1;
2273 if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
2274 sg->offset + sg->length == orig_offset) {
2275 sg->length += use;
2276 } else {
2277 sge = sg + sg_curr;
2278 sg_unmark_end(sge);
2279 sg_set_page(sge, pfrag->page, use, orig_offset);
2280 get_page(pfrag->page);
2281 sg_curr++;
2282
2283 if (sg_curr == MAX_SKB_FRAGS)
2284 sg_curr = 0;
2285
2286 if (sg_curr == sg_start) {
2287 rc = -ENOSPC;
2288 break;
2289 }
2290 }
2291
2292 len -= use;
2293 }
2294out:
2295 *sg_curr_size = size;
2296 *sg_curr_index = sg_curr;
2297 return rc;
2298}
2299EXPORT_SYMBOL(sk_alloc_sg);
2300
2301static void __lock_sock(struct sock *sk)
2302 __releases(&sk->sk_lock.slock)
2303 __acquires(&sk->sk_lock.slock)
2304{
2305 DEFINE_WAIT(wait);
2306
2307 for (;;) {
2308 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2309 TASK_UNINTERRUPTIBLE);
2310 spin_unlock_bh(&sk->sk_lock.slock);
2311 schedule();
2312 spin_lock_bh(&sk->sk_lock.slock);
2313 if (!sock_owned_by_user(sk))
2314 break;
2315 }
2316 finish_wait(&sk->sk_lock.wq, &wait);
2317}
2318
2319static void __release_sock(struct sock *sk)
2320 __releases(&sk->sk_lock.slock)
2321 __acquires(&sk->sk_lock.slock)
2322{
2323 struct sk_buff *skb, *next;
2324
2325 while ((skb = sk->sk_backlog.head) != NULL) {
2326 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2327
2328 spin_unlock_bh(&sk->sk_lock.slock);
2329
2330 do {
2331 next = skb->next;
2332 prefetch(next);
2333 WARN_ON_ONCE(skb_dst_is_noref(skb));
2334 skb->next = NULL;
2335 sk_backlog_rcv(sk, skb);
2336
2337 cond_resched();
2338
2339 skb = next;
2340 } while (skb != NULL);
2341
2342 spin_lock_bh(&sk->sk_lock.slock);
2343 }
2344
2345 /*
2346 * Doing the zeroing here guarantee we can not loop forever
2347 * while a wild producer attempts to flood us.
2348 */
2349 sk->sk_backlog.len = 0;
2350}
2351
2352void __sk_flush_backlog(struct sock *sk)
2353{
2354 spin_lock_bh(&sk->sk_lock.slock);
2355 __release_sock(sk);
2356 spin_unlock_bh(&sk->sk_lock.slock);
2357}
2358
2359/**
2360 * sk_wait_data - wait for data to arrive at sk_receive_queue
2361 * @sk: sock to wait on
2362 * @timeo: for how long
2363 * @skb: last skb seen on sk_receive_queue
2364 *
2365 * Now socket state including sk->sk_err is changed only under lock,
2366 * hence we may omit checks after joining wait queue.
2367 * We check receive queue before schedule() only as optimization;
2368 * it is very likely that release_sock() added new data.
2369 */
2370int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2371{
2372 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2373 int rc;
2374
2375 add_wait_queue(sk_sleep(sk), &wait);
2376 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2377 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2378 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2379 remove_wait_queue(sk_sleep(sk), &wait);
2380 return rc;
2381}
2382EXPORT_SYMBOL(sk_wait_data);
2383
2384/**
2385 * __sk_mem_raise_allocated - increase memory_allocated
2386 * @sk: socket
2387 * @size: memory size to allocate
2388 * @amt: pages to allocate
2389 * @kind: allocation type
2390 *
2391 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2392 */
2393int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2394{
2395 struct proto *prot = sk->sk_prot;
2396 long allocated = sk_memory_allocated_add(sk, amt);
2397
2398 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2399 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2400 goto suppress_allocation;
2401
2402 /* Under limit. */
2403 if (allocated <= sk_prot_mem_limits(sk, 0)) {
2404 sk_leave_memory_pressure(sk);
2405 return 1;
2406 }
2407
2408 /* Under pressure. */
2409 if (allocated > sk_prot_mem_limits(sk, 1))
2410 sk_enter_memory_pressure(sk);
2411
2412 /* Over hard limit. */
2413 if (allocated > sk_prot_mem_limits(sk, 2))
2414 goto suppress_allocation;
2415
2416 /* guarantee minimum buffer size under pressure */
2417 if (kind == SK_MEM_RECV) {
2418 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2419 return 1;
2420
2421 } else { /* SK_MEM_SEND */
2422 int wmem0 = sk_get_wmem0(sk, prot);
2423
2424 if (sk->sk_type == SOCK_STREAM) {
2425 if (sk->sk_wmem_queued < wmem0)
2426 return 1;
2427 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2428 return 1;
2429 }
2430 }
2431
2432 if (sk_has_memory_pressure(sk)) {
2433 int alloc;
2434
2435 if (!sk_under_memory_pressure(sk))
2436 return 1;
2437 alloc = sk_sockets_allocated_read_positive(sk);
2438 if (sk_prot_mem_limits(sk, 2) > alloc *
2439 sk_mem_pages(sk->sk_wmem_queued +
2440 atomic_read(&sk->sk_rmem_alloc) +
2441 sk->sk_forward_alloc))
2442 return 1;
2443 }
2444
2445suppress_allocation:
2446
2447 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2448 sk_stream_moderate_sndbuf(sk);
2449
2450 /* Fail only if socket is _under_ its sndbuf.
2451 * In this case we cannot block, so that we have to fail.
2452 */
2453 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2454 return 1;
2455 }
2456
2457 trace_sock_exceed_buf_limit(sk, prot, allocated);
2458
2459 sk_memory_allocated_sub(sk, amt);
2460
2461 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2462 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2463
2464 return 0;
2465}
2466EXPORT_SYMBOL(__sk_mem_raise_allocated);
2467
2468/**
2469 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2470 * @sk: socket
2471 * @size: memory size to allocate
2472 * @kind: allocation type
2473 *
2474 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2475 * rmem allocation. This function assumes that protocols which have
2476 * memory_pressure use sk_wmem_queued as write buffer accounting.
2477 */
2478int __sk_mem_schedule(struct sock *sk, int size, int kind)
2479{
2480 int ret, amt = sk_mem_pages(size);
2481
2482 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2483 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2484 if (!ret)
2485 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2486 return ret;
2487}
2488EXPORT_SYMBOL(__sk_mem_schedule);
2489
2490/**
2491 * __sk_mem_reduce_allocated - reclaim memory_allocated
2492 * @sk: socket
2493 * @amount: number of quanta
2494 *
2495 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2496 */
2497void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2498{
2499 sk_memory_allocated_sub(sk, amount);
2500
2501 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2502 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2503
2504 if (sk_under_memory_pressure(sk) &&
2505 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2506 sk_leave_memory_pressure(sk);
2507}
2508EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2509
2510/**
2511 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2512 * @sk: socket
2513 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2514 */
2515void __sk_mem_reclaim(struct sock *sk, int amount)
2516{
2517 amount >>= SK_MEM_QUANTUM_SHIFT;
2518 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2519 __sk_mem_reduce_allocated(sk, amount);
2520}
2521EXPORT_SYMBOL(__sk_mem_reclaim);
2522
2523int sk_set_peek_off(struct sock *sk, int val)
2524{
2525 sk->sk_peek_off = val;
2526 return 0;
2527}
2528EXPORT_SYMBOL_GPL(sk_set_peek_off);
2529
2530/*
2531 * Set of default routines for initialising struct proto_ops when
2532 * the protocol does not support a particular function. In certain
2533 * cases where it makes no sense for a protocol to have a "do nothing"
2534 * function, some default processing is provided.
2535 */
2536
2537int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2538{
2539 return -EOPNOTSUPP;
2540}
2541EXPORT_SYMBOL(sock_no_bind);
2542
2543int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2544 int len, int flags)
2545{
2546 return -EOPNOTSUPP;
2547}
2548EXPORT_SYMBOL(sock_no_connect);
2549
2550int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2551{
2552 return -EOPNOTSUPP;
2553}
2554EXPORT_SYMBOL(sock_no_socketpair);
2555
2556int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2557 bool kern)
2558{
2559 return -EOPNOTSUPP;
2560}
2561EXPORT_SYMBOL(sock_no_accept);
2562
2563int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2564 int peer)
2565{
2566 return -EOPNOTSUPP;
2567}
2568EXPORT_SYMBOL(sock_no_getname);
2569
2570__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2571{
2572 return 0;
2573}
2574EXPORT_SYMBOL(sock_no_poll);
2575
2576int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2577{
2578 return -EOPNOTSUPP;
2579}
2580EXPORT_SYMBOL(sock_no_ioctl);
2581
2582int sock_no_listen(struct socket *sock, int backlog)
2583{
2584 return -EOPNOTSUPP;
2585}
2586EXPORT_SYMBOL(sock_no_listen);
2587
2588int sock_no_shutdown(struct socket *sock, int how)
2589{
2590 return -EOPNOTSUPP;
2591}
2592EXPORT_SYMBOL(sock_no_shutdown);
2593
2594int sock_no_setsockopt(struct socket *sock, int level, int optname,
2595 char __user *optval, unsigned int optlen)
2596{
2597 return -EOPNOTSUPP;
2598}
2599EXPORT_SYMBOL(sock_no_setsockopt);
2600
2601int sock_no_getsockopt(struct socket *sock, int level, int optname,
2602 char __user *optval, int __user *optlen)
2603{
2604 return -EOPNOTSUPP;
2605}
2606EXPORT_SYMBOL(sock_no_getsockopt);
2607
2608int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2609{
2610 return -EOPNOTSUPP;
2611}
2612EXPORT_SYMBOL(sock_no_sendmsg);
2613
2614int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2615{
2616 return -EOPNOTSUPP;
2617}
2618EXPORT_SYMBOL(sock_no_sendmsg_locked);
2619
2620int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2621 int flags)
2622{
2623 return -EOPNOTSUPP;
2624}
2625EXPORT_SYMBOL(sock_no_recvmsg);
2626
2627int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2628{
2629 /* Mirror missing mmap method error code */
2630 return -ENODEV;
2631}
2632EXPORT_SYMBOL(sock_no_mmap);
2633
2634ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2635{
2636 ssize_t res;
2637 struct msghdr msg = {.msg_flags = flags};
2638 struct kvec iov;
2639 char *kaddr = kmap(page);
2640 iov.iov_base = kaddr + offset;
2641 iov.iov_len = size;
2642 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2643 kunmap(page);
2644 return res;
2645}
2646EXPORT_SYMBOL(sock_no_sendpage);
2647
2648ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2649 int offset, size_t size, int flags)
2650{
2651 ssize_t res;
2652 struct msghdr msg = {.msg_flags = flags};
2653 struct kvec iov;
2654 char *kaddr = kmap(page);
2655
2656 iov.iov_base = kaddr + offset;
2657 iov.iov_len = size;
2658 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2659 kunmap(page);
2660 return res;
2661}
2662EXPORT_SYMBOL(sock_no_sendpage_locked);
2663
2664/*
2665 * Default Socket Callbacks
2666 */
2667
2668static void sock_def_wakeup(struct sock *sk)
2669{
2670 struct socket_wq *wq;
2671
2672 rcu_read_lock();
2673 wq = rcu_dereference(sk->sk_wq);
2674 if (skwq_has_sleeper(wq))
2675 wake_up_interruptible_all(&wq->wait);
2676 rcu_read_unlock();
2677}
2678
2679static void sock_def_error_report(struct sock *sk)
2680{
2681 struct socket_wq *wq;
2682
2683 rcu_read_lock();
2684 wq = rcu_dereference(sk->sk_wq);
2685 if (skwq_has_sleeper(wq))
2686 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2687 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2688 rcu_read_unlock();
2689}
2690
2691static void sock_def_readable(struct sock *sk)
2692{
2693 struct socket_wq *wq;
2694
2695 rcu_read_lock();
2696 wq = rcu_dereference(sk->sk_wq);
2697 if (skwq_has_sleeper(wq))
2698 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2699 EPOLLRDNORM | EPOLLRDBAND);
2700 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2701 rcu_read_unlock();
2702}
2703
2704static void sock_def_write_space(struct sock *sk)
2705{
2706 struct socket_wq *wq;
2707
2708 rcu_read_lock();
2709
2710 /* Do not wake up a writer until he can make "significant"
2711 * progress. --DaveM
2712 */
2713 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2714 wq = rcu_dereference(sk->sk_wq);
2715 if (skwq_has_sleeper(wq))
2716 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2717 EPOLLWRNORM | EPOLLWRBAND);
2718
2719 /* Should agree with poll, otherwise some programs break */
2720 if (sock_writeable(sk))
2721 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2722 }
2723
2724 rcu_read_unlock();
2725}
2726
2727static void sock_def_destruct(struct sock *sk)
2728{
2729}
2730
2731void sk_send_sigurg(struct sock *sk)
2732{
2733 if (sk->sk_socket && sk->sk_socket->file)
2734 if (send_sigurg(&sk->sk_socket->file->f_owner))
2735 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2736}
2737EXPORT_SYMBOL(sk_send_sigurg);
2738
2739void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2740 unsigned long expires)
2741{
2742 if (!mod_timer(timer, expires))
2743 sock_hold(sk);
2744}
2745EXPORT_SYMBOL(sk_reset_timer);
2746
2747void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2748{
2749 if (del_timer(timer))
2750 __sock_put(sk);
2751}
2752EXPORT_SYMBOL(sk_stop_timer);
2753
2754void sock_init_data(struct socket *sock, struct sock *sk)
2755{
2756 sk_init_common(sk);
2757 sk->sk_send_head = NULL;
2758
2759 timer_setup(&sk->sk_timer, NULL, 0);
2760
2761 sk->sk_allocation = GFP_KERNEL;
2762 sk->sk_rcvbuf = sysctl_rmem_default;
2763 sk->sk_sndbuf = sysctl_wmem_default;
2764 sk->sk_state = TCP_CLOSE;
2765 sk_set_socket(sk, sock);
2766
2767 sock_set_flag(sk, SOCK_ZAPPED);
2768
2769 if (sock) {
2770 sk->sk_type = sock->type;
2771 sk->sk_wq = sock->wq;
2772 sock->sk = sk;
2773 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2774 } else {
2775 sk->sk_wq = NULL;
2776 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2777 }
2778
2779 rwlock_init(&sk->sk_callback_lock);
2780 if (sk->sk_kern_sock)
2781 lockdep_set_class_and_name(
2782 &sk->sk_callback_lock,
2783 af_kern_callback_keys + sk->sk_family,
2784 af_family_kern_clock_key_strings[sk->sk_family]);
2785 else
2786 lockdep_set_class_and_name(
2787 &sk->sk_callback_lock,
2788 af_callback_keys + sk->sk_family,
2789 af_family_clock_key_strings[sk->sk_family]);
2790
2791 sk->sk_state_change = sock_def_wakeup;
2792 sk->sk_data_ready = sock_def_readable;
2793 sk->sk_write_space = sock_def_write_space;
2794 sk->sk_error_report = sock_def_error_report;
2795 sk->sk_destruct = sock_def_destruct;
2796
2797 sk->sk_frag.page = NULL;
2798 sk->sk_frag.offset = 0;
2799 sk->sk_peek_off = -1;
2800
2801 sk->sk_peer_pid = NULL;
2802 sk->sk_peer_cred = NULL;
2803 sk->sk_write_pending = 0;
2804 sk->sk_rcvlowat = 1;
2805 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2806 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2807
2808 sk->sk_stamp = SK_DEFAULT_STAMP;
2809 atomic_set(&sk->sk_zckey, 0);
2810
2811#ifdef CONFIG_NET_RX_BUSY_POLL
2812 sk->sk_napi_id = 0;
2813 sk->sk_ll_usec = sysctl_net_busy_read;
2814#endif
2815
2816 sk->sk_max_pacing_rate = ~0U;
2817 sk->sk_pacing_rate = ~0U;
2818 sk->sk_pacing_shift = 10;
2819 sk->sk_incoming_cpu = -1;
2820 /*
2821 * Before updating sk_refcnt, we must commit prior changes to memory
2822 * (Documentation/RCU/rculist_nulls.txt for details)
2823 */
2824 smp_wmb();
2825 refcount_set(&sk->sk_refcnt, 1);
2826 atomic_set(&sk->sk_drops, 0);
2827}
2828EXPORT_SYMBOL(sock_init_data);
2829
2830void lock_sock_nested(struct sock *sk, int subclass)
2831{
2832 might_sleep();
2833 spin_lock_bh(&sk->sk_lock.slock);
2834 if (sk->sk_lock.owned)
2835 __lock_sock(sk);
2836 sk->sk_lock.owned = 1;
2837 spin_unlock(&sk->sk_lock.slock);
2838 /*
2839 * The sk_lock has mutex_lock() semantics here:
2840 */
2841 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2842 local_bh_enable();
2843}
2844EXPORT_SYMBOL(lock_sock_nested);
2845
2846void release_sock(struct sock *sk)
2847{
2848 spin_lock_bh(&sk->sk_lock.slock);
2849 if (sk->sk_backlog.tail)
2850 __release_sock(sk);
2851
2852 /* Warning : release_cb() might need to release sk ownership,
2853 * ie call sock_release_ownership(sk) before us.
2854 */
2855 if (sk->sk_prot->release_cb)
2856 sk->sk_prot->release_cb(sk);
2857
2858 sock_release_ownership(sk);
2859 if (waitqueue_active(&sk->sk_lock.wq))
2860 wake_up(&sk->sk_lock.wq);
2861 spin_unlock_bh(&sk->sk_lock.slock);
2862}
2863EXPORT_SYMBOL(release_sock);
2864
2865/**
2866 * lock_sock_fast - fast version of lock_sock
2867 * @sk: socket
2868 *
2869 * This version should be used for very small section, where process wont block
2870 * return false if fast path is taken:
2871 *
2872 * sk_lock.slock locked, owned = 0, BH disabled
2873 *
2874 * return true if slow path is taken:
2875 *
2876 * sk_lock.slock unlocked, owned = 1, BH enabled
2877 */
2878bool lock_sock_fast(struct sock *sk)
2879{
2880 might_sleep();
2881 spin_lock_bh(&sk->sk_lock.slock);
2882
2883 if (!sk->sk_lock.owned)
2884 /*
2885 * Note : We must disable BH
2886 */
2887 return false;
2888
2889 __lock_sock(sk);
2890 sk->sk_lock.owned = 1;
2891 spin_unlock(&sk->sk_lock.slock);
2892 /*
2893 * The sk_lock has mutex_lock() semantics here:
2894 */
2895 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2896 local_bh_enable();
2897 return true;
2898}
2899EXPORT_SYMBOL(lock_sock_fast);
2900
2901int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2902{
2903 struct timeval tv;
2904 if (!sock_flag(sk, SOCK_TIMESTAMP))
2905 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2906 tv = ktime_to_timeval(sk->sk_stamp);
2907 if (tv.tv_sec == -1)
2908 return -ENOENT;
2909 if (tv.tv_sec == 0) {
2910 sk->sk_stamp = ktime_get_real();
2911 tv = ktime_to_timeval(sk->sk_stamp);
2912 }
2913 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2914}
2915EXPORT_SYMBOL(sock_get_timestamp);
2916
2917int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2918{
2919 struct timespec ts;
2920 if (!sock_flag(sk, SOCK_TIMESTAMP))
2921 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2922 ts = ktime_to_timespec(sk->sk_stamp);
2923 if (ts.tv_sec == -1)
2924 return -ENOENT;
2925 if (ts.tv_sec == 0) {
2926 sk->sk_stamp = ktime_get_real();
2927 ts = ktime_to_timespec(sk->sk_stamp);
2928 }
2929 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2930}
2931EXPORT_SYMBOL(sock_get_timestampns);
2932
2933void sock_enable_timestamp(struct sock *sk, int flag)
2934{
2935 if (!sock_flag(sk, flag)) {
2936 unsigned long previous_flags = sk->sk_flags;
2937
2938 sock_set_flag(sk, flag);
2939 /*
2940 * we just set one of the two flags which require net
2941 * time stamping, but time stamping might have been on
2942 * already because of the other one
2943 */
2944 if (sock_needs_netstamp(sk) &&
2945 !(previous_flags & SK_FLAGS_TIMESTAMP))
2946 net_enable_timestamp();
2947 }
2948}
2949
2950int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2951 int level, int type)
2952{
2953 struct sock_exterr_skb *serr;
2954 struct sk_buff *skb;
2955 int copied, err;
2956
2957 err = -EAGAIN;
2958 skb = sock_dequeue_err_skb(sk);
2959 if (skb == NULL)
2960 goto out;
2961
2962 copied = skb->len;
2963 if (copied > len) {
2964 msg->msg_flags |= MSG_TRUNC;
2965 copied = len;
2966 }
2967 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2968 if (err)
2969 goto out_free_skb;
2970
2971 sock_recv_timestamp(msg, sk, skb);
2972
2973 serr = SKB_EXT_ERR(skb);
2974 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2975
2976 msg->msg_flags |= MSG_ERRQUEUE;
2977 err = copied;
2978
2979out_free_skb:
2980 kfree_skb(skb);
2981out:
2982 return err;
2983}
2984EXPORT_SYMBOL(sock_recv_errqueue);
2985
2986/*
2987 * Get a socket option on an socket.
2988 *
2989 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2990 * asynchronous errors should be reported by getsockopt. We assume
2991 * this means if you specify SO_ERROR (otherwise whats the point of it).
2992 */
2993int sock_common_getsockopt(struct socket *sock, int level, int optname,
2994 char __user *optval, int __user *optlen)
2995{
2996 struct sock *sk = sock->sk;
2997
2998 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2999}
3000EXPORT_SYMBOL(sock_common_getsockopt);
3001
3002#ifdef CONFIG_COMPAT
3003int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3004 char __user *optval, int __user *optlen)
3005{
3006 struct sock *sk = sock->sk;
3007
3008 if (sk->sk_prot->compat_getsockopt != NULL)
3009 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3010 optval, optlen);
3011 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3012}
3013EXPORT_SYMBOL(compat_sock_common_getsockopt);
3014#endif
3015
3016int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3017 int flags)
3018{
3019 struct sock *sk = sock->sk;
3020 int addr_len = 0;
3021 int err;
3022
3023 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3024 flags & ~MSG_DONTWAIT, &addr_len);
3025 if (err >= 0)
3026 msg->msg_namelen = addr_len;
3027 return err;
3028}
3029EXPORT_SYMBOL(sock_common_recvmsg);
3030
3031/*
3032 * Set socket options on an inet socket.
3033 */
3034int sock_common_setsockopt(struct socket *sock, int level, int optname,
3035 char __user *optval, unsigned int optlen)
3036{
3037 struct sock *sk = sock->sk;
3038
3039 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3040}
3041EXPORT_SYMBOL(sock_common_setsockopt);
3042
3043#ifdef CONFIG_COMPAT
3044int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
3045 char __user *optval, unsigned int optlen)
3046{
3047 struct sock *sk = sock->sk;
3048
3049 if (sk->sk_prot->compat_setsockopt != NULL)
3050 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3051 optval, optlen);
3052 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3053}
3054EXPORT_SYMBOL(compat_sock_common_setsockopt);
3055#endif
3056
3057void sk_common_release(struct sock *sk)
3058{
3059 if (sk->sk_prot->destroy)
3060 sk->sk_prot->destroy(sk);
3061
3062 /*
3063 * Observation: when sock_common_release is called, processes have
3064 * no access to socket. But net still has.
3065 * Step one, detach it from networking:
3066 *
3067 * A. Remove from hash tables.
3068 */
3069
3070 sk->sk_prot->unhash(sk);
3071
3072 /*
3073 * In this point socket cannot receive new packets, but it is possible
3074 * that some packets are in flight because some CPU runs receiver and
3075 * did hash table lookup before we unhashed socket. They will achieve
3076 * receive queue and will be purged by socket destructor.
3077 *
3078 * Also we still have packets pending on receive queue and probably,
3079 * our own packets waiting in device queues. sock_destroy will drain
3080 * receive queue, but transmitted packets will delay socket destruction
3081 * until the last reference will be released.
3082 */
3083
3084 sock_orphan(sk);
3085
3086 xfrm_sk_free_policy(sk);
3087
3088 sk_refcnt_debug_release(sk);
3089
3090 sock_put(sk);
3091}
3092EXPORT_SYMBOL(sk_common_release);
3093
3094void sk_get_meminfo(const struct sock *sk, u32 *mem)
3095{
3096 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3097
3098 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3099 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
3100 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3101 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
3102 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3103 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
3104 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3105 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
3106 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3107}
3108
3109#ifdef CONFIG_PROC_FS
3110#define PROTO_INUSE_NR 64 /* should be enough for the first time */
3111struct prot_inuse {
3112 int val[PROTO_INUSE_NR];
3113};
3114
3115static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3116
3117void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3118{
3119 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3120}
3121EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3122
3123int sock_prot_inuse_get(struct net *net, struct proto *prot)
3124{
3125 int cpu, idx = prot->inuse_idx;
3126 int res = 0;
3127
3128 for_each_possible_cpu(cpu)
3129 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3130
3131 return res >= 0 ? res : 0;
3132}
3133EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3134
3135static void sock_inuse_add(struct net *net, int val)
3136{
3137 this_cpu_add(*net->core.sock_inuse, val);
3138}
3139
3140int sock_inuse_get(struct net *net)
3141{
3142 int cpu, res = 0;
3143
3144 for_each_possible_cpu(cpu)
3145 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3146
3147 return res;
3148}
3149
3150EXPORT_SYMBOL_GPL(sock_inuse_get);
3151
3152static int __net_init sock_inuse_init_net(struct net *net)
3153{
3154 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3155 if (net->core.prot_inuse == NULL)
3156 return -ENOMEM;
3157
3158 net->core.sock_inuse = alloc_percpu(int);
3159 if (net->core.sock_inuse == NULL)
3160 goto out;
3161
3162 return 0;
3163
3164out:
3165 free_percpu(net->core.prot_inuse);
3166 return -ENOMEM;
3167}
3168
3169static void __net_exit sock_inuse_exit_net(struct net *net)
3170{
3171 free_percpu(net->core.prot_inuse);
3172 free_percpu(net->core.sock_inuse);
3173}
3174
3175static struct pernet_operations net_inuse_ops = {
3176 .init = sock_inuse_init_net,
3177 .exit = sock_inuse_exit_net,
3178};
3179
3180static __init int net_inuse_init(void)
3181{
3182 if (register_pernet_subsys(&net_inuse_ops))
3183 panic("Cannot initialize net inuse counters");
3184
3185 return 0;
3186}
3187
3188core_initcall(net_inuse_init);
3189
3190static void assign_proto_idx(struct proto *prot)
3191{
3192 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3193
3194 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3195 pr_err("PROTO_INUSE_NR exhausted\n");
3196 return;
3197 }
3198
3199 set_bit(prot->inuse_idx, proto_inuse_idx);
3200}
3201
3202static void release_proto_idx(struct proto *prot)
3203{
3204 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3205 clear_bit(prot->inuse_idx, proto_inuse_idx);
3206}
3207#else
3208static inline void assign_proto_idx(struct proto *prot)
3209{
3210}
3211
3212static inline void release_proto_idx(struct proto *prot)
3213{
3214}
3215
3216static void sock_inuse_add(struct net *net, int val)
3217{
3218}
3219#endif
3220
3221static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3222{
3223 if (!rsk_prot)
3224 return;
3225 kfree(rsk_prot->slab_name);
3226 rsk_prot->slab_name = NULL;
3227 kmem_cache_destroy(rsk_prot->slab);
3228 rsk_prot->slab = NULL;
3229}
3230
3231static int req_prot_init(const struct proto *prot)
3232{
3233 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3234
3235 if (!rsk_prot)
3236 return 0;
3237
3238 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3239 prot->name);
3240 if (!rsk_prot->slab_name)
3241 return -ENOMEM;
3242
3243 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3244 rsk_prot->obj_size, 0,
3245 prot->slab_flags, NULL);
3246
3247 if (!rsk_prot->slab) {
3248 pr_crit("%s: Can't create request sock SLAB cache!\n",
3249 prot->name);
3250 return -ENOMEM;
3251 }
3252 return 0;
3253}
3254
3255int proto_register(struct proto *prot, int alloc_slab)
3256{
3257 if (alloc_slab) {
3258 prot->slab = kmem_cache_create_usercopy(prot->name,
3259 prot->obj_size, 0,
3260 SLAB_HWCACHE_ALIGN | prot->slab_flags,
3261 prot->useroffset, prot->usersize,
3262 NULL);
3263
3264 if (prot->slab == NULL) {
3265 pr_crit("%s: Can't create sock SLAB cache!\n",
3266 prot->name);
3267 goto out;
3268 }
3269
3270 if (req_prot_init(prot))
3271 goto out_free_request_sock_slab;
3272
3273 if (prot->twsk_prot != NULL) {
3274 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3275
3276 if (prot->twsk_prot->twsk_slab_name == NULL)
3277 goto out_free_request_sock_slab;
3278
3279 prot->twsk_prot->twsk_slab =
3280 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3281 prot->twsk_prot->twsk_obj_size,
3282 0,
3283 prot->slab_flags,
3284 NULL);
3285 if (prot->twsk_prot->twsk_slab == NULL)
3286 goto out_free_timewait_sock_slab_name;
3287 }
3288 }
3289
3290 mutex_lock(&proto_list_mutex);
3291 list_add(&prot->node, &proto_list);
3292 assign_proto_idx(prot);
3293 mutex_unlock(&proto_list_mutex);
3294 return 0;
3295
3296out_free_timewait_sock_slab_name:
3297 kfree(prot->twsk_prot->twsk_slab_name);
3298out_free_request_sock_slab:
3299 req_prot_cleanup(prot->rsk_prot);
3300
3301 kmem_cache_destroy(prot->slab);
3302 prot->slab = NULL;
3303out:
3304 return -ENOBUFS;
3305}
3306EXPORT_SYMBOL(proto_register);
3307
3308void proto_unregister(struct proto *prot)
3309{
3310 mutex_lock(&proto_list_mutex);
3311 release_proto_idx(prot);
3312 list_del(&prot->node);
3313 mutex_unlock(&proto_list_mutex);
3314
3315 kmem_cache_destroy(prot->slab);
3316 prot->slab = NULL;
3317
3318 req_prot_cleanup(prot->rsk_prot);
3319
3320 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
3321 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
3322 kfree(prot->twsk_prot->twsk_slab_name);
3323 prot->twsk_prot->twsk_slab = NULL;
3324 }
3325}
3326EXPORT_SYMBOL(proto_unregister);
3327
3328int sock_load_diag_module(int family, int protocol)
3329{
3330 if (!protocol) {
3331 if (!sock_is_registered(family))
3332 return -ENOENT;
3333
3334 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3335 NETLINK_SOCK_DIAG, family);
3336 }
3337
3338#ifdef CONFIG_INET
3339 if (family == AF_INET &&
3340 !rcu_access_pointer(inet_protos[protocol]))
3341 return -ENOENT;
3342#endif
3343
3344 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3345 NETLINK_SOCK_DIAG, family, protocol);
3346}
3347EXPORT_SYMBOL(sock_load_diag_module);
3348
3349#ifdef CONFIG_PROC_FS
3350static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3351 __acquires(proto_list_mutex)
3352{
3353 mutex_lock(&proto_list_mutex);
3354 return seq_list_start_head(&proto_list, *pos);
3355}
3356
3357static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3358{
3359 return seq_list_next(v, &proto_list, pos);
3360}
3361
3362static void proto_seq_stop(struct seq_file *seq, void *v)
3363 __releases(proto_list_mutex)
3364{
3365 mutex_unlock(&proto_list_mutex);
3366}
3367
3368static char proto_method_implemented(const void *method)
3369{
3370 return method == NULL ? 'n' : 'y';
3371}
3372static long sock_prot_memory_allocated(struct proto *proto)
3373{
3374 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3375}
3376
3377static char *sock_prot_memory_pressure(struct proto *proto)
3378{
3379 return proto->memory_pressure != NULL ?
3380 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3381}
3382
3383static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3384{
3385
3386 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
3387 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3388 proto->name,
3389 proto->obj_size,
3390 sock_prot_inuse_get(seq_file_net(seq), proto),
3391 sock_prot_memory_allocated(proto),
3392 sock_prot_memory_pressure(proto),
3393 proto->max_header,
3394 proto->slab == NULL ? "no" : "yes",
3395 module_name(proto->owner),
3396 proto_method_implemented(proto->close),
3397 proto_method_implemented(proto->connect),
3398 proto_method_implemented(proto->disconnect),
3399 proto_method_implemented(proto->accept),
3400 proto_method_implemented(proto->ioctl),
3401 proto_method_implemented(proto->init),
3402 proto_method_implemented(proto->destroy),
3403 proto_method_implemented(proto->shutdown),
3404 proto_method_implemented(proto->setsockopt),
3405 proto_method_implemented(proto->getsockopt),
3406 proto_method_implemented(proto->sendmsg),
3407 proto_method_implemented(proto->recvmsg),
3408 proto_method_implemented(proto->sendpage),
3409 proto_method_implemented(proto->bind),
3410 proto_method_implemented(proto->backlog_rcv),
3411 proto_method_implemented(proto->hash),
3412 proto_method_implemented(proto->unhash),
3413 proto_method_implemented(proto->get_port),
3414 proto_method_implemented(proto->enter_memory_pressure));
3415}
3416
3417static int proto_seq_show(struct seq_file *seq, void *v)
3418{
3419 if (v == &proto_list)
3420 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3421 "protocol",
3422 "size",
3423 "sockets",
3424 "memory",
3425 "press",
3426 "maxhdr",
3427 "slab",
3428 "module",
3429 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3430 else
3431 proto_seq_printf(seq, list_entry(v, struct proto, node));
3432 return 0;
3433}
3434
3435static const struct seq_operations proto_seq_ops = {
3436 .start = proto_seq_start,
3437 .next = proto_seq_next,
3438 .stop = proto_seq_stop,
3439 .show = proto_seq_show,
3440};
3441
3442static int proto_seq_open(struct inode *inode, struct file *file)
3443{
3444 return seq_open_net(inode, file, &proto_seq_ops,
3445 sizeof(struct seq_net_private));
3446}
3447
3448static const struct file_operations proto_seq_fops = {
3449 .open = proto_seq_open,
3450 .read = seq_read,
3451 .llseek = seq_lseek,
3452 .release = seq_release_net,
3453};
3454
3455static __net_init int proto_init_net(struct net *net)
3456{
3457 if (!proc_create("protocols", 0444, net->proc_net, &proto_seq_fops))
3458 return -ENOMEM;
3459
3460 return 0;
3461}
3462
3463static __net_exit void proto_exit_net(struct net *net)
3464{
3465 remove_proc_entry("protocols", net->proc_net);
3466}
3467
3468
3469static __net_initdata struct pernet_operations proto_net_ops = {
3470 .init = proto_init_net,
3471 .exit = proto_exit_net,
3472};
3473
3474static int __init proto_init(void)
3475{
3476 return register_pernet_subsys(&proto_net_ops);
3477}
3478
3479subsys_initcall(proto_init);
3480
3481#endif /* PROC_FS */
3482
3483#ifdef CONFIG_NET_RX_BUSY_POLL
3484bool sk_busy_loop_end(void *p, unsigned long start_time)
3485{
3486 struct sock *sk = p;
3487
3488 return !skb_queue_empty(&sk->sk_receive_queue) ||
3489 sk_busy_loop_timeout(sk, start_time);
3490}
3491EXPORT_SYMBOL(sk_busy_loop_end);
3492#endif /* CONFIG_NET_RX_BUSY_POLL */