Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PF_INET protocol family socket handler.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Florian La Roche, <flla@stud.uni-sb.de>
12 * Alan Cox, <A.Cox@swansea.ac.uk>
13 *
14 * Changes (see also sock.c)
15 *
16 * piggy,
17 * Karl Knutson : Socket protocol table
18 * A.N.Kuznetsov : Socket death error in accept().
19 * John Richardson : Fix non blocking error in connect()
20 * so sockets that fail to connect
21 * don't return -EINPROGRESS.
22 * Alan Cox : Asynchronous I/O support
23 * Alan Cox : Keep correct socket pointer on sock
24 * structures
25 * when accept() ed
26 * Alan Cox : Semantics of SO_LINGER aren't state
27 * moved to close when you look carefully.
28 * With this fixed and the accept bug fixed
29 * some RPC stuff seems happier.
30 * Niibe Yutaka : 4.4BSD style write async I/O
31 * Alan Cox,
32 * Tony Gale : Fixed reuse semantics.
33 * Alan Cox : bind() shouldn't abort existing but dead
34 * sockets. Stops FTP netin:.. I hope.
35 * Alan Cox : bind() works correctly for RAW sockets.
36 * Note that FreeBSD at least was broken
37 * in this respect so be careful with
38 * compatibility tests...
39 * Alan Cox : routing cache support
40 * Alan Cox : memzero the socket structure for
41 * compactness.
42 * Matt Day : nonblock connect error handler
43 * Alan Cox : Allow large numbers of pending sockets
44 * (eg for big web sites), but only if
45 * specifically application requested.
46 * Alan Cox : New buffering throughout IP. Used
47 * dumbly.
48 * Alan Cox : New buffering now used smartly.
49 * Alan Cox : BSD rather than common sense
50 * interpretation of listen.
51 * Germano Caronni : Assorted small races.
52 * Alan Cox : sendmsg/recvmsg basic support.
53 * Alan Cox : Only sendmsg/recvmsg now supported.
54 * Alan Cox : Locked down bind (see security list).
55 * Alan Cox : Loosened bind a little.
56 * Mike McLagan : ADD/DEL DLCI Ioctls
57 * Willy Konynenberg : Transparent proxying support.
58 * David S. Miller : New socket lookup architecture.
59 * Some other random speedups.
60 * Cyrus Durgin : Cleaned up file for kmod hacks.
61 * Andi Kleen : Fix inet_stream_connect TCP race.
62 */
63
64#define pr_fmt(fmt) "IPv4: " fmt
65
66#include <linux/err.h>
67#include <linux/errno.h>
68#include <linux/types.h>
69#include <linux/socket.h>
70#include <linux/in.h>
71#include <linux/kernel.h>
72#include <linux/kmod.h>
73#include <linux/sched.h>
74#include <linux/timer.h>
75#include <linux/string.h>
76#include <linux/sockios.h>
77#include <linux/net.h>
78#include <linux/capability.h>
79#include <linux/fcntl.h>
80#include <linux/mm.h>
81#include <linux/interrupt.h>
82#include <linux/stat.h>
83#include <linux/init.h>
84#include <linux/poll.h>
85#include <linux/netfilter_ipv4.h>
86#include <linux/random.h>
87#include <linux/slab.h>
88
89#include <linux/uaccess.h>
90
91#include <linux/inet.h>
92#include <linux/igmp.h>
93#include <linux/inetdevice.h>
94#include <linux/netdevice.h>
95#include <net/checksum.h>
96#include <net/ip.h>
97#include <net/protocol.h>
98#include <net/arp.h>
99#include <net/route.h>
100#include <net/ip_fib.h>
101#include <net/inet_connection_sock.h>
102#include <net/gro.h>
103#include <net/gso.h>
104#include <net/tcp.h>
105#include <net/udp.h>
106#include <net/udplite.h>
107#include <net/ping.h>
108#include <linux/skbuff.h>
109#include <net/sock.h>
110#include <net/raw.h>
111#include <net/icmp.h>
112#include <net/inet_common.h>
113#include <net/ip_tunnels.h>
114#include <net/xfrm.h>
115#include <net/net_namespace.h>
116#include <net/secure_seq.h>
117#ifdef CONFIG_IP_MROUTE
118#include <linux/mroute.h>
119#endif
120#include <net/l3mdev.h>
121#include <net/compat.h>
122#include <net/rps.h>
123
124#include <trace/events/sock.h>
125
126/* The inetsw table contains everything that inet_create needs to
127 * build a new socket.
128 */
129static struct list_head inetsw[SOCK_MAX];
130static DEFINE_SPINLOCK(inetsw_lock);
131
132/* New destruction routine */
133
134void inet_sock_destruct(struct sock *sk)
135{
136 struct inet_sock *inet = inet_sk(sk);
137
138 __skb_queue_purge(&sk->sk_receive_queue);
139 __skb_queue_purge(&sk->sk_error_queue);
140
141 sk_mem_reclaim_final(sk);
142
143 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
144 pr_err("Attempt to release TCP socket in state %d %p\n",
145 sk->sk_state, sk);
146 return;
147 }
148 if (!sock_flag(sk, SOCK_DEAD)) {
149 pr_err("Attempt to release alive inet socket %p\n", sk);
150 return;
151 }
152
153 WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
154 WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
155 WARN_ON_ONCE(sk->sk_wmem_queued);
156 WARN_ON_ONCE(sk_forward_alloc_get(sk));
157
158 kfree(rcu_dereference_protected(inet->inet_opt, 1));
159 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
160 dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
161}
162EXPORT_SYMBOL(inet_sock_destruct);
163
164/*
165 * The routines beyond this point handle the behaviour of an AF_INET
166 * socket object. Mostly it punts to the subprotocols of IP to do
167 * the work.
168 */
169
170/*
171 * Automatically bind an unbound socket.
172 */
173
174static int inet_autobind(struct sock *sk)
175{
176 struct inet_sock *inet;
177 /* We may need to bind the socket. */
178 lock_sock(sk);
179 inet = inet_sk(sk);
180 if (!inet->inet_num) {
181 if (sk->sk_prot->get_port(sk, 0)) {
182 release_sock(sk);
183 return -EAGAIN;
184 }
185 inet->inet_sport = htons(inet->inet_num);
186 }
187 release_sock(sk);
188 return 0;
189}
190
191int __inet_listen_sk(struct sock *sk, int backlog)
192{
193 unsigned char old_state = sk->sk_state;
194 int err, tcp_fastopen;
195
196 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
197 return -EINVAL;
198
199 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
200 /* Really, if the socket is already in listen state
201 * we can only allow the backlog to be adjusted.
202 */
203 if (old_state != TCP_LISTEN) {
204 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
205 * Note that only TCP sockets (SOCK_STREAM) will reach here.
206 * Also fastopen backlog may already been set via the option
207 * because the socket was in TCP_LISTEN state previously but
208 * was shutdown() rather than close().
209 */
210 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
211 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
212 (tcp_fastopen & TFO_SERVER_ENABLE) &&
213 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
214 fastopen_queue_tune(sk, backlog);
215 tcp_fastopen_init_key_once(sock_net(sk));
216 }
217
218 err = inet_csk_listen_start(sk);
219 if (err)
220 return err;
221
222 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
223 }
224 return 0;
225}
226
227/*
228 * Move a socket into listening state.
229 */
230int inet_listen(struct socket *sock, int backlog)
231{
232 struct sock *sk = sock->sk;
233 int err = -EINVAL;
234
235 lock_sock(sk);
236
237 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
238 goto out;
239
240 err = __inet_listen_sk(sk, backlog);
241
242out:
243 release_sock(sk);
244 return err;
245}
246EXPORT_SYMBOL(inet_listen);
247
248/*
249 * Create an inet socket.
250 */
251
252static int inet_create(struct net *net, struct socket *sock, int protocol,
253 int kern)
254{
255 struct sock *sk;
256 struct inet_protosw *answer;
257 struct inet_sock *inet;
258 struct proto *answer_prot;
259 unsigned char answer_flags;
260 int try_loading_module = 0;
261 int err;
262
263 if (protocol < 0 || protocol >= IPPROTO_MAX)
264 return -EINVAL;
265
266 sock->state = SS_UNCONNECTED;
267
268 /* Look for the requested type/protocol pair. */
269lookup_protocol:
270 err = -ESOCKTNOSUPPORT;
271 rcu_read_lock();
272 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
273
274 err = 0;
275 /* Check the non-wild match. */
276 if (protocol == answer->protocol) {
277 if (protocol != IPPROTO_IP)
278 break;
279 } else {
280 /* Check for the two wild cases. */
281 if (IPPROTO_IP == protocol) {
282 protocol = answer->protocol;
283 break;
284 }
285 if (IPPROTO_IP == answer->protocol)
286 break;
287 }
288 err = -EPROTONOSUPPORT;
289 }
290
291 if (unlikely(err)) {
292 if (try_loading_module < 2) {
293 rcu_read_unlock();
294 /*
295 * Be more specific, e.g. net-pf-2-proto-132-type-1
296 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
297 */
298 if (++try_loading_module == 1)
299 request_module("net-pf-%d-proto-%d-type-%d",
300 PF_INET, protocol, sock->type);
301 /*
302 * Fall back to generic, e.g. net-pf-2-proto-132
303 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
304 */
305 else
306 request_module("net-pf-%d-proto-%d",
307 PF_INET, protocol);
308 goto lookup_protocol;
309 } else
310 goto out_rcu_unlock;
311 }
312
313 err = -EPERM;
314 if (sock->type == SOCK_RAW && !kern &&
315 !ns_capable(net->user_ns, CAP_NET_RAW))
316 goto out_rcu_unlock;
317
318 sock->ops = answer->ops;
319 answer_prot = answer->prot;
320 answer_flags = answer->flags;
321 rcu_read_unlock();
322
323 WARN_ON(!answer_prot->slab);
324
325 err = -ENOMEM;
326 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
327 if (!sk)
328 goto out;
329
330 err = 0;
331 if (INET_PROTOSW_REUSE & answer_flags)
332 sk->sk_reuse = SK_CAN_REUSE;
333
334 if (INET_PROTOSW_ICSK & answer_flags)
335 inet_init_csk_locks(sk);
336
337 inet = inet_sk(sk);
338 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
339
340 inet_clear_bit(NODEFRAG, sk);
341
342 if (SOCK_RAW == sock->type) {
343 inet->inet_num = protocol;
344 if (IPPROTO_RAW == protocol)
345 inet_set_bit(HDRINCL, sk);
346 }
347
348 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
349 inet->pmtudisc = IP_PMTUDISC_DONT;
350 else
351 inet->pmtudisc = IP_PMTUDISC_WANT;
352
353 atomic_set(&inet->inet_id, 0);
354
355 sock_init_data(sock, sk);
356
357 sk->sk_destruct = inet_sock_destruct;
358 sk->sk_protocol = protocol;
359 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
360 sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
361
362 inet->uc_ttl = -1;
363 inet_set_bit(MC_LOOP, sk);
364 inet->mc_ttl = 1;
365 inet_set_bit(MC_ALL, sk);
366 inet->mc_index = 0;
367 inet->mc_list = NULL;
368 inet->rcv_tos = 0;
369
370 if (inet->inet_num) {
371 /* It assumes that any protocol which allows
372 * the user to assign a number at socket
373 * creation time automatically
374 * shares.
375 */
376 inet->inet_sport = htons(inet->inet_num);
377 /* Add to protocol hash chains. */
378 err = sk->sk_prot->hash(sk);
379 if (err)
380 goto out_sk_release;
381 }
382
383 if (sk->sk_prot->init) {
384 err = sk->sk_prot->init(sk);
385 if (err)
386 goto out_sk_release;
387 }
388
389 if (!kern) {
390 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
391 if (err)
392 goto out_sk_release;
393 }
394out:
395 return err;
396out_rcu_unlock:
397 rcu_read_unlock();
398 goto out;
399out_sk_release:
400 sk_common_release(sk);
401 sock->sk = NULL;
402 goto out;
403}
404
405
406/*
407 * The peer socket should always be NULL (or else). When we call this
408 * function we are destroying the object and from then on nobody
409 * should refer to it.
410 */
411int inet_release(struct socket *sock)
412{
413 struct sock *sk = sock->sk;
414
415 if (sk) {
416 long timeout;
417
418 if (!sk->sk_kern_sock)
419 BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
420
421 /* Applications forget to leave groups before exiting */
422 ip_mc_drop_socket(sk);
423
424 /* If linger is set, we don't return until the close
425 * is complete. Otherwise we return immediately. The
426 * actually closing is done the same either way.
427 *
428 * If the close is due to the process exiting, we never
429 * linger..
430 */
431 timeout = 0;
432 if (sock_flag(sk, SOCK_LINGER) &&
433 !(current->flags & PF_EXITING))
434 timeout = sk->sk_lingertime;
435 sk->sk_prot->close(sk, timeout);
436 sock->sk = NULL;
437 }
438 return 0;
439}
440EXPORT_SYMBOL(inet_release);
441
442int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
443{
444 u32 flags = BIND_WITH_LOCK;
445 int err;
446
447 /* If the socket has its own bind function then use it. (RAW) */
448 if (sk->sk_prot->bind) {
449 return sk->sk_prot->bind(sk, uaddr, addr_len);
450 }
451 if (addr_len < sizeof(struct sockaddr_in))
452 return -EINVAL;
453
454 /* BPF prog is run before any checks are done so that if the prog
455 * changes context in a wrong way it will be caught.
456 */
457 err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
458 CGROUP_INET4_BIND, &flags);
459 if (err)
460 return err;
461
462 return __inet_bind(sk, uaddr, addr_len, flags);
463}
464
465int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
466{
467 return inet_bind_sk(sock->sk, uaddr, addr_len);
468}
469EXPORT_SYMBOL(inet_bind);
470
471int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
472 u32 flags)
473{
474 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
475 struct inet_sock *inet = inet_sk(sk);
476 struct net *net = sock_net(sk);
477 unsigned short snum;
478 int chk_addr_ret;
479 u32 tb_id = RT_TABLE_LOCAL;
480 int err;
481
482 if (addr->sin_family != AF_INET) {
483 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
484 * only if s_addr is INADDR_ANY.
485 */
486 err = -EAFNOSUPPORT;
487 if (addr->sin_family != AF_UNSPEC ||
488 addr->sin_addr.s_addr != htonl(INADDR_ANY))
489 goto out;
490 }
491
492 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
493 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
494
495 /* Not specified by any standard per-se, however it breaks too
496 * many applications when removed. It is unfortunate since
497 * allowing applications to make a non-local bind solves
498 * several problems with systems using dynamic addressing.
499 * (ie. your servers still start up even if your ISDN link
500 * is temporarily down)
501 */
502 err = -EADDRNOTAVAIL;
503 if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
504 chk_addr_ret))
505 goto out;
506
507 snum = ntohs(addr->sin_port);
508 err = -EACCES;
509 if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
510 snum && inet_port_requires_bind_service(net, snum) &&
511 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
512 goto out;
513
514 /* We keep a pair of addresses. rcv_saddr is the one
515 * used by hash lookups, and saddr is used for transmit.
516 *
517 * In the BSD API these are the same except where it
518 * would be illegal to use them (multicast/broadcast) in
519 * which case the sending device address is used.
520 */
521 if (flags & BIND_WITH_LOCK)
522 lock_sock(sk);
523
524 /* Check these errors (active socket, double bind). */
525 err = -EINVAL;
526 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
527 goto out_release_sock;
528
529 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
530 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
531 inet->inet_saddr = 0; /* Use device */
532
533 /* Make sure we are allowed to bind here. */
534 if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
535 (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
536 err = sk->sk_prot->get_port(sk, snum);
537 if (err) {
538 inet->inet_saddr = inet->inet_rcv_saddr = 0;
539 goto out_release_sock;
540 }
541 if (!(flags & BIND_FROM_BPF)) {
542 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
543 if (err) {
544 inet->inet_saddr = inet->inet_rcv_saddr = 0;
545 if (sk->sk_prot->put_port)
546 sk->sk_prot->put_port(sk);
547 goto out_release_sock;
548 }
549 }
550 }
551
552 if (inet->inet_rcv_saddr)
553 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
554 if (snum)
555 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
556 inet->inet_sport = htons(inet->inet_num);
557 inet->inet_daddr = 0;
558 inet->inet_dport = 0;
559 sk_dst_reset(sk);
560 err = 0;
561out_release_sock:
562 if (flags & BIND_WITH_LOCK)
563 release_sock(sk);
564out:
565 return err;
566}
567
568int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
569 int addr_len, int flags)
570{
571 struct sock *sk = sock->sk;
572 const struct proto *prot;
573 int err;
574
575 if (addr_len < sizeof(uaddr->sa_family))
576 return -EINVAL;
577
578 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
579 prot = READ_ONCE(sk->sk_prot);
580
581 if (uaddr->sa_family == AF_UNSPEC)
582 return prot->disconnect(sk, flags);
583
584 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
585 err = prot->pre_connect(sk, uaddr, addr_len);
586 if (err)
587 return err;
588 }
589
590 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
591 return -EAGAIN;
592 return prot->connect(sk, uaddr, addr_len);
593}
594EXPORT_SYMBOL(inet_dgram_connect);
595
596static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
597{
598 DEFINE_WAIT_FUNC(wait, woken_wake_function);
599
600 add_wait_queue(sk_sleep(sk), &wait);
601 sk->sk_write_pending += writebias;
602
603 /* Basic assumption: if someone sets sk->sk_err, he _must_
604 * change state of the socket from TCP_SYN_*.
605 * Connect() does not allow to get error notifications
606 * without closing the socket.
607 */
608 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
609 release_sock(sk);
610 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
611 lock_sock(sk);
612 if (signal_pending(current) || !timeo)
613 break;
614 }
615 remove_wait_queue(sk_sleep(sk), &wait);
616 sk->sk_write_pending -= writebias;
617 return timeo;
618}
619
620/*
621 * Connect to a remote host. There is regrettably still a little
622 * TCP 'magic' in here.
623 */
624int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
625 int addr_len, int flags, int is_sendmsg)
626{
627 struct sock *sk = sock->sk;
628 int err;
629 long timeo;
630
631 /*
632 * uaddr can be NULL and addr_len can be 0 if:
633 * sk is a TCP fastopen active socket and
634 * TCP_FASTOPEN_CONNECT sockopt is set and
635 * we already have a valid cookie for this socket.
636 * In this case, user can call write() after connect().
637 * write() will invoke tcp_sendmsg_fastopen() which calls
638 * __inet_stream_connect().
639 */
640 if (uaddr) {
641 if (addr_len < sizeof(uaddr->sa_family))
642 return -EINVAL;
643
644 if (uaddr->sa_family == AF_UNSPEC) {
645 sk->sk_disconnects++;
646 err = sk->sk_prot->disconnect(sk, flags);
647 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
648 goto out;
649 }
650 }
651
652 switch (sock->state) {
653 default:
654 err = -EINVAL;
655 goto out;
656 case SS_CONNECTED:
657 err = -EISCONN;
658 goto out;
659 case SS_CONNECTING:
660 if (inet_test_bit(DEFER_CONNECT, sk))
661 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
662 else
663 err = -EALREADY;
664 /* Fall out of switch with err, set for this state */
665 break;
666 case SS_UNCONNECTED:
667 err = -EISCONN;
668 if (sk->sk_state != TCP_CLOSE)
669 goto out;
670
671 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
672 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
673 if (err)
674 goto out;
675 }
676
677 err = sk->sk_prot->connect(sk, uaddr, addr_len);
678 if (err < 0)
679 goto out;
680
681 sock->state = SS_CONNECTING;
682
683 if (!err && inet_test_bit(DEFER_CONNECT, sk))
684 goto out;
685
686 /* Just entered SS_CONNECTING state; the only
687 * difference is that return value in non-blocking
688 * case is EINPROGRESS, rather than EALREADY.
689 */
690 err = -EINPROGRESS;
691 break;
692 }
693
694 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
695
696 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
697 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
698 tcp_sk(sk)->fastopen_req &&
699 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
700 int dis = sk->sk_disconnects;
701
702 /* Error code is set above */
703 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
704 goto out;
705
706 err = sock_intr_errno(timeo);
707 if (signal_pending(current))
708 goto out;
709
710 if (dis != sk->sk_disconnects) {
711 err = -EPIPE;
712 goto out;
713 }
714 }
715
716 /* Connection was closed by RST, timeout, ICMP error
717 * or another process disconnected us.
718 */
719 if (sk->sk_state == TCP_CLOSE)
720 goto sock_error;
721
722 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
723 * and error was received after socket entered established state.
724 * Hence, it is handled normally after connect() return successfully.
725 */
726
727 sock->state = SS_CONNECTED;
728 err = 0;
729out:
730 return err;
731
732sock_error:
733 err = sock_error(sk) ? : -ECONNABORTED;
734 sock->state = SS_UNCONNECTED;
735 sk->sk_disconnects++;
736 if (sk->sk_prot->disconnect(sk, flags))
737 sock->state = SS_DISCONNECTING;
738 goto out;
739}
740EXPORT_SYMBOL(__inet_stream_connect);
741
742int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
743 int addr_len, int flags)
744{
745 int err;
746
747 lock_sock(sock->sk);
748 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
749 release_sock(sock->sk);
750 return err;
751}
752EXPORT_SYMBOL(inet_stream_connect);
753
754void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
755{
756 sock_rps_record_flow(newsk);
757 WARN_ON(!((1 << newsk->sk_state) &
758 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
759 TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
760 TCPF_CLOSING | TCPF_CLOSE_WAIT |
761 TCPF_CLOSE)));
762
763 if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
764 set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
765 sock_graft(newsk, newsock);
766
767 newsock->state = SS_CONNECTED;
768}
769
770/*
771 * Accept a pending connection. The TCP layer now gives BSD semantics.
772 */
773
774int inet_accept(struct socket *sock, struct socket *newsock,
775 struct proto_accept_arg *arg)
776{
777 struct sock *sk1 = sock->sk, *sk2;
778
779 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
780 arg->err = -EINVAL;
781 sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
782 if (!sk2)
783 return arg->err;
784
785 lock_sock(sk2);
786 __inet_accept(sock, newsock, sk2);
787 release_sock(sk2);
788 return 0;
789}
790EXPORT_SYMBOL(inet_accept);
791
792/*
793 * This does both peername and sockname.
794 */
795int inet_getname(struct socket *sock, struct sockaddr *uaddr,
796 int peer)
797{
798 struct sock *sk = sock->sk;
799 struct inet_sock *inet = inet_sk(sk);
800 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
801 int sin_addr_len = sizeof(*sin);
802
803 sin->sin_family = AF_INET;
804 lock_sock(sk);
805 if (peer) {
806 if (!inet->inet_dport ||
807 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
808 peer == 1)) {
809 release_sock(sk);
810 return -ENOTCONN;
811 }
812 sin->sin_port = inet->inet_dport;
813 sin->sin_addr.s_addr = inet->inet_daddr;
814 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
815 CGROUP_INET4_GETPEERNAME);
816 } else {
817 __be32 addr = inet->inet_rcv_saddr;
818 if (!addr)
819 addr = inet->inet_saddr;
820 sin->sin_port = inet->inet_sport;
821 sin->sin_addr.s_addr = addr;
822 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
823 CGROUP_INET4_GETSOCKNAME);
824 }
825 release_sock(sk);
826 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
827 return sin_addr_len;
828}
829EXPORT_SYMBOL(inet_getname);
830
831int inet_send_prepare(struct sock *sk)
832{
833 sock_rps_record_flow(sk);
834
835 /* We may need to bind the socket. */
836 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
837 inet_autobind(sk))
838 return -EAGAIN;
839
840 return 0;
841}
842EXPORT_SYMBOL_GPL(inet_send_prepare);
843
844int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
845{
846 struct sock *sk = sock->sk;
847
848 if (unlikely(inet_send_prepare(sk)))
849 return -EAGAIN;
850
851 return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
852 sk, msg, size);
853}
854EXPORT_SYMBOL(inet_sendmsg);
855
856void inet_splice_eof(struct socket *sock)
857{
858 const struct proto *prot;
859 struct sock *sk = sock->sk;
860
861 if (unlikely(inet_send_prepare(sk)))
862 return;
863
864 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
865 prot = READ_ONCE(sk->sk_prot);
866 if (prot->splice_eof)
867 prot->splice_eof(sock);
868}
869EXPORT_SYMBOL_GPL(inet_splice_eof);
870
871INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
872 size_t, int, int *));
873int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
874 int flags)
875{
876 struct sock *sk = sock->sk;
877 int addr_len = 0;
878 int err;
879
880 if (likely(!(flags & MSG_ERRQUEUE)))
881 sock_rps_record_flow(sk);
882
883 err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
884 sk, msg, size, flags, &addr_len);
885 if (err >= 0)
886 msg->msg_namelen = addr_len;
887 return err;
888}
889EXPORT_SYMBOL(inet_recvmsg);
890
891int inet_shutdown(struct socket *sock, int how)
892{
893 struct sock *sk = sock->sk;
894 int err = 0;
895
896 /* This should really check to make sure
897 * the socket is a TCP socket. (WHY AC...)
898 */
899 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
900 1->2 bit 2 snds.
901 2->3 */
902 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
903 return -EINVAL;
904
905 lock_sock(sk);
906 if (sock->state == SS_CONNECTING) {
907 if ((1 << sk->sk_state) &
908 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
909 sock->state = SS_DISCONNECTING;
910 else
911 sock->state = SS_CONNECTED;
912 }
913
914 switch (sk->sk_state) {
915 case TCP_CLOSE:
916 err = -ENOTCONN;
917 /* Hack to wake up other listeners, who can poll for
918 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
919 fallthrough;
920 default:
921 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
922 if (sk->sk_prot->shutdown)
923 sk->sk_prot->shutdown(sk, how);
924 break;
925
926 /* Remaining two branches are temporary solution for missing
927 * close() in multithreaded environment. It is _not_ a good idea,
928 * but we have no choice until close() is repaired at VFS level.
929 */
930 case TCP_LISTEN:
931 if (!(how & RCV_SHUTDOWN))
932 break;
933 fallthrough;
934 case TCP_SYN_SENT:
935 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
936 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
937 break;
938 }
939
940 /* Wake up anyone sleeping in poll. */
941 sk->sk_state_change(sk);
942 release_sock(sk);
943 return err;
944}
945EXPORT_SYMBOL(inet_shutdown);
946
947/*
948 * ioctl() calls you can issue on an INET socket. Most of these are
949 * device configuration and stuff and very rarely used. Some ioctls
950 * pass on to the socket itself.
951 *
952 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
953 * loads the devconfigure module does its configuring and unloads it.
954 * There's a good 20K of config code hanging around the kernel.
955 */
956
957int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
958{
959 struct sock *sk = sock->sk;
960 int err = 0;
961 struct net *net = sock_net(sk);
962 void __user *p = (void __user *)arg;
963 struct ifreq ifr;
964 struct rtentry rt;
965
966 switch (cmd) {
967 case SIOCADDRT:
968 case SIOCDELRT:
969 if (copy_from_user(&rt, p, sizeof(struct rtentry)))
970 return -EFAULT;
971 err = ip_rt_ioctl(net, cmd, &rt);
972 break;
973 case SIOCRTMSG:
974 err = -EINVAL;
975 break;
976 case SIOCDARP:
977 case SIOCGARP:
978 case SIOCSARP:
979 err = arp_ioctl(net, cmd, (void __user *)arg);
980 break;
981 case SIOCGIFADDR:
982 case SIOCGIFBRDADDR:
983 case SIOCGIFNETMASK:
984 case SIOCGIFDSTADDR:
985 case SIOCGIFPFLAGS:
986 if (get_user_ifreq(&ifr, NULL, p))
987 return -EFAULT;
988 err = devinet_ioctl(net, cmd, &ifr);
989 if (!err && put_user_ifreq(&ifr, p))
990 err = -EFAULT;
991 break;
992
993 case SIOCSIFADDR:
994 case SIOCSIFBRDADDR:
995 case SIOCSIFNETMASK:
996 case SIOCSIFDSTADDR:
997 case SIOCSIFPFLAGS:
998 case SIOCSIFFLAGS:
999 if (get_user_ifreq(&ifr, NULL, p))
1000 return -EFAULT;
1001 err = devinet_ioctl(net, cmd, &ifr);
1002 break;
1003 default:
1004 if (sk->sk_prot->ioctl)
1005 err = sk_ioctl(sk, cmd, (void __user *)arg);
1006 else
1007 err = -ENOIOCTLCMD;
1008 break;
1009 }
1010 return err;
1011}
1012EXPORT_SYMBOL(inet_ioctl);
1013
1014#ifdef CONFIG_COMPAT
1015static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1016 struct compat_rtentry __user *ur)
1017{
1018 compat_uptr_t rtdev;
1019 struct rtentry rt;
1020
1021 if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1022 3 * sizeof(struct sockaddr)) ||
1023 get_user(rt.rt_flags, &ur->rt_flags) ||
1024 get_user(rt.rt_metric, &ur->rt_metric) ||
1025 get_user(rt.rt_mtu, &ur->rt_mtu) ||
1026 get_user(rt.rt_window, &ur->rt_window) ||
1027 get_user(rt.rt_irtt, &ur->rt_irtt) ||
1028 get_user(rtdev, &ur->rt_dev))
1029 return -EFAULT;
1030
1031 rt.rt_dev = compat_ptr(rtdev);
1032 return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1033}
1034
1035static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1036{
1037 void __user *argp = compat_ptr(arg);
1038 struct sock *sk = sock->sk;
1039
1040 switch (cmd) {
1041 case SIOCADDRT:
1042 case SIOCDELRT:
1043 return inet_compat_routing_ioctl(sk, cmd, argp);
1044 default:
1045 if (!sk->sk_prot->compat_ioctl)
1046 return -ENOIOCTLCMD;
1047 return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1048 }
1049}
1050#endif /* CONFIG_COMPAT */
1051
1052const struct proto_ops inet_stream_ops = {
1053 .family = PF_INET,
1054 .owner = THIS_MODULE,
1055 .release = inet_release,
1056 .bind = inet_bind,
1057 .connect = inet_stream_connect,
1058 .socketpair = sock_no_socketpair,
1059 .accept = inet_accept,
1060 .getname = inet_getname,
1061 .poll = tcp_poll,
1062 .ioctl = inet_ioctl,
1063 .gettstamp = sock_gettstamp,
1064 .listen = inet_listen,
1065 .shutdown = inet_shutdown,
1066 .setsockopt = sock_common_setsockopt,
1067 .getsockopt = sock_common_getsockopt,
1068 .sendmsg = inet_sendmsg,
1069 .recvmsg = inet_recvmsg,
1070#ifdef CONFIG_MMU
1071 .mmap = tcp_mmap,
1072#endif
1073 .splice_eof = inet_splice_eof,
1074 .splice_read = tcp_splice_read,
1075 .set_peek_off = sk_set_peek_off,
1076 .read_sock = tcp_read_sock,
1077 .read_skb = tcp_read_skb,
1078 .sendmsg_locked = tcp_sendmsg_locked,
1079 .peek_len = tcp_peek_len,
1080#ifdef CONFIG_COMPAT
1081 .compat_ioctl = inet_compat_ioctl,
1082#endif
1083 .set_rcvlowat = tcp_set_rcvlowat,
1084};
1085EXPORT_SYMBOL(inet_stream_ops);
1086
1087const struct proto_ops inet_dgram_ops = {
1088 .family = PF_INET,
1089 .owner = THIS_MODULE,
1090 .release = inet_release,
1091 .bind = inet_bind,
1092 .connect = inet_dgram_connect,
1093 .socketpair = sock_no_socketpair,
1094 .accept = sock_no_accept,
1095 .getname = inet_getname,
1096 .poll = udp_poll,
1097 .ioctl = inet_ioctl,
1098 .gettstamp = sock_gettstamp,
1099 .listen = sock_no_listen,
1100 .shutdown = inet_shutdown,
1101 .setsockopt = sock_common_setsockopt,
1102 .getsockopt = sock_common_getsockopt,
1103 .sendmsg = inet_sendmsg,
1104 .read_skb = udp_read_skb,
1105 .recvmsg = inet_recvmsg,
1106 .mmap = sock_no_mmap,
1107 .splice_eof = inet_splice_eof,
1108 .set_peek_off = udp_set_peek_off,
1109#ifdef CONFIG_COMPAT
1110 .compat_ioctl = inet_compat_ioctl,
1111#endif
1112};
1113EXPORT_SYMBOL(inet_dgram_ops);
1114
1115/*
1116 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1117 * udp_poll
1118 */
1119static const struct proto_ops inet_sockraw_ops = {
1120 .family = PF_INET,
1121 .owner = THIS_MODULE,
1122 .release = inet_release,
1123 .bind = inet_bind,
1124 .connect = inet_dgram_connect,
1125 .socketpair = sock_no_socketpair,
1126 .accept = sock_no_accept,
1127 .getname = inet_getname,
1128 .poll = datagram_poll,
1129 .ioctl = inet_ioctl,
1130 .gettstamp = sock_gettstamp,
1131 .listen = sock_no_listen,
1132 .shutdown = inet_shutdown,
1133 .setsockopt = sock_common_setsockopt,
1134 .getsockopt = sock_common_getsockopt,
1135 .sendmsg = inet_sendmsg,
1136 .recvmsg = inet_recvmsg,
1137 .mmap = sock_no_mmap,
1138 .splice_eof = inet_splice_eof,
1139#ifdef CONFIG_COMPAT
1140 .compat_ioctl = inet_compat_ioctl,
1141#endif
1142};
1143
1144static const struct net_proto_family inet_family_ops = {
1145 .family = PF_INET,
1146 .create = inet_create,
1147 .owner = THIS_MODULE,
1148};
1149
1150/* Upon startup we insert all the elements in inetsw_array[] into
1151 * the linked list inetsw.
1152 */
1153static struct inet_protosw inetsw_array[] =
1154{
1155 {
1156 .type = SOCK_STREAM,
1157 .protocol = IPPROTO_TCP,
1158 .prot = &tcp_prot,
1159 .ops = &inet_stream_ops,
1160 .flags = INET_PROTOSW_PERMANENT |
1161 INET_PROTOSW_ICSK,
1162 },
1163
1164 {
1165 .type = SOCK_DGRAM,
1166 .protocol = IPPROTO_UDP,
1167 .prot = &udp_prot,
1168 .ops = &inet_dgram_ops,
1169 .flags = INET_PROTOSW_PERMANENT,
1170 },
1171
1172 {
1173 .type = SOCK_DGRAM,
1174 .protocol = IPPROTO_ICMP,
1175 .prot = &ping_prot,
1176 .ops = &inet_sockraw_ops,
1177 .flags = INET_PROTOSW_REUSE,
1178 },
1179
1180 {
1181 .type = SOCK_RAW,
1182 .protocol = IPPROTO_IP, /* wild card */
1183 .prot = &raw_prot,
1184 .ops = &inet_sockraw_ops,
1185 .flags = INET_PROTOSW_REUSE,
1186 }
1187};
1188
1189#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1190
1191void inet_register_protosw(struct inet_protosw *p)
1192{
1193 struct list_head *lh;
1194 struct inet_protosw *answer;
1195 int protocol = p->protocol;
1196 struct list_head *last_perm;
1197
1198 spin_lock_bh(&inetsw_lock);
1199
1200 if (p->type >= SOCK_MAX)
1201 goto out_illegal;
1202
1203 /* If we are trying to override a permanent protocol, bail. */
1204 last_perm = &inetsw[p->type];
1205 list_for_each(lh, &inetsw[p->type]) {
1206 answer = list_entry(lh, struct inet_protosw, list);
1207 /* Check only the non-wild match. */
1208 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1209 break;
1210 if (protocol == answer->protocol)
1211 goto out_permanent;
1212 last_perm = lh;
1213 }
1214
1215 /* Add the new entry after the last permanent entry if any, so that
1216 * the new entry does not override a permanent entry when matched with
1217 * a wild-card protocol. But it is allowed to override any existing
1218 * non-permanent entry. This means that when we remove this entry, the
1219 * system automatically returns to the old behavior.
1220 */
1221 list_add_rcu(&p->list, last_perm);
1222out:
1223 spin_unlock_bh(&inetsw_lock);
1224
1225 return;
1226
1227out_permanent:
1228 pr_err("Attempt to override permanent protocol %d\n", protocol);
1229 goto out;
1230
1231out_illegal:
1232 pr_err("Ignoring attempt to register invalid socket type %d\n",
1233 p->type);
1234 goto out;
1235}
1236EXPORT_SYMBOL(inet_register_protosw);
1237
1238void inet_unregister_protosw(struct inet_protosw *p)
1239{
1240 if (INET_PROTOSW_PERMANENT & p->flags) {
1241 pr_err("Attempt to unregister permanent protocol %d\n",
1242 p->protocol);
1243 } else {
1244 spin_lock_bh(&inetsw_lock);
1245 list_del_rcu(&p->list);
1246 spin_unlock_bh(&inetsw_lock);
1247
1248 synchronize_net();
1249 }
1250}
1251EXPORT_SYMBOL(inet_unregister_protosw);
1252
1253static int inet_sk_reselect_saddr(struct sock *sk)
1254{
1255 struct inet_sock *inet = inet_sk(sk);
1256 __be32 old_saddr = inet->inet_saddr;
1257 __be32 daddr = inet->inet_daddr;
1258 struct flowi4 *fl4;
1259 struct rtable *rt;
1260 __be32 new_saddr;
1261 struct ip_options_rcu *inet_opt;
1262 int err;
1263
1264 inet_opt = rcu_dereference_protected(inet->inet_opt,
1265 lockdep_sock_is_held(sk));
1266 if (inet_opt && inet_opt->opt.srr)
1267 daddr = inet_opt->opt.faddr;
1268
1269 /* Query new route. */
1270 fl4 = &inet->cork.fl.u.ip4;
1271 rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1272 sk->sk_protocol, inet->inet_sport,
1273 inet->inet_dport, sk);
1274 if (IS_ERR(rt))
1275 return PTR_ERR(rt);
1276
1277 new_saddr = fl4->saddr;
1278
1279 if (new_saddr == old_saddr) {
1280 sk_setup_caps(sk, &rt->dst);
1281 return 0;
1282 }
1283
1284 err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1285 if (err) {
1286 ip_rt_put(rt);
1287 return err;
1288 }
1289
1290 sk_setup_caps(sk, &rt->dst);
1291
1292 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1293 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1294 __func__, &old_saddr, &new_saddr);
1295 }
1296
1297 /*
1298 * XXX The only one ugly spot where we need to
1299 * XXX really change the sockets identity after
1300 * XXX it has entered the hashes. -DaveM
1301 *
1302 * Besides that, it does not check for connection
1303 * uniqueness. Wait for troubles.
1304 */
1305 return __sk_prot_rehash(sk);
1306}
1307
1308int inet_sk_rebuild_header(struct sock *sk)
1309{
1310 struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
1311 struct inet_sock *inet = inet_sk(sk);
1312 __be32 daddr;
1313 struct ip_options_rcu *inet_opt;
1314 struct flowi4 *fl4;
1315 int err;
1316
1317 /* Route is OK, nothing to do. */
1318 if (rt)
1319 return 0;
1320
1321 /* Reroute. */
1322 rcu_read_lock();
1323 inet_opt = rcu_dereference(inet->inet_opt);
1324 daddr = inet->inet_daddr;
1325 if (inet_opt && inet_opt->opt.srr)
1326 daddr = inet_opt->opt.faddr;
1327 rcu_read_unlock();
1328 fl4 = &inet->cork.fl.u.ip4;
1329 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1330 inet->inet_dport, inet->inet_sport,
1331 sk->sk_protocol, ip_sock_rt_tos(sk),
1332 sk->sk_bound_dev_if);
1333 if (!IS_ERR(rt)) {
1334 err = 0;
1335 sk_setup_caps(sk, &rt->dst);
1336 } else {
1337 err = PTR_ERR(rt);
1338
1339 /* Routing failed... */
1340 sk->sk_route_caps = 0;
1341 /*
1342 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1343 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1344 */
1345 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1346 sk->sk_state != TCP_SYN_SENT ||
1347 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1348 (err = inet_sk_reselect_saddr(sk)) != 0)
1349 WRITE_ONCE(sk->sk_err_soft, -err);
1350 }
1351
1352 return err;
1353}
1354EXPORT_SYMBOL(inet_sk_rebuild_header);
1355
1356void inet_sk_set_state(struct sock *sk, int state)
1357{
1358 trace_inet_sock_set_state(sk, sk->sk_state, state);
1359 sk->sk_state = state;
1360}
1361EXPORT_SYMBOL(inet_sk_set_state);
1362
1363void inet_sk_state_store(struct sock *sk, int newstate)
1364{
1365 trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1366 smp_store_release(&sk->sk_state, newstate);
1367}
1368
1369struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1370 netdev_features_t features)
1371{
1372 bool udpfrag = false, fixedid = false, gso_partial, encap;
1373 struct sk_buff *segs = ERR_PTR(-EINVAL);
1374 const struct net_offload *ops;
1375 unsigned int offset = 0;
1376 struct iphdr *iph;
1377 int proto, tot_len;
1378 int nhoff;
1379 int ihl;
1380 int id;
1381
1382 skb_reset_network_header(skb);
1383 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1384 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1385 goto out;
1386
1387 iph = ip_hdr(skb);
1388 ihl = iph->ihl * 4;
1389 if (ihl < sizeof(*iph))
1390 goto out;
1391
1392 id = ntohs(iph->id);
1393 proto = iph->protocol;
1394
1395 /* Warning: after this point, iph might be no longer valid */
1396 if (unlikely(!pskb_may_pull(skb, ihl)))
1397 goto out;
1398 __skb_pull(skb, ihl);
1399
1400 encap = SKB_GSO_CB(skb)->encap_level > 0;
1401 if (encap)
1402 features &= skb->dev->hw_enc_features;
1403 SKB_GSO_CB(skb)->encap_level += ihl;
1404
1405 skb_reset_transport_header(skb);
1406
1407 segs = ERR_PTR(-EPROTONOSUPPORT);
1408
1409 if (!skb->encapsulation || encap) {
1410 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1411 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1412
1413 /* fixed ID is invalid if DF bit is not set */
1414 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1415 goto out;
1416 }
1417
1418 ops = rcu_dereference(inet_offloads[proto]);
1419 if (likely(ops && ops->callbacks.gso_segment)) {
1420 segs = ops->callbacks.gso_segment(skb, features);
1421 if (!segs)
1422 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1423 }
1424
1425 if (IS_ERR_OR_NULL(segs))
1426 goto out;
1427
1428 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1429
1430 skb = segs;
1431 do {
1432 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1433 if (udpfrag) {
1434 iph->frag_off = htons(offset >> 3);
1435 if (skb->next)
1436 iph->frag_off |= htons(IP_MF);
1437 offset += skb->len - nhoff - ihl;
1438 tot_len = skb->len - nhoff;
1439 } else if (skb_is_gso(skb)) {
1440 if (!fixedid) {
1441 iph->id = htons(id);
1442 id += skb_shinfo(skb)->gso_segs;
1443 }
1444
1445 if (gso_partial)
1446 tot_len = skb_shinfo(skb)->gso_size +
1447 SKB_GSO_CB(skb)->data_offset +
1448 skb->head - (unsigned char *)iph;
1449 else
1450 tot_len = skb->len - nhoff;
1451 } else {
1452 if (!fixedid)
1453 iph->id = htons(id++);
1454 tot_len = skb->len - nhoff;
1455 }
1456 iph->tot_len = htons(tot_len);
1457 ip_send_check(iph);
1458 if (encap)
1459 skb_reset_inner_headers(skb);
1460 skb->network_header = (u8 *)iph - skb->head;
1461 skb_reset_mac_len(skb);
1462 } while ((skb = skb->next));
1463
1464out:
1465 return segs;
1466}
1467
1468static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1469 netdev_features_t features)
1470{
1471 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1472 return ERR_PTR(-EINVAL);
1473
1474 return inet_gso_segment(skb, features);
1475}
1476
1477struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1478{
1479 const struct net_offload *ops;
1480 struct sk_buff *pp = NULL;
1481 const struct iphdr *iph;
1482 struct sk_buff *p;
1483 unsigned int hlen;
1484 unsigned int off;
1485 int flush = 1;
1486 int proto;
1487
1488 off = skb_gro_offset(skb);
1489 hlen = off + sizeof(*iph);
1490 iph = skb_gro_header(skb, hlen, off);
1491 if (unlikely(!iph))
1492 goto out;
1493
1494 proto = iph->protocol;
1495
1496 ops = rcu_dereference(inet_offloads[proto]);
1497 if (!ops || !ops->callbacks.gro_receive)
1498 goto out;
1499
1500 if (*(u8 *)iph != 0x45)
1501 goto out;
1502
1503 if (ip_is_fragment(iph))
1504 goto out;
1505
1506 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1507 goto out;
1508
1509 NAPI_GRO_CB(skb)->proto = proto;
1510 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
1511
1512 list_for_each_entry(p, head, list) {
1513 struct iphdr *iph2;
1514
1515 if (!NAPI_GRO_CB(p)->same_flow)
1516 continue;
1517
1518 iph2 = (struct iphdr *)(p->data + off);
1519 /* The above works because, with the exception of the top
1520 * (inner most) layer, we only aggregate pkts with the same
1521 * hdr length so all the hdrs we'll need to verify will start
1522 * at the same offset.
1523 */
1524 if ((iph->protocol ^ iph2->protocol) |
1525 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1526 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1527 NAPI_GRO_CB(p)->same_flow = 0;
1528 continue;
1529 }
1530 }
1531
1532 NAPI_GRO_CB(skb)->flush |= flush;
1533 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
1534
1535 /* Note : No need to call skb_gro_postpull_rcsum() here,
1536 * as we already checked checksum over ipv4 header was 0
1537 */
1538 skb_gro_pull(skb, sizeof(*iph));
1539 skb_set_transport_header(skb, skb_gro_offset(skb));
1540
1541 pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1542 ops->callbacks.gro_receive, head, skb);
1543
1544out:
1545 skb_gro_flush_final(skb, pp, flush);
1546
1547 return pp;
1548}
1549
1550static struct sk_buff *ipip_gro_receive(struct list_head *head,
1551 struct sk_buff *skb)
1552{
1553 if (NAPI_GRO_CB(skb)->encap_mark) {
1554 NAPI_GRO_CB(skb)->flush = 1;
1555 return NULL;
1556 }
1557
1558 NAPI_GRO_CB(skb)->encap_mark = 1;
1559
1560 return inet_gro_receive(head, skb);
1561}
1562
1563#define SECONDS_PER_DAY 86400
1564
1565/* inet_current_timestamp - Return IP network timestamp
1566 *
1567 * Return milliseconds since midnight in network byte order.
1568 */
1569__be32 inet_current_timestamp(void)
1570{
1571 u32 secs;
1572 u32 msecs;
1573 struct timespec64 ts;
1574
1575 ktime_get_real_ts64(&ts);
1576
1577 /* Get secs since midnight. */
1578 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1579 /* Convert to msecs. */
1580 msecs = secs * MSEC_PER_SEC;
1581 /* Convert nsec to msec. */
1582 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1583
1584 /* Convert to network byte order. */
1585 return htonl(msecs);
1586}
1587EXPORT_SYMBOL(inet_current_timestamp);
1588
1589int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1590{
1591 unsigned int family = READ_ONCE(sk->sk_family);
1592
1593 if (family == AF_INET)
1594 return ip_recv_error(sk, msg, len, addr_len);
1595#if IS_ENABLED(CONFIG_IPV6)
1596 if (family == AF_INET6)
1597 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1598#endif
1599 return -EINVAL;
1600}
1601EXPORT_SYMBOL(inet_recv_error);
1602
1603int inet_gro_complete(struct sk_buff *skb, int nhoff)
1604{
1605 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1606 const struct net_offload *ops;
1607 __be16 totlen = iph->tot_len;
1608 int proto = iph->protocol;
1609 int err = -ENOSYS;
1610
1611 if (skb->encapsulation) {
1612 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1613 skb_set_inner_network_header(skb, nhoff);
1614 }
1615
1616 iph_set_totlen(iph, skb->len - nhoff);
1617 csum_replace2(&iph->check, totlen, iph->tot_len);
1618
1619 ops = rcu_dereference(inet_offloads[proto]);
1620 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1621 goto out;
1622
1623 /* Only need to add sizeof(*iph) to get to the next hdr below
1624 * because any hdr with option will have been flushed in
1625 * inet_gro_receive().
1626 */
1627 err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1628 tcp4_gro_complete, udp4_gro_complete,
1629 skb, nhoff + sizeof(*iph));
1630
1631out:
1632 return err;
1633}
1634
1635static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1636{
1637 skb->encapsulation = 1;
1638 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1639 return inet_gro_complete(skb, nhoff);
1640}
1641
1642int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1643 unsigned short type, unsigned char protocol,
1644 struct net *net)
1645{
1646 struct socket *sock;
1647 int rc = sock_create_kern(net, family, type, protocol, &sock);
1648
1649 if (rc == 0) {
1650 *sk = sock->sk;
1651 (*sk)->sk_allocation = GFP_ATOMIC;
1652 (*sk)->sk_use_task_frag = false;
1653 /*
1654 * Unhash it so that IP input processing does not even see it,
1655 * we do not wish this socket to see incoming packets.
1656 */
1657 (*sk)->sk_prot->unhash(*sk);
1658 }
1659 return rc;
1660}
1661EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1662
1663unsigned long snmp_fold_field(void __percpu *mib, int offt)
1664{
1665 unsigned long res = 0;
1666 int i;
1667
1668 for_each_possible_cpu(i)
1669 res += snmp_get_cpu_field(mib, i, offt);
1670 return res;
1671}
1672EXPORT_SYMBOL_GPL(snmp_fold_field);
1673
1674#if BITS_PER_LONG==32
1675
1676u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1677 size_t syncp_offset)
1678{
1679 void *bhptr;
1680 struct u64_stats_sync *syncp;
1681 u64 v;
1682 unsigned int start;
1683
1684 bhptr = per_cpu_ptr(mib, cpu);
1685 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1686 do {
1687 start = u64_stats_fetch_begin(syncp);
1688 v = *(((u64 *)bhptr) + offt);
1689 } while (u64_stats_fetch_retry(syncp, start));
1690
1691 return v;
1692}
1693EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1694
1695u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1696{
1697 u64 res = 0;
1698 int cpu;
1699
1700 for_each_possible_cpu(cpu) {
1701 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1702 }
1703 return res;
1704}
1705EXPORT_SYMBOL_GPL(snmp_fold_field64);
1706#endif
1707
1708#ifdef CONFIG_IP_MULTICAST
1709static const struct net_protocol igmp_protocol = {
1710 .handler = igmp_rcv,
1711};
1712#endif
1713
1714static const struct net_protocol icmp_protocol = {
1715 .handler = icmp_rcv,
1716 .err_handler = icmp_err,
1717 .no_policy = 1,
1718};
1719
1720static __net_init int ipv4_mib_init_net(struct net *net)
1721{
1722 int i;
1723
1724 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1725 if (!net->mib.tcp_statistics)
1726 goto err_tcp_mib;
1727 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1728 if (!net->mib.ip_statistics)
1729 goto err_ip_mib;
1730
1731 for_each_possible_cpu(i) {
1732 struct ipstats_mib *af_inet_stats;
1733 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1734 u64_stats_init(&af_inet_stats->syncp);
1735 }
1736
1737 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1738 if (!net->mib.net_statistics)
1739 goto err_net_mib;
1740 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1741 if (!net->mib.udp_statistics)
1742 goto err_udp_mib;
1743 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1744 if (!net->mib.udplite_statistics)
1745 goto err_udplite_mib;
1746 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1747 if (!net->mib.icmp_statistics)
1748 goto err_icmp_mib;
1749 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1750 GFP_KERNEL);
1751 if (!net->mib.icmpmsg_statistics)
1752 goto err_icmpmsg_mib;
1753
1754 tcp_mib_init(net);
1755 return 0;
1756
1757err_icmpmsg_mib:
1758 free_percpu(net->mib.icmp_statistics);
1759err_icmp_mib:
1760 free_percpu(net->mib.udplite_statistics);
1761err_udplite_mib:
1762 free_percpu(net->mib.udp_statistics);
1763err_udp_mib:
1764 free_percpu(net->mib.net_statistics);
1765err_net_mib:
1766 free_percpu(net->mib.ip_statistics);
1767err_ip_mib:
1768 free_percpu(net->mib.tcp_statistics);
1769err_tcp_mib:
1770 return -ENOMEM;
1771}
1772
1773static __net_exit void ipv4_mib_exit_net(struct net *net)
1774{
1775 kfree(net->mib.icmpmsg_statistics);
1776 free_percpu(net->mib.icmp_statistics);
1777 free_percpu(net->mib.udplite_statistics);
1778 free_percpu(net->mib.udp_statistics);
1779 free_percpu(net->mib.net_statistics);
1780 free_percpu(net->mib.ip_statistics);
1781 free_percpu(net->mib.tcp_statistics);
1782#ifdef CONFIG_MPTCP
1783 /* allocated on demand, see mptcp_init_sock() */
1784 free_percpu(net->mib.mptcp_statistics);
1785#endif
1786}
1787
1788static __net_initdata struct pernet_operations ipv4_mib_ops = {
1789 .init = ipv4_mib_init_net,
1790 .exit = ipv4_mib_exit_net,
1791};
1792
1793static int __init init_ipv4_mibs(void)
1794{
1795 return register_pernet_subsys(&ipv4_mib_ops);
1796}
1797
1798static __net_init int inet_init_net(struct net *net)
1799{
1800 /*
1801 * Set defaults for local port range
1802 */
1803 net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
1804
1805 seqlock_init(&net->ipv4.ping_group_range.lock);
1806 /*
1807 * Sane defaults - nobody may create ping sockets.
1808 * Boot scripts should set this to distro-specific group.
1809 */
1810 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1811 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1812
1813 /* Default values for sysctl-controlled parameters.
1814 * We set them here, in case sysctl is not compiled.
1815 */
1816 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1817 net->ipv4.sysctl_ip_fwd_update_priority = 1;
1818 net->ipv4.sysctl_ip_dynaddr = 0;
1819 net->ipv4.sysctl_ip_early_demux = 1;
1820 net->ipv4.sysctl_udp_early_demux = 1;
1821 net->ipv4.sysctl_tcp_early_demux = 1;
1822 net->ipv4.sysctl_nexthop_compat_mode = 1;
1823#ifdef CONFIG_SYSCTL
1824 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1825#endif
1826
1827 /* Some igmp sysctl, whose values are always used */
1828 net->ipv4.sysctl_igmp_max_memberships = 20;
1829 net->ipv4.sysctl_igmp_max_msf = 10;
1830 /* IGMP reports for link-local multicast groups are enabled by default */
1831 net->ipv4.sysctl_igmp_llm_reports = 1;
1832 net->ipv4.sysctl_igmp_qrv = 2;
1833
1834 net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1835
1836 return 0;
1837}
1838
1839static __net_initdata struct pernet_operations af_inet_ops = {
1840 .init = inet_init_net,
1841};
1842
1843static int __init init_inet_pernet_ops(void)
1844{
1845 return register_pernet_subsys(&af_inet_ops);
1846}
1847
1848static int ipv4_proc_init(void);
1849
1850/*
1851 * IP protocol layer initialiser
1852 */
1853
1854
1855static const struct net_offload ipip_offload = {
1856 .callbacks = {
1857 .gso_segment = ipip_gso_segment,
1858 .gro_receive = ipip_gro_receive,
1859 .gro_complete = ipip_gro_complete,
1860 },
1861};
1862
1863static int __init ipip_offload_init(void)
1864{
1865 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1866}
1867
1868static int __init ipv4_offload_init(void)
1869{
1870 /*
1871 * Add offloads
1872 */
1873 if (udpv4_offload_init() < 0)
1874 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1875 if (tcpv4_offload_init() < 0)
1876 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1877 if (ipip_offload_init() < 0)
1878 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1879
1880 net_hotdata.ip_packet_offload = (struct packet_offload) {
1881 .type = cpu_to_be16(ETH_P_IP),
1882 .callbacks = {
1883 .gso_segment = inet_gso_segment,
1884 .gro_receive = inet_gro_receive,
1885 .gro_complete = inet_gro_complete,
1886 },
1887 };
1888 dev_add_offload(&net_hotdata.ip_packet_offload);
1889 return 0;
1890}
1891
1892fs_initcall(ipv4_offload_init);
1893
1894static struct packet_type ip_packet_type __read_mostly = {
1895 .type = cpu_to_be16(ETH_P_IP),
1896 .func = ip_rcv,
1897 .list_func = ip_list_rcv,
1898};
1899
1900static int __init inet_init(void)
1901{
1902 struct inet_protosw *q;
1903 struct list_head *r;
1904 int rc;
1905
1906 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1907
1908 raw_hashinfo_init(&raw_v4_hashinfo);
1909
1910 rc = proto_register(&tcp_prot, 1);
1911 if (rc)
1912 goto out;
1913
1914 rc = proto_register(&udp_prot, 1);
1915 if (rc)
1916 goto out_unregister_tcp_proto;
1917
1918 rc = proto_register(&raw_prot, 1);
1919 if (rc)
1920 goto out_unregister_udp_proto;
1921
1922 rc = proto_register(&ping_prot, 1);
1923 if (rc)
1924 goto out_unregister_raw_proto;
1925
1926 /*
1927 * Tell SOCKET that we are alive...
1928 */
1929
1930 (void)sock_register(&inet_family_ops);
1931
1932#ifdef CONFIG_SYSCTL
1933 ip_static_sysctl_init();
1934#endif
1935
1936 /*
1937 * Add all the base protocols.
1938 */
1939
1940 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1941 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1942
1943 net_hotdata.udp_protocol = (struct net_protocol) {
1944 .handler = udp_rcv,
1945 .err_handler = udp_err,
1946 .no_policy = 1,
1947 };
1948 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
1949 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1950
1951 net_hotdata.tcp_protocol = (struct net_protocol) {
1952 .handler = tcp_v4_rcv,
1953 .err_handler = tcp_v4_err,
1954 .no_policy = 1,
1955 .icmp_strict_tag_validation = 1,
1956 };
1957 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
1958 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1959#ifdef CONFIG_IP_MULTICAST
1960 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1961 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1962#endif
1963
1964 /* Register the socket-side information for inet_create. */
1965 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1966 INIT_LIST_HEAD(r);
1967
1968 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1969 inet_register_protosw(q);
1970
1971 /*
1972 * Set the ARP module up
1973 */
1974
1975 arp_init();
1976
1977 /*
1978 * Set the IP module up
1979 */
1980
1981 ip_init();
1982
1983 /* Initialise per-cpu ipv4 mibs */
1984 if (init_ipv4_mibs())
1985 panic("%s: Cannot init ipv4 mibs\n", __func__);
1986
1987 /* Setup TCP slab cache for open requests. */
1988 tcp_init();
1989
1990 /* Setup UDP memory threshold */
1991 udp_init();
1992
1993 /* Add UDP-Lite (RFC 3828) */
1994 udplite4_register();
1995
1996 raw_init();
1997
1998 ping_init();
1999
2000 /*
2001 * Set the ICMP layer up
2002 */
2003
2004 if (icmp_init() < 0)
2005 panic("Failed to create the ICMP control socket.\n");
2006
2007 /*
2008 * Initialise the multicast router
2009 */
2010#if defined(CONFIG_IP_MROUTE)
2011 if (ip_mr_init())
2012 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
2013#endif
2014
2015 if (init_inet_pernet_ops())
2016 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2017
2018 ipv4_proc_init();
2019
2020 ipfrag_init();
2021
2022 dev_add_pack(&ip_packet_type);
2023
2024 ip_tunnel_core_init();
2025
2026 rc = 0;
2027out:
2028 return rc;
2029out_unregister_raw_proto:
2030 proto_unregister(&raw_prot);
2031out_unregister_udp_proto:
2032 proto_unregister(&udp_prot);
2033out_unregister_tcp_proto:
2034 proto_unregister(&tcp_prot);
2035 goto out;
2036}
2037
2038fs_initcall(inet_init);
2039
2040/* ------------------------------------------------------------------------ */
2041
2042#ifdef CONFIG_PROC_FS
2043static int __init ipv4_proc_init(void)
2044{
2045 int rc = 0;
2046
2047 if (raw_proc_init())
2048 goto out_raw;
2049 if (tcp4_proc_init())
2050 goto out_tcp;
2051 if (udp4_proc_init())
2052 goto out_udp;
2053 if (ping_proc_init())
2054 goto out_ping;
2055 if (ip_misc_proc_init())
2056 goto out_misc;
2057out:
2058 return rc;
2059out_misc:
2060 ping_proc_exit();
2061out_ping:
2062 udp4_proc_exit();
2063out_udp:
2064 tcp4_proc_exit();
2065out_tcp:
2066 raw_proc_exit();
2067out_raw:
2068 rc = -ENOMEM;
2069 goto out;
2070}
2071
2072#else /* CONFIG_PROC_FS */
2073static int __init ipv4_proc_init(void)
2074{
2075 return 0;
2076}
2077#endif /* CONFIG_PROC_FS */
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PF_INET protocol family socket handler.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Florian La Roche, <flla@stud.uni-sb.de>
11 * Alan Cox, <A.Cox@swansea.ac.uk>
12 *
13 * Changes (see also sock.c)
14 *
15 * piggy,
16 * Karl Knutson : Socket protocol table
17 * A.N.Kuznetsov : Socket death error in accept().
18 * John Richardson : Fix non blocking error in connect()
19 * so sockets that fail to connect
20 * don't return -EINPROGRESS.
21 * Alan Cox : Asynchronous I/O support
22 * Alan Cox : Keep correct socket pointer on sock
23 * structures
24 * when accept() ed
25 * Alan Cox : Semantics of SO_LINGER aren't state
26 * moved to close when you look carefully.
27 * With this fixed and the accept bug fixed
28 * some RPC stuff seems happier.
29 * Niibe Yutaka : 4.4BSD style write async I/O
30 * Alan Cox,
31 * Tony Gale : Fixed reuse semantics.
32 * Alan Cox : bind() shouldn't abort existing but dead
33 * sockets. Stops FTP netin:.. I hope.
34 * Alan Cox : bind() works correctly for RAW sockets.
35 * Note that FreeBSD at least was broken
36 * in this respect so be careful with
37 * compatibility tests...
38 * Alan Cox : routing cache support
39 * Alan Cox : memzero the socket structure for
40 * compactness.
41 * Matt Day : nonblock connect error handler
42 * Alan Cox : Allow large numbers of pending sockets
43 * (eg for big web sites), but only if
44 * specifically application requested.
45 * Alan Cox : New buffering throughout IP. Used
46 * dumbly.
47 * Alan Cox : New buffering now used smartly.
48 * Alan Cox : BSD rather than common sense
49 * interpretation of listen.
50 * Germano Caronni : Assorted small races.
51 * Alan Cox : sendmsg/recvmsg basic support.
52 * Alan Cox : Only sendmsg/recvmsg now supported.
53 * Alan Cox : Locked down bind (see security list).
54 * Alan Cox : Loosened bind a little.
55 * Mike McLagan : ADD/DEL DLCI Ioctls
56 * Willy Konynenberg : Transparent proxying support.
57 * David S. Miller : New socket lookup architecture.
58 * Some other random speedups.
59 * Cyrus Durgin : Cleaned up file for kmod hacks.
60 * Andi Kleen : Fix inet_stream_connect TCP race.
61 *
62 * This program is free software; you can redistribute it and/or
63 * modify it under the terms of the GNU General Public License
64 * as published by the Free Software Foundation; either version
65 * 2 of the License, or (at your option) any later version.
66 */
67
68#define pr_fmt(fmt) "IPv4: " fmt
69
70#include <linux/err.h>
71#include <linux/errno.h>
72#include <linux/types.h>
73#include <linux/socket.h>
74#include <linux/in.h>
75#include <linux/kernel.h>
76#include <linux/kmod.h>
77#include <linux/sched.h>
78#include <linux/timer.h>
79#include <linux/string.h>
80#include <linux/sockios.h>
81#include <linux/net.h>
82#include <linux/capability.h>
83#include <linux/fcntl.h>
84#include <linux/mm.h>
85#include <linux/interrupt.h>
86#include <linux/stat.h>
87#include <linux/init.h>
88#include <linux/poll.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/slab.h>
92
93#include <linux/uaccess.h>
94
95#include <linux/inet.h>
96#include <linux/igmp.h>
97#include <linux/inetdevice.h>
98#include <linux/netdevice.h>
99#include <net/checksum.h>
100#include <net/ip.h>
101#include <net/protocol.h>
102#include <net/arp.h>
103#include <net/route.h>
104#include <net/ip_fib.h>
105#include <net/inet_connection_sock.h>
106#include <net/tcp.h>
107#include <net/udp.h>
108#include <net/udplite.h>
109#include <net/ping.h>
110#include <linux/skbuff.h>
111#include <net/sock.h>
112#include <net/raw.h>
113#include <net/icmp.h>
114#include <net/inet_common.h>
115#include <net/ip_tunnels.h>
116#include <net/xfrm.h>
117#include <net/net_namespace.h>
118#include <net/secure_seq.h>
119#ifdef CONFIG_IP_MROUTE
120#include <linux/mroute.h>
121#endif
122#include <net/l3mdev.h>
123
124
125/* The inetsw table contains everything that inet_create needs to
126 * build a new socket.
127 */
128static struct list_head inetsw[SOCK_MAX];
129static DEFINE_SPINLOCK(inetsw_lock);
130
131/* New destruction routine */
132
133void inet_sock_destruct(struct sock *sk)
134{
135 struct inet_sock *inet = inet_sk(sk);
136
137 __skb_queue_purge(&sk->sk_receive_queue);
138 __skb_queue_purge(&sk->sk_error_queue);
139
140 sk_mem_reclaim(sk);
141
142 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
143 pr_err("Attempt to release TCP socket in state %d %p\n",
144 sk->sk_state, sk);
145 return;
146 }
147 if (!sock_flag(sk, SOCK_DEAD)) {
148 pr_err("Attempt to release alive inet socket %p\n", sk);
149 return;
150 }
151
152 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
153 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
154 WARN_ON(sk->sk_wmem_queued);
155 WARN_ON(sk->sk_forward_alloc);
156
157 kfree(rcu_dereference_protected(inet->inet_opt, 1));
158 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
159 dst_release(sk->sk_rx_dst);
160 sk_refcnt_debug_dec(sk);
161}
162EXPORT_SYMBOL(inet_sock_destruct);
163
164/*
165 * The routines beyond this point handle the behaviour of an AF_INET
166 * socket object. Mostly it punts to the subprotocols of IP to do
167 * the work.
168 */
169
170/*
171 * Automatically bind an unbound socket.
172 */
173
174static int inet_autobind(struct sock *sk)
175{
176 struct inet_sock *inet;
177 /* We may need to bind the socket. */
178 lock_sock(sk);
179 inet = inet_sk(sk);
180 if (!inet->inet_num) {
181 if (sk->sk_prot->get_port(sk, 0)) {
182 release_sock(sk);
183 return -EAGAIN;
184 }
185 inet->inet_sport = htons(inet->inet_num);
186 }
187 release_sock(sk);
188 return 0;
189}
190
191/*
192 * Move a socket into listening state.
193 */
194int inet_listen(struct socket *sock, int backlog)
195{
196 struct sock *sk = sock->sk;
197 unsigned char old_state;
198 int err;
199
200 lock_sock(sk);
201
202 err = -EINVAL;
203 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
204 goto out;
205
206 old_state = sk->sk_state;
207 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
208 goto out;
209
210 /* Really, if the socket is already in listen state
211 * we can only allow the backlog to be adjusted.
212 */
213 if (old_state != TCP_LISTEN) {
214 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
215 * Note that only TCP sockets (SOCK_STREAM) will reach here.
216 * Also fastopen backlog may already been set via the option
217 * because the socket was in TCP_LISTEN state previously but
218 * was shutdown() rather than close().
219 */
220 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
221 (sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
222 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
223 fastopen_queue_tune(sk, backlog);
224 tcp_fastopen_init_key_once(true);
225 }
226
227 err = inet_csk_listen_start(sk, backlog);
228 if (err)
229 goto out;
230 }
231 sk->sk_max_ack_backlog = backlog;
232 err = 0;
233
234out:
235 release_sock(sk);
236 return err;
237}
238EXPORT_SYMBOL(inet_listen);
239
240/*
241 * Create an inet socket.
242 */
243
244static int inet_create(struct net *net, struct socket *sock, int protocol,
245 int kern)
246{
247 struct sock *sk;
248 struct inet_protosw *answer;
249 struct inet_sock *inet;
250 struct proto *answer_prot;
251 unsigned char answer_flags;
252 int try_loading_module = 0;
253 int err;
254
255 if (protocol < 0 || protocol >= IPPROTO_MAX)
256 return -EINVAL;
257
258 sock->state = SS_UNCONNECTED;
259
260 /* Look for the requested type/protocol pair. */
261lookup_protocol:
262 err = -ESOCKTNOSUPPORT;
263 rcu_read_lock();
264 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
265
266 err = 0;
267 /* Check the non-wild match. */
268 if (protocol == answer->protocol) {
269 if (protocol != IPPROTO_IP)
270 break;
271 } else {
272 /* Check for the two wild cases. */
273 if (IPPROTO_IP == protocol) {
274 protocol = answer->protocol;
275 break;
276 }
277 if (IPPROTO_IP == answer->protocol)
278 break;
279 }
280 err = -EPROTONOSUPPORT;
281 }
282
283 if (unlikely(err)) {
284 if (try_loading_module < 2) {
285 rcu_read_unlock();
286 /*
287 * Be more specific, e.g. net-pf-2-proto-132-type-1
288 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
289 */
290 if (++try_loading_module == 1)
291 request_module("net-pf-%d-proto-%d-type-%d",
292 PF_INET, protocol, sock->type);
293 /*
294 * Fall back to generic, e.g. net-pf-2-proto-132
295 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
296 */
297 else
298 request_module("net-pf-%d-proto-%d",
299 PF_INET, protocol);
300 goto lookup_protocol;
301 } else
302 goto out_rcu_unlock;
303 }
304
305 err = -EPERM;
306 if (sock->type == SOCK_RAW && !kern &&
307 !ns_capable(net->user_ns, CAP_NET_RAW))
308 goto out_rcu_unlock;
309
310 sock->ops = answer->ops;
311 answer_prot = answer->prot;
312 answer_flags = answer->flags;
313 rcu_read_unlock();
314
315 WARN_ON(!answer_prot->slab);
316
317 err = -ENOBUFS;
318 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
319 if (!sk)
320 goto out;
321
322 err = 0;
323 if (INET_PROTOSW_REUSE & answer_flags)
324 sk->sk_reuse = SK_CAN_REUSE;
325
326 inet = inet_sk(sk);
327 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
328
329 inet->nodefrag = 0;
330
331 if (SOCK_RAW == sock->type) {
332 inet->inet_num = protocol;
333 if (IPPROTO_RAW == protocol)
334 inet->hdrincl = 1;
335 }
336
337 if (net->ipv4.sysctl_ip_no_pmtu_disc)
338 inet->pmtudisc = IP_PMTUDISC_DONT;
339 else
340 inet->pmtudisc = IP_PMTUDISC_WANT;
341
342 inet->inet_id = 0;
343
344 sock_init_data(sock, sk);
345
346 sk->sk_destruct = inet_sock_destruct;
347 sk->sk_protocol = protocol;
348 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
349
350 inet->uc_ttl = -1;
351 inet->mc_loop = 1;
352 inet->mc_ttl = 1;
353 inet->mc_all = 1;
354 inet->mc_index = 0;
355 inet->mc_list = NULL;
356 inet->rcv_tos = 0;
357
358 sk_refcnt_debug_inc(sk);
359
360 if (inet->inet_num) {
361 /* It assumes that any protocol which allows
362 * the user to assign a number at socket
363 * creation time automatically
364 * shares.
365 */
366 inet->inet_sport = htons(inet->inet_num);
367 /* Add to protocol hash chains. */
368 err = sk->sk_prot->hash(sk);
369 if (err) {
370 sk_common_release(sk);
371 goto out;
372 }
373 }
374
375 if (sk->sk_prot->init) {
376 err = sk->sk_prot->init(sk);
377 if (err) {
378 sk_common_release(sk);
379 goto out;
380 }
381 }
382
383 if (!kern) {
384 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
385 if (err) {
386 sk_common_release(sk);
387 goto out;
388 }
389 }
390out:
391 return err;
392out_rcu_unlock:
393 rcu_read_unlock();
394 goto out;
395}
396
397
398/*
399 * The peer socket should always be NULL (or else). When we call this
400 * function we are destroying the object and from then on nobody
401 * should refer to it.
402 */
403int inet_release(struct socket *sock)
404{
405 struct sock *sk = sock->sk;
406
407 if (sk) {
408 long timeout;
409
410 /* Applications forget to leave groups before exiting */
411 ip_mc_drop_socket(sk);
412
413 /* If linger is set, we don't return until the close
414 * is complete. Otherwise we return immediately. The
415 * actually closing is done the same either way.
416 *
417 * If the close is due to the process exiting, we never
418 * linger..
419 */
420 timeout = 0;
421 if (sock_flag(sk, SOCK_LINGER) &&
422 !(current->flags & PF_EXITING))
423 timeout = sk->sk_lingertime;
424 sock->sk = NULL;
425 sk->sk_prot->close(sk, timeout);
426 }
427 return 0;
428}
429EXPORT_SYMBOL(inet_release);
430
431int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
432{
433 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
434 struct sock *sk = sock->sk;
435 struct inet_sock *inet = inet_sk(sk);
436 struct net *net = sock_net(sk);
437 unsigned short snum;
438 int chk_addr_ret;
439 u32 tb_id = RT_TABLE_LOCAL;
440 int err;
441
442 /* If the socket has its own bind function then use it. (RAW) */
443 if (sk->sk_prot->bind) {
444 err = sk->sk_prot->bind(sk, uaddr, addr_len);
445 goto out;
446 }
447 err = -EINVAL;
448 if (addr_len < sizeof(struct sockaddr_in))
449 goto out;
450
451 if (addr->sin_family != AF_INET) {
452 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
453 * only if s_addr is INADDR_ANY.
454 */
455 err = -EAFNOSUPPORT;
456 if (addr->sin_family != AF_UNSPEC ||
457 addr->sin_addr.s_addr != htonl(INADDR_ANY))
458 goto out;
459 }
460
461 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
462 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
463
464 /* Not specified by any standard per-se, however it breaks too
465 * many applications when removed. It is unfortunate since
466 * allowing applications to make a non-local bind solves
467 * several problems with systems using dynamic addressing.
468 * (ie. your servers still start up even if your ISDN link
469 * is temporarily down)
470 */
471 err = -EADDRNOTAVAIL;
472 if (!net->ipv4.sysctl_ip_nonlocal_bind &&
473 !(inet->freebind || inet->transparent) &&
474 addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
475 chk_addr_ret != RTN_LOCAL &&
476 chk_addr_ret != RTN_MULTICAST &&
477 chk_addr_ret != RTN_BROADCAST)
478 goto out;
479
480 snum = ntohs(addr->sin_port);
481 err = -EACCES;
482 if (snum && snum < PROT_SOCK &&
483 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
484 goto out;
485
486 /* We keep a pair of addresses. rcv_saddr is the one
487 * used by hash lookups, and saddr is used for transmit.
488 *
489 * In the BSD API these are the same except where it
490 * would be illegal to use them (multicast/broadcast) in
491 * which case the sending device address is used.
492 */
493 lock_sock(sk);
494
495 /* Check these errors (active socket, double bind). */
496 err = -EINVAL;
497 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
498 goto out_release_sock;
499
500 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
501 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
502 inet->inet_saddr = 0; /* Use device */
503
504 /* Make sure we are allowed to bind here. */
505 if ((snum || !inet->bind_address_no_port) &&
506 sk->sk_prot->get_port(sk, snum)) {
507 inet->inet_saddr = inet->inet_rcv_saddr = 0;
508 err = -EADDRINUSE;
509 goto out_release_sock;
510 }
511
512 if (inet->inet_rcv_saddr)
513 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
514 if (snum)
515 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
516 inet->inet_sport = htons(inet->inet_num);
517 inet->inet_daddr = 0;
518 inet->inet_dport = 0;
519 sk_dst_reset(sk);
520 err = 0;
521out_release_sock:
522 release_sock(sk);
523out:
524 return err;
525}
526EXPORT_SYMBOL(inet_bind);
527
528int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
529 int addr_len, int flags)
530{
531 struct sock *sk = sock->sk;
532
533 if (addr_len < sizeof(uaddr->sa_family))
534 return -EINVAL;
535 if (uaddr->sa_family == AF_UNSPEC)
536 return sk->sk_prot->disconnect(sk, flags);
537
538 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
539 return -EAGAIN;
540 return sk->sk_prot->connect(sk, uaddr, addr_len);
541}
542EXPORT_SYMBOL(inet_dgram_connect);
543
544static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
545{
546 DEFINE_WAIT_FUNC(wait, woken_wake_function);
547
548 add_wait_queue(sk_sleep(sk), &wait);
549 sk->sk_write_pending += writebias;
550
551 /* Basic assumption: if someone sets sk->sk_err, he _must_
552 * change state of the socket from TCP_SYN_*.
553 * Connect() does not allow to get error notifications
554 * without closing the socket.
555 */
556 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
557 release_sock(sk);
558 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
559 lock_sock(sk);
560 if (signal_pending(current) || !timeo)
561 break;
562 }
563 remove_wait_queue(sk_sleep(sk), &wait);
564 sk->sk_write_pending -= writebias;
565 return timeo;
566}
567
568/*
569 * Connect to a remote host. There is regrettably still a little
570 * TCP 'magic' in here.
571 */
572int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
573 int addr_len, int flags)
574{
575 struct sock *sk = sock->sk;
576 int err;
577 long timeo;
578
579 if (addr_len < sizeof(uaddr->sa_family))
580 return -EINVAL;
581
582 if (uaddr->sa_family == AF_UNSPEC) {
583 err = sk->sk_prot->disconnect(sk, flags);
584 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
585 goto out;
586 }
587
588 switch (sock->state) {
589 default:
590 err = -EINVAL;
591 goto out;
592 case SS_CONNECTED:
593 err = -EISCONN;
594 goto out;
595 case SS_CONNECTING:
596 err = -EALREADY;
597 /* Fall out of switch with err, set for this state */
598 break;
599 case SS_UNCONNECTED:
600 err = -EISCONN;
601 if (sk->sk_state != TCP_CLOSE)
602 goto out;
603
604 err = sk->sk_prot->connect(sk, uaddr, addr_len);
605 if (err < 0)
606 goto out;
607
608 sock->state = SS_CONNECTING;
609
610 /* Just entered SS_CONNECTING state; the only
611 * difference is that return value in non-blocking
612 * case is EINPROGRESS, rather than EALREADY.
613 */
614 err = -EINPROGRESS;
615 break;
616 }
617
618 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
619
620 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
621 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
622 tcp_sk(sk)->fastopen_req &&
623 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
624
625 /* Error code is set above */
626 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
627 goto out;
628
629 err = sock_intr_errno(timeo);
630 if (signal_pending(current))
631 goto out;
632 }
633
634 /* Connection was closed by RST, timeout, ICMP error
635 * or another process disconnected us.
636 */
637 if (sk->sk_state == TCP_CLOSE)
638 goto sock_error;
639
640 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
641 * and error was received after socket entered established state.
642 * Hence, it is handled normally after connect() return successfully.
643 */
644
645 sock->state = SS_CONNECTED;
646 err = 0;
647out:
648 return err;
649
650sock_error:
651 err = sock_error(sk) ? : -ECONNABORTED;
652 sock->state = SS_UNCONNECTED;
653 if (sk->sk_prot->disconnect(sk, flags))
654 sock->state = SS_DISCONNECTING;
655 goto out;
656}
657EXPORT_SYMBOL(__inet_stream_connect);
658
659int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
660 int addr_len, int flags)
661{
662 int err;
663
664 lock_sock(sock->sk);
665 err = __inet_stream_connect(sock, uaddr, addr_len, flags);
666 release_sock(sock->sk);
667 return err;
668}
669EXPORT_SYMBOL(inet_stream_connect);
670
671/*
672 * Accept a pending connection. The TCP layer now gives BSD semantics.
673 */
674
675int inet_accept(struct socket *sock, struct socket *newsock, int flags)
676{
677 struct sock *sk1 = sock->sk;
678 int err = -EINVAL;
679 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
680
681 if (!sk2)
682 goto do_err;
683
684 lock_sock(sk2);
685
686 sock_rps_record_flow(sk2);
687 WARN_ON(!((1 << sk2->sk_state) &
688 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
689 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
690
691 sock_graft(sk2, newsock);
692
693 newsock->state = SS_CONNECTED;
694 err = 0;
695 release_sock(sk2);
696do_err:
697 return err;
698}
699EXPORT_SYMBOL(inet_accept);
700
701
702/*
703 * This does both peername and sockname.
704 */
705int inet_getname(struct socket *sock, struct sockaddr *uaddr,
706 int *uaddr_len, int peer)
707{
708 struct sock *sk = sock->sk;
709 struct inet_sock *inet = inet_sk(sk);
710 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
711
712 sin->sin_family = AF_INET;
713 if (peer) {
714 if (!inet->inet_dport ||
715 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
716 peer == 1))
717 return -ENOTCONN;
718 sin->sin_port = inet->inet_dport;
719 sin->sin_addr.s_addr = inet->inet_daddr;
720 } else {
721 __be32 addr = inet->inet_rcv_saddr;
722 if (!addr)
723 addr = inet->inet_saddr;
724 sin->sin_port = inet->inet_sport;
725 sin->sin_addr.s_addr = addr;
726 }
727 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
728 *uaddr_len = sizeof(*sin);
729 return 0;
730}
731EXPORT_SYMBOL(inet_getname);
732
733int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
734{
735 struct sock *sk = sock->sk;
736
737 sock_rps_record_flow(sk);
738
739 /* We may need to bind the socket. */
740 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
741 inet_autobind(sk))
742 return -EAGAIN;
743
744 return sk->sk_prot->sendmsg(sk, msg, size);
745}
746EXPORT_SYMBOL(inet_sendmsg);
747
748ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
749 size_t size, int flags)
750{
751 struct sock *sk = sock->sk;
752
753 sock_rps_record_flow(sk);
754
755 /* We may need to bind the socket. */
756 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
757 inet_autobind(sk))
758 return -EAGAIN;
759
760 if (sk->sk_prot->sendpage)
761 return sk->sk_prot->sendpage(sk, page, offset, size, flags);
762 return sock_no_sendpage(sock, page, offset, size, flags);
763}
764EXPORT_SYMBOL(inet_sendpage);
765
766int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
767 int flags)
768{
769 struct sock *sk = sock->sk;
770 int addr_len = 0;
771 int err;
772
773 sock_rps_record_flow(sk);
774
775 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
776 flags & ~MSG_DONTWAIT, &addr_len);
777 if (err >= 0)
778 msg->msg_namelen = addr_len;
779 return err;
780}
781EXPORT_SYMBOL(inet_recvmsg);
782
783int inet_shutdown(struct socket *sock, int how)
784{
785 struct sock *sk = sock->sk;
786 int err = 0;
787
788 /* This should really check to make sure
789 * the socket is a TCP socket. (WHY AC...)
790 */
791 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
792 1->2 bit 2 snds.
793 2->3 */
794 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
795 return -EINVAL;
796
797 lock_sock(sk);
798 if (sock->state == SS_CONNECTING) {
799 if ((1 << sk->sk_state) &
800 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
801 sock->state = SS_DISCONNECTING;
802 else
803 sock->state = SS_CONNECTED;
804 }
805
806 switch (sk->sk_state) {
807 case TCP_CLOSE:
808 err = -ENOTCONN;
809 /* Hack to wake up other listeners, who can poll for
810 POLLHUP, even on eg. unconnected UDP sockets -- RR */
811 default:
812 sk->sk_shutdown |= how;
813 if (sk->sk_prot->shutdown)
814 sk->sk_prot->shutdown(sk, how);
815 break;
816
817 /* Remaining two branches are temporary solution for missing
818 * close() in multithreaded environment. It is _not_ a good idea,
819 * but we have no choice until close() is repaired at VFS level.
820 */
821 case TCP_LISTEN:
822 if (!(how & RCV_SHUTDOWN))
823 break;
824 /* Fall through */
825 case TCP_SYN_SENT:
826 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
827 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
828 break;
829 }
830
831 /* Wake up anyone sleeping in poll. */
832 sk->sk_state_change(sk);
833 release_sock(sk);
834 return err;
835}
836EXPORT_SYMBOL(inet_shutdown);
837
838/*
839 * ioctl() calls you can issue on an INET socket. Most of these are
840 * device configuration and stuff and very rarely used. Some ioctls
841 * pass on to the socket itself.
842 *
843 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
844 * loads the devconfigure module does its configuring and unloads it.
845 * There's a good 20K of config code hanging around the kernel.
846 */
847
848int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
849{
850 struct sock *sk = sock->sk;
851 int err = 0;
852 struct net *net = sock_net(sk);
853
854 switch (cmd) {
855 case SIOCGSTAMP:
856 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
857 break;
858 case SIOCGSTAMPNS:
859 err = sock_get_timestampns(sk, (struct timespec __user *)arg);
860 break;
861 case SIOCADDRT:
862 case SIOCDELRT:
863 case SIOCRTMSG:
864 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
865 break;
866 case SIOCDARP:
867 case SIOCGARP:
868 case SIOCSARP:
869 err = arp_ioctl(net, cmd, (void __user *)arg);
870 break;
871 case SIOCGIFADDR:
872 case SIOCSIFADDR:
873 case SIOCGIFBRDADDR:
874 case SIOCSIFBRDADDR:
875 case SIOCGIFNETMASK:
876 case SIOCSIFNETMASK:
877 case SIOCGIFDSTADDR:
878 case SIOCSIFDSTADDR:
879 case SIOCSIFPFLAGS:
880 case SIOCGIFPFLAGS:
881 case SIOCSIFFLAGS:
882 err = devinet_ioctl(net, cmd, (void __user *)arg);
883 break;
884 default:
885 if (sk->sk_prot->ioctl)
886 err = sk->sk_prot->ioctl(sk, cmd, arg);
887 else
888 err = -ENOIOCTLCMD;
889 break;
890 }
891 return err;
892}
893EXPORT_SYMBOL(inet_ioctl);
894
895#ifdef CONFIG_COMPAT
896static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
897{
898 struct sock *sk = sock->sk;
899 int err = -ENOIOCTLCMD;
900
901 if (sk->sk_prot->compat_ioctl)
902 err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
903
904 return err;
905}
906#endif
907
908const struct proto_ops inet_stream_ops = {
909 .family = PF_INET,
910 .owner = THIS_MODULE,
911 .release = inet_release,
912 .bind = inet_bind,
913 .connect = inet_stream_connect,
914 .socketpair = sock_no_socketpair,
915 .accept = inet_accept,
916 .getname = inet_getname,
917 .poll = tcp_poll,
918 .ioctl = inet_ioctl,
919 .listen = inet_listen,
920 .shutdown = inet_shutdown,
921 .setsockopt = sock_common_setsockopt,
922 .getsockopt = sock_common_getsockopt,
923 .sendmsg = inet_sendmsg,
924 .recvmsg = inet_recvmsg,
925 .mmap = sock_no_mmap,
926 .sendpage = inet_sendpage,
927 .splice_read = tcp_splice_read,
928 .read_sock = tcp_read_sock,
929 .peek_len = tcp_peek_len,
930#ifdef CONFIG_COMPAT
931 .compat_setsockopt = compat_sock_common_setsockopt,
932 .compat_getsockopt = compat_sock_common_getsockopt,
933 .compat_ioctl = inet_compat_ioctl,
934#endif
935};
936EXPORT_SYMBOL(inet_stream_ops);
937
938const struct proto_ops inet_dgram_ops = {
939 .family = PF_INET,
940 .owner = THIS_MODULE,
941 .release = inet_release,
942 .bind = inet_bind,
943 .connect = inet_dgram_connect,
944 .socketpair = sock_no_socketpair,
945 .accept = sock_no_accept,
946 .getname = inet_getname,
947 .poll = udp_poll,
948 .ioctl = inet_ioctl,
949 .listen = sock_no_listen,
950 .shutdown = inet_shutdown,
951 .setsockopt = sock_common_setsockopt,
952 .getsockopt = sock_common_getsockopt,
953 .sendmsg = inet_sendmsg,
954 .recvmsg = inet_recvmsg,
955 .mmap = sock_no_mmap,
956 .sendpage = inet_sendpage,
957 .set_peek_off = sk_set_peek_off,
958#ifdef CONFIG_COMPAT
959 .compat_setsockopt = compat_sock_common_setsockopt,
960 .compat_getsockopt = compat_sock_common_getsockopt,
961 .compat_ioctl = inet_compat_ioctl,
962#endif
963};
964EXPORT_SYMBOL(inet_dgram_ops);
965
966/*
967 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
968 * udp_poll
969 */
970static const struct proto_ops inet_sockraw_ops = {
971 .family = PF_INET,
972 .owner = THIS_MODULE,
973 .release = inet_release,
974 .bind = inet_bind,
975 .connect = inet_dgram_connect,
976 .socketpair = sock_no_socketpair,
977 .accept = sock_no_accept,
978 .getname = inet_getname,
979 .poll = datagram_poll,
980 .ioctl = inet_ioctl,
981 .listen = sock_no_listen,
982 .shutdown = inet_shutdown,
983 .setsockopt = sock_common_setsockopt,
984 .getsockopt = sock_common_getsockopt,
985 .sendmsg = inet_sendmsg,
986 .recvmsg = inet_recvmsg,
987 .mmap = sock_no_mmap,
988 .sendpage = inet_sendpage,
989#ifdef CONFIG_COMPAT
990 .compat_setsockopt = compat_sock_common_setsockopt,
991 .compat_getsockopt = compat_sock_common_getsockopt,
992 .compat_ioctl = inet_compat_ioctl,
993#endif
994};
995
996static const struct net_proto_family inet_family_ops = {
997 .family = PF_INET,
998 .create = inet_create,
999 .owner = THIS_MODULE,
1000};
1001
1002/* Upon startup we insert all the elements in inetsw_array[] into
1003 * the linked list inetsw.
1004 */
1005static struct inet_protosw inetsw_array[] =
1006{
1007 {
1008 .type = SOCK_STREAM,
1009 .protocol = IPPROTO_TCP,
1010 .prot = &tcp_prot,
1011 .ops = &inet_stream_ops,
1012 .flags = INET_PROTOSW_PERMANENT |
1013 INET_PROTOSW_ICSK,
1014 },
1015
1016 {
1017 .type = SOCK_DGRAM,
1018 .protocol = IPPROTO_UDP,
1019 .prot = &udp_prot,
1020 .ops = &inet_dgram_ops,
1021 .flags = INET_PROTOSW_PERMANENT,
1022 },
1023
1024 {
1025 .type = SOCK_DGRAM,
1026 .protocol = IPPROTO_ICMP,
1027 .prot = &ping_prot,
1028 .ops = &inet_dgram_ops,
1029 .flags = INET_PROTOSW_REUSE,
1030 },
1031
1032 {
1033 .type = SOCK_RAW,
1034 .protocol = IPPROTO_IP, /* wild card */
1035 .prot = &raw_prot,
1036 .ops = &inet_sockraw_ops,
1037 .flags = INET_PROTOSW_REUSE,
1038 }
1039};
1040
1041#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1042
1043void inet_register_protosw(struct inet_protosw *p)
1044{
1045 struct list_head *lh;
1046 struct inet_protosw *answer;
1047 int protocol = p->protocol;
1048 struct list_head *last_perm;
1049
1050 spin_lock_bh(&inetsw_lock);
1051
1052 if (p->type >= SOCK_MAX)
1053 goto out_illegal;
1054
1055 /* If we are trying to override a permanent protocol, bail. */
1056 last_perm = &inetsw[p->type];
1057 list_for_each(lh, &inetsw[p->type]) {
1058 answer = list_entry(lh, struct inet_protosw, list);
1059 /* Check only the non-wild match. */
1060 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1061 break;
1062 if (protocol == answer->protocol)
1063 goto out_permanent;
1064 last_perm = lh;
1065 }
1066
1067 /* Add the new entry after the last permanent entry if any, so that
1068 * the new entry does not override a permanent entry when matched with
1069 * a wild-card protocol. But it is allowed to override any existing
1070 * non-permanent entry. This means that when we remove this entry, the
1071 * system automatically returns to the old behavior.
1072 */
1073 list_add_rcu(&p->list, last_perm);
1074out:
1075 spin_unlock_bh(&inetsw_lock);
1076
1077 return;
1078
1079out_permanent:
1080 pr_err("Attempt to override permanent protocol %d\n", protocol);
1081 goto out;
1082
1083out_illegal:
1084 pr_err("Ignoring attempt to register invalid socket type %d\n",
1085 p->type);
1086 goto out;
1087}
1088EXPORT_SYMBOL(inet_register_protosw);
1089
1090void inet_unregister_protosw(struct inet_protosw *p)
1091{
1092 if (INET_PROTOSW_PERMANENT & p->flags) {
1093 pr_err("Attempt to unregister permanent protocol %d\n",
1094 p->protocol);
1095 } else {
1096 spin_lock_bh(&inetsw_lock);
1097 list_del_rcu(&p->list);
1098 spin_unlock_bh(&inetsw_lock);
1099
1100 synchronize_net();
1101 }
1102}
1103EXPORT_SYMBOL(inet_unregister_protosw);
1104
1105static int inet_sk_reselect_saddr(struct sock *sk)
1106{
1107 struct inet_sock *inet = inet_sk(sk);
1108 __be32 old_saddr = inet->inet_saddr;
1109 __be32 daddr = inet->inet_daddr;
1110 struct flowi4 *fl4;
1111 struct rtable *rt;
1112 __be32 new_saddr;
1113 struct ip_options_rcu *inet_opt;
1114
1115 inet_opt = rcu_dereference_protected(inet->inet_opt,
1116 lockdep_sock_is_held(sk));
1117 if (inet_opt && inet_opt->opt.srr)
1118 daddr = inet_opt->opt.faddr;
1119
1120 /* Query new route. */
1121 fl4 = &inet->cork.fl.u.ip4;
1122 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1123 sk->sk_bound_dev_if, sk->sk_protocol,
1124 inet->inet_sport, inet->inet_dport, sk);
1125 if (IS_ERR(rt))
1126 return PTR_ERR(rt);
1127
1128 sk_setup_caps(sk, &rt->dst);
1129
1130 new_saddr = fl4->saddr;
1131
1132 if (new_saddr == old_saddr)
1133 return 0;
1134
1135 if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
1136 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1137 __func__, &old_saddr, &new_saddr);
1138 }
1139
1140 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1141
1142 /*
1143 * XXX The only one ugly spot where we need to
1144 * XXX really change the sockets identity after
1145 * XXX it has entered the hashes. -DaveM
1146 *
1147 * Besides that, it does not check for connection
1148 * uniqueness. Wait for troubles.
1149 */
1150 return __sk_prot_rehash(sk);
1151}
1152
1153int inet_sk_rebuild_header(struct sock *sk)
1154{
1155 struct inet_sock *inet = inet_sk(sk);
1156 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1157 __be32 daddr;
1158 struct ip_options_rcu *inet_opt;
1159 struct flowi4 *fl4;
1160 int err;
1161
1162 /* Route is OK, nothing to do. */
1163 if (rt)
1164 return 0;
1165
1166 /* Reroute. */
1167 rcu_read_lock();
1168 inet_opt = rcu_dereference(inet->inet_opt);
1169 daddr = inet->inet_daddr;
1170 if (inet_opt && inet_opt->opt.srr)
1171 daddr = inet_opt->opt.faddr;
1172 rcu_read_unlock();
1173 fl4 = &inet->cork.fl.u.ip4;
1174 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1175 inet->inet_dport, inet->inet_sport,
1176 sk->sk_protocol, RT_CONN_FLAGS(sk),
1177 sk->sk_bound_dev_if);
1178 if (!IS_ERR(rt)) {
1179 err = 0;
1180 sk_setup_caps(sk, &rt->dst);
1181 } else {
1182 err = PTR_ERR(rt);
1183
1184 /* Routing failed... */
1185 sk->sk_route_caps = 0;
1186 /*
1187 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1188 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1189 */
1190 if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
1191 sk->sk_state != TCP_SYN_SENT ||
1192 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1193 (err = inet_sk_reselect_saddr(sk)) != 0)
1194 sk->sk_err_soft = -err;
1195 }
1196
1197 return err;
1198}
1199EXPORT_SYMBOL(inet_sk_rebuild_header);
1200
1201struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1202 netdev_features_t features)
1203{
1204 bool udpfrag = false, fixedid = false, gso_partial, encap;
1205 struct sk_buff *segs = ERR_PTR(-EINVAL);
1206 const struct net_offload *ops;
1207 unsigned int offset = 0;
1208 struct iphdr *iph;
1209 int proto, tot_len;
1210 int nhoff;
1211 int ihl;
1212 int id;
1213
1214 skb_reset_network_header(skb);
1215 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1216 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1217 goto out;
1218
1219 iph = ip_hdr(skb);
1220 ihl = iph->ihl * 4;
1221 if (ihl < sizeof(*iph))
1222 goto out;
1223
1224 id = ntohs(iph->id);
1225 proto = iph->protocol;
1226
1227 /* Warning: after this point, iph might be no longer valid */
1228 if (unlikely(!pskb_may_pull(skb, ihl)))
1229 goto out;
1230 __skb_pull(skb, ihl);
1231
1232 encap = SKB_GSO_CB(skb)->encap_level > 0;
1233 if (encap)
1234 features &= skb->dev->hw_enc_features;
1235 SKB_GSO_CB(skb)->encap_level += ihl;
1236
1237 skb_reset_transport_header(skb);
1238
1239 segs = ERR_PTR(-EPROTONOSUPPORT);
1240
1241 if (!skb->encapsulation || encap) {
1242 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1243 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1244
1245 /* fixed ID is invalid if DF bit is not set */
1246 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1247 goto out;
1248 }
1249
1250 ops = rcu_dereference(inet_offloads[proto]);
1251 if (likely(ops && ops->callbacks.gso_segment))
1252 segs = ops->callbacks.gso_segment(skb, features);
1253
1254 if (IS_ERR_OR_NULL(segs))
1255 goto out;
1256
1257 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1258
1259 skb = segs;
1260 do {
1261 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1262 if (udpfrag) {
1263 iph->frag_off = htons(offset >> 3);
1264 if (skb->next)
1265 iph->frag_off |= htons(IP_MF);
1266 offset += skb->len - nhoff - ihl;
1267 tot_len = skb->len - nhoff;
1268 } else if (skb_is_gso(skb)) {
1269 if (!fixedid) {
1270 iph->id = htons(id);
1271 id += skb_shinfo(skb)->gso_segs;
1272 }
1273
1274 if (gso_partial)
1275 tot_len = skb_shinfo(skb)->gso_size +
1276 SKB_GSO_CB(skb)->data_offset +
1277 skb->head - (unsigned char *)iph;
1278 else
1279 tot_len = skb->len - nhoff;
1280 } else {
1281 if (!fixedid)
1282 iph->id = htons(id++);
1283 tot_len = skb->len - nhoff;
1284 }
1285 iph->tot_len = htons(tot_len);
1286 ip_send_check(iph);
1287 if (encap)
1288 skb_reset_inner_headers(skb);
1289 skb->network_header = (u8 *)iph - skb->head;
1290 } while ((skb = skb->next));
1291
1292out:
1293 return segs;
1294}
1295EXPORT_SYMBOL(inet_gso_segment);
1296
1297struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1298{
1299 const struct net_offload *ops;
1300 struct sk_buff **pp = NULL;
1301 struct sk_buff *p;
1302 const struct iphdr *iph;
1303 unsigned int hlen;
1304 unsigned int off;
1305 unsigned int id;
1306 int flush = 1;
1307 int proto;
1308
1309 off = skb_gro_offset(skb);
1310 hlen = off + sizeof(*iph);
1311 iph = skb_gro_header_fast(skb, off);
1312 if (skb_gro_header_hard(skb, hlen)) {
1313 iph = skb_gro_header_slow(skb, hlen, off);
1314 if (unlikely(!iph))
1315 goto out;
1316 }
1317
1318 proto = iph->protocol;
1319
1320 rcu_read_lock();
1321 ops = rcu_dereference(inet_offloads[proto]);
1322 if (!ops || !ops->callbacks.gro_receive)
1323 goto out_unlock;
1324
1325 if (*(u8 *)iph != 0x45)
1326 goto out_unlock;
1327
1328 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1329 goto out_unlock;
1330
1331 id = ntohl(*(__be32 *)&iph->id);
1332 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1333 id >>= 16;
1334
1335 for (p = *head; p; p = p->next) {
1336 struct iphdr *iph2;
1337 u16 flush_id;
1338
1339 if (!NAPI_GRO_CB(p)->same_flow)
1340 continue;
1341
1342 iph2 = (struct iphdr *)(p->data + off);
1343 /* The above works because, with the exception of the top
1344 * (inner most) layer, we only aggregate pkts with the same
1345 * hdr length so all the hdrs we'll need to verify will start
1346 * at the same offset.
1347 */
1348 if ((iph->protocol ^ iph2->protocol) |
1349 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1350 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1351 NAPI_GRO_CB(p)->same_flow = 0;
1352 continue;
1353 }
1354
1355 /* All fields must match except length and checksum. */
1356 NAPI_GRO_CB(p)->flush |=
1357 (iph->ttl ^ iph2->ttl) |
1358 (iph->tos ^ iph2->tos) |
1359 ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1360
1361 NAPI_GRO_CB(p)->flush |= flush;
1362
1363 /* We need to store of the IP ID check to be included later
1364 * when we can verify that this packet does in fact belong
1365 * to a given flow.
1366 */
1367 flush_id = (u16)(id - ntohs(iph2->id));
1368
1369 /* This bit of code makes it much easier for us to identify
1370 * the cases where we are doing atomic vs non-atomic IP ID
1371 * checks. Specifically an atomic check can return IP ID
1372 * values 0 - 0xFFFF, while a non-atomic check can only
1373 * return 0 or 0xFFFF.
1374 */
1375 if (!NAPI_GRO_CB(p)->is_atomic ||
1376 !(iph->frag_off & htons(IP_DF))) {
1377 flush_id ^= NAPI_GRO_CB(p)->count;
1378 flush_id = flush_id ? 0xFFFF : 0;
1379 }
1380
1381 /* If the previous IP ID value was based on an atomic
1382 * datagram we can overwrite the value and ignore it.
1383 */
1384 if (NAPI_GRO_CB(skb)->is_atomic)
1385 NAPI_GRO_CB(p)->flush_id = flush_id;
1386 else
1387 NAPI_GRO_CB(p)->flush_id |= flush_id;
1388 }
1389
1390 NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1391 NAPI_GRO_CB(skb)->flush |= flush;
1392 skb_set_network_header(skb, off);
1393 /* The above will be needed by the transport layer if there is one
1394 * immediately following this IP hdr.
1395 */
1396
1397 /* Note : No need to call skb_gro_postpull_rcsum() here,
1398 * as we already checked checksum over ipv4 header was 0
1399 */
1400 skb_gro_pull(skb, sizeof(*iph));
1401 skb_set_transport_header(skb, skb_gro_offset(skb));
1402
1403 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1404
1405out_unlock:
1406 rcu_read_unlock();
1407
1408out:
1409 NAPI_GRO_CB(skb)->flush |= flush;
1410
1411 return pp;
1412}
1413EXPORT_SYMBOL(inet_gro_receive);
1414
1415static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
1416 struct sk_buff *skb)
1417{
1418 if (NAPI_GRO_CB(skb)->encap_mark) {
1419 NAPI_GRO_CB(skb)->flush = 1;
1420 return NULL;
1421 }
1422
1423 NAPI_GRO_CB(skb)->encap_mark = 1;
1424
1425 return inet_gro_receive(head, skb);
1426}
1427
1428#define SECONDS_PER_DAY 86400
1429
1430/* inet_current_timestamp - Return IP network timestamp
1431 *
1432 * Return milliseconds since midnight in network byte order.
1433 */
1434__be32 inet_current_timestamp(void)
1435{
1436 u32 secs;
1437 u32 msecs;
1438 struct timespec64 ts;
1439
1440 ktime_get_real_ts64(&ts);
1441
1442 /* Get secs since midnight. */
1443 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1444 /* Convert to msecs. */
1445 msecs = secs * MSEC_PER_SEC;
1446 /* Convert nsec to msec. */
1447 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1448
1449 /* Convert to network byte order. */
1450 return htonl(msecs);
1451}
1452EXPORT_SYMBOL(inet_current_timestamp);
1453
1454int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1455{
1456 if (sk->sk_family == AF_INET)
1457 return ip_recv_error(sk, msg, len, addr_len);
1458#if IS_ENABLED(CONFIG_IPV6)
1459 if (sk->sk_family == AF_INET6)
1460 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1461#endif
1462 return -EINVAL;
1463}
1464
1465int inet_gro_complete(struct sk_buff *skb, int nhoff)
1466{
1467 __be16 newlen = htons(skb->len - nhoff);
1468 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1469 const struct net_offload *ops;
1470 int proto = iph->protocol;
1471 int err = -ENOSYS;
1472
1473 if (skb->encapsulation) {
1474 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1475 skb_set_inner_network_header(skb, nhoff);
1476 }
1477
1478 csum_replace2(&iph->check, iph->tot_len, newlen);
1479 iph->tot_len = newlen;
1480
1481 rcu_read_lock();
1482 ops = rcu_dereference(inet_offloads[proto]);
1483 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1484 goto out_unlock;
1485
1486 /* Only need to add sizeof(*iph) to get to the next hdr below
1487 * because any hdr with option will have been flushed in
1488 * inet_gro_receive().
1489 */
1490 err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
1491
1492out_unlock:
1493 rcu_read_unlock();
1494
1495 return err;
1496}
1497EXPORT_SYMBOL(inet_gro_complete);
1498
1499static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1500{
1501 skb->encapsulation = 1;
1502 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1503 return inet_gro_complete(skb, nhoff);
1504}
1505
1506int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1507 unsigned short type, unsigned char protocol,
1508 struct net *net)
1509{
1510 struct socket *sock;
1511 int rc = sock_create_kern(net, family, type, protocol, &sock);
1512
1513 if (rc == 0) {
1514 *sk = sock->sk;
1515 (*sk)->sk_allocation = GFP_ATOMIC;
1516 /*
1517 * Unhash it so that IP input processing does not even see it,
1518 * we do not wish this socket to see incoming packets.
1519 */
1520 (*sk)->sk_prot->unhash(*sk);
1521 }
1522 return rc;
1523}
1524EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1525
1526u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
1527{
1528 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
1529}
1530EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
1531
1532unsigned long snmp_fold_field(void __percpu *mib, int offt)
1533{
1534 unsigned long res = 0;
1535 int i;
1536
1537 for_each_possible_cpu(i)
1538 res += snmp_get_cpu_field(mib, i, offt);
1539 return res;
1540}
1541EXPORT_SYMBOL_GPL(snmp_fold_field);
1542
1543#if BITS_PER_LONG==32
1544
1545u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1546 size_t syncp_offset)
1547{
1548 void *bhptr;
1549 struct u64_stats_sync *syncp;
1550 u64 v;
1551 unsigned int start;
1552
1553 bhptr = per_cpu_ptr(mib, cpu);
1554 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1555 do {
1556 start = u64_stats_fetch_begin_irq(syncp);
1557 v = *(((u64 *)bhptr) + offt);
1558 } while (u64_stats_fetch_retry_irq(syncp, start));
1559
1560 return v;
1561}
1562EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1563
1564u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1565{
1566 u64 res = 0;
1567 int cpu;
1568
1569 for_each_possible_cpu(cpu) {
1570 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1571 }
1572 return res;
1573}
1574EXPORT_SYMBOL_GPL(snmp_fold_field64);
1575#endif
1576
1577#ifdef CONFIG_IP_MULTICAST
1578static const struct net_protocol igmp_protocol = {
1579 .handler = igmp_rcv,
1580 .netns_ok = 1,
1581};
1582#endif
1583
1584static const struct net_protocol tcp_protocol = {
1585 .early_demux = tcp_v4_early_demux,
1586 .handler = tcp_v4_rcv,
1587 .err_handler = tcp_v4_err,
1588 .no_policy = 1,
1589 .netns_ok = 1,
1590 .icmp_strict_tag_validation = 1,
1591};
1592
1593static const struct net_protocol udp_protocol = {
1594 .early_demux = udp_v4_early_demux,
1595 .handler = udp_rcv,
1596 .err_handler = udp_err,
1597 .no_policy = 1,
1598 .netns_ok = 1,
1599};
1600
1601static const struct net_protocol icmp_protocol = {
1602 .handler = icmp_rcv,
1603 .err_handler = icmp_err,
1604 .no_policy = 1,
1605 .netns_ok = 1,
1606};
1607
1608static __net_init int ipv4_mib_init_net(struct net *net)
1609{
1610 int i;
1611
1612 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1613 if (!net->mib.tcp_statistics)
1614 goto err_tcp_mib;
1615 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1616 if (!net->mib.ip_statistics)
1617 goto err_ip_mib;
1618
1619 for_each_possible_cpu(i) {
1620 struct ipstats_mib *af_inet_stats;
1621 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1622 u64_stats_init(&af_inet_stats->syncp);
1623 }
1624
1625 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1626 if (!net->mib.net_statistics)
1627 goto err_net_mib;
1628 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1629 if (!net->mib.udp_statistics)
1630 goto err_udp_mib;
1631 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1632 if (!net->mib.udplite_statistics)
1633 goto err_udplite_mib;
1634 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1635 if (!net->mib.icmp_statistics)
1636 goto err_icmp_mib;
1637 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1638 GFP_KERNEL);
1639 if (!net->mib.icmpmsg_statistics)
1640 goto err_icmpmsg_mib;
1641
1642 tcp_mib_init(net);
1643 return 0;
1644
1645err_icmpmsg_mib:
1646 free_percpu(net->mib.icmp_statistics);
1647err_icmp_mib:
1648 free_percpu(net->mib.udplite_statistics);
1649err_udplite_mib:
1650 free_percpu(net->mib.udp_statistics);
1651err_udp_mib:
1652 free_percpu(net->mib.net_statistics);
1653err_net_mib:
1654 free_percpu(net->mib.ip_statistics);
1655err_ip_mib:
1656 free_percpu(net->mib.tcp_statistics);
1657err_tcp_mib:
1658 return -ENOMEM;
1659}
1660
1661static __net_exit void ipv4_mib_exit_net(struct net *net)
1662{
1663 kfree(net->mib.icmpmsg_statistics);
1664 free_percpu(net->mib.icmp_statistics);
1665 free_percpu(net->mib.udplite_statistics);
1666 free_percpu(net->mib.udp_statistics);
1667 free_percpu(net->mib.net_statistics);
1668 free_percpu(net->mib.ip_statistics);
1669 free_percpu(net->mib.tcp_statistics);
1670}
1671
1672static __net_initdata struct pernet_operations ipv4_mib_ops = {
1673 .init = ipv4_mib_init_net,
1674 .exit = ipv4_mib_exit_net,
1675};
1676
1677static int __init init_ipv4_mibs(void)
1678{
1679 return register_pernet_subsys(&ipv4_mib_ops);
1680}
1681
1682static __net_init int inet_init_net(struct net *net)
1683{
1684 /*
1685 * Set defaults for local port range
1686 */
1687 seqlock_init(&net->ipv4.ip_local_ports.lock);
1688 net->ipv4.ip_local_ports.range[0] = 32768;
1689 net->ipv4.ip_local_ports.range[1] = 60999;
1690
1691 seqlock_init(&net->ipv4.ping_group_range.lock);
1692 /*
1693 * Sane defaults - nobody may create ping sockets.
1694 * Boot scripts should set this to distro-specific group.
1695 */
1696 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1697 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1698
1699 /* Default values for sysctl-controlled parameters.
1700 * We set them here, in case sysctl is not compiled.
1701 */
1702 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1703 net->ipv4.sysctl_ip_dynaddr = 0;
1704 net->ipv4.sysctl_ip_early_demux = 1;
1705
1706 return 0;
1707}
1708
1709static __net_exit void inet_exit_net(struct net *net)
1710{
1711}
1712
1713static __net_initdata struct pernet_operations af_inet_ops = {
1714 .init = inet_init_net,
1715 .exit = inet_exit_net,
1716};
1717
1718static int __init init_inet_pernet_ops(void)
1719{
1720 return register_pernet_subsys(&af_inet_ops);
1721}
1722
1723static int ipv4_proc_init(void);
1724
1725/*
1726 * IP protocol layer initialiser
1727 */
1728
1729static struct packet_offload ip_packet_offload __read_mostly = {
1730 .type = cpu_to_be16(ETH_P_IP),
1731 .callbacks = {
1732 .gso_segment = inet_gso_segment,
1733 .gro_receive = inet_gro_receive,
1734 .gro_complete = inet_gro_complete,
1735 },
1736};
1737
1738static const struct net_offload ipip_offload = {
1739 .callbacks = {
1740 .gso_segment = inet_gso_segment,
1741 .gro_receive = ipip_gro_receive,
1742 .gro_complete = ipip_gro_complete,
1743 },
1744};
1745
1746static int __init ipv4_offload_init(void)
1747{
1748 /*
1749 * Add offloads
1750 */
1751 if (udpv4_offload_init() < 0)
1752 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1753 if (tcpv4_offload_init() < 0)
1754 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1755
1756 dev_add_offload(&ip_packet_offload);
1757 inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1758 return 0;
1759}
1760
1761fs_initcall(ipv4_offload_init);
1762
1763static struct packet_type ip_packet_type __read_mostly = {
1764 .type = cpu_to_be16(ETH_P_IP),
1765 .func = ip_rcv,
1766};
1767
1768static int __init inet_init(void)
1769{
1770 struct inet_protosw *q;
1771 struct list_head *r;
1772 int rc = -EINVAL;
1773
1774 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1775
1776 rc = proto_register(&tcp_prot, 1);
1777 if (rc)
1778 goto out;
1779
1780 rc = proto_register(&udp_prot, 1);
1781 if (rc)
1782 goto out_unregister_tcp_proto;
1783
1784 rc = proto_register(&raw_prot, 1);
1785 if (rc)
1786 goto out_unregister_udp_proto;
1787
1788 rc = proto_register(&ping_prot, 1);
1789 if (rc)
1790 goto out_unregister_raw_proto;
1791
1792 /*
1793 * Tell SOCKET that we are alive...
1794 */
1795
1796 (void)sock_register(&inet_family_ops);
1797
1798#ifdef CONFIG_SYSCTL
1799 ip_static_sysctl_init();
1800#endif
1801
1802 /*
1803 * Add all the base protocols.
1804 */
1805
1806 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1807 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1808 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1809 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1810 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1811 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1812#ifdef CONFIG_IP_MULTICAST
1813 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1814 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1815#endif
1816
1817 /* Register the socket-side information for inet_create. */
1818 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1819 INIT_LIST_HEAD(r);
1820
1821 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1822 inet_register_protosw(q);
1823
1824 /*
1825 * Set the ARP module up
1826 */
1827
1828 arp_init();
1829
1830 /*
1831 * Set the IP module up
1832 */
1833
1834 ip_init();
1835
1836 tcp_v4_init();
1837
1838 /* Setup TCP slab cache for open requests. */
1839 tcp_init();
1840
1841 /* Setup UDP memory threshold */
1842 udp_init();
1843
1844 /* Add UDP-Lite (RFC 3828) */
1845 udplite4_register();
1846
1847 ping_init();
1848
1849 /*
1850 * Set the ICMP layer up
1851 */
1852
1853 if (icmp_init() < 0)
1854 panic("Failed to create the ICMP control socket.\n");
1855
1856 /*
1857 * Initialise the multicast router
1858 */
1859#if defined(CONFIG_IP_MROUTE)
1860 if (ip_mr_init())
1861 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1862#endif
1863
1864 if (init_inet_pernet_ops())
1865 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1866 /*
1867 * Initialise per-cpu ipv4 mibs
1868 */
1869
1870 if (init_ipv4_mibs())
1871 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1872
1873 ipv4_proc_init();
1874
1875 ipfrag_init();
1876
1877 dev_add_pack(&ip_packet_type);
1878
1879 ip_tunnel_core_init();
1880
1881 rc = 0;
1882out:
1883 return rc;
1884out_unregister_raw_proto:
1885 proto_unregister(&raw_prot);
1886out_unregister_udp_proto:
1887 proto_unregister(&udp_prot);
1888out_unregister_tcp_proto:
1889 proto_unregister(&tcp_prot);
1890 goto out;
1891}
1892
1893fs_initcall(inet_init);
1894
1895/* ------------------------------------------------------------------------ */
1896
1897#ifdef CONFIG_PROC_FS
1898static int __init ipv4_proc_init(void)
1899{
1900 int rc = 0;
1901
1902 if (raw_proc_init())
1903 goto out_raw;
1904 if (tcp4_proc_init())
1905 goto out_tcp;
1906 if (udp4_proc_init())
1907 goto out_udp;
1908 if (ping_proc_init())
1909 goto out_ping;
1910 if (ip_misc_proc_init())
1911 goto out_misc;
1912out:
1913 return rc;
1914out_misc:
1915 ping_proc_exit();
1916out_ping:
1917 udp4_proc_exit();
1918out_udp:
1919 tcp4_proc_exit();
1920out_tcp:
1921 raw_proc_exit();
1922out_raw:
1923 rc = -ENOMEM;
1924 goto out;
1925}
1926
1927#else /* CONFIG_PROC_FS */
1928static int __init ipv4_proc_init(void)
1929{
1930 return 0;
1931}
1932#endif /* CONFIG_PROC_FS */